content
stringlengths 5
1.05M
|
|---|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/plots.ipynb (unless otherwise specified).
__all__ = ['river_reliability_diagram', 'class_wise_river_reliability_diagram', 'confidence_reliability_diagram',
'class_wise_confidence_reliability_diagram']
# Cell
from riverreliability import utils, metrics as rmetrics
import matplotlib.pyplot as plt
import matplotlib.axes
from matplotlib import cm
import numpy as np
from scipy.stats import beta
from scipy import interpolate
from .beta import get_beta_parameters, beta_avg_pdf
import sklearn.datasets
import sklearn.model_selection
import sklearn.svm
from sklearn.metrics import confusion_matrix, accuracy_score, balanced_accuracy_score
import sklearn.utils
# Internal Cell
def _decorate_ax(ax:matplotlib.axes.Axes):
"""Apply styling changes to a matplotlib axis.
Arguments:
ax -- matplotlib axis
"""
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.setp(ax.spines.values(), color=cm.tab20c(18))
plt.setp([ax.get_xticklines(), ax.get_yticklines()], color=cm.tab20c(18))
def _get_beta_pdf(dist):
"""Get pdf and beta parameters from `dist`.
`dist` is either:
- a tuple which contains a and b, in which case the exact pdf is sampled
- a vector containing samples from a beta pdf (with unknown a and b), in which case MLE is used to estimate a and b
Returns:
tuple containing a, b, pdf and linspace x over which the pdf was sampled
"""
if len(dist) == 2:
# dist contains the parameters of the beta distribution
a, b = dist
# sample the beta
x = np.linspace(0, 1, 500)
pdf = beta.pdf(x, a, b)
else:
# dist contains samples of the beta pdf
## sample from the beta distribution using the pdf probabilities
# make it impossible to sample 0 or 1
# in theory this should never happen, but approximations introduce errors
prob = dist.copy()
prob[0] = 0.0
prob[-1] = 0.0
x = np.linspace(0, 1, len(dist))
samples = np.random.choice(x, size=500, p=prob/prob.sum())
## fit a beta distribution to the samples
a, b, loc, scale = beta.fit(samples, floc=0, fscale=1)
pdf = dist
return a, b, pdf, x
def _pre_plot_checks(y_probs, y_preds, y_true, ax, ci=None, required_axes=None):
"""Perform some pre-plotting checks on input data, create required axes if necessary and compute number of classes."""
num_classes = len(sklearn.utils.multiclass.unique_labels(y_preds, y_true))
if (required_axes == 1) and (ax is None):
fig, ax = plt.subplots(subplot_kw={"aspect": 0.75}, dpi=100, tight_layout=True)
else:
required_axes = num_classes if required_axes is None else required_axes
if ax is None:
fig, ax = plt.subplots(1, required_axes, figsize=(3*required_axes, 3), subplot_kw={"aspect": 0.75}, constrained_layout=True, sharex=True, sharey=True, dpi=100)
if (required_axes != 1) and (len(ax) != required_axes):
raise ValueError(f"Wrong amount of axes provided: {required_axes} needed, but {len(ax)} provided.")
sklearn.utils.check_consistent_length(y_probs, y_preds, y_true)
if ci is not None:
ci = ci if isinstance(ci, list) else [ci]
for a in ci:
if (a > 1) or (a < 0):
raise ValueError(f"CI must be in [0, 1]")
return num_classes, ax
class clipped_cm:
def __init__(self, n, base_cm=cm.Greys, clip_range=(0.5, 1.0)):
self.n = n
self.space = np.linspace(*clip_range, n+1)
self.cm = [base_cm(p) for p in self.space]
def __call__(self, x):
return self.cm[int(x*self.n)]
# Internal Cell
def river_diagram(distributions:np.array, confidence_levels:np.array, ax:matplotlib.axes.Axes, ci:list):
ci = sorted(ci)[::-1]
_decorate_ax(ax)
ax.set_ylim(0, 1)
intervals = np.empty((len(confidence_levels), len(ci), 2), dtype=float)
means = np.empty((len(confidence_levels),), dtype=float)
for i, (cl, dist) in enumerate(zip(confidence_levels, distributions)):
if cl is np.nan:
continue
a, b, pdf, _ = _get_beta_pdf(dist)
for j, l in enumerate(ci):
intervals[i, j] = beta.interval(l, a, b)
means[i] = a/(a+b)
x = np.linspace(min(confidence_levels), max(confidence_levels), 1000)
for i, l in enumerate(ci):
f0 = interpolate.PchipInterpolator(confidence_levels, intervals[:, i, 0])
f1 = interpolate.PchipInterpolator(confidence_levels, intervals[:, i, 1])
ax.fill_between(x, f0(x), f1(x), zorder=i, color=cm.Greys(0.2+i*0.1), label=f"{int(l*100):2d}% CI")
fm = interpolate.PchipInterpolator(confidence_levels, means)
ax.plot(x, fm(x), color="black", zorder=4, label="Mean")
ax.scatter(confidence_levels, means, s=20, color="black", zorder=4)
ax.plot([0,1], [0,1], color=cm.Greys(0.8), linestyle="--", zorder=5, label="Perfect calibration")
# Cell
def river_reliability_diagram(y_probs:np.array, y_preds:np.array, y_true:np.array, ax:matplotlib.axes.Axes=None, bins="fd", ci=[0.90, 0.95, 0.99], **bin_args):
"""Plot the posterior balanced accuracy-based reliability diagram.
Arguments:
y_probs -- Array containing prediction confidences
y_preds -- Array containing predicted labels (shape (N,))
y_true -- Array containing true labels (shape (N,))
ax -- Axes on which the diagram will be plotted (will be decorated by `_decorate_ax`)
bins -- Description of amount of bins in which to divide prediction confidences (see `numpy.histogram_bin_edges` for options)
ci -- Confidence interval level to plot. When style is river provide a list, otherwise a float.
Returns:
Axes containing the plot
"""
num_classes, ax = _pre_plot_checks(y_probs, y_preds, y_true, ax, ci, required_axes=1)
# bin the probabilities
bin_indices, edges = utils.get_bin_indices(y_probs, bins, 0.0, 1.0, return_edges=True, **bin_args)
unique_bin_indices = sorted(np.unique(bin_indices))
confidence_levels = np.empty((len(unique_bin_indices),), dtype=np.float32) # store mean confidence
if len(np.unique(y_preds)) > 1:
# the beta distribution will be the average of the per-class distribution
n_samples = 10000
distributions = np.empty((len(unique_bin_indices), n_samples), dtype=np.float32) # store beta parameters
x = np.linspace(0, 1, n_samples)
else:
# the beta distributions will be exact
distributions = np.empty((len(unique_bin_indices), 2), dtype=np.int)
# compute beta distribution per bin
for i, bin_idx in enumerate(unique_bin_indices):
# select instances in this bin
selector = bin_indices == bin_idx
# set the confidence level to the average confidence reported in the bin
confidence_levels[i] = y_probs[selector].mean()
if len(np.unique(y_preds)) > 1:
# compute the average beta distribution
conf = confusion_matrix(y_true[selector], y_preds[selector])#, labels=np.arange(0, num_classes))
parameters = get_beta_parameters(conf)
distributions[i] = np.clip(beta_avg_pdf(x, parameters, fft=True), 0, None)
else:
# compute the exact beta distribution
correct = (y_true[selector] == y_preds[selector]).sum()
incorrect = len(y_true[selector]) - correct
distributions[i] = correct + 1, incorrect + 1
# plot the actual diagram
ax.set_xlabel("Confidence level")
ax.set_ylabel("Posterior balanced accuracy")
ci = [0.90, 0.95, 0.99] if ci is None else ci
river_diagram(distributions, confidence_levels, ax, ci=ci)
return ax
# Cell
def class_wise_river_reliability_diagram(y_probs:np.array, y_preds:np.array, y_true:np.array, axes:matplotlib.axes.Axes=None, bins="fd", metric=None, show_k_least_calibrated:int=None, ci=[0.90, 0.95, 0.99], **bin_args):
"""Plot the class-wise posterior balanced accuracy-based reliability diagram.
Arguments:
y_probs -- Array containing prediction confidences
y_preds -- Array containing predicted labels (shape (N,))
y_true -- Array containing true labels (shape (N,))
axes -- Axes on which the diagram will be plotted (will be decorated by `_decorate_ax`)
bins -- Description of amount of bins in which to divide prediction confidences (see `numpy.histogram_bin_edges` for options)
ci -- Confidence interval level to plot. When style is river provide a list, otherwise a float.
Returns:
Axes containing the plot
"""
num_classes, axes = _pre_plot_checks(y_probs, y_preds, y_true, axes, ci, show_k_least_calibrated)
if metric is None:
a = np.arange(num_classes)
else:
metric_values = []
for c in np.arange(num_classes):
selector = y_preds == c
metric_values.append(metric(y_probs[selector, c], y_preds[selector], y_true[selector]))
a = np.argsort(metric_values)[::-1][:show_k_least_calibrated]
for ax, c in zip(axes, a):
selector = y_preds == c
if metric is None:
ax.set_title(f"Class {c}")
else:
ax.set_title(f"Class {c} ({metric_values[c]:.3f})")
river_reliability_diagram(y_probs[selector, c], y_preds[selector], y_true[selector], ax, bins, ci=ci)
return axes
# Internal Cell
def bar_diagram(edges:np.array, bin_accuracies:np.array, bin_confidences:np.array, ax:matplotlib.axes.Axes, bin_sem:np.array=None):
"""Plot a bar plot confidence reliability diagram.
Arguments:
edges -- Edges of the probability bins
bin_accuracies -- Accuracy per bin
bin_confidences -- Average confidence of predictions in bin
ax -- Axes on which the diagram will be plotted (will be decorated by `_decorate_ax`)
"""
_decorate_ax(ax)
cmap = clipped_cm(len(bin_accuracies), clip_range=(0.2, 0.7))
ax.plot([0,1], [0,1], linestyle="--", color=cmap(1), alpha=0.9, linewidth=1)
width = (edges - np.roll(edges, 1))[1:]
for i, (xi, yi, bi) in enumerate(zip(edges, bin_accuracies, bin_confidences)):
if np.isnan(bi):
continue
if yi < 0:
continue
if bin_sem is not None:
sem = bin_sem[i]
else:
sem = 0.
# plot bin value
ax.bar(xi, yi, width=width[i], align="edge", color=cmap(1-bi), edgecolor="grey", yerr=sem, linewidth=1, zorder=0)
# plot gap to ideal value
ax.bar(
xi+width[i]/2, np.abs(bi-yi), bottom=min(bi, yi), width=width[i],
align="center", color=cmap(0), edgecolor="grey", linewidth=1, zorder=1
)
# Cell
def confidence_reliability_diagram(y_probs:np.array, y_preds:np.array, y_true:np.array, ax:matplotlib.axes.Axes=None, bins="fd", balanced:bool=True, **bin_args):
"""Plot a confidence reliability diagram.
Arguments:
y_probs -- Array containing prediction confidences
y_preds -- Array containing predicted labels (shape (N,))
y_true -- Array containing true labels (shape (N,))
ax -- Axes on which the diagram will be plotted (will be decorated by `_decorate_ax`)
bins -- Description of amount of bins in which to divide prediction confidences (see `numpy.histogram_bin_edges` for options)
balanced -- Flag for using balanced accuracy score
"""
num_classes, ax = _pre_plot_checks(y_probs, y_preds, y_true, ax, required_axes=1)
bin_indices, edges = utils.get_bin_indices(y_probs, bins, 0.0, 1.0, return_edges=True, **bin_args)
unique_bin_indices = sorted(np.unique(bin_indices))
mean_confidences = np.full((len(edges)-1,), dtype=np.float32, fill_value=np.nan)
bin_metric = np.full((len(edges)-1,), dtype=np.float32, fill_value=np.nan)
metric = balanced_accuracy_score if balanced else accuracy_score
ax.set_xlabel("Confidence level")
ax.set_ylabel("Balanced accuracy" if balanced else "Accuracy")
for bin_idx in unique_bin_indices:
selector = bin_indices == bin_idx
mean_confidences[bin_idx-1] = np.mean(y_probs[selector])
bin_metric[bin_idx-1] = metric(y_true[selector], y_preds[selector])
bar_diagram(edges, bin_metric, mean_confidences, ax)
# Cell
def class_wise_confidence_reliability_diagram(y_probs:np.array, y_preds:np.array, y_true:np.array, axes:matplotlib.axes.Axes, bins="fd", **bin_args):
"""Plot a class-wise confidence reliability diagram.
Arguments:
y_probs -- Array containing prediction confidences
y_preds -- Array containing predicted labels (shape (N,))
y_true -- Array containing true labels (shape (N,))
ax -- Axes on which the diagram will be plotted (will be decorated by `_decorate_ax`)
bins -- Description of amount of bins in which to divide prediction confidences (see `numpy.histogram_bin_edges` for options)
balanced -- Flag for using balanced accuracy score
"""
classes = np.unique(y_true)
for ax, c in zip(axes, range(len(classes))):
ax.set_title(f"Class {c}")
selector = y_preds == c
confidence_reliability_diagram(y_probs[selector, c], y_preds[selector], y_true[selector], ax, bins, balanced=False, **bin_args)
|
import json
import os
import shutil
import subprocess
from ..util.constants import LOG
from .constants import LOCAL_REPO_DIR
from .google_benchmark.gbench2junit import GBenchToJUnit
class MicroBenchmarksRunner(object):
""" A runner for microbenchmark tests. It will run the microbenchmarks
based on a config object passed to it """
def __init__(self, config):
self.config = config
self.last_build = '000'
return
def run_benchmarks(self, enable_perf):
""" Runs all the microbenchmarks.
Parameters
----------
enable_perf : bool
Whether perf should be enabled for all the benchmarks.
Returns
-------
ret_val : int
the return value for the last failed benchmark. If no benchmarks
fail then it will return 0.
"""
if not len(self.config.benchmarks):
LOG.error('Invlid benchmarks were specified to execute. \
Try not specifying a benchmark and it will execute all.')
return 0
ret_val = 0
benchmark_fail_count = 0
# iterate over all benchmarks and run them
for benchmark_count, bench_name in enumerate(self.config.benchmarks):
LOG.info(f"Running '{bench_name}' with {self.config.num_threads} threads [{benchmark_count}/{len(self.config.benchmarks)}]")
benchmark_ret_val = self.run_single_benchmark(bench_name, enable_perf)
if benchmark_ret_val:
ret_val = benchmark_ret_val
benchmark_fail_count += 1
LOG.info("{PASSED}/{TOTAL} benchmarks passed".format(PASSED=len(self.config.benchmarks) -
benchmark_fail_count, TOTAL=len(self.config.benchmarks)))
return ret_val
def run_single_benchmark(self, bench_name, enable_perf):
""" Execute a single benchmark. The results will be stored in a JSON
file and an XML file.
Parameters
----------
bench_name : str
The name of the benchmark to run.
enable_perf : bool
Whether perf should be enabled for all the benchmarks.
Returns
-------
ret_val : int
The return value from the benchmark process. 0 if successful.
"""
output_file = "{}.json".format(bench_name)
cmd = self._build_benchmark_cmd(bench_name, output_file, enable_perf)
# Environment Variables
os.environ["TERRIER_BENCHMARK_THREADS"] = str(self.config.num_threads) # has to be a str
os.environ["TERRIER_BENCHMARK_LOGFILE_PATH"] = self.config.logfile_path
ret_val, err = self._execute_benchmark(cmd)
if ret_val == 0:
convert_result_xml(bench_name, output_file)
else:
LOG.error(f'Unexpected failure of {bench_name} [ret_val={ret_val}]')
LOG.error(err)
# return the process exit code
return ret_val
def _build_benchmark_cmd(self, bench_name, output_file, enable_perf):
""" Construct the command necessary to execute the microbenchmark test.
Parameters
----------
bench_name : str
The name of the benchmark to run.
output_file : str
The path of the file where the benchmark result should be stored.
enable_perf : bool
Whether perf should be enabled for all the benchmarks.
Returns
-------
cmd : str
The command to be executed in order to run the microbenchmark.
"""
benchmark_path = os.path.join(self.config.benchmark_path, bench_name)
cmd = f'{benchmark_path} ' + \
f' --benchmark_min_time={self.config.min_time} ' + \
f' --benchmark_format=json' + \
f' --benchmark_out={output_file}'
if enable_perf:
if not is_package_installed('perf'):
raise Exception('Missing perf binary. Please install package.')
perf_cmd = generate_perf_command(bench_name)
cmd = f'{perf_cmd} {cmd}'
if self.config.is_local:
pass
elif not is_package_installed('numactl', '--show'):
raise Exception('Missing numactl binary. Please install package')
else:
numa_cmd = generate_numa_command()
cmd = f'{numa_cmd} {cmd}'
return cmd
def _execute_benchmark(self, cmd):
""" Execute the microbenchmark command provided.
Parameters
----------
cmd : str
The command to be executed in order to run the microbenchmark.
Returns
-------
ret_code : int
The return value from the benchmark process. 0 if successful.
err : Error
The error that occured. None if successful.
"""
LOG.debug(f'Executing command [num_threads={self.config.num_threads}]: {cmd}')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
pretty_format_json = json.dumps(json.loads(output.decode('utf8').replace("'", '"')), indent=4)
LOG.debug(f'OUTPUT: {pretty_format_json}')
return 0, None
except subprocess.CalledProcessError as err:
print(err)
return err.returncode, err
except Exception as err:
return 1, err
def create_local_dirs(self):
""" Create directories to be used as historical results for future
local runs.
This will create a directory for the build in the LOCAL_REPO_DIR.
Each time the microbenchmark script is run it will create another dir
by incrementing the last dir name created. If the script is run 3 times
the LOCAL_REPO_DIR will have directories named 001 002 003 each
containing the json Google benchmark result file.
"""
build_dirs = next(os.walk(LOCAL_REPO_DIR))[1]
last_build = max(build_dirs) if build_dirs else '000'
next_build = os.path.join(LOCAL_REPO_DIR, f'{(int(last_build) + 1):03}')
LOG.info(f'Creating new result directory in local data repository {next_build}')
os.mkdir(next_build)
self.last_build = os.path.basename(next_build)
for bench_name in self.config.benchmarks:
copy_benchmark_result(bench_name, next_build)
def is_package_installed(package_name, validation_command='--version'):
""" Check to see if package is installed.
Parameters
----------
package_name : str
The name of the executable to check.
validation_command : str, optional
The command to execute to check if the package has been installed.
(The default is '--version')
Returns
is_installed : bool
Whether the package is installed.
"""
try:
subprocess.check_output(f'{package_name} {validation_command}', shell=True)
return True
except:
return False
def generate_perf_command(bench_name):
""" Create the command line string to execute perf.
Parameters
----------
bench_name : str
The name of the benchmark.
Returns
-------
perf_cmd : str
The command to execute pref data collection.
"""
perf_output_file = f'{bench_name}.perf'
LOG.debug(f'Enabling perf data collection [output={perf_output_file}]')
return f'perf record --output={perf_output_file}'
def generate_numa_command():
""" Create the command line string to execute numactl.
Returns
-------
numa_cmd : str
The command to execute using NUMA.
"""
# use all the cpus from the highest numbered numa node
nodes = subprocess.check_output("numactl --hardware | grep 'available: ' | cut -d' ' -f2", shell=True)
if not nodes or int(nodes) == 1:
return ''
highest_cpu_node = int(nodes) - 1
if highest_cpu_node > 0:
LOG.debug(f'Number of NUMA Nodes = {highest_cpu_node}')
LOG.debug('Enabling NUMA support')
return f'numactl --cpunodebind={highest_cpu_node} --preferred={highest_cpu_node}'
def convert_result_xml(bench_name, bench_output_file):
""" Convert the gbench results to xml file named after the bench_name.
Parameters
----------
bench_name : str
The name of the microbenchmark.
bench_output_file : str
The path to the benchmark results file.
"""
xml_output_file = f'{bench_name}.xml'
GBenchToJUnit(bench_output_file).convert(xml_output_file)
def copy_benchmark_result(bench_name, build_dir):
""" Copy the benchmark result file.
This is used when running in local mode.
Parameters
----------
bench_name : str
The name of the microbenchmark.
build_dir : str
The path to the build directory.
"""
result_file = f'{bench_name}.json'
shutil.copy(result_file, build_dir)
LOG.debug(f'Copything result file {result_file} into {build_dir}')
|
"""
********************************************************************************
all your parameters
********************************************************************************
"""
import sys
import os
import numpy as np
import tensorflow as tf
# network structure
in_dim = 3
out_dim = 3
width = 2 ** 8 # 2 ** 6 = 64, 2 ** 8 = 256
depth = 5
# training setting
n_epch = int(1e2)
n_btch = 0
c_tol = 1e-8
# dataset prep
N_trn = int(1e4)
N_val = int(5e3)
N_inf = int(1e3)
# optimization
w_init = "Glorot"
b_init = "zeros"
act = "tanh"
lr0 = 1e-2
gam = 1e-2
lrd_exp = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = lr0,
decay_steps = n_epch,
decay_rate = gam,
staircase = False
)
lrd_cos = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate = lr0,
decay_steps = n_epch,
alpha = gam
)
lr = 1e-3 # 1e-3 / lrd_exp / lrd_cos
opt = "Adam"
f_scl = "minmax"
laaf = True
inv = False
# system param
rho = 1.
nu = .01
# weight
w_nth = 1.
w_sth = 1.
w_est = 1.
w_wst = 1.
w_pde = 1.
# rarely change
f_mntr = 10
r_seed = 1234
def params():
print("python :", sys.version)
print("tensorflow:", tf.__version__)
print("rand seed :", r_seed)
os.environ["PYTHONHASHSEED"] = str(r_seed)
np.random.seed(r_seed)
tf.random.set_seed(r_seed)
return in_dim, out_dim, width, depth, \
w_init, b_init, act, \
lr, opt, \
f_scl, laaf, inv, \
rho, nu, \
w_nth, w_sth, w_est, w_wst, w_pde, \
f_mntr, r_seed, \
n_epch, n_btch, c_tol, \
N_trn, N_val, N_inf
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os, shutil, logging, uuid
import os.path as osp, numpy as np
from array import array
import hgcalhistory
logger = logging.getLogger('hgcalhistory')
import ROOT
class Histogram2D(object):
def __init__(self):
super(Histogram2D, self).__init__()
self.x_bin_boundaries = None
self.y_bin_boundaries = None
self.data = None
def set_x_bin_boundaries(self, bounds):
self.x_bin_boundaries = bounds
self.x_bin_centers = (bounds[:-1] + bounds[1:]) / 2.
def set_y_bin_boundaries(self, bounds):
self.y_bin_boundaries = bounds
self.y_bin_centers = (bounds[:-1] + bounds[1:]) / 2.
@property
def n_bins_x(self):
return len(self.x_bin_boundaries) - 1
@property
def n_bins_y(self):
return len(self.y_bin_boundaries) - 1
@property
def n_bounds_x(self):
return len(self.x_bin_boundaries)
@property
def n_bounds_y(self):
return len(self.y_bin_boundaries)
def find_nearest_bin_x(self, x):
return (np.abs(self.x_bin_centers - x)).argmin()
def find_nearest_bin_y(self, y):
return (np.abs(self.y_bin_centers - y)).argmin()
def _prepare_data(self):
if self.data is None:
self.data = np.zeros((self.n_bins_x, self.n_bins_y))
def clear_data(self):
self.data = None
self._prepare_data()
def to_TH2(self):
TH2 = ROOT.TH2D(
'TH2_{0}'.format(uuid.uuid4()), '',
self.n_bins_x, array('d', self.x_bin_boundaries),
self.n_bins_y, array('d', self.y_bin_boundaries),
)
ROOT.SetOwnership(TH2, False)
for i_x in xrange(self.n_bins_x):
for i_y in xrange(self.n_bins_y):
TH2.SetBinContent(i_x+1, i_y+1, self.data[i_x][i_y])
return TH2
class Histogram2DFillable(Histogram2D):
def fill(self, x, y, value):
self._prepare_data()
i_x = self.find_nearest_bin_x(x)
i_y = self.find_nearest_bin_y(y)
self.data[i_x][i_y] += value
def get_value(self, x, y):
self._prepare_data()
i_x = self.find_nearest_bin_x(x)
i_y = self.find_nearest_bin_y(y)
return self.data[i_x][i_y]
def set_value(self, x, y, value):
self._prepare_data()
i_x = self.find_nearest_bin_x(x)
i_y = self.find_nearest_bin_y(y)
self.data[i_x][i_y] = value
|
import asyncio
import logging
import threading
import time
from watchgod import watch
WAIT_SEC = 2
class FileSystemWatcher:
def __init__(self, path_to_watch, reload_configuration):
self.active = True
self.change_detected = False
self.path_to_watch = path_to_watch
self.reload_configuration = reload_configuration
self.stop_event = asyncio.Event()
self.watch_thread = threading.Thread(target=self.watch, name="config-watcher")
self.watch_thread.start()
self.fs_watch_thread = threading.Thread(target=self.fs_watch, name="fs-watcher")
self.fs_watch_thread.start()
logging.info(f"watching dir {path_to_watch} for custom playbooks changes")
def fs_watch(self):
for _ in watch(self.path_to_watch, stop_event=self.stop_event):
self.mark_change()
def watch(self):
while self.active:
time.sleep(WAIT_SEC)
if self.change_detected:
time.sleep(
WAIT_SEC
) # once we detected a change, we wait a safety period to make sure all the changes under this 'bulk' are finished
self.change_detected = False
try:
self.reload_configuration(self.path_to_watch)
except Exception as e: # in case we have an error while trying to reload, we want the watch thread to stay alive
logging.exception("failed to reload configuration")
def stop_watcher(self):
self.active = False
self.stop_event.set()
def mark_change(self):
self.change_detected = True
|
#Tests proper handling of Verifications with Transactions which don't exist.
#Types.
from typing import Dict, IO, Any
#SignedVerification class.
from python_tests.Classes.Consensus.Verification import SignedVerification
#Blockchain class.
from python_tests.Classes.Merit.Blockchain import Blockchain
#TestError Exception.
from python_tests.Tests.Errors import TestError
#Meros classes.
from python_tests.Meros.Meros import MessageType
from python_tests.Meros.RPC import RPC
#JSON standard lib.
import json
def VUnknown(
rpc: RPC
) -> None:
file: IO[Any] = open("python_tests/Vectors/Consensus/Verification/Parsable.json", "r")
vectors: Dict[str, Any] = json.loads(file.read())
#SignedVerification.
sv: SignedVerification = SignedVerification.fromJSON(vectors["verification"])
#Blockchain.
blockchain: Blockchain = Blockchain.fromJSON(
b"MEROS_DEVELOPER_NETWORK",
60,
int("FAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", 16),
vectors["blockchain"]
)
file.close()
#Handshake with the node.
rpc.meros.connect(
254,
254,
len(blockchain.blocks)
)
sentLast: bool = False
hash: bytes = bytes()
while True:
msg: bytes = rpc.meros.recv()
if MessageType(msg[0]) == MessageType.Syncing:
rpc.meros.acknowledgeSyncing()
elif MessageType(msg[0]) == MessageType.GetBlockHash:
height: int = int.from_bytes(msg[1 : 5], byteorder = "big")
if height == 0:
rpc.meros.blockHash(blockchain.last())
else:
if height >= len(blockchain.blocks):
raise TestError("Meros asked for a Block Hash we do not have.")
rpc.meros.blockHash(blockchain.blocks[height].header.hash)
elif MessageType(msg[0]) == MessageType.BlockHeaderRequest:
hash = msg[1 : 49]
for block in blockchain.blocks:
if block.header.hash == hash:
rpc.meros.blockHeader(block.header)
break
if block.header.hash == blockchain.last():
raise TestError("Meros asked for a Block Header we do not have.")
elif MessageType(msg[0]) == MessageType.BlockBodyRequest:
hash = msg[1 : 49]
for block in blockchain.blocks:
if block.header.hash == hash:
rpc.meros.blockBody(block.body)
break
if block.header.hash == blockchain.last():
raise TestError("Meros asked for a Block Body we do not have.")
elif MessageType(msg[0]) == MessageType.ElementRequest:
rpc.meros.element(sv)
elif MessageType(msg[0]) == MessageType.TransactionRequest:
sentLast = True
rpc.meros.dataMissing()
elif MessageType(msg[0]) == MessageType.SyncingOver:
if sentLast:
break
else:
raise TestError("Unexpected message sent: " + msg.hex().upper())
#Verify the Verification and Block were not added.
if rpc.call("consensus", "getHeight", [sv.holder.hex()]) != 0:
raise TestError("Meros added an unknown Verification.")
if rpc.call("merit", "getHeight") != 2:
raise TestError("Meros added a block with an unknown Verification.")
|
# ADB File Explorer `tool`
# Copyright (C) 2022 Azat Aldeshov azata1919@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import datetime
import logging
import os
import shlex
from typing import List
from usb1 import USBContext
from core.configurations import Defaults
from core.managers import PythonADBManager
from data.models import Device, File, FileType
from helpers.converters import __converter_to_permissions_default__
from services.adb import ShellCommand
class FileRepository:
@classmethod
def file(cls, path: str) -> (File, str):
if not PythonADBManager.device:
return None, "No device selected!"
if not PythonADBManager.device.available:
return None, "Device not available!"
try:
path = PythonADBManager.clear_path(path)
mode, size, mtime = PythonADBManager.device.stat(path)
file = File(
name=os.path.basename(os.path.normpath(path)),
size=size,
date_time=datetime.datetime.utcfromtimestamp(mtime),
permissions=__converter_to_permissions_default__(list(oct(mode)[2:]))
)
if file.type == FileType.LINK:
args = ShellCommand.LS_LIST_DIRS + [path.replace(' ', r'\ ') + '/']
response = PythonADBManager.device.shell(shlex.join(args))
file.link_type = FileType.UNKNOWN
if response and response.startswith('d'):
file.link_type = FileType.DIRECTORY
elif response and response.__contains__('Not a'):
file.link_type = FileType.FILE
file.path = path
return file, None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
@classmethod
def files(cls) -> (List[File], str):
if not PythonADBManager.device:
return None, "No device selected!"
if not PythonADBManager.device.available:
return None, "Device not available!"
files = []
try:
path = PythonADBManager.path()
response = PythonADBManager.device.list(path)
args = ShellCommand.LS_ALL_DIRS + [path.replace(' ', r'\ ') + "*/"]
dirs = PythonADBManager.device.shell(" ".join(args)).split()
for file in response:
if file.filename.decode() == '.' or file.filename.decode() == '..':
continue
permissions = __converter_to_permissions_default__(list(oct(file.mode)[2:]))
link_type = None
if permissions[0] == 'l':
link_type = FileType.FILE
if dirs.__contains__(f"{path}{file.filename.decode()}/"):
link_type = FileType.DIRECTORY
files.append(
File(
name=file.filename.decode(),
size=file.size,
path=f"{path}{file.filename.decode()}",
link_type=link_type,
date_time=datetime.datetime.utcfromtimestamp(file.mtime),
permissions=permissions,
)
)
return files, None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return files, error
@classmethod
def rename(cls, file: File, name: str) -> (str, str):
if not PythonADBManager.device:
return None, "No device selected!"
if not PythonADBManager.device.available:
return None, "Device not available!"
if name.__contains__('/') or name.__contains__('\\'):
return None, "Invalid name"
try:
args = [ShellCommand.MV, file.path, file.location + name]
response = PythonADBManager.device.shell(shlex.join(args))
if response:
return None, response
return None, None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
@classmethod
def delete(cls, file: File) -> (str, str):
if not PythonADBManager.device:
return None, "No device selected!"
if not PythonADBManager.device.available:
return None, "Device not available!"
try:
args = [ShellCommand.RM, file.path]
if file.isdir:
args = ShellCommand.RM_DIR_FORCE + [file.path]
response = PythonADBManager.device.shell(shlex.join(args))
if response:
return None, response
return f"{'Folder' if file.isdir else 'File'} '{file.path}' has been deleted", None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
@classmethod
def download(cls, progress_callback: callable, source: str) -> (str, str):
destination = Defaults.device_downloads_path(PythonADBManager.get_device())
return cls.download_to(progress_callback, source, destination)
class UpDownHelper:
def __init__(self, callback: callable):
self.callback = callback
self.written = 0
self.total = 0
def call(self, path: str, written: int, total: int):
if self.total != total:
self.total = total
self.written = 0
self.written += written
self.callback(path, int(self.written / self.total * 100))
@classmethod
def download_to(cls, progress_callback: callable, source: str, destination: str) -> (str, str):
helper = cls.UpDownHelper(progress_callback)
destination = os.path.join(destination, os.path.basename(os.path.normpath(source)))
if PythonADBManager.device and PythonADBManager.device.available and source:
try:
PythonADBManager.device.pull(
device_path=source,
local_path=destination,
progress_callback=helper.call
)
return f"Download successful!\nDest: {destination}", None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
return None, None
@classmethod
def new_folder(cls, name) -> (str, str):
if not PythonADBManager.device:
return None, "No device selected!"
if not PythonADBManager.device.available:
return None, "Device not available!"
try:
args = [ShellCommand.MKDIR, f'{PythonADBManager.path()}{name}']
response = PythonADBManager.device.shell(shlex.join(args))
return None, response
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
@classmethod
def upload(cls, progress_callback: callable, source: str) -> (str, str):
helper = cls.UpDownHelper(progress_callback)
destination = PythonADBManager.path() + os.path.basename(os.path.normpath(source))
if PythonADBManager.device and PythonADBManager.device.available and PythonADBManager.path() and source:
try:
PythonADBManager.device.push(
local_path=source,
device_path=destination,
progress_callback=helper.call
)
return f"Upload successful!\nDest: {destination}", None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
return None, None
class DeviceRepository:
@classmethod
def devices(cls) -> (List[Device], str):
if PythonADBManager.device:
PythonADBManager.device.close()
errors = []
devices = []
for device in USBContext().getDeviceList(skip_on_error=True):
for setting in device.iterSettings():
if (setting.getClass(), setting.getSubClass(), setting.getProtocol()) == (0xFF, 0x42, 0x01):
try:
device_id = device.getSerialNumber()
PythonADBManager.connect(device_id)
device_name = " ".join(
PythonADBManager.device.shell(" ".join(ShellCommand.GETPROP_PRODUCT_MODEL)).split()
)
device_type = "device" if PythonADBManager.device.available else "unknown"
devices.append(Device(id=device_id, name=device_name, type=device_type))
PythonADBManager.device.close()
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
errors.append(str(error))
return devices, str("\n".join(errors))
@classmethod
def connect(cls, device_id: str) -> (str, str):
try:
if PythonADBManager.device:
PythonADBManager.device.close()
serial = PythonADBManager.connect(device_id)
if PythonADBManager.device.available:
device_name = " ".join(
PythonADBManager.device.shell(" ".join(ShellCommand.GETPROP_PRODUCT_MODEL)).split()
)
PythonADBManager.set_device(Device(id=serial, name=device_name, type="device"))
return "Connection established", None
return None, "Device not available"
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
@classmethod
def disconnect(cls) -> (str, str):
try:
if PythonADBManager.device:
PythonADBManager.device.close()
return "Disconnected", None
return None, None
except BaseException as error:
logging.error(f"Unexpected {error=}, {type(error)=}")
return None, error
|
import os
import gconf
from xdg.DesktopEntry import DesktopEntry
from fluxgui.exceptions import DirectoryCreationError
################################################################
# Color temperatures.
# The color options available in the color preferences dropdown in the
# "Preferences" GUI are defined in ./preferences.glade. Choosing a
# preference in the GUI returns a number, with 0 for the first choice,
# 1 for the second choice, etc.
default_temperature = '3400'
off_temperature = '6500'
temperatures = [
'2000', # The minimum supported by flux; see https://github.com/xflux-gui/xflux-gui/issues/51
'2300',
'2700',
'3400', # The 'default_temperature' needs to be one of the options!
'4200',
'5000',
# The "off temperature" is not one of the menu choices, but
# the previous code included it, so @ntc2 is leaving it in
# without understanding why ...
#
# TODO(ntc2): understand why this entry is in the list, and remove
# it if possible.
off_temperature ]
def key_to_temperature(key):
"""The inverse of 'temperature_to_key'.
"""
# The old version of this code supported a special key "off". We
# now map all unknown keys to "off", but I don't understand what
# the "off" value is for.
#
# TODO(ntc2): figure out what the "off" value is for.
try:
return temperatures[key]
except IndexError:
return off_temperature
def temperature_to_key(temperature):
"""Convert a temperature like "3400" to a Glade/GTK menu value like
"1" or "off".
"""
for i, t in enumerate(temperatures):
if t == temperature:
return i
# For invalid temperatures -- which should be impossible ? --
# return the number corresponding to the off temperature. Perhaps
# we could also return "off" here? But I have no idea how this
# code is even triggered.
return len(temperatures) - 1
################################################################
class Settings(object):
def __init__(self):
# You can use 'gconftool --dump /apps/fluxgui' to see current
# settings on command line.
self.client = GConfClient("/apps/fluxgui")
self._color = self.client.get_client_string("colortemp", 3400)
self._autostart = self.client.get_client_bool("autostart")
self._latitude = self.client.get_client_string("latitude")
self._longitude = self.client.get_client_string("longitude")
self._zipcode = self.client.get_client_string("zipcode")
self.has_set_prefs = True
if not self._latitude and not self._zipcode:
self.has_set_prefs = False
self._zipcode = '90210'
self.autostart=True
# After an upgrade to fluxgui where the color options change,
# the color setting may no longer be one of the menu
# options. In this case, we reset to the default night time
# temp.
if self._color not in temperatures:
self.color = default_temperature
else:
self.color = self._color
def xflux_settings_dict(self):
d = {
'color': self.color,
'latitude': self.latitude,
'longitude': self.longitude,
'zipcode': self.zipcode,
'pause_color': off_temperature
}
return d
def _get_color(self):
return str(self._color)
def _set_color(self, value):
self._color = value
self.client.set_client_string("colortemp", value)
def _get_latitude(self):
return str(self._latitude)
def _set_latitude(self, value):
self._latitude = value
self.client.set_client_string("latitude", value)
def _get_longitude(self):
return str(self._longitude)
def _set_longitude(self, value):
self._longitude = value
self.client.set_client_string("longitude", value)
def _get_zipcode(self):
return str(self._zipcode)
def _set_zipcode(self, value):
self._zipcode = value
self.client.set_client_string("zipcode", value)
def _get_autostart(self):
return bool(self._autostart)
def _set_autostart(self, value):
self._autostart = value
self.client.set_client_bool("autostart", self._autostart)
if self._autostart:
self._create_autostarter()
else:
self._delete_autostarter()
color=property(_get_color, _set_color)
latitude=property(_get_latitude, _set_latitude)
longitude=property(_get_longitude, _set_longitude)
zipcode=property(_get_zipcode, _set_zipcode)
autostart=property(_get_autostart, _set_autostart)
#autostart code copied from AWN
def _get_autostart_file_path(self):
autostart_dir = os.path.join(os.environ['HOME'], '.config',
'autostart')
return os.path.join(autostart_dir, 'fluxgui.desktop')
def _create_autostarter(self):
autostart_file = self._get_autostart_file_path()
autostart_dir = os.path.dirname(autostart_file)
if not os.path.isdir(autostart_dir):
#create autostart dir
try:
os.mkdir(autostart_dir)
except DirectoryCreationError, e:
print "Creation of autostart dir failed, please make it yourself: %s" % autostart_dir
raise e
if not os.path.isfile(autostart_file):
#create autostart entry
starter_item = DesktopEntry(autostart_file)
starter_item.set('Name', 'f.lux indicator applet')
starter_item.set('Exec', 'fluxgui')
starter_item.set('Icon', 'fluxgui')
starter_item.set('X-GNOME-Autostart-enabled', 'true')
starter_item.write()
self.autostart = True
def _delete_autostarter(self):
autostart_file = self._get_autostart_file_path()
if os.path.isfile(autostart_file):
os.remove(autostart_file)
self.autostart = False
class GConfClient(object):
"""
Gets and sets gconf settings.
"""
def __init__(self, prefs_key):
self.client = gconf.client_get_default()
self.prefs_key = prefs_key
self.client.add_dir(self.prefs_key, gconf.CLIENT_PRELOAD_NONE)
def get_client_string(self, property_name, default=""):
client_string = self.client.get_string(self.prefs_key+"/"+property_name)
if client_string is None:
client_string = default
return client_string
def set_client_string(self, property_name, value):
self.client.set_string(self.prefs_key + "/" + property_name, str(value))
def get_client_bool(self, property_name, default=True):
try:
gconf_type = self.client.get(self.prefs_key + "/"
+ property_name).type
except AttributeError:
# key is not set
self.set_client_bool(property_name, default)
client_bool = default
return client_bool
client_bool = None
if gconf_type != gconf.VALUE_BOOL:
# previous release used strings for autostart, handle here
client_string = self.get_client_string(property_name).lower()
if client_string == '1':
self.set_client_bool(property_name, True)
client_bool = True
elif client_string == '0':
self.set_client_bool(property_name, False)
client_bool = False
else:
client_bool = self.client.get_bool(self.prefs_key
+ "/"+property_name)
return client_bool
def set_client_bool(self, property_name, value):
self.client.set_bool(self.prefs_key + "/" + property_name, bool(value))
|
from .load import *
|
import sys
import glob
import os
from functools import partial
import numpy as np
import pandas as pd
from PyQt5.QtCore import (Qt, QObject, QProcess, QSettings, QThread, QTimer,
pyqtSignal, pyqtSlot)
from PyQt5.QtMultimedia import QAudioFormat, QAudioOutput, QMediaPlayer
from PyQt5.QtWidgets import QMainWindow
from PyQt5 import QtGui as gui
from PyQt5 import QtCore
from PyQt5 import QtWidgets as widgets
from fbs_runtime.application_context.PyQt5 import ApplicationContext
from interfaces.audio import (
LazyMultiWavInterface,
LazySignalInterface,
LazyWavInterface,
ConcatenatedMultiChannelInterface,
ConcatenatedWavInterface,
)
from app.state import AppState, ViewState
from app.views import MainView
from app.settings import fonts, read_default
from app.style import qss
class Events(widgets.QWidget):
createSource = pyqtSignal(object)
sourcesChanged = pyqtSignal()
rangeChanged = pyqtSignal()
dataLoaded = pyqtSignal()
rangeSelected = pyqtSignal(object, object)
rangeHighlighted = pyqtSignal(object, object)
setPosition = pyqtSignal([object], [object, object])
zoomEvent = pyqtSignal([int], [int, float], [str])
triggerShortcut = pyqtSignal(str)
playAudio = pyqtSignal(int)
class App(widgets.QMainWindow):
"""Main App instance with logic for file read/write
"""
shortcut_codes = [
"A",
"Shift+A",
"D",
"Shift+D",
"E",
"F",
"M",
"Q",
"S",
"Shift+S",
"W",
"Shift+W",
"X",
"Z",
"Ctrl+W",
"Space",
"Escape"
]
def __init__(self):
super().__init__()
self.title = "SoundSep"
self.settings = QSettings("Theunissen Lab", "Sound Separation")
#self.sources = SourceManager(None)
self.state = AppState()
self.view_state = ViewState()
self.events = Events()
self.init_ui()
self.init_actions()
self.init_menus()
self.update_open_recent_actions()
self.display_main()
if self.settings.value("OPEN_RECENT", []):
self.load_dir(self.settings.value("OPEN_RECENT")[-1])
self.autosave_timer = QTimer(self)
self.autosave_timer.timeout.connect(self.autosave)
self.autosave_timer.start(read_default.AUTOSAVE_SECONDS * 1000)
def init_actions(self):
self.open_directory_action = widgets.QAction("Open Directory", self)
self.open_directory_action.triggered.connect(self.run_directory_loader)
self.open_recent_actions = []
for i in range(read_default.MAX_RECENT_FILES):
action = widgets.QAction("", self)
action.setVisible(False)
action.triggered.connect(partial(self.open_recent, i))
self.open_recent_actions.append(action)
self.load_sources_action = widgets.QAction("Load Sources", self)
self.load_sources_action.triggered.connect(self.load_sources)
self.save_action = widgets.QAction("Save", self)
self.save_action.triggered.connect(self.save)
self.save_as_action = widgets.QAction("Save As", self)
self.save_as_action.triggered.connect(partial(self.save, save_as=True))
self.quit_action = widgets.QAction("Close", self)
self.quit_action.triggered.connect(self.close)
self.export_action = widgets.QAction("Export Pickle", self)
self.export_action.triggered.connect(partial(self.export, "pkl"))
self.export_csv_action = widgets.QAction("Export CSV", self)
self.export_csv_action.triggered.connect(partial(self.export, "csv"))
self.save_shortcut = widgets.QShortcut(gui.QKeySequence.Save, self)
self.save_shortcut.activated.connect(self.save)
self.close_shortcut = widgets.QShortcut(gui.QKeySequence.Close, self)
self.close_shortcut.activated.connect(self.close)
self.help_action = widgets.QAction("Help", self)
self.help_action.triggered.connect(self.help)
for code in self.shortcut_codes:
shortcut = widgets.QShortcut(gui.QKeySequence(code), self)
shortcut.activated.connect(partial(self.pass_shortcut, code))
# self.show_pref_action = widgets.QAction("Amplitude Envelope Parameters", self)
# self.show_pref_action.triggered.connect(self.amp_env_pref_window.show)
def pass_shortcut(self, shortcut):
"""
For some reason I can't get shortcuts defined in the child widgets
to work. So I have to define the shortcuts in this app and then pass
them through an event.
"""
self.events.triggerShortcut.emit(shortcut)
def init_ui(self):
self.setWindowTitle(self.title)
def init_menus(self):
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu("&File")
fileMenu.addAction(self.open_directory_action)
self.openRecentMenu = fileMenu.addMenu("&Open Recent")
for i in range(read_default.MAX_RECENT_FILES):
self.openRecentMenu.addAction(self.open_recent_actions[i])
fileMenu.addSeparator()
fileMenu.addAction(self.load_sources_action)
fileMenu.addSeparator()
fileMenu.addAction(self.export_action)
fileMenu.addAction(self.export_csv_action)
fileMenu.addAction(self.save_action)
fileMenu.addAction(self.save_as_action)
fileMenu.addSeparator()
fileMenu.addAction(self.quit_action)
settingsMenu = mainMenu.addMenu("&Settings")
helpMenu = mainMenu.addMenu("&Help")
helpMenu.addAction(self.help_action)
# settingsMenu.addAction(self.show_pref_action)
def help(self):
url = QtCore.QUrl(read_default.GITHUB_LINK)
gui.QDesktopServices.openUrl(url)
def display_main(self):
self.main_view = MainView(self)
self.setCentralWidget(self.main_view)
self.resize(1024, 768)
self.show()
def update_open_recent_actions(self):
recently_opened = self.settings.value("OPEN_RECENT", [])
for i in range(read_default.MAX_RECENT_FILES):
if i < len(recently_opened):
self.open_recent_actions[i].setText(recently_opened[-i])
self.open_recent_actions[i].setData(recently_opened[-i])
self.open_recent_actions[i].setVisible(True)
else:
self.open_recent_actions[i].setText(None)
self.open_recent_actions[i].setData(None)
self.open_recent_actions[i].setVisible(False)
if not len(recently_opened):
self.openRecentMenu.setDisabled(True)
else:
self.openRecentMenu.setDisabled(False)
def run_directory_loader(self):
"""Dialog to read in a directory of wav files and intervals
At some point we may have the gui generate the intervals file if it doesn't
exist yet, but for now it must be precomputed.
The directory should contain the following files (one wav per channel):
- ch0.wav
- ch1.wav
- ch2.wav
...
- intervals.npy
- spectrograms.npy
"""
options = widgets.QFileDialog.Options()
selected_file = widgets.QFileDialog.getExistingDirectory(
self,
"Load directory",
self.settings.value("OPEN_RECENT", ["."])[-1],
options=options
)
if selected_file:
self.load_dir(selected_file)
def load_dir(self, dir):
if not os.path.isdir(dir):
widgets.QMessageBox.warning(
self,
"Error",
"{} is not a directory".format(dir),
)
return
# Update the open recent menu item
open_recent = self.settings.value("OPEN_RECENT", [])
try:
idx = open_recent.index(dir)
except ValueError:
open_recent.append(dir)
else:
open_recent.pop(idx)
open_recent.append(dir)
max_recent = read_default.MAX_RECENT_FILES
open_recent = open_recent[-max_recent:]
self.settings.setValue("OPEN_RECENT", open_recent)
self.update_open_recent_actions()
self.state.reset()
self._load_dir(dir)
def _load_dir(self, dir):
# This function can load from many different directory formats
# 1. toplevel/ch0/*.wav, toplevel/ch1/*.wav, ... (each wav has 1 channel, many wavs per channel)
# 2. toplevel/ch0.wav, toplevel/ch1.wav, ... (each wav has 1 channel, 1 wav per channel)
# 3. toplevel/wavs/*.wav (each wav has multiple channels, many wavs)
# 4. toplevel/lazy.npy (lazy loading from songephys project)
data_directory = os.path.join(dir, "outputs")
wav_files = glob.glob(os.path.join(dir, "ch[0-9]*.wav"))
wav_dir_files = glob.glob(os.path.join(dir, "ch[0-9]", "*.wav"))
multi_channel_wavs = glob.glob(os.path.join(dir, "wavs", "*.wav"))
lazy_file = os.path.join(dir, "lazy.npy")
if not os.path.exists(data_directory):
os.makedirs(data_directory)
self.save_file = os.path.join(data_directory, "save.npy")
if not len(wav_files) and not os.path.exists(lazy_file) and not wav_dir_files and not multi_channel_wavs:
widgets.QMessageBox.warning(
self,
"Error",
"No wav files found.",
)
return
elif not len(wav_files) and len(wav_dir_files):
sound_object = ConcatenatedMultiChannelInterface.create_from_directory(
dir,
force_equal_length=True
)
elif not len(wav_files) and not len(wav_dir_files) and len(multi_channel_wavs):
sound_object = ConcatenatedWavInterface(
multi_channel_wavs,
)
elif os.path.exists(lazy_file):
sound_object = LazySignalInterface(lazy_file)
# The absolute path of the file when loaded remotely may not exist
if not sound_object.validate_paths():
# First try the most likely path.
# This should work usually for songephys data...
suggested = lazy_file.split("bird")[0]
sound_object.set_data_path(suggested)
if not sound_object.validate_paths():
# Ask the user to point to the remote drive "bird" directory
widgets.QMessageBox.about(
self,
"Data directory not found",
"Path to audio data was not found for {}. If using a remote mounted drive. "
"select the data directory containing the folder \"birds\"".format(lazy_file)
)
options = widgets.QFileDialog.Options()
data_path = widgets.QFileDialog.getExistingDirectory(
self,
"Choose the data directory containing the folder \"birds\"",
suggested,
options=options
)
if not data_path:
return
sound_object.set_data_path(data_path)
elif len(wav_files) > 1:
sound_object = LazyMultiWavInterface.create_from_directory(dir, force_equal_length=True)
elif len(wav_files) == 1:
sound_object = LazyWavInterface(wav_files[0])
self.state.set("sources", [])
if os.path.exists(self.save_file):
loaded_data = np.load(self.save_file, allow_pickle=True)[()]
if "sources" in loaded_data:
self.state.set("sources", loaded_data["sources"])
if "autodetect" in loaded_data:
self.state.set("autodetected_periods", loaded_data["autodetect"])
# if "_VIEW_STATE" in loaded_data:
# self.view_state.update(loaded_data["_VIEW_STATE"])
self.state.set("sound_object", sound_object)
self.state.set("sound_file", dir)
self.events.dataLoaded.emit()
def load_sources(self):
options = widgets.QFileDialog.Options()
file_name, _ = widgets.QFileDialog.getOpenFileName(
self,
"Load sources",
self.save_file,
"*",
options=options)
if file_name:
loaded_data = np.load(file_name, allow_pickle=True)[()]
if "sources" in loaded_data:
self.state.set("sources", loaded_data["sources"])
self.events.sourcesChanged.emit()
def open_recent(self, i):
self.load_dir(self.settings.value("OPEN_RECENT")[-i])
def save(self, save_as=False):
if not save_as:
msg = ("Are you sure you want to save?\n"
"Saving will overwrite any previously saved data.")
reply = widgets.QMessageBox.question(
self,
'Save Confirmation',
msg,
widgets.QMessageBox.Yes,
widgets.QMessageBox.No)
if reply == widgets.QMessageBox.No:
return
save_data = {}
if self.state.has("sources"):
save_data["sources"] = self.state.get("sources")
# save_data["_VIEW_STATE"] = dict(self.view_state.__dict__)
if save_as:
options = widgets.QFileDialog.Options()
save_file, _ = widgets.QFileDialog.getSaveFileName(
self,
"Save intervals as",
self.save_file,
"*",
options=options)
else:
save_file = self.save_file
np.save(self.save_file, save_data)
widgets.QMessageBox.about(
self,
"Saved",
"Saved successfully.",
)
def autosave(self):
save_data = {}
if self.state.has("sources"):
save_data["sources"] = self.state.get("sources")
try:
np.save(self.save_file + ".autosave", save_data)
print("Autosaved to {}".format(self.save_file + ".autosave"))
except:
print("Warning: autosave failed!")
def export(self, fmt="csv"):
# Save the sources to a pandas dataframe
if not self.state.has("sources"):
widgets.QMessageBox.about(self, "!", "No sources to export")
rows = []
for source in self.state.get("sources"):
if isinstance(source.get("intervals"), pd.DataFrame):
df = source.get("intervals")
for i in np.arange(len(df)):
t0, t1 = df.iloc[i][["t_start", "t_stop"]]
rows.append([source["name"], source["channel"], t0, t1])
df = pd.DataFrame(rows, columns=["source_name", "source_channel", "t_start", "t_stop"])
options = widgets.QFileDialog.Options()
file_name, _ = widgets.QFileDialog.getSaveFileName(
self,
"Export data",
os.path.join(self.state.get("sound_file"), "intervals.{}".format(fmt)),
"*",
options=options)
if not file_name:
return
else:
if fmt == "pkl":
df.to_pickle(file_name)
elif fmt == "csv":
df.to_csv(file_name)
widgets.QMessageBox.about(
self,
"Exported",
"Exported successfully.",
)
if __name__ == '__main__':
from app.context import context
window = App()
window.setFont(fonts.default)
window.setStyle(widgets.QStyleFactory.create("Fusion"))
window.setStyleSheet(qss)
window.show()
exit_code = context.app.exec_() # 2. Invoke appctxt.app.exec_()
sys.exit(exit_code)
|
#
# PySNMP MIB module DT1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DT1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:39:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, NotificationType, IpAddress, Counter32, iso, experimental, enterprises, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ObjectIdentity, Unsigned32, ModuleIdentity, MibIdentifier, Gauge32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "NotificationType", "IpAddress", "Counter32", "iso", "experimental", "enterprises", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "MibIdentifier", "Gauge32", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
usr = MibIdentifier((1, 3, 6, 1, 4, 1, 429))
nas = MibIdentifier((1, 3, 6, 1, 4, 1, 429, 1))
dt1 = MibIdentifier((1, 3, 6, 1, 4, 1, 429, 1, 3))
dt1Id = MibIdentifier((1, 3, 6, 1, 4, 1, 429, 1, 3, 1))
dt1IdTable = MibTable((1, 3, 6, 1, 4, 1, 429, 1, 3, 1, 1), )
if mibBuilder.loadTexts: dt1IdTable.setStatus('mandatory')
dt1IdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 429, 1, 3, 1, 1, 1), ).setIndexNames((0, "DT1-MIB", "dt1IdIndex"))
if mibBuilder.loadTexts: dt1IdEntry.setStatus('mandatory')
dt1IdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1IdIndex.setStatus('mandatory')
dt1IdHardwareSerNum = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1IdHardwareSerNum.setStatus('mandatory')
dt1IdHardwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 1, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1IdHardwareRev.setStatus('mandatory')
dt1IdSoftwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 1, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1IdSoftwareRev.setStatus('mandatory')
dt1Cfg = MibIdentifier((1, 3, 6, 1, 4, 1, 429, 1, 3, 2))
dt1CfgTable = MibTable((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1), )
if mibBuilder.loadTexts: dt1CfgTable.setStatus('mandatory')
dt1CfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1), ).setIndexNames((0, "DT1-MIB", "dt1CfgIndex"))
if mibBuilder.loadTexts: dt1CfgEntry.setStatus('mandatory')
dt1CfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1CfgIndex.setStatus('mandatory')
dt1CfgSpanATmgSrcPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notAllowed", 1), ("high", 2), ("mediumHigh", 3), ("medium", 4), ("low", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgSpanATmgSrcPrio.setStatus('mandatory')
dt1CfgSpanBTmgSrcPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notAllowed", 1), ("high", 2), ("mediumHigh", 3), ("medium", 4), ("low", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgSpanBTmgSrcPrio.setStatus('mandatory')
dt1CfgInternTmgSrcPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notAllowed", 1), ("high", 2), ("mediumHigh", 3), ("medium", 4), ("low", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgInternTmgSrcPrio.setStatus('mandatory')
dt1CfgTdmBusTmgSrcPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notAllowed", 1), ("high", 2), ("mediumHigh", 3), ("medium", 4), ("low", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1CfgTdmBusTmgSrcPrio.setStatus('deprecated')
dt1CfgIdleDiscPatt = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgIdleDiscPatt.setStatus('mandatory')
dt1CfgNumT1TypeNacs = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notSupported", 1), ("single", 2), ("multiple", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgNumT1TypeNacs.setStatus('mandatory')
dt1CfgCallEventFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notSupported", 1), ("filterOutNone", 2), ("filterOutBoth", 3), ("filterOutSuccess", 4), ("filterOutFailure", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgCallEventFilter.setStatus('mandatory')
dt1CfgSetDs0OutofService = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgSetDs0OutofService.setStatus('mandatory')
dt1CfgWirelessMode = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("wireless", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CfgWirelessMode.setStatus('mandatory')
dt1Stat = MibIdentifier((1, 3, 6, 1, 4, 1, 429, 1, 3, 3))
dt1StatTable = MibTable((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1), )
if mibBuilder.loadTexts: dt1StatTable.setStatus('mandatory')
dt1StatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1), ).setIndexNames((0, "DT1-MIB", "dt1StatIndex"))
if mibBuilder.loadTexts: dt1StatEntry.setStatus('mandatory')
dt1StatIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1StatIndex.setStatus('mandatory')
dt1StatCurrentTmgSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("spanLineA", 1), ("spanLineB", 2), ("internalClock", 3), ("tdmBusClock", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1StatCurrentTmgSrc.setStatus('mandatory')
dt1StatSelfTest = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1StatSelfTest.setStatus('mandatory')
dt1StatUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1StatUpTime.setStatus('mandatory')
dt1StatCallEventCode = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))).clone(namedValues=NamedValues(("notSupported", 1), ("setup", 2), ("usrSetup", 3), ("telcoDisconnect", 4), ("usrDisconnect", 5), ("noFreeModem", 6), ("modemsNotAllowed", 7), ("modemsRejectCall", 8), ("modemSetupTimeout", 9), ("noFreeIGW", 10), ("igwRejectCall", 11), ("igwSetupTimeout", 12), ("noFreeTdmts", 13), ("bcReject", 14), ("ieReject", 15), ("chidReject", 16), ("progReject", 17), ("callingPartyReject", 18), ("calledPartyReject", 19), ("blocked", 20), ("analogBlocked", 21), ("digitalBlocked", 22), ("outOfService", 23), ("busy", 24), ("congestion", 25), ("protocolError", 26), ("noFreeBchannel", 27), ("inOutCallCollision", 28), ("inCallArrival", 29), ("outCallArrival", 30), ("inCallConnect", 31), ("outCallConnect", 32)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1StatCallEventCode.setStatus('mandatory')
dt1StatCallEventQ931Value = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 3, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1StatCallEventQ931Value.setStatus('mandatory')
dt1Cmd = MibIdentifier((1, 3, 6, 1, 4, 1, 429, 1, 3, 4))
dt1CmdTable = MibTable((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1), )
if mibBuilder.loadTexts: dt1CmdTable.setStatus('mandatory')
dt1CmdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1), ).setIndexNames((0, "DT1-MIB", "dt1CmdIndex"))
if mibBuilder.loadTexts: dt1CmdEntry.setStatus('mandatory')
dt1CmdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1CmdIndex.setStatus('mandatory')
dt1CmdMgtStationId = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CmdMgtStationId.setStatus('mandatory')
dt1CmdReqId = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1CmdReqId.setStatus('mandatory')
dt1CmdFunction = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("noCommand", 1), ("saveToNVRAM", 2), ("restoreFromNVRAM", 3), ("restoreFromDefault", 4), ("nonDisruptSelfTest", 5), ("disruptSelfTest", 6), ("softwareReset", 7), ("resetToHiPrioTimingSrc", 8), ("forceTdmBusMastership", 9), ("enterSpanToSpanLoopback", 10), ("exitSpanToSpanLoopback", 11), ("restoreDefaultUIPassword", 12)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CmdFunction.setStatus('mandatory')
dt1CmdForce = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("force", 1), ("noForce", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CmdForce.setStatus('mandatory')
dt1CmdParam = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1CmdParam.setStatus('mandatory')
dt1CmdResult = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("success", 2), ("inProgress", 3), ("notSupported", 4), ("unAbleToRun", 5), ("aborted", 6), ("failed", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1CmdResult.setStatus('mandatory')
dt1CmdCode = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 6, 8, 12, 20, 22, 25, 58, 73))).clone(namedValues=NamedValues(("noError", 1), ("unable", 2), ("unrecognizedCommand", 6), ("slotEmpty", 8), ("noResponse", 12), ("unsupportedCommand", 20), ("deviceDisabled", 22), ("testFailed", 25), ("userInterfaceActive", 58), ("pendingSoftwareDownload", 73)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1CmdCode.setStatus('mandatory')
dt1TrapEnaTable = MibTable((1, 3, 6, 1, 4, 1, 429, 1, 3, 5), )
if mibBuilder.loadTexts: dt1TrapEnaTable.setStatus('mandatory')
dt1TrapEnaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1), ).setIndexNames((0, "DT1-MIB", "dt1TrapEnaIndex"))
if mibBuilder.loadTexts: dt1TrapEnaEntry.setStatus('mandatory')
dt1TrapEnaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dt1TrapEnaIndex.setStatus('mandatory')
dt1TrapEnaTxTmgSrcSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enableTrap", 1), ("disableAll", 2), ("enableLog", 3), ("enableAll", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1TrapEnaTxTmgSrcSwitch.setStatus('mandatory')
dt1TrapEnaCallEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enableTrap", 1), ("disableAll", 2), ("enableLog", 3), ("enableAll", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1TrapEnaCallEvent.setStatus('mandatory')
dt1TrapEnaCallArriveEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enableTrap", 1), ("disableAll", 2), ("enableLog", 3), ("enableAll", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1TrapEnaCallArriveEvent.setStatus('mandatory')
dt1TrapEnaCallConnEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enableTrap", 1), ("disableAll", 2), ("enableLog", 3), ("enableAll", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1TrapEnaCallConnEvent.setStatus('mandatory')
dt1TrapEnaCallTermEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enableTrap", 1), ("disableAll", 2), ("enableLog", 3), ("enableAll", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1TrapEnaCallTermEvent.setStatus('mandatory')
dt1TrapEnaCallFailEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 429, 1, 3, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enableTrap", 1), ("disableAll", 2), ("enableLog", 3), ("enableAll", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dt1TrapEnaCallFailEvent.setStatus('mandatory')
mibBuilder.exportSymbols("DT1-MIB", dt1IdIndex=dt1IdIndex, dt1TrapEnaCallEvent=dt1TrapEnaCallEvent, dt1StatCallEventQ931Value=dt1StatCallEventQ931Value, dt1StatTable=dt1StatTable, dt1TrapEnaTxTmgSrcSwitch=dt1TrapEnaTxTmgSrcSwitch, dt1StatUpTime=dt1StatUpTime, usr=usr, dt1CmdParam=dt1CmdParam, nas=nas, dt1CmdTable=dt1CmdTable, dt1TrapEnaTable=dt1TrapEnaTable, dt1CfgSpanATmgSrcPrio=dt1CfgSpanATmgSrcPrio, dt1StatSelfTest=dt1StatSelfTest, dt1TrapEnaCallArriveEvent=dt1TrapEnaCallArriveEvent, dt1StatIndex=dt1StatIndex, dt1CmdEntry=dt1CmdEntry, dt1StatCallEventCode=dt1StatCallEventCode, dt1TrapEnaCallTermEvent=dt1TrapEnaCallTermEvent, dt1IdSoftwareRev=dt1IdSoftwareRev, dt1CfgTdmBusTmgSrcPrio=dt1CfgTdmBusTmgSrcPrio, dt1CmdReqId=dt1CmdReqId, dt1CmdCode=dt1CmdCode, dt1CfgSpanBTmgSrcPrio=dt1CfgSpanBTmgSrcPrio, dt1TrapEnaIndex=dt1TrapEnaIndex, dt1IdEntry=dt1IdEntry, dt1CfgCallEventFilter=dt1CfgCallEventFilter, dt1StatEntry=dt1StatEntry, dt1=dt1, dt1CfgWirelessMode=dt1CfgWirelessMode, dt1Cmd=dt1Cmd, dt1CfgIndex=dt1CfgIndex, dt1CfgEntry=dt1CfgEntry, dt1TrapEnaCallFailEvent=dt1TrapEnaCallFailEvent, dt1IdHardwareSerNum=dt1IdHardwareSerNum, dt1CfgIdleDiscPatt=dt1CfgIdleDiscPatt, dt1IdTable=dt1IdTable, dt1Id=dt1Id, dt1TrapEnaEntry=dt1TrapEnaEntry, dt1CmdForce=dt1CmdForce, dt1CfgSetDs0OutofService=dt1CfgSetDs0OutofService, dt1StatCurrentTmgSrc=dt1StatCurrentTmgSrc, dt1CmdIndex=dt1CmdIndex, dt1Cfg=dt1Cfg, dt1CmdMgtStationId=dt1CmdMgtStationId, dt1CfgTable=dt1CfgTable, dt1Stat=dt1Stat, dt1CfgInternTmgSrcPrio=dt1CfgInternTmgSrcPrio, dt1IdHardwareRev=dt1IdHardwareRev, dt1TrapEnaCallConnEvent=dt1TrapEnaCallConnEvent, dt1CfgNumT1TypeNacs=dt1CfgNumT1TypeNacs, dt1CmdResult=dt1CmdResult, dt1CmdFunction=dt1CmdFunction)
|
'''
run.py
Run file for photo service.
Author: Nicolas Inden
eMail: nico@smashnet.de
GPG-Key-ID: B2F8AA17
GPG-Fingerprint: A757 5741 FD1E 63E8 357D 48E2 3C68 AE70 B2F8 AA17
License: MIT License
'''
import os, os.path
import sys
import cherrypy
import redis
import sqlite3
import common
import config
from service_routing import PhotoServiceRouting
from delete_files_task_processor import DeleteFilesTaskProcessor
def init_service():
## Init local data storage
## Create directories if not existing yet
if not os.path.exists(config.PHOTO_DIR):
os.makedirs(config.PHOTO_DIR)
## Init redis communication
common.myRedis = redis.Redis(host='redis', port=6379, db=0)
## Listen on redis channel _delete-files_ for new tasks
common.deleteFilesTaskThread = DeleteFilesTaskProcessor()
common.deleteFilesTaskThread.daemon = True
common.deleteFilesTaskThread.start()
## Init DB and create tables if not yet existing
with sqlite3.connect(config.DB_STRING) as con:
con.execute("CREATE TABLE IF NOT EXISTS general (key, value)")
con.execute("CREATE TABLE IF NOT EXISTS files (fileid, filename, extension, content_type, md5, uploader, timestamp_date_time_original timestamp, timestamp_uploaded timestamp)")
## Check DB version
with sqlite3.connect(config.DB_STRING) as con:
r = con.execute("SELECT value FROM general WHERE key='version' LIMIT 1")
res = r.fetchall()
if len(res) == 0:
con.execute("INSERT INTO general VALUES (?, ?)", ["version", config.VERSION])
elif config.VERSION == res[0][0]:
# Program and DB run same version, everything OK!
pass
else:
# Different versions! Please migrate!
# TODO
print("Running ? v? with DB v?! Exiting...", (config.NAME, config.VERSION, res[0][0]))
sys.exit(100)
def cleanup():
common.deleteFilesTaskThread.join(timeout=1.0)
return
if __name__ == '__main__':
service_routing = PhotoServiceRouting()
conf = {
'/': {
'tools.sessions.on': False,
'request.dispatch': service_routing.getRoutesDispatcher()
}
}
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.server.socket_port = 8080
cherrypy.engine.subscribe('start', init_service)
cherrypy.engine.subscribe('stop', cleanup)
cherrypy.quickstart(None, '/photo-service', conf)
|
n = float(input("digite um valor: "))
v = []
i = 0
v.append(n)
print("N["+str(i)+"] = "+str(v[i]))
while i < 9:
n *= 2
v.append(n)
i += 1
print("N["+str(i)+"] = "+str(v[i]))
|
from .stopword_modification import StopwordModification
from .repeat_modification import RepeatModification
from .input_column_modification import InputColumnModification
from .max_word_index_modification import MaxWordIndexModification
from .min_word_length import MinWordLength
|
import numpy as np
from skimage.measure import shannon_entropy
from tqdm import tqdm
def read_label_string():
label_path = "./cifar100_text.txt"
f = open(label_path, "r")
labels = f.read()
labels = labels.split(",")
labels = [i.rstrip().lstrip() for i in labels]
# labels = [i.split(' ')[0] for i in labels]
return labels
def get_vec_from_names(names):
label_arr = []
label_path = "./glove.6B.50d.txt"
f = open(label_path, "r")
labels = f.read()
labels = labels.split("\n")
# Now construct a dictionary
word_dict = {}
for i, row in enumerate(tqdm(labels)):
row = row.split(' ')
word_dict[row[0]] = np.asarray(row[1:]).astype(np.float32)
# Now retrieve vectors from names
for name in names:
name = name.split(' ')[0].split('-')[0]
label_arr.append(word_dict[name])
return np.asarray(label_arr)
cifar100_names = read_label_string()
cifar100_labels = get_vec_from_names(cifar100_names)
np.save('./cifar100_glove.npy', cifar100_labels.astype(np.float32))
entropy_dict = {}
entropy_arr = []
for i in range(10):
value = shannon_entropy(cifar100_labels[i])
entropy_dict[cifar100_names[i]] = value
entropy_arr.append(value)
entropy_arr = np.asarray(entropy_arr)
print('mean entropy: ', np.mean(entropy_arr))
print('std entropy: ', np.std(entropy_arr))
|
from tonks.vision.models.multi_task_resnet import ResnetForMultiTaskClassification
|
import json,os
import boto3
from datetime import *
import json, logging
import pprint,re
from elasticsearch import Elasticsearch, helpers
from opensearchpy import OpenSearch,helpers, RequestsHttpConnection
import requests
from requests_aws4auth import AWS4Auth
import urllib.parse
from botocore.exceptions import ClientError
from datetime import *
import shlex,subprocess
from urllib.parse import unquote_plus
import elasticsearch
# import PyPDF2
from io import BytesIO
import io
from pptx import Presentation
import fitz
from requests.auth import HTTPBasicAuth
from docx import Document
import pandas as pd
logging.getLogger().setLevel(logging.INFO)
logging.info(f'date={date}')
cfn = boto3.resource('cloudformation')
def lambda_handler(event, context):
logging.info('lambda_handler starts...')
print("Lambda function ARN:", context.invoked_function_arn)
runtime_region = os.environ['AWS_REGION']
context_arn=context.invoked_function_arn
u_id=context_arn.split('-')[-1]
print('u_id',u_id)
print('***********************************************')
s3 = boto3.client('s3')
data={}
doc_list=[]
check=0
secret_data_internal = get_secret(
'nasuni-labs-internal-'+u_id, runtime_region)
secret_nct_nce_admin = get_secret('nasuni-labs-os-admin',runtime_region)
role = secret_data_internal['discovery_lambda_role_arn']
username=secret_nct_nce_admin['nac_es_admin_user']
role_data = '{"backend_roles":["' +role + '"],"hosts": [],"users": ["'+username+'"]}'
print('role_data',role_data)
with open("/tmp/"+"/data.json", "w") as write_file:
write_file.write(role_data)
link=secret_nct_nce_admin['nac_kibana_url']
link=link[:link.index('_')]
password=secret_nct_nce_admin['nac_es_admin_password']
data_file_obj = '/tmp/data.json'
merge_link = '\"https://'+link+'_opendistro/_security/api/rolesmapping/all_access\"'
url = 'https://' + link + '_opendistro/_security/api/rolesmapping/all_access/'
headers = {'content-type': 'application/json'}
response = requests.put(url, auth=HTTPBasicAuth(username, password), headers=headers, data=role_data)
print(response.text)
#Deletion of folder from s3
for record in event['Records']:
print(record)
data['dest_bucket'] = record['s3']['bucket']['name']
data['object_key'] = unquote_plus(record['s3']['object']['key'])
data['size'] = str(record['s3']['object'].get('size', -1))
file_name=os.path.basename(data['object_key'])
data['event_name'] = record['eventName']
data['event_time'] = record['eventTime']
data['awsRegion'] = record['awsRegion']
data['extension'] = file_name[file_name.index('.')+1:]
data['volume_name'] = secret_data_internal['volume_name']
#data['root_handle'] = secret_data_internal['root_handle'].replace('.','_').lower()
data['root_handle'] = re.sub('[!@#$%^&*()+?=,<>/.]', '-', secret_data_internal['root_handle']).lower()
data['source_bucket'] = secret_data_internal['discovery_source_bucket']
print("data['object_key']",data['object_key'])
print("data['dest_bucket']",data['dest_bucket'])
obj1 = s3.get_object(Bucket=data['dest_bucket'], Key=data['object_key'])
if data['extension'] in ['csv','txt']:
data['content'] = obj1['Body'].read().decode('utf-8')
elif data['extension'] == 'pdf':
file_content = obj1['Body'].read()
text = ""
with fitz.open(stream=file_content, filetype="pdf") as doc:
# iterating through pdf file pages
for page in doc:
# fetching & appending text to text variable of each page
text += page.getText()
print('pdf data priting',text)
data['content'] = text
elif data['extension'] in ['docx','doc']:
fs = obj1['Body'].read()
sentence = str(parseDocx(fs))
print('docx data priting',sentence)
data['content'] = sentence
elif data['extension'] in ['xlsx','xls']:
file_content = obj1['Body'].read()
read_excel_data = io.BytesIO(file_content)
df = pd.read_excel(read_excel_data)
df = df.to_string()
print('xlsx data priting',df)
data['content'] = df
elif data['extension'] == 'pptx':
print('data[extension] elif',data['extension'])
pptx_content = obj1['Body'].read()
ppt = Presentation(io.BytesIO(pptx_content))
pptx_data=''
for slide in ppt.slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
pptx_data+=run.text
print(pptx_data)
data['content'] = pptx_data
if secret_data_internal['web_access_appliance_address']!='not_found':
data['access_url']='https://'+secret_data_internal['web_access_appliance_address']+'/fs/view/'+data['volume_name']+'/'+file_name
else:
data['access_url']=secret_data_internal['web_access_appliance_address']
print('data',data)
print('secret_data_internal',secret_data_internal)
es_obj = launch_es(secret_nct_nce_admin['nac_es_url'],data['awsRegion'])
check=connect_es(es_obj,data['root_handle'], data)
#Deletion of folder from s3
if check == 0:
print('Insertion into ES success.Hence deleting s3 bucket folder')
del_s3_folder(data['object_key'],data['dest_bucket'])
else:
print('Not deleting the s3 bucket folder all data not got loaded into ES.')
logging.info('lambda_handler ends...')
def parseDocx(data):
data = io.BytesIO(data)
document = Document(docx = data)
content = ''
for para in document.paragraphs:
data = para.text
content+= data
return content
def del_s3_folder(full_path,dest_bucket):
print("Full Path:-",full_path)
path=os.path.dirname(full_path)
print("Folder Path:-",path)
s3 = boto3.resource('s3')
bucket = s3.Bucket(dest_bucket)
bucket.objects.filter(Prefix=path).delete()
def launch_es(es_url,region):
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
# es = Elasticsearch(hosts=[{'host': es_url, 'port': 443}], http_auth=awsauth, use_ssl=True, verify_certs=True)
# es = Elasticsearch(hosts=[{'host': es_url, 'port': 443}], http_auth=awsauth, verify_certs=True)
es = OpenSearch(hosts=[{'host': es_url, 'port': 443}], http_auth=awsauth, use_ssl=True, verify_certs=True,connection_class = RequestsHttpConnection)
return es
def connect_es(es,index, data):
#CTPROJECT-125
try:
flag = 0
for elem in es.cat.indices(format="json"):
query = {"query": {"match_all": {}}}
resp = es.search(index=elem['index'], body=query)
for i in resp['hits']['hits']:
idx_content = i['_source'].get('content', 0)
idx_object_key = i['_source'].get('object_key', 0)
if idx_content == data['content'] and idx_object_key == data['object_key']:
flag = 1
print("Indexing is doing when the idx_content and idx_object_key has matched", resp)
es.index(index=i['_index'], doc_type="_doc", id=i['_id'], body=data)
break
if flag == 0:
doc_list = []
doc_list += [data]
logging.info("\nAttempting to index the list of docs using helpers.bulk()")
# use the helpers library's Bulk API to index list of Elasticsearch docs
resp = helpers.bulk(es, doc_list, index=data['root_handle'], doc_type="_doc")
# print the response returned by Elasticsearch
print("helpers.bulk() RESPONSE:", resp)
print("helpers.bulk() RESPONSE:", json.dumps(resp, indent=4))
return 0
except Exception as e:
logging.error('ERROR: {0}'.format(str(e)))
logging.error('ERROR: Unable to index line:"{0}"'.format(str(data['object_key'])))
print(e)
return 1
def get_secret(secret_name,region_name):
secret = ''
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
print("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
print("The request had invalid params:", e)
elif e.response['Error']['Code'] == 'DecryptionFailure':
print("The requested secret can't be decrypted using the provided KMS key:", e)
elif e.response['Error']['Code'] == 'InternalServiceError':
print("An error occurred on service side:", e)
else:
# Secrets Manager decrypts the secret value using the associated KMS CMK
# Depending on whether the secret was a string or binary, only one of these fields will be populated
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(secret)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: zparteka
"""
def read_data(infile):
masks = {}
with open(infile, 'r') as f:
data = f.readlines()
for i in range(len(data)):
if data[i].strip().startswith("mask"):
mask = data[i].strip()[7:]
counter = i + 1
masks[mask] = []
while data[counter].startswith("mem"):
line = data[counter].strip().split(' = ')
masks[mask].append((int(line[0][4:-1]), int(line[1])))
if counter == len(data) - 1:
return masks
else:
counter += 1
def masking(masks):
numbers = {}
for i in masks.keys():
for j in masks[i]:
mask = format(j[1], "036b")
for n in range(len(mask)):
if i[n] != "X" and mask[n] != i[n]:
mask = mask[:n] + i[n] + mask[n + 1:]
numbers[j[0]] = int(mask, 2)
return sum(list(numbers.values()))
def part2(masks):
numbers = {}
number_counter = 0
for i in masks.keys():
print(i)
for j in masks[i]:
print(j[0])
mask = format(j[0], "036b")
counter = 0
for n in range(len(mask)):
if i[n] == '0':
continue
elif i[n] == '1':
mask = mask[:n] + '1' + mask[n + 1:]
if i[n] == 'X':
counter += 1
mask = mask[:n] + 'X' + mask[n + 1:]
arr = [None] * counter
ready = []
generate_binary(counter, arr, 0, ready)
for variation in ready:
print(variation)
counter = 0
fmask = mask
for k in range(len(fmask)):
if fmask[k] == "X":
fmask = fmask[:k] + str(variation[counter]) + fmask[k + 1:]
counter += 1
numbers[int(fmask, 2)] = j[1]
number_counter += 1
print(number_counter)
print(len(numbers.keys()))
return sum(list(numbers.values()))
def generate_binary(n, arr, i, ready):
barr = arr[:]
if i == n:
ready.append(barr)
return
barr[i] = 0
generate_binary(n, barr, i + 1, ready)
barr[i] = 1
generate_binary(n, barr, i + 1, ready)
def main():
masks = read_data("input_day14")
print(part2(masks))
if __name__ == '__main__':
main()
|
class ProcessingHelper():
def __init__(self, df):
self.dframe = df
|
from django.shortcuts import render
from django.views.generic import TemplateView, View, ListView, DetailView
from league.models import Schedule, Standings, Season, Player, Team, STANDINGS_ORDER
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
class StandingsFull(ListView):
template_name = 'standings.html'
model = Standings
context_object_name = 'standings'
def get_context_data(self, **kwargs):
context = super(StandingsFull, self).get_context_data(**kwargs)
season_name = ''
if self.kwargs.get('season'):
season = Season.objects.get(slug=self.kwargs['season'])
season_pk = season.pk
season_name = ": {} {}".format(season.league, season.name)
context['table_name'] = season_name
context['slug'] = self.kwargs.get('season')
return context
def get_queryset(self, *args, **kwargs):
qs = self.model.objects.all()
if self.kwargs.get('season'):
season = Season.objects.get(slug=self.kwargs['season'])
season_pk = season.pk
season_name = season.name
order = STANDINGS_ORDER[season.standings_order][1]
qs = self.model.objects.filter(season=season_pk).order_by(*order)
return qs
class TeamDetails(DetailView):
template_name = 'team.html'
model = Team
context_object_name = 'team'
slug_url_kwarg = 'team'
def get_context_data(self, **kwargs):
context = super(TeamDetails, self).get_context_data(**kwargs)
if self.kwargs.get('season') and self.kwargs.get('team'):
season_pk = Season.objects.get(slug=self.kwargs['season']).pk
team_pk = self.model.objects.get(slug=self.kwargs.get('team')).pk
team = Standings.objects.get(season=season_pk, team=team_pk)
roster = Player.objects.filter(season=season_pk, team=team_pk)
schedule = Schedule.objects.filter(Q(home_team=team_pk) | Q(away_team=team_pk), season = season_pk ).order_by('date')
context['team_more'] = team
context['team_roster'] = roster
context['team_schedule'] = schedule
return context
class ScheduleFull(ListView):
template_name = 'schedule.html'
model = Schedule
context_object_name = 'schedule'
def get_context_data(self, **kwargs):
context = super(ScheduleFull, self).get_context_data(**kwargs)
season_name = ''
if self.kwargs.get('season'):
season = Season.objects.get(slug=self.kwargs['season'])
season_pk = season.pk
season_name = ": {} {}".format(season.league, season.name)
context['page_name'] = _('Schedule')
context['season'] = season
context['slug'] = self.kwargs.get('season')
return context
def get_queryset(self, *args, **kwargs):
qs = self.model.objects.all()
if self.kwargs.get('season'):
season = Season.objects.get(slug=self.kwargs['season'])
season_pk = season.pk
season_name = season.name
qs = self.model.objects.filter(season=season_pk).order_by('date')
return qs
class TeamSchedule(ListView):
template_name = 'schedule.html'
model = Schedule
context_object_name = 'schedule'
def get_context_data(self, **kwargs):
context = super(TeamSchedule, self).get_context_data(**kwargs)
context['page_name'] = _('Archiwum')
if self.kwargs.get('team'):
team = Team.objects.get(slug=self.kwargs['team'])
context['team'] = team
if self.kwargs.get('season'):
season = Season.objects.get(slug=self.kwargs['season'])
context['season'] = season
context['page_name'] = _('Schedule')
return context
def get_queryset(self, *args, **kwargs):
qs = self.model.objects.all().order_by('date')
if self.kwargs.get('team'):
team_pk = Team.objects.get(slug=self.kwargs.get('team')).pk
qs = self.model.objects.filter(Q(home_team=team_pk) | Q(away_team=team_pk)).order_by('date')
if self.kwargs.get('season') and self.kwargs.get('team'):
season = Season.objects.get(slug=self.kwargs['season'])
season_pk = season.pk
season_name = season.name
team_pk = Team.objects.get(slug=self.kwargs.get('team')).pk
qs = self.model.objects.filter(Q(home_team=team_pk) | Q(away_team=team_pk), season = season_pk ).order_by('date')
return qs
class TeamRoster(ListView):
template_name = 'roster.html'
model = Player
context_object_name = 'roster'
def get_context_data(self, **kwargs):
context = super(TeamRoster, self).get_context_data(**kwargs)
context['page_name'] = _('Roster')
if self.kwargs.get('team'):
team = Team.objects.get(slug=self.kwargs['team'])
context['team'] = team
if self.kwargs.get('season'):
team = Team.objects.get(slug=self.kwargs['team'])
season = Season.objects.get(slug=self.kwargs['season'])
context['roster_image'] = Standings.objects.get(team = team.pk, season = season.pk).roster_image
context['season'] = season
return context
def get_queryset(self, *args, **kwargs):
qs = self.model.objects.all().order_by('jersey')
if self.kwargs.get('season') and self.kwargs.get('team'):
season = Season.objects.get(slug=self.kwargs['season'])
season_pk = season.pk
season_name = season.name
team_pk = Team.objects.get(slug=self.kwargs.get('team')).pk
qs = self.model.objects.filter(team = team_pk, season = season_pk ).order_by('jersey')
return qs
# Create your views here.
|
import os
import tkMessageBox
def default():
if (not tkMessageBox.askyesno("Default Settings", "Do you really want to force Default Settings?")):
return
os.popen('iptables -F',"r")
os.popen('iptables -A INPUT -p tcp -s 0/0 --dport 1:1024 -j DROP',"r")
os.popen('iptables -A INPUT -p udp -s 0/0 --dport 1:1024 -j DROP',"r")
os.popen(' iptables -A INPUT -p ICMP -s 0/0 -j DROP',"r")
tkMessageBox.showinfo("Default Settings","Default Settings has been applied.")
|
#SERVER Configuration
KoboENV = "production"
KoboConfig = {
'local': {
'USERNAME': '',
'PASSWORD': '',
'PROTOCOL': 'http',
'SCHEME': 'kobo',
'IP': '127.0.0.1',
'HOST': '',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': ''
},
'development': {
'USERNAME': 'admin',
'PASSWORD': 'admin',
'PROTOCOL': 'https',
'SCHEME': 'kobo',
'IP': '',
'HOST': 'kc.aqm.space',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': '12'
},
'uat': {
'USERNAME': '',
'PASSWORD': '',
'PROTOCOL': 'http',
'SCHEME': 'kobo',
'IP': '',
'HOST': '',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': ''
},
'production': {
'USERNAME': 'admin',
'PASSWORD': 'adm(1)n@AWH',
'PROTOCOL': 'https',
'SCHEME': 'kobo',
'IP': '',
'HOST': 'kc.aqm.space',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': '13'
}
}
# FORM: 33 for testing 2 set
# FORM: 12 for actual testing
# FROM: 13 for production
|
#author : eric mourgya
#
import commands
from flask import jsonify
from flask import Flask, Response, request, redirect,session, url_for
from flask.ext.login import LoginManager, UserMixin,login_required, login_user, logout_user
#@app.after_request
#def treat_as_plain_text(response):
# response.headers["content-type"] = "text/plain; charset=utf-8"
# return response
app = Flask(__name__)
app.secret_key="gloubiboulga"
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
class User(UserMixin):
def __init__(self, id):
self.id = id
self.name = "user" + str(id)
self.password = self.name + "_secret"
def __repr__(self):
return "%d/%s/%s" % (self.id, self.name, self.password)
@app.route('/', methods = ['GET'])
@login_required
def help():
"""Welcome page and help page."""
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__
return jsonify(func_list)
def cmdline(cmd):
# make and exec of cmd command on system
status, output = commands.getstatusoutput(cmd)
if status != 0:
error_str= cmd + ": command failed! : " +status+" "+output
print error_str
return error_str
else:
print cmd + "done"
return output
@app.route('/show/discovery')
@login_required
def showdiscovery():
"""------------------------Show discovery portals."""
cmdshow="iscsiadm -m discovery -P1"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/nodes')
@login_required
def shownodes():
"""Show nodes."""
cmdshow="iscsiadm -m node -P1"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/disks')
@login_required
def showdisk():
"""Show discovery disk."""
cmdshow="iscsiadm -m session -P3"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/lsblk')
@login_required
def showlsblk():
"""Show discovery sessions and disks."""
cmdshow="lsblk"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/sessiondetail')
@login_required
def showsessiondetail():
"""Show session in detail without disk."""
cmdshow="iscsiadm -m session -P1"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/session')
@login_required
def showsession():
"""Show session ids"""
cmdshow="iscsiadm -m session"
res=cmdline(cmdshow)
return Response(response=res,status=200,mimetype="text/plain")
@app.route('/show/specifiquesession',methods=["GET", "POST"])
@login_required
def showspecifiquesession():
"""show specifique session"""
if request.method == 'POST':
session=request.form['session']
cmdres="iscsiadm -m session -r"+session +" -P3"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="session id" type=text name=session>
<p><input type=submit value=submit>
</form>
''')
@app.route('/rescan/session',methods=["GET", "POST"])
@login_required
def rescansession():
"""rescan a specifique session"""
if request.method == 'POST':
ip=request.form['session']
cmdres="iscsiadm -m session -r"+session +" -R"
res=cmdline(cmdres)
return redirect(url_for('showspecifiquesession'),code=302)
else:
return Response('''
<form action="" method="post">
<p><input placeholder="session id" type=text name=session>
<p><input type=submit value=submit>
</form>
''')
@app.route('/make/discovery',methods=["GET", "POST"])
@login_required
def makediscovery():
"""make a discovery
"""
if request.method == 'POST':
ipaddr=request.form['ip']
print ipaddr
cmdres="iscsiadm -m discovery -t sendtargets -p "+ipaddr+":3260 -P 1"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input type=submit value=submit>
</form>
''')
@app.route('/make/nodelogin',methods=["GET", "POST"])
@login_required
def makenodelogin():
"""make a node login
"""
if request.method == 'POST':
ipaddr=request.form['ip']
iqn=request.form['iqn']
cmdres="iscsiadm -m node "+ iqn + "-p " +ipaddr + "-o update -n node.startup -v automatic"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input placeholder="portal iqn" type=text name=iqn>
<p><input type=submit value=submit>
</form>
''')
@app.route('/make/sessionlogin',methods=["GET", "POST"])
@login_required
def makesessionlogin():
"""make a session login
"""
if request.method == 'POST':
ipaddr=request.form['ip']
iqn=request.form['iqn']
cmdres="iscsiadm -m node "+ iqn + "-p " +ipaddr + "-l"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input placeholder="portal iqn" type=text name=iqn>
<p><input type=submit value=submit>
</form>
''')
@app.route("/login", methods=["GET", "POST"])
def login():
"""login page"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if password == username + "_secret":
id = username.split('user')[0]
user = User(id)
login_user(user)
return redirect(url_for('help'))
else:
return abort(401)
else:
return Response('''
<form action="" method="post">
<p><input placeholder="Username" type=text name=username>
<p><input placeholder="Password" type=password name=password>
<p><input type=submit value=Login>
</form>
''')
@app.route("/logout")
@login_required
def logout():
"""logout page """
logout_user()
return Response('<p>Logged out</p>')
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
@login_manager.user_loader
def load_user(userid):
return User(userid)
app.run(debug=True,port=5001)
|
from django.test import TestCase
from oscar.core import compat
class TestCustomUserModel(TestCase):
def test_can_be_created_without_error(self):
klass = compat.get_user_model()
try:
klass.objects.create_user('_', 'a@a.com', 'pa55w0rd')
except Exception, e:
self.fail("Unable to create user model: %s" % e)
|
class NoConfigException(Exception):
pass
class ConfigFormatException(Exception):
pass
class FeedNotFoundException(Exception):
pass
|
"""
================================
Make an MNE-Report with a Slider
================================
In this example, MEG evoked data are plotted in an html slider.
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
###############################################################################
# Do standard folder parsing (this can take a couple of minutes):
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample',
raw_psd=False) # use False for speed here
report.parse_folder(meg_path, on_error='ignore', mri_decim=10)
###############################################################################
# Add a custom section with an evoked slider:
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
# to save report
report.save('my_report.html', overwrite=True)
|
"""Module containing graphQL client."""
import aiohttp
import requests
class GraphqlClient:
"""Class which represents the interface to make graphQL requests through."""
def __init__(self, endpoint: str, headers: dict = None):
"""Insantiate the client."""
self.endpoint = endpoint
self.headers = headers or {}
def __request_body(
self, query: str, variables: dict = None, operation_name: str = None
) -> dict:
json = {"query": query}
if variables:
json["variables"] = variables
if operation_name:
json["operationName"] = operation_name
return json
def __request_headers(self, headers: dict = None) -> dict:
return {**self.headers, **headers} if headers else self.headers
def execute(
self,
query: str,
variables: dict = None,
operation_name: str = None,
headers: dict = None,
):
"""Make synchronous request to graphQL server."""
request_body = self.__request_body(
query=query, variables=variables, operation_name=operation_name
)
result = requests.post(
self.endpoint, json=request_body, headers=self.__request_headers(headers),
)
result.raise_for_status()
return result.json()
async def execute_async(
self,
query: str,
variables: dict = None,
operation_name: str = None,
headers: dict = None,
):
"""Make asynchronous request to graphQL server."""
request_body = self.__request_body(
query=query, variables=variables, operation_name=operation_name
)
async with aiohttp.ClientSession() as session:
async with session.post(
self.endpoint,
json=request_body,
headers=self.__request_headers(headers),
) as response:
return await response.json()
|
import uncompyle2
with open("sql_quality_check.py", "wb") as fileobj:
uncompyle2.uncompyle_file("/Users/Minat_Verma/Desktop/sql_quality_check.pyc", fileobj)
|
# -*- coding: utf-8 -*-
import glm
import material
from random import random
from math import pi
class Triangle(object):
"""
Triangle ( vertex1 (vec3), vertex2 (vec3), vertex3 (vec3) )
Creates triangles on the scene.
"""
def __init__(self, *args, **kwargs):
if kwargs:
self.vertex1 = kwargs.get('vertex1', glm.vec3(.0, .0, .0))
self.vertex2 = kwargs.get('vertex2', glm.vec3(.0, .0, .0))
self.vertex3 = kwargs.get('vertex3', glm.vec3(.0, .0, .0))
self.material = kwargs.get('material', material.Material())
elif args:
self.vertex1, self.vertex2, self.vertex3, self.material = args
else:
self.vertex1 = glm.vec3(.0, .0, .0)
self.vertex2 = glm.vec3(.0, .0, .0)
self.vertex3 = glm.vec3(.0, .0, .0)
self.material = material.Material()
def intersect(self, my_ray, inter_rec):
a = self.vertex1.x - self.vertex2.x
b = self.vertex1.y - self.vertex2.y
c = self.vertex1.z - self.vertex2.z
d = self.vertex1.x - self.vertex3.x
e = self.vertex1.y - self.vertex3.y
f = self.vertex1.z - self.vertex3.z
g = my_ray.direction.x
h = my_ray.direction.y
i = my_ray.direction.z
j = self.vertex1.x - my_ray.origin.x
k = self.vertex1.y - my_ray.origin.y
l = self.vertex1.z - my_ray.origin.z
ei_minus_hf = float(e * i - h * f)
gf_minus_di = float(g * f - d * i)
dh_minus_eg = float(d * h - e * g)
ak_minus_jb = float(a * k - j * b)
jc_minus_al = float(j * c - a * l)
bl_minus_kc = float(b * l - k * c)
M = a * (ei_minus_hf) + b * (gf_minus_di) + c * (dh_minus_eg)
t = -(f * (ak_minus_jb) + e * (jc_minus_al) + d * (bl_minus_kc)) / M
if t < 0.0:
return False
gamma = (i * (ak_minus_jb) + h * (jc_minus_al) + g * (bl_minus_kc)) / M
if gamma < 0.0 or gamma > 1.0:
return False
beta = (j * (ei_minus_hf) + k * (gf_minus_di) + l * (dh_minus_eg)) / M
if beta < 0.0 or beta > 1.0 - gamma:
return False
inter_rec.t = t
inter_rec.position = my_ray.origin + inter_rec.t * my_ray.direction
inter_rec.normal = glm.normalize(glm.cross(self.vertex2 - self.vertex1, self.vertex3 - self.vertex1))
if glm.dot(inter_rec.normal, my_ray.direction) > 0:
inter_rec.normal = -inter_rec.normal
inter_rec.material = self.material
return True
|
import torch
import math
from time import time
import torch.distributions as tdis
import matplotlib.pyplot as plt
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, RandomSampler, BatchSampler, DataLoader
def get_model(in_size):
return nn.Sequential(
nn.Linear(in_size, 36),
#nn.Dropout(p=0.4),
nn.ReLU(),
nn.Linear(36, 18),
#nn.Dropout(p=0.25),
nn.ReLU(),
nn.Linear(18, 1),
nn.ReLU()
)
def InfoNES(X, Y, q=2.4, q1=0.6, batch_size=512, num_epochs=300, dev=torch.device("cpu"), model=None,lrate=0.01):
A = torch.tensor([0.0001] * batch_size).to(dev)
B = torch.tensor([0.0001] * (batch_size * batch_size)).to(dev)
if not model:
model = get_model(X.shape[1]+Y.shape[1])
# Move data to device
X = X.to(dev)
Y = Y.to(dev)
Y += torch.randn_like(Y) * 1e-4
model = model.to(dev)
opt = optim.Adam(model.parameters(), lr=lrate)
td = TensorDataset(X, Y)
result = []
epoch = 0
ref_time = time()
while epoch < num_epochs:
if epoch == 50 and result[-1] < 0.001:
# Start from the beginning
model = get_model(X.shape[1]+Y.shape[1]).to(dev)
opt = optim.Adam(model.parameters(), lr=lrate)
epoch = 0
print("Did not converge in 50 epochs")
if epoch % 200 == 0 and epoch > 0:
print("MI at", epoch, "-", result[-1], "elapsed", time() - ref_time, "seconds")
ref_time = time()
for x, y in DataLoader(td, batch_size, shuffle=True, drop_last=True):
opt.zero_grad()
fxy = model(torch.cat([x, y], 1)).flatten()
topin = torch.max(A,(1 + (1-q) * fxy))
top = torch.pow(topin, (1 / (1-q)))
xiyj = torch.cat([x.repeat_interleave(batch_size,dim=0),y.repeat(batch_size,1)], 1)
bottomin = torch.max(B,(1 + (1-q) * model(xiyj)).flatten())
bottom = torch.pow(bottomin, (1 / (1-q))).reshape(batch_size,batch_size).mean(dim=1)
tb = top/bottom
loss = -((torch.pow(tb, (1-q1)) - 1) / (1-q1)).mean()
if math.isnan(loss.item()):
break
result.append(-loss.item())
loss.backward(retain_graph=True)
opt.step()
epoch += 1
r = torch.tensor(result[-50:]).mean()
plt.plot(result,label="q-exp=2.4,q-log=0.6")
plt.title('Qabe')
plt.xlabel('Number of Epochs')
plt.ylabel('Mutual Infomation')
plt.legend(loc='lower right')
print(r)
return r
|
import argparse
import json
import os
import string
import time
import uuid
import boto3
greengrass_client = boto3.client("greengrassv2")
s3_client = boto3.client("s3")
sts_client = boto3.client("sts")
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
key, value = value.split("=")
getattr(namespace, self.dest)[key] = value
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--components-directory", type=str, required=True)
parser.add_argument("-m", "--model-uri", type=str, required=True)
parser.add_argument("-r", "--role-arn", type=str, required=True)
parser.add_argument("-g", "--use-gpu", type=str, required=True)
parser.add_argument("-b", "--bucket", type=str, required=True)
parser.add_argument("-c", "--components", nargs="+", required=True)
parser.add_argument("-v", "--variables", nargs="*", action=ParseKwargs)
def compile_and_package_model(next_version, role_arn, bucket_name, model_uri, use_gpu):
client = boto3.client("sagemaker")
compilation_job_name = str(uuid.uuid4())
packaging_job_name = str(uuid.uuid4())
output_config = {
"S3OutputLocation": "s3://{}/compiled/".format(bucket_name),
"TargetPlatform": {"Os": "LINUX", "Arch": "X86_64"},
}
model_name = "gluoncv-model"
if int(use_gpu):
model_name = "gluoncv-gpu-model"
output_config = {
"S3OutputLocation": "s3://{}/compiled/".format(bucket_name),
"TargetPlatform": {
"Os": "LINUX",
"Arch": "X86_64",
"Accelerator": "NVIDIA",
},
"CompilerOptions": json.dumps(
{"cuda-ver": "10.2", "trt-ver": "7.2.1", "gpu-code": "sm_61"}
),
}
response = client.create_compilation_job(
CompilationJobName=compilation_job_name,
RoleArn=role_arn,
InputConfig={
"S3Uri": model_uri,
"DataInputConfig": json.dumps({"data": [1, 3, 512, 512]}),
"Framework": "MXNET",
},
OutputConfig=output_config,
StoppingCondition={"MaxRuntimeInSeconds": 900, "MaxWaitTimeInSeconds": 900},
)
finished = False
while not finished:
response = client.describe_compilation_job(
CompilationJobName=compilation_job_name
)
finished = response["CompilationJobStatus"] in [
"COMPLETED",
"FAILED",
"STOPPED",
]
if finished:
break
time.sleep(10)
client.create_edge_packaging_job(
EdgePackagingJobName=packaging_job_name,
CompilationJobName=compilation_job_name,
ModelName=model_name,
ModelVersion=next_version,
RoleArn=role_arn,
OutputConfig={"S3OutputLocation": "s3://{}/packaged/".format(bucket_name)},
)
finished = False
while not finished:
response = client.describe_edge_packaging_job(
EdgePackagingJobName=packaging_job_name
)
finished = response["EdgePackagingJobStatus"] in [
"COMPLETED",
"FAILED",
"STOPPED",
]
if finished:
return "{}{}-{}.tar.gz".format(
response["OutputConfig"]["S3OutputLocation"],
response["ModelName"],
response["ModelVersion"],
)
break
time.sleep(10)
def generate_recipe(component_name, version, model_uri):
component_variables = args.variables.copy()
component_variables["component_version_number"] = version
component_variables["component_name"] = component_name
component_variables["packaged_model_uri"] = model_uri
component_variables["packaged_model_filename"] = model_uri.split("/")[-1]
# substitute variables, and generate new recipe file
with open(
"{}/recipe-template.yml".format(args.components_directory), "r"
) as input_recipe:
src = string.Template(input_recipe.read())
result = src.substitute(component_variables)
with open(
"{}/{}.yml".format(args.components_directory, component_name), "w"
) as output_recipe:
output_recipe.write(result)
def create_component_version(component_name):
with open(
"{}/{}.yml".format(args.components_directory, component_name), "r"
) as recipe_file:
recipe = recipe_file.read().encode()
greengrass_client.create_component_version(inlineRecipe=recipe)
def get_next_component_version(component_name):
versions = greengrass_client.list_component_versions(
arn="arn:aws:greengrass:{}:{}:components:{}".format(
os.environ["AWS_REGION"],
sts_client.get_caller_identity()["Account"],
component_name,
)
)["componentVersions"]
print(versions)
if len(versions) > 0:
current_version = versions[0]["componentVersion"]
else:
return "1.0.0"
current_versions = current_version.split(".")
major = int(current_versions[0])
minor = int(current_versions[1])
micro = int(current_versions[2])
return "{}.{}.{}".format(major, minor, micro + 1)
if __name__ == "__main__":
args = parser.parse_args()
print(args)
for component in args.components:
next_component_version = get_next_component_version(component)
packaged_model_uri = compile_and_package_model(
next_component_version,
args.role_arn,
args.bucket,
args.model_uri,
args.use_gpu,
)
generate_recipe(component, next_component_version, packaged_model_uri)
create_component_version(component)
|
import time
from behave import given, when, then
from selenium.webdriver.chrome.webdriver import WebDriver
@given(u'I navigate to the Manager Home Page')
def step_impl(context):
context.driver.get('http://127.0.0.1:5000/home')
@when(u'I Login In')
def step_impl(context):
context.employee_home_page.login().click()
time.sleep(2)
@then(u'I should be on Manager Login Page')
def step_impl(context):
text = context.driver.title
print(text)
assert text == 'Login'
@when(u'I submit email')
def step_impl(context):
context.login_home_page.email().send_keys('admin@gmail.com')
time.sleep(2)
@when(u'I submit password')
def step_impl(context):
context.login_home_page.password().send_keys('password')
time.sleep(2)
@when(u'I click on enter')
def step_impl(context):
context.login_home_page.submit().click()
time.sleep(5)
@when(u'I submit email1')
def step_impl(context):
context.login_home_page.email().send_keys('admin@gmail1.com')
time.sleep(2)
@when('I click on Statistics')
def step_impl(context):
context.manager_home_page.statistics().click()
time.sleep(5)
@then(u'I should be on Statistics Page')
def step_impl(context):
text = context.driver.title
print(text)
assert text == 'Statistics'
@when(u'I click on Display')
def step_impl(context):
context.manager_home_page.display().click()
time.sleep(5)
@then(u'I should be on Reimbursements Page')
def step_impl(context):
text = context.driver.title
print(text)
assert text == 'Reimbursements'
|
from abc import abstractmethod
import numpy as np
from scipy.optimize import minimize
from ..opt_control_defaults import lbfgsb_control_defaults
from ..output import (
add_g_to_retlist,
add_llik_to_retlist,
add_posterior_to_retlist,
df_ret_str,
g_in_output,
g_ret_str,
llik_in_output,
llik_ret_str,
posterior_in_output,
)
from ..workhorse_parametric import check_g_init, handle_optmethod_parameter
from .base import BaseEBNM
class ParametricEBNM(BaseEBNM):
@property
def _pointmass(self) -> bool:
return True
@property
@abstractmethod
def _class_name(self) -> str:
pass
@property
def _scale_name(self) -> str:
return "scale"
@property
def _mode_name(self) -> str:
return "mean"
def _fit(self, x, s, output, control):
self._checkg(self.g_init, self.fix_g, self.mode, self.scale, self._pointmass)
par_init = self._initpar(self.g_init, self.mode, self.scale, self._pointmass, x, s)
if self.fix_g:
fix_par = np.array([True, True, True])
else:
fix_par = np.array(
[not self._pointmass, self.scale != "estimate", self.mode != "estimate"]
)
optmethod = handle_optmethod_parameter(self.optmethod, fix_par)
x_optset = x
s_optset = s
if np.any(np.isinf(s)):
x_optset = x[np.isfinite(s)]
s_optset = s[np.isfinite(s)]
optres = self._mle_parametric(
x_optset,
s_optset,
par_init,
fix_par,
optmethod=optmethod["fn"],
use_grad=optmethod["use_grad"],
use_hess=optmethod["use_hess"],
control=control,
)
retlist = dict()
if posterior_in_output(output):
posterior = self._summres(x, s, optres["par"], output)
retlist = add_posterior_to_retlist(retlist, posterior, output)
if g_in_output(output):
fitted_g = self._partog(par=optres["par"])
retlist = add_g_to_retlist(retlist, fitted_g)
if llik_in_output(output):
loglik = optres["val"]
retlist = add_llik_to_retlist(retlist, loglik)
if self.include_posterior_sampler:
def post_sampler(nsamp):
return self._postsamp(x, s, optres["par"], nsamp)
self.posterior_sampler_ = post_sampler
if g_ret_str() in retlist:
self.fitted_g_ = retlist[g_ret_str()]
if df_ret_str() in retlist:
self.posterior_ = retlist[df_ret_str()]
if llik_ret_str() in retlist:
self.log_likelihood_ = retlist[llik_ret_str()]
def _mle_parametric(self, x, s, par_init, fix_par, optmethod, use_grad, use_hess, control):
scale_factor = 1 / np.median(s[s > 0])
x = x * scale_factor
s = s * scale_factor
par_init = self._scalepar(par_init, scale_factor)
precomp = self._precomp(x, s, par_init, fix_par)
fn_params = dict(precomp, x=x, s=s, par_init=par_init, fix_par=fix_par)
p = np.array(list(par_init.values()))[~np.array(fix_par)]
if (not fix_par[1]) and np.isinf(p[0]):
p[0] = np.sign(p[0]) * np.log(len(x))
if all(fix_par):
optpar = par_init
optval = self._nllik(par=None, calc_grad=False, calc_hess=False, **fn_params)
elif optmethod == "lbfgsb":
control = dict(lbfgsb_control_defaults(), **control)
def fn(par, kwargs):
return self._nllik(par, calc_grad=False, calc_hess=False, **kwargs)
if use_grad:
def gr(par, kwargs):
return self._nllik(par, calc_grad=True, calc_hess=False, **kwargs)
else:
gr = None
optres = minimize(
fun=fn,
x0=p,
jac=gr,
args=(fn_params,),
options=control,
method="L-BFGS-B",
)
optpar = optres.x
optval = optres.fun
else:
raise NotImplementedError
retpar = par_init
retpar_values = np.array(list(retpar.values()))
if isinstance(optpar, dict):
retpar_values[~fix_par] = np.array(list(optpar.values()))[~fix_par]
else:
retpar_values[~fix_par] = optpar
retpar = dict(zip(list(retpar), retpar_values))
retpar = self._scalepar(par=retpar, scale_factor=1 / scale_factor)
optval = optval - sum(np.isfinite(x) * np.log(scale_factor))
retlist = self._postcomp(
optpar=retpar,
optval=optval,
x=x,
s=s,
par_init=par_init,
fix_par=fix_par,
scale_factor=scale_factor,
**precomp,
)
return retlist
def _checkg(self, g_init, fix_g, mode, scale, pointmass):
return check_g_init(
g_init=g_init,
fix_g=fix_g,
mode=mode,
scale=scale,
pointmass=pointmass,
class_name=self._class_name,
scale_name=self._scale_name,
mode_name=self._mode_name,
)
@abstractmethod
def _initpar(self, g_init, mode, scale, pointmass, x, s):
pass
@abstractmethod
def _scalepar(self, par, scale_factor):
pass
@abstractmethod
def _precomp(self, x, s, par_init, fix_par):
pass
@abstractmethod
def _nllik(self, par, x, s, par_init, fix_par, calc_grad, calc_hess, **kwargs):
pass
@abstractmethod
def _postcomp(self, optpar, optval, x, s, par_init, fix_par, scale_factor, **kwargs):
pass
@abstractmethod
def _summres(self, x, s, optpar, output):
pass
@abstractmethod
def _partog(self, par):
pass
@abstractmethod
def _postsamp(self, x, s, optpar, nsamp):
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Written as part of https://www.scrapehero.com/how-to-scrape-amazon-product-reviews-using-python/
import json
import requests
import urllib
import re
import shutil
def getInfo(url):
file = open("list.txt", "w")
while True:
print(url)
file.write(url+'\n')
requests.packages.urllib3.disable_warnings(category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
page = requests.get(url, verify=False)
page_response = page.text
regex = r"\(scan: ([^\)]*)\)\n[^\n]*\n <a href=\"([^\"]*)\">Download</a>"
matches = re.findall(regex, page_response, re.MULTILINE)
#print(matches)
#download_file(matches[0][1], matches[0][0])
regex = r"<a class=\"float-right\" href=\"([^\"]*)\"> Next"
matches = re.findall(regex, page_response, re.MULTILINE)
#print(matches)
if len(matches) == 0:
break
else:
url = 'https://szukajwarchiwach.pl' + matches[0]
file.close()
def download_file(url, local_filename):
url = 'https://szukajwarchiwach.pl' + url
#local_filename = url.split('/')[-1]
requests.packages.urllib3.disable_warnings(category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
r = requests.get(url, stream=True, verify=False)
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename
getInfo('https://szukajwarchiwach.pl/53/1318/0/-/24/skan/full/4tU7herNt3Ia-i1Xoweoug')
|
from fastapi import FastAPI
# from pydantic import BaseModel
# from typing import Optional
import random
import mechanize
from bs4 import BeautifulSoup
import html5lib
import requests
import json
rockauto_api = FastAPI()
@rockauto_api.get("/")
async def root():
return {"message": "Hello World"}
@rockauto_api.get("/makes")
async def get_makes():
makes_list = []
browser = mechanize.Browser()
page_content = browser.open('https://www.rockauto.com/en/catalog/').read()
browser.close()
soup = BeautifulSoup(page_content, features='html5lib').find_all('div', attrs={'class', 'ranavnode'})
soup_filter = []
# Find US Market Only
for x in soup:
if 'US' in next(x.children)['value']:
soup_filter.append( x.find('a', attrs={'class', 'navlabellink'}) )
# Get [Make, Year, Model, Link]
for x in soup_filter:
makes_list.append( {'make': x.get_text(), 'link': 'https://www.rockauto.com' + str( x.get('href') ) })
return makes_list
@rockauto_api.get("/years/{search_vehicle}")
async def get_years( search_make: str, search_link: str ):
years_list = []
browser = mechanize.Browser()
page_content = browser.open( search_link ).read()
browser.close()
soup = BeautifulSoup(page_content, features='html5lib').find_all('div', attrs={'class', 'ranavnode'})[1:]
soup_filter = []
# Find US Market Only
for x in soup:
if 'US' in next(x.children)['value']:
soup_filter.append( x.find('a', attrs={'class', 'navlabellink'}) )
# Get [Make, Year, Model, Link]
for x in soup_filter:
years_list.append( {'make': search_make, 'year': x.get_text(), 'link': 'https://www.rockauto.com' + str( x.get('href') ) })
return years_list
@rockauto_api.get("/years/{search_vehicle}")
async def get_models( search_make: str, search_year: str, search_link: str ):
models_list = []
browser = mechanize.Browser()
page_content = browser.open( search_link ).read()
browser.close()
soup = BeautifulSoup(page_content, features='html5lib').find_all('div', attrs={'class', 'ranavnode'})[2:]
soup_filter = []
# Find US Market Only
for x in soup:
if 'US' in next(x.children)['value']:
soup_filter.append( x.find('a', attrs={'class', 'navlabellink'}) )
# Get [Make, Year, Model, Link]
for x in soup_filter:
models_list.append( {'make': search_make, 'year': search_year, 'model': x.get_text(), 'link': 'https://www.rockauto.com' + str( x.get('href') ) })
return models_list
@rockauto_api.get("/engines/{search_vehicle}")
async def get_engines( search_make: str, search_year: str, search_model: str, search_link: str ):
engines_list = []
browser = mechanize.Browser()
page_content = browser.open( search_link ).read()
browser.close()
soup = BeautifulSoup(page_content, features='html5lib').find_all('div', attrs={'class', 'ranavnode'})[3:]
soup_filter = []
# Find US Market Only
for x in soup:
if 'US' in next(x.children)['value']:
soup_filter.append( x.find('a', attrs={'class', 'navlabellink'}) )
# Get [Make, Year, Model, Link]
for x in soup_filter:
engines_list.append( {'make': search_make, 'year': search_year, 'model': search_model, 'engine': x.get_text(), 'link': 'https://www.rockauto.com' + str( x.get('href') ) })
return engines_list
@rockauto_api.get("/categories/{search_vehicle}")
async def get_categories( search_make: str, search_year: str, search_model: str, search_engine: str, search_link: str ):
browser = mechanize.Browser()
page_content = browser.open( search_link ).read()
browser.close()
soup = BeautifulSoup(page_content, features='html5lib').find_all('a', attrs={'class', 'navlabellink'})[4:]
return [
{
'make': search_make,
'year': search_year,
'model': search_model,
'engine': search_engine,
'category': x.get_text(),
'link': 'https://www.rockauto.com' + str(x.get('href')),
}
for x in soup
]
@rockauto_api.get("/sub_categories/{search_vehicle}")
async def get_sub_categories( search_make: str, search_year: str, search_model: str, search_engine: str, search_category: str, search_link: str ):
browser = mechanize.Browser()
page_content = browser.open( search_link ).read()
browser.close()
soup = BeautifulSoup(page_content, features='html5lib').find_all('a', attrs={'class', 'navlabellink'})[5:]
return [
{
'make': search_make,
'year': search_year,
'model': search_model,
'engine': search_engine,
'category': search_category,
'sub_category': x.get_text(),
'link': 'https://www.rockauto.com' + str(x.get('href')),
}
for x in soup
]
|
"""
app.recipe_users.utils
----------------------
Utils file to handle recipe users utilities functions
"""
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
"""
Manager class to perform users functions
"""
def create_user(
self,
email: str,
password: str = None,
**more_args
):
"""
Manager function to create a user
:param email: Email of the user
:param password: Password of the user can be none as well
:param more_args: Another arguments in case we modify the user creation
functionality
:return: created user
"""
if email is None:
exception_message = "An email must be pass"
raise ValueError(exception_message)
new_user_creation_dict = {
'email': self.normalize_email(email),
**more_args
}
new_user = self.model(**new_user_creation_dict)
new_user.set_password(password)
new_user.save(using=self._db)
return new_user
def create_superuser(
self,
email: str,
password: str = None,
**more_args
):
"""
Create a new super user for the app
:param email: Email of the user
:param password: Password of the user can be none as well
:param more_args: Another arguments in case we modify the user creation
functionality
:return: New super user instance
"""
new_superuser = self.create_user(email, password, **more_args)
new_superuser.is_staff = True
new_superuser.is_superuser = True
new_superuser.save(using=self._db)
return new_superuser
|
import tkinter
import tkinter.messagebox
class MyGUI:
def __init__(self) -> None:
# Create the main window
self.window = tkinter.Tk()
self.button = tkinter.Button(self.window, text="Click Me", command=self.do_something)
self.quit_button = tkinter.Button(self.window, text="Quit", command=self.window.destroy)
self.button.pack()
self.quit_button.pack()
tkinter.mainloop()
def do_something(self):
tkinter.messagebox.showinfo("Response", "Thanks for the click.")
gui = MyGUI()
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from pyro_models.load import load
from pyro_models.utils import data
__all__ = [
'load',
'data'
]
|
from inspect import getsourcefile
from os.path import abspath
print(abspath(getsourcefile(lambda:0)))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class Provider(pulumi.ProviderResource):
def __init__(__self__, resource_name, opts=None, azure_auth=None, basic_auth=None, config_file=None, host=None, profile=None, token=None, __props__=None, __name__=None, __opts__=None):
"""
The provider type for the databricks package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_file: Location of the Databricks CLI credentials file, that is created by `databricks configure --token` command. By default,
it is located in ~/.databrickscfg. Check https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication for
docs. Config file credetials will only be used when host/token are not provided.
:param pulumi.Input[str] profile: Connection profile specified within ~/.databrickscfg. Please check
https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles for documentation.
The **azure_auth** object supports the following:
* `azureRegion` (`pulumi.Input[str]`)
* `client_id` (`pulumi.Input[str]`)
* `clientSecret` (`pulumi.Input[str]`)
* `managedResourceGroup` (`pulumi.Input[str]`)
* `resourceGroup` (`pulumi.Input[str]`)
* `subscriptionId` (`pulumi.Input[str]`)
* `tenant_id` (`pulumi.Input[str]`)
* `workspace_name` (`pulumi.Input[str]`)
The **basic_auth** object supports the following:
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_auth'] = pulumi.Output.from_input(azure_auth).apply(json.dumps) if azure_auth is not None else None
__props__['basic_auth'] = pulumi.Output.from_input(basic_auth).apply(json.dumps) if basic_auth is not None else None
__props__['config_file'] = config_file
if host is None:
host = utilities.get_env('DATABRICKS_HOST')
__props__['host'] = host
__props__['profile'] = profile
if token is None:
token = utilities.get_env('DATABRICKS_TOKEN')
__props__['token'] = token
super(Provider, __self__).__init__(
'databricks',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from unittest import TestCase
from pybridair.auth import *
from pybridair.data import *
auth = BridAuth('10.101.30.42')
class TestGetStatus(TestCase):
"""
Test pybrid.data.get_status function against live Brid device
"""
def test_get_current_air(self):
"""
Tests pybrid.data.get_status function against live Brid device
"""
current_status = get_status(auth)
data_points = ['Time', 'Sensors', 'Settings', 'Filters']
for data_point in current_status.keys():
self.assertIn(data_point, data_points)
class TestGetHistory(TestCase):
"""
Test pybrid.data.get_status function against live Brid device
"""
def test_get_history(self):
"""
Tests pybrid.data.get_history function against live Brid device
"""
history = get_history(auth)
data_points = ['t', 'V', 'T', 'H', 'C']
for data_point in history[0].keys():
self.assertIn(data_point, data_points)
|
import struct
import pyb
CONFIG_MODE = 0x00
ACCONLY_MODE = 0x01
MAGONLY_MODE = 0x02
GYRONLY_MODE = 0x03
ACCMAG_MODE = 0x04
ACCGYRO_MODE = 0x05
MAGGYRO_MODE = 0x06
AMG_MODE = 0x07
IMUPLUS_MODE = 0x08
COMPASS_MODE = 0x09
M4G_MODE = 0x0a
NDOF_FMC_OFF_MODE = 0x0b
NDOF_MODE = 0x0c
AXIS_P0 = bytes([0x21, 0x04])
AXIS_P1 = bytes([0x24, 0x00])
AXIS_P2 = bytes([0x24, 0x06])
AXIS_P3 = bytes([0x21, 0x02])
AXIS_P4 = bytes([0x24, 0x03])
AXIS_P5 = bytes([0x21, 0x01])
AXIS_P6 = bytes([0x21, 0x07])
AXIS_P7 = bytes([0x24, 0x05])
_MODE_REGISTER = 0x3d
_POWER_REGISTER = 0x3e
_AXIS_MAP_CONFIG = 0x41
class BNO055:
def __init__(self, i2c, address=0x28, mode = NDOF_MODE, axis = AXIS_P4):
self.i2c = i2c
self.address = address
if self.read_id() != bytes([0xA0, 0xFB, 0x32, 0x0F]):
raise RuntimeError('Failed to find expected ID register values. Check wiring!')
self.operation_mode(CONFIG_MODE)
self.system_trigger(0x20)# reset
pyb.delay(700)
self.power_mode(0x00)#POWER_NORMAL
self.axis(axis)
self.page(0)
pyb.delay(10)
self.operation_mode(mode)
self.system_trigger(0x80) # external oscillator
pyb.delay(200)
def read_registers(self, register, size=1):
return(self.i2c.readfrom_mem(self.address, register, size))
def write_registers(self, register, data):
self.i2c.writeto_mem(self.address, register, data)
def operation_mode(self, mode=None):
if mode:
self.write_registers(_MODE_REGISTER, bytes([mode]))
else:
return(self.read_registers(_MODE_REGISTER, 1)[0])
def system_trigger(self, data):
self.write_registers(0x3f, bytes([data]))
def power_mode(self, mode=None):
if mode:
self.write_registers(_POWER_REGISTER, bytes([mode]))
else:
return(self.read_registers(_POWER_REGISTER, 1))
def page(self, num=None):
if num:
self.write_registers(0x3f, bytes([num]))
else:
self.read_registers(0x3f)
def temperature(self):
return(self.read_registers(0x34, 1)[0])
def read_id(self):
return(self.read_registers(0x00, 4))
def axis(self, placement=None):
if placement:
self.write_registers(_AXIS_MAP_CONFIG, placement)
else:
return(self.read_registers(_AXIS_MAP_CONFIG, 2))
def quaternion(self):
data = struct.unpack("<hhhh", self.read_registers(0x20, 8))
return [d/(1<<14) for d in data] #[w, x, y, z]
def euler(self):
data = struct.unpack("<hhh", self.read_registers(0x1A, 6))
return [d/16 for d in data] # [yaw, roll, pitch]
def accelerometer(self):
data = struct.unpack("<hhh", self.read_registers(0x08, 6))
return [d/100 for d in data] #[x, y, z]
def magnetometer(self):
data = struct.unpack("<hhh", self.read_registers(0x0E, 6))
return [d/16 for d in data] # [x, y, z]
def gyroscope(self):
data = struct.unpack("<hhh", self.read_registers(0x14, 6))
return [d/900 for d in data] #[x, y, z]
def linear_acceleration(self):
data = struct.unpack("<hhh", self.read_registers(0x28, 6))
return [d/100 for d in data] #[x, y, z]
def gravity(self):
data = struct.unpack("<hhh", self.read_registers(0x2e, 6))
return [d/100 for d in data] #[x, y, z]
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .SwitchNorm import SwitchNorm2d
def make_layers(
cfg, block, in_chns=None, norm=True, skip=False, downsampling="maxpooling", upsampling="transconv"
):
layers = []
cur_chns = in_chns
for ii, v in enumerate(cfg):
if v == "M":
if downsampling == "maxpooling":
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
elif downsampling == "strideconv":
layers += [nn.Conv2d(cur_chns, cfg[ii + 1], kernel_size=3, stride=2, padding=1)]
cur_chns = cfg[ii + 1]
else:
raise ValueError(f"unsupported down-sampling method")
elif v == "U":
if upsampling == "transconv":
layers += [nn.ConvTranspose2d(cur_chns, cfg[ii + 1], kernel_size=4, stride=2, padding=1)]
cur_chns = cfg[ii + 1]
elif upsampling == "bilinear":
layers += [nn.Upsample(scale_factor=2, mode="bilinear")]
else:
raise ValueError(f"unsupported up-sampling method")
elif v == "D":
layers += [nn.Dropout2d(p=0.3)]
else:
if ii == 0:
layers += [DoubleConv(cur_chns, v, norm=norm)]
else:
in_v = 2 * v if skip else cur_chns
layers += [block(in_v, v, norm=norm)]
cur_chns = v
return nn.Sequential(*layers)
class Encoder(nn.Module):
def __init__(self, cfg, block, in_chns, norm=True, downsampling="maxpooling"):
super(Encoder, self).__init__()
self.cfg = cfg
self.layers = make_layers(self.cfg, block, in_chns, norm=norm, downsampling=downsampling)
def forward(self, xt, ht, cell_list, delta_t, split=False):
kk = 0
out = []
if split:
xt_out = []
for v, module in zip(self.cfg, self.layers.children()):
xt = module(xt)
if isinstance(v, int):
if split:
xt_out.append(xt)
xt = cell_list[kk](xt, ht[kk], delta_t)
kk += 1
out.append(xt)
if split:
return out, xt_out
else:
return out
class Decoder(nn.Module):
def __init__(self, cfg, in_chns, skip=True, norm=True, upsampling="transconv"):
super(Decoder, self).__init__()
self.cfg = ["U" if v == "M" else v for v in cfg[::-1]]
self.cfg = self.cfg[1:]
self.skip = skip
self.layers = make_layers(self.cfg, SingleConv, in_chns, norm=norm, skip=skip, upsampling=upsampling)
def forward(self, x):
out = []
kk = -1
for ii, (v, module) in enumerate(zip(self.cfg, self.layers.children())):
if ii == 0:
y = module(x[kk])
kk -= 1
elif isinstance(v, int):
if self.skip:
y = torch.cat((y, x[kk]), dim=1)
kk -= 1
y = module(y)
out.append(y)
else:
y = module(y)
return out
class DoubleConv(nn.Module):
def __init__(self, in_chns, out_chns, norm=True):
super(DoubleConv, self).__init__()
self.layers = nn.Sequential(
SingleConv(in_chns, out_chns, norm=norm), SingleConv(out_chns, out_chns, norm=norm)
)
def forward(self, x):
return self.layers(x)
class DoublePreActConv(nn.Module):
def __init__(self, in_chns, out_chns, norm=True):
super(DoublePreActConv, self).__init__()
self.layers = nn.Sequential(
SinglePreActConv(in_chns, out_chns, norm=norm), SinglePreActConv(out_chns, out_chns, norm=norm)
)
def forward(self, x):
return self.layers(x)
class ResDoubleConv(nn.Module):
def __init__(self, in_chns, out_chns, norm=True):
super(ResDoubleConv, self).__init__()
self.block = DoublePreActConv(in_chns, out_chns, norm=norm)
def forward(self, x):
y = self.block(x)
delta_c = y.size(1) - x.size(1)
x_skip = F.pad(x, (0, 0, 0, 0, 0, delta_c)) if delta_c > 0 else x
return y + x_skip
class SingleConv(nn.Module):
def __init__(self, in_chns, out_chns, kernel_size=3, stride=1, padding=1, norm=True, act=True):
super(SingleConv, self).__init__()
layers = [
nn.Conv2d(
in_chns, out_chns, kernel_size=kernel_size, stride=stride, padding=padding, bias=not norm
),
]
if norm:
layers.append(SwitchNorm2d(out_chns))
if act:
layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class SingleDeconv(nn.Module):
def __init__(self, in_chns, out_chns, kernel_size=4, stride=2, padding=1, norm=True, act=True):
super(SingleDeconv, self).__init__()
layers = [
nn.ConvTranspose2d(
in_chns, out_chns, kernel_size=kernel_size, stride=stride, padding=padding, bias=not norm
),
]
if norm:
layers.append(SwitchNorm2d(out_chns))
if act:
layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class SinglePreActConv(nn.Module):
def __init__(self, in_chns, out_chns, norm=True):
super(SinglePreActConv, self).__init__()
if norm:
layers = [SwitchNorm2d(in_chns), nn.ReLU(inplace=True)]
else:
layers = [nn.ReLU(inplace=True)]
layers.append(nn.Conv2d(in_chns, out_chns, kernel_size=3, padding=1, bias=not norm))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class DSVBlock(nn.Module):
def __init__(self, in_chns, out_chns, scale_factor):
super(DSVBlock, self).__init__()
layers = [
nn.Conv2d(in_chns, out_chns, kernel_size=1, stride=1, padding=0),
]
if scale_factor > 1:
layers += [nn.Upsample(scale_factor=scale_factor, mode="bilinear", align_corners=True)]
self.block = nn.Sequential(*layers)
def forward(self, x):
return self.block(x)
class DSVLayer(nn.Module):
def __init__(self, nb_filter, out_chns, num_layers=1):
super(DSVLayer, self).__init__()
self.num_layers = num_layers
layers = []
for ii in range(self.num_layers):
layers.append(DSVBlock(nb_filter[ii], out_chns, scale_factor=2 ** ii))
self.block = nn.ModuleList(layers)
def forward(self, x):
out = []
for ii in range(1, self.num_layers + 1):
out.append(self.block[ii - 1](x[-ii]))
y = out[0]
for ii in range(1, self.num_layers):
y += out[ii]
return y
def init_weights(net, init_type="normal"):
if init_type == "normal":
net.apply(weights_init_normal)
elif init_type == "xavier":
net.apply(weights_init_xavier)
elif init_type == "kaiming":
net.apply(weights_init_kaiming)
elif init_type == "orthogonal":
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError("initialization method [%s] is not implemented" % init_type)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("Linear") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1:
nn.init.xavier_normal_(m.weight.data, gain=1)
elif classname.find("Linear") != -1:
nn.init.xavier_normal_(m.weight.data, gain=1)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1:
nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif classname.find("Linear") != -1:
nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1:
nn.init.orthogonal_(m.weight.data, gain=1)
elif classname.find("Linear") != -1:
nn.init.orthogonal_(m.weight.data, gain=1)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
|
# minimal potential length of requirement
test_text = 'При первом открытии а п.1.23приложения "Х" должен появляться новый экран. ' \
'1.1 Если экран успешно открылся, то начнет загружаться новая страница. ' \
'1.2 Если появилась ошибка, то стоит отобразить юзеру модальное окно с текстом ошибки.' \
'Цвет модального окна: #232832.' \
'1.2.1 В модальном окне при нажатии на "Отмена" выполнение перейдет к п. 1.1. '
# todo for future functionality: add notification if requirement
# is more than X symbols, offer to divide into 2 reqs [i.e. if more than 90 symbols]
# todo implement main algo for dividing input name to N sentences
# todo divided sentences should have id , because of further sequential or parallel processing
# [labeling -> semantics handling -> structure handling, etc.
class ReqSeparator:
"""Separates input name to N sentences for further cleaning and improving each requirement."""
sentences = []
def __init__(self, text):
print('\n===============')
self.text = text.strip()
def get_text(self):
return self.text
def divide_reqs(self):
l_chr_index = 0
r_chr_index = 90
max_req_len = 90
for x in self.text.split():
print(len(x))
while r_chr_index != (len(self.text) - 1):
# print(r_chr_index)
# print(len(self.name[l_chr_index:r_chr_index]))
if len(self.text[l_chr_index:r_chr_index]) >= max_req_len:
# print(self.name[l_chr_index:r_chr_index].split())
self.sentences.append(self.text[l_chr_index:r_chr_index])
r_chr_index += r_chr_index
l_chr_index += r_chr_index
print(r_chr_index)
print(self.sentences)
ReqSeparator(test_text).divide_reqs()
|
import numpy as np
import tensorflow as tf
from gym.spaces import Box, Discrete
EPS = 1e-8
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))
def placeholders(*args):
return [placeholder(dim) for dim in args]
def placeholder_from_space(space):
if space is None:
return tf.placeholder(dtype=tf.float32,shape=(None,))
if isinstance(space, Box):
return tf.placeholder(dtype=tf.float32, shape=(None,space.shape[0]))
elif isinstance(space, Discrete):
return tf.placeholder(dtype=tf.int32, shape=(None,1))
raise NotImplementedError
def placeholders_from_space(*args):
return [placeholder_from_space(dim) for dim in args]
def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)
def get_vars(scope):
return [x for x in tf.global_variables() if scope in x.name]
def count_vars(scope):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def mlp_ensemble_with_prior(x, hidden_sizes=(32,), ensemble_size=10, prior_scale=1.0, activation=None, output_activation=None):
# An ensemble of Prior nets.
priors = []
for _ in range(ensemble_size):
x_proxy = x
for h in hidden_sizes[:-1]:
x_proxy = tf.layers.dense(x_proxy, units=h, activation=activation, kernel_initializer=tf.variance_scaling_initializer(2.0))
priors.append(tf.stop_gradient(tf.layers.dense(x_proxy, units=hidden_sizes[-1], activation=output_activation, kernel_initializer=tf.variance_scaling_initializer(2.0)))) # outputs: 10 x shape(?, 4)
prior_nets = priors # 10 x shape(?, 4)
# An ensemble of Q nets.
qs = []
for _ in range(ensemble_size):
x_proxy = x
for h in hidden_sizes[:-1]:
x_proxy = tf.layers.dense(x_proxy, units=h, activation=activation, kernel_initializer=tf.variance_scaling_initializer(2.0))
qs.append(tf.layers.dense(x_proxy, units=hidden_sizes[-1], activation=output_activation, kernel_initializer=tf.variance_scaling_initializer(2.0)))
q_nets = qs
# An ensemble of Q models.
q_models = [q_nets[i] + prior_scale * prior_nets[i] for i in range(ensemble_size)]
return q_models
"""
Policies
"""
def softmax_policy(alpha, v_x, act_dim):
pi_log = tf.nn.log_softmax(v_x/alpha, axis=1)
mu = tf.argmax(pi_log, axis=1)
# tf.random.multinomial( logits, num_samples, seed=None, name=None, output_dtype=None )
# logits: 2-D Tensor with shape [batch_size, num_classes]. Each slice [i, :] represents the unnormalized log-probabilities for all classes.
# num_samples: 0-D. Number of independent samples to draw for each row slice.
pi = tf.squeeze(tf.random.multinomial(pi_log, num_samples=1), axis=1)
# logp_pi = tf.reduce_sum(tf.one_hot(mu, depth=act_dim) * pi_log, axis=1) # use max Q(s,a)
logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * pi_log, axis=1)
# logp_pi = tf.reduce_sum(tf.exp(pi_log)*pi_log, axis=1) # exact entropy
return mu, pi, logp_pi
"""
Actor-Critics
"""
def mlp_actor_critic(x, a, alpha, hidden_sizes=(400,300), ensemble_size=10, activation=tf.nn.relu,
output_activation=None, policy=softmax_policy, action_space=None):
if x.shape[1] == 128: # for Breakout-ram-v4
x = (x - 128.0) / 128.0 # x: shape(?,128)
act_dim = action_space.n
a_one_hot = tf.squeeze(tf.one_hot(a, depth=act_dim), axis=1) # shape(?,4)
#vfs
# vf_mlp = lambda x: mlp(x, list(hidden_sizes) + [act_dim], activation, None) # return: shape(?,4)
vf_mlp = lambda x: mlp_ensemble_with_prior(x, list(hidden_sizes) + [act_dim], ensemble_size=ensemble_size, activation=activation, output_activation=output_activation)
with tf.variable_scope('q1'):
vx1_a = vf_mlp(x)
q1 = [tf.reduce_sum(vx1_a[i]*a_one_hot, axis=1) for i in range(ensemble_size)]
with tf.variable_scope('q1', reuse=True):
vx1_b= vf_mlp(x)
# policy
mu, pi, logp_pi = [], [], []
for i in range(ensemble_size):
mu_pi_logpi = policy(alpha, vx1_b[i], act_dim)
mu.append(mu_pi_logpi[0])
pi.append(mu_pi_logpi[1])
logp_pi.append(mu_pi_logpi[2])
# mu_one_hot = tf.one_hot(mu, depth=act_dim)
pi_one_hot = [tf.one_hot(pi[i], depth=act_dim) for i in range(ensemble_size)]
# q1_pi = tf.reduce_sum(v_x*mu_one_hot, axis=1) # use max Q(s,a)
q1_pi = [tf.reduce_sum(vx1_b[i] * pi_one_hot[i], axis=1) for i in range(ensemble_size)]
with tf.variable_scope('q2'):
vx2_a = vf_mlp(x)
q2 = [tf.reduce_sum(vx2_a[i]*a_one_hot, axis=1) for i in range(ensemble_size)]
with tf.variable_scope('q2', reuse=True):
vx2_b = vf_mlp(x)
# q2_pi = tf.reduce_sum(vf_mlp(x)*mu_one_hot, axis=1) # use max Q(s,a)
q2_pi = [tf.reduce_sum(vx2_b[i] * pi_one_hot[i], axis=1) for i in range(ensemble_size)]
# 10 x shape(?,)
return mu, pi, logp_pi, q1, q1_pi, q2, q2_pi
|
from predict_flask import classify
body = "Al-Sisi has denied Israeli reports stating that he offered to extend the Gaza Strip."
head = "Apple installing safes in-store to protect gold Watch Edition"
print (classify([head, body]))
|
from django.urls import path, re_path
from apps.user import views
urlpatterns = [
path('register/', views.RegisterView.as_view(), name='register'), # 用户注册页面
path('login/', views.LoginView.as_view(), name='login'), # 用户注册页面
re_path('active/(?P<token>.*)$', views.ActiveView.as_view(), name='active'), # 用户账号激活路由
path('logout/', views.LogoutView.as_view(), name='logout'), # 用户注销登录路由
re_path(r'^$', views.UserInfoView.as_view(), name='users'), # 用户中心-信息页
re_path(r'^order/(?P<page>\d+)$', views.UserOrderView.as_view(), name='order'), # 用户中心-订单页
path(r'address/', views.AddressView.as_view(), name='address'), # 用户中心-地址页
path(r'get_valid_img/', views.get_valid_img),
]
|
#!/usr/bin/env python
import json
import sys
release_type="patch"
if len(sys.argv) > 1:
release_type = sys.argv[1]
default_version_data = \
"""
{
"version": { "major": 0, "minor": 0, "patch": 0 }
}
"""
# supported types
types = ['major', 'minor', 'patch']
def get_version(release_type_):
if release_type_ not in types:
print("Invalid release type: {}!".format(release_type_))
exit(1)
try:
with open("version.json", "r") as fh:
version = json.load(fh)
except FileNotFoundError:
version = json.loads(default_version_data)
#print("Version file not found, starting from {}!".format(as_string(version)))
#print(version)
except IOError:
print("IO error, don't know how to deal with this!")
exit(1)
if release_type.lower() == "patch":
version['version']['patch'] = version['version']['patch'] + 1
elif release_type.lower() == "minor":
version['version']['minor'] = version['version']['minor'] + 1
# Patch version MUST be reset to 0 when minor version is incremented.
version['version']['patch'] = 0
elif release_type.lower() == "major":
version['version']['major'] = version['version']['major'] + 1
# Minor and patch version MUST be reset to 0 when major version is incremented.
version['version']['minor'] = 0
version['version']['patch'] = 0
# write the version file
try:
with open("version.json", "w") as fh:
json.dump(version, fh)
except IOError:
print("IO error, don't know how to deal with this!")
exit(1)
return version
def as_string(version_):
return "{0}.{1}.{2}".format(
version_['version']['major'],
version_['version']['minor'],
version_['version']['patch']
)
v = get_version(release_type_=release_type)
print(as_string(version_=v))
|
from flee import flee
from flee.datamanager import handle_refugee_data
from flee.datamanager import DataTable # DataTable.subtract_dates()
from flee import InputGeography
import numpy as np
import flee.postprocessing.analysis as a
import sys
import argparse
import time
def date_to_sim_days(date):
return DataTable.subtract_dates(date, "2010-01-01")
def test_par_seq(end_time=10, last_physical_day=10,
parallel_mode="advanced", latency_mode="high_latency",
inputdir="test_data/test_input_csv",
initialagents=100000,
newagentsperstep=1000):
t_exec_start = time.time()
e = flee.Ecosystem()
e.parallel_mode = parallel_mode
e.latency_mode = latency_mode
ig = InputGeography.InputGeography()
ig.ReadLocationsFromCSV("%s/locations.csv" % inputdir)
ig.ReadLinksFromCSV("%s/routes.csv" % inputdir)
ig.ReadClosuresFromCSV("%s/closures.csv" % inputdir)
e, lm = ig.StoreInputGeographyInEcosystem(e)
#print("Network data loaded")
#d = handle_refugee_data.RefugeeTable(csvformat="generic", data_directory="test_data/test_input_csv/refugee_data", start_date="2010-01-01", data_layout="data_layout.csv")
output_header_string = "Day,"
camp_locations = e.get_camp_names()
ig.AddNewConflictZones(e, 0)
# All initial refugees start in location A.
e.add_agents_to_conflict_zones(initialagents)
for l in camp_locations:
output_header_string += "%s sim,%s data,%s error," % (
lm[l].name, lm[l].name, lm[l].name)
output_header_string += "Total error,refugees in camps (UNHCR),total refugees (simulation),raw UNHCR refugee count,refugees in camps (simulation),refugee_debt"
if e.getRankN(0):
print(output_header_string)
# Set up a mechanism to incorporate temporary decreases in refugees
# raw (interpolated) data from TOTAL UNHCR refugee count only.
refugees_raw = 0
t_exec_init = time.time()
if e.getRankN(0):
my_file = open('perf.log', 'w', encoding='utf-8')
print("Init time,{}".format(t_exec_init - t_exec_start), file=my_file)
for t in range(0, end_time):
if t > 0:
ig.AddNewConflictZones(e, t)
# Determine number of new refugees to insert into the system.
new_refs = newagentsperstep
refugees_raw += new_refs
# Insert refugee agents
e.add_agents_to_conflict_zones(new_refs)
e.refresh_conflict_weights()
t_data = t
e.enact_border_closures(t)
e.evolve()
# Calculation of error terms
errors = []
abs_errors = []
camps = []
for i in camp_locations:
camps += [lm[i]]
# calculate retrofitted time.
refugees_in_camps_sim = 0
for c in camps:
refugees_in_camps_sim += c.numAgents
output = "%s" % t
for i in range(0, len(camp_locations)):
output += ",%s" % (lm[camp_locations[i]].numAgents)
if refugees_raw > 0:
output += ",%s,%s" % (e.numAgents(), refugees_in_camps_sim)
else:
output += ",0,0"
if e.getRankN(t):
print(output)
t_exec_end = time.time()
if e.getRankN(0):
my_file = open('perf.log', 'a', encoding='utf-8')
print("Time in main loop,{}".format(
t_exec_end - t_exec_init), file=my_file)
if __name__ == "__main__":
end_time = 10
last_physical_day = 10
parser = argparse.ArgumentParser(
description='Run a parallel Flee benchmark.')
parser.add_argument("-p", "--parallelmode", type=str, default="advanced",
help="Parallelization mode (advanced, classic, cl-hilat OR adv-lowlat)")
parser.add_argument("-N", "--initialagents", type=int, default=100000,
help="Number of agents at the start of the simulation.")
parser.add_argument("-d", "--newagentsperstep", type=int, default=1000,
help="Number of agents added per time step.")
parser.add_argument("-t", "--simulationperiod", type=int, default=10,
help="Duration of the simulation in days.")
parser.add_argument("-i", "--inputdir", type=str, default="test_data/test_input_csv",
help="Directory with parallel test input. Must have locations named 'A','D','E' and 'F'.")
args = parser.parse_args()
end_time = args.simulationperiod
last_physical_day = args.simulationperiod
inputdir = args.inputdir
initialagents = args.initialagents
newagentsperstep = args.newagentsperstep
if args.parallelmode in ["advanced", "adv-lowlat"]:
parallel_mode = "loc-par"
else:
parallel_mode = "classic"
if args.parallelmode in ["advanced", "cl-hilat"]:
latency_mode = "high_latency"
else:
latency_mode = "low_latency"
print("MODE: ", args, file=sys.stderr)
test_par_seq(end_time, last_physical_day,
parallel_mode, latency_mode,
inputdir, initialagents,
newagentsperstep)
|
#!/usr/bin/env python
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# NASA Jet Propulsion Laboratory
# California Institute of Technology
# (C) 2008 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
import json
from kombu.common import maybe_declare
from kombu.utils.debug import setup_logging
from kombu import Connection, Exchange, Queue
from datetime import datetime
import logging
logger = logging.getLogger()
class KombuMessenger(object):
"""
Sends messages via Kombu.
"""
def __init__(self, queueHost, queueName, id, hostname, pid, type):
"""
Initializer.
"""
self._queueHost = queueHost
self._queueName = queueName
self._id = id
self._hostname = hostname
self._pid = pid
self._type = type
self._connection = Connection(
'pyamqp://guest:guest@%s:5672//' % self._queueHost)
self._connection.ensure_connection()
self._exchange = Exchange(self._queueName, type='direct')
self._queue = Queue(self._queueName, self._exchange,
routing_key=self._queueName)
self._producer = self._connection.Producer()
self._publish = self._connection.ensure(
self._producer, self._producer.publish, max_retries=3)
# end def
def __del__(self):
"""
Finalizer.
"""
self._connection.close()
# end def
def __str__(self):
"""
Gets the string representation of this object.
@return: the string representation of this object.
@rtype: str
"""
return 'connection: "%s", id: "%s", queueName: "%s", hostname: "%s", pid: "%s", type: "%s"' % (self._connection, self._id, self._queueName, self._hostname, self._pid, self._type)
# end def
def send(self, chunk):
"""
Send stream chunk with JSON descriptor.
"""
context = {
'id': self._id,
'datetime': datetime.isoformat(datetime.now()),
'hostname': self._hostname,
'pid': self._pid,
'type': self._type,
'chunk': chunk
}
#contextStr = json.dumps(context)
self._publish(context, routing_key=self._queueName,
declare=[self._queue])
# with self._connection.Producer() as producer:
# publish = self._connection.ensure(producer, producer.publish, max_retries=3)
# publish(context, routing_key=self._queueName, declare=[self._queue])
# print 'channel.basic_publish(): %s' % contextStr
# end def
# end class
|
from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "or_123"
class TestOrder(object):
async def test_is_listable(self, request_mock):
resources = await stripe.Order.list()
request_mock.assert_requested("get", "/v1/orders")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Order)
async def test_is_retrievable(self, request_mock):
resource = await stripe.Order.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Order)
async def test_is_creatable(self, request_mock):
resource = await stripe.Order.create(currency="usd")
request_mock.assert_requested("post", "/v1/orders")
assert isinstance(resource, stripe.Order)
async def test_is_saveable(self, request_mock):
resource = await stripe.Order.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post", "/v1/orders/%s" % TEST_RESOURCE_ID
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.Order.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Order)
async def test_can_pay(self, request_mock):
order = await stripe.Order.retrieve(TEST_RESOURCE_ID)
resource = await order.pay(source="src_123")
request_mock.assert_requested(
"post",
"/v1/orders/%s/pay" % TEST_RESOURCE_ID,
{"source": "src_123"},
)
assert isinstance(resource, stripe.Order)
assert resource is order
async def test_can_pay_classmethod(self, request_mock):
resource = await stripe.Order.pay(TEST_RESOURCE_ID, source="src_123")
request_mock.assert_requested(
"post",
"/v1/orders/%s/pay" % TEST_RESOURCE_ID,
{"source": "src_123"},
)
assert isinstance(resource, stripe.Order)
async def test_can_return(self, request_mock):
order = await stripe.Order.retrieve(TEST_RESOURCE_ID)
resource = await order.return_order()
request_mock.assert_requested(
"post", "/v1/orders/%s/returns" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.OrderReturn)
async def test_can_return_classmethod(self, request_mock):
resource = await stripe.Order.return_order(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/orders/%s/returns" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.OrderReturn)
|
# Copyright (c) 2009 by Yaco S.L.
#
# This file is part of PyCha.
#
# PyCha is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyCha. If not, see <http://www.gnu.org/licenses/>.
import sys
import cairo
import pycha.stackedbar
def stackedBarChart(output, chartFactory):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 400, 200)
dataSet = (
('internal', [(0, 8), (1, 10), (2, 5), (3, 6)]),
('external', [(0, 5), (1, 2), (2, 4), (3, 8)]),
)
options = {
'background': {
'chartColor': '#ffeeff',
'baseColor': '#ffffff',
'lineColor': '#444444',
},
'colorScheme': {
'name': 'gradient',
'args': {
'initialColor': 'red',
},
},
'legend': {
'hide': True,
},
'padding': {
'left': 75,
'bottom': 55,
},
'title': 'Sample Chart'
}
chart = chartFactory(surface, options)
chart.addDataset(dataSet)
chart.render()
surface.write_to_png(output)
if __name__ == '__main__':
if len(sys.argv) > 1:
output = sys.argv[1]
else:
output = 'stackedbarchart.png'
stackedBarChart('v' + output, pycha.stackedbar.StackedVerticalBarChart)
stackedBarChart('h' + output, pycha.stackedbar.StackedHorizontalBarChart)
|
from datetime import datetime
from time import mktime
import feedparser
import requests
from analyzer.tasks import parse_html_entry
from celery import shared_task
from celery.utils.log import get_task_logger
from django.db.models import Q
from django.utils.timezone import make_aware
from crawler.models import RSSEntry, RSSFeed
logger = get_task_logger(__name__)
def request_article(rss_entry, session=requests.Session(), timeout=8.0):
"""Perform the GET request and persist the HTML and meta in the database."""
resp = None
try:
resp = session.get(rss_entry.link, timeout=timeout)
except requests.exceptions.Timeout:
# connection to server timed out or server did not send data in time
logger.warn(f"Request timeout for `{rss_entry}`")
except requests.exceptions.RequestException as e:
# arbitrary requests related exception
logger.warn(f"Could not GET `{rss_entry}`: `{e}`")
if resp is None:
# request response not instantiated
return
# response can be an http error, so we want to track all the meta
content_type = resp.headers.get("Content-Type", "")
raw_html = None
if "text" in content_type:
raw_html = resp.text
else:
logger.warn(f"Unsupported Content-Type `{content_type}` from `{rss_entry}`")
raw_html = ""
rss_entry, _ = RSSEntry.objects.update_or_create(
pk=rss_entry.pk,
defaults={
"raw_html": raw_html,
"resolved_url": resp.url,
"status_code": resp.status_code,
"requested_at": make_aware(datetime.now()),
"headers": dict(resp.headers),
},
)
return rss_entry
@shared_task
def retrieve_feed_entries(rss_feed_id):
"""Given an RSSFeed, iterate through all RSSEntries and download the HTML."""
try:
rss_feed = RSSFeed.objects.get(pk=rss_feed_id)
data = feedparser.parse(rss_feed.url)
except Exception as e:
logger.warn(f"Failed to parse feed `{rss_feed}`: `{e}`")
data = {}
session = requests.Session()
# Parse all of the RSS entries and create model instances
for entry in data.get("entries", []):
if any(req_key not in entry for req_key in ["link", "title"]):
logger.warn(f"Entry missing 'link' or 'title' `{entry}` from `{rss_feed}`")
continue
pub_date = None # may not always exist
if "published_parsed" in entry:
# https://stackoverflow.com/a/1697907/1942263
pub_date = datetime.utcfromtimestamp(mktime(entry["published_parsed"]))
pub_date = make_aware(pub_date)
description = entry.get("description", "")
if not description and "summary" in entry:
description = entry["summary"]
rss_entry, _created = RSSEntry.objects.update_or_create(
link=entry["link"],
defaults={
"feed": rss_feed,
"title": entry["title"],
"description": description,
"pub_date": pub_date,
},
)
if not rss_entry.raw_html:
rss_entry = request_article(rss_entry, session=session)
if not hasattr(rss_entry, "article"):
if "html" in rss_entry.headers.get("Content-Type"):
parse_html_entry.delay(rss_entry.pk)
@shared_task
def dispatch_crawl_entries():
"""Iterate through valid RSSEntries that don't have html and retry query"""
missed_entries = RSSEntry.objects.filter(
Q(raw_html__isnull=True) # raw_html is null AND
& (
# Content-Type header is null OR
Q(**{"headers__Content-Type__isnull": True})
|
# Content-Type header is textual
Q(**{"headers__Content-Type__icontains": "text/html"})
)
)
session = requests.Session()
for rss_entry in missed_entries.iterator():
request_article(rss_entry, session=session)
@shared_task
def dispatch_crawl_feeds():
"""Iterate through all of the RSSFeeds and dispatch a task for each one."""
for rss_feed in RSSFeed.objects.all().iterator():
retrieve_feed_entries.delay(rss_feed.pk)
|
expected_output = {
"segment_routing": {"sid": {"12345": {"state": "S", "state_info": "Shared"}}}
}
|
# from core.Backup import Backup
# from core.Jobs import Jobs
# jobs = Jobs()
# for job in range(len(jobs)):
# try:
# job = jobs.next()
# bu = Backup(job)
# print(bu.command())
# except Exception as err:
# print(err)
# from core.Notifier import Notifier
# email = Notifier()
# email.from_address("mdognini@eurokemical.it") \
# .to_address("mdognini@eurokemical.it") \
# .subject("pyuthon") \
# .body("il messaggio inviazio") \
# .send()
from decouple import config
from configs.conf import ERROR_LOG
import os
from configs.conf import INCLUDE
print(INCLUDE)
|
from __future__ import unicode_literals
import os
import sys
import urllib
import datetime
import logging
import subprocess
import eeUtil
import urllib.request
import requests
from bs4 import BeautifulSoup
import copy
import numpy as np
import ee
import time
from string import ascii_uppercase
import json
# url for historical air quality data
SOURCE_URL_HISTORICAL = 'https://portal.nccs.nasa.gov/datashare/gmao/geos-cf/v1/das/Y{year}/M{month}/D{day}/GEOS-CF.v01.rpl.chm_tavg_1hr_g1440x721_v1.{year}{month}{day}_{time}z.nc4'
# url for forecast air quality data
SOURCE_URL_FORECAST = 'https://portal.nccs.nasa.gov/datashare/gmao/geos-cf/v1/forecast/Y{start_year}/M{start_month}/D{start_day}/H12/GEOS-CF.v01.fcst.chm_tavg_1hr_g1440x721_v1.{start_year}{start_month}{start_day}_12z+{year}{month}{day}_{time}z.nc4'
# subdataset to be converted to tif
# should be of the format 'NETCDF:"filename.nc":variable'
SDS_NAME = 'NETCDF:"{fname}":{var}'
# list variables (as named in netcdf) that we want to pull
VARS = ['NO2', 'O3', 'PM25_RH35_GCC']
# define unit conversion factors for each compound
CONVERSION_FACTORS = {
'NO2': 1e9, # mol/mol to ppb
'O3': 1e9, # mol/mol to ppb
'PM25_RH35_GCC': 1, # keep original units
}
# define metrics to calculate for each compound
# each metric is the name of a function defined in this script
# available metrics: daily_avg, daily_max
METRIC_BY_COMPOUND = {
'NO2': 'daily_avg',
'O3': 'daily_max',
'PM25_RH35_GCC': 'daily_avg',
}
# nodata value for netcdf
NODATA_VALUE = 9.9999999E14
# name of data directory in Docker container
DATA_DIR = 'data'
# name of collection in GEE where we will upload the final data
COLLECTION = '/projects/resource-watch-gee/cit_002_gmao_air_quality'
# generate name for dataset's parent folder on GEE which will be used to store
# several collections - one collection per variable
PARENT_FOLDER = COLLECTION + '_{period}_{metric}'
# generate generic string that can be formatted to name each variable's GEE collection
EE_COLLECTION_GEN = PARENT_FOLDER + '/{var}'
# generate generic string that can be formatted to name each variable's asset name
FILENAME = PARENT_FOLDER.split('/')[-1] + '_{var}_{date}'
# specify Google Cloud Storage folder name
GS_FOLDER = COLLECTION[1:]
# do you want to delete everything currently in the GEE collection when you run this script?
CLEAR_COLLECTION_FIRST = False
# how many assets can be stored in the GEE collection before the oldest ones are deleted?
MAX_ASSETS = 100
# date format to use in GEE
DATE_FORMAT = '%Y-%m-%d'
# Resource Watch dataset API IDs
# Important! Before testing this script:
# Please change these IDs OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on different datasets on Resource Watch
DATASET_IDS = {
'NO2':'ecce902d-a322-4d13-a3d6-e1a36fc5573e',
'O3':'ebc079a1-51d8-4622-ba25-d8f3b4fcf8b3',
'PM25_RH35_GCC':'645fe192-28db-4949-95b9-79d898f4226b',
}
'''
FUNCTIONS FOR ALL DATASETS
The functions below must go in every near real-time script.
Their format should not need to be changed.
'''
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR RASTER DATASETS
The functions below must go in every near real-time script for a RASTER dataset.
Their format should not need to be changed.
'''
def getLastUpdate(dataset):
'''
Given a Resource Watch dataset's API ID,
this function will get the current 'last update date' from the API
and return it as a datetime
INPUT dataset: Resource Watch API dataset ID (string)
RETURN lastUpdateDT: current 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{}'.format(dataset)
# pull the dataset from the API
r = requests.get(apiUrl)
# find the 'last update date'
lastUpdateString=r.json()['data']['attributes']['dataLastUpdated']
# split this date into two pieces at the seconds decimal so that the datetime module can read it:
# ex: '2020-03-11T00:00:00.000Z' will become '2020-03-11T00:00:00' (nofrag) and '000Z' (frag)
nofrag, frag = lastUpdateString.split('.')
# generate a datetime object
nofrag_dt = datetime.datetime.strptime(nofrag, "%Y-%m-%dT%H:%M:%S")
# add back the microseconds to the datetime
lastUpdateDT = nofrag_dt.replace(microsecond=int(frag[:-1])*1000)
return lastUpdateDT
def getLayerIDs(dataset):
'''
Given a Resource Watch dataset's API ID,
this function will return a list of all the layer IDs associated with it
INPUT dataset: Resource Watch API dataset ID (string)
RETURN layerIDs: Resource Watch API layer IDs for the input dataset (list of strings)
'''
# generate the API url for this dataset - this must include the layers
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{}?includes=layer'.format(dataset)
# pull the dataset from the API
r = requests.get(apiUrl)
#get a list of all the layers
layers = r.json()['data']['attributes']['layer']
# create an empty list to store the layer IDs
layerIDs =[]
# go through each layer and add its ID to the list
for layer in layers:
# only add layers that have Resource Watch listed as its application
if layer['attributes']['application']==['rw']:
layerIDs.append(layer['id'])
return layerIDs
def flushTileCache(layer_id):
"""
Given the API ID for a GEE layer on Resource Watch,
this function will clear the layer cache.
If the cache is not cleared, when you view the dataset on Resource Watch, old and new tiles will be mixed together.
INPUT layer_id: Resource Watch API layer ID (string)
"""
# generate the API url for this layer's cache
apiUrl = 'http://api.resourcewatch.org/v1/layer/{}/expire-cache'.format(layer_id)
# create headers to send with the request to clear the cache
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# clear the cache for the layer
# sometimetimes this fails, so we will try multiple times, if it does
# specify that we are on the first try
try_num=1
tries = 4
while try_num<tries:
try:
# try to delete the cache
r = requests.delete(url = apiUrl, headers = headers, timeout=1000)
# if we get a 200, the cache has been deleted
# if we get a 504 (gateway timeout) - the tiles are still being deleted, but it worked
if r.ok or r.status_code==504:
logging.info('[Cache tiles deleted] for {}: status code {}'.format(layer_id, r.status_code))
return r.status_code
# if we don't get a 200 or 504:
else:
# if we are not on our last try, wait 60 seconds and try to clear the cache again
if try_num < (tries-1):
logging.info('Cache failed to flush: status code {}'.format(r.status_code))
time.sleep(60)
logging.info('Trying again.')
# if we are on our last try, log that the cache flush failed
else:
logging.error('Cache failed to flush: status code {}'.format(r.status_code))
logging.error('Aborting.')
try_num += 1
except Exception as e:
logging.error('Failed: {}'.format(e))
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
def getCollectionName(period, var):
'''
get GEE collection name
INPUT period: period to be used in asset name, historical or forecast (string)
var: variable to be used in asset name (string)
RETURN GEE collection name for input date (string)
'''
return EE_COLLECTION_GEN.format(period=period, metric=METRIC_BY_COMPOUND[var], var=var)
def getAssetName(date, period, var):
'''
get asset name
INPUT date: date in the format of the DATE_FORMAT variable (string)
period: period to be used in asset name, historical or forecast (string)
var: variable to be used in asset name (string)
RETURN GEE asset name for input date (string)
'''
collection = getCollectionName(period, var)
return os.path.join(collection, FILENAME.format(period=period, metric=METRIC_BY_COMPOUND[var], var=var, date=date))
def getTiffName(file, period, var):
'''
generate names for tif files that we are going to create from netcdf
INPUT file: netcdf filename (string)
period: period to be used in tif name, historical or forecast(string)
var: variable to be used in tif name (string)
RETURN name: file name to save tif file created from netcdf (string)
'''
# get year, month, day, and time from netcdf filename
year = file.split('/')[1][-18:-14]
month = file.split('/')[1][-14:-12]
day = file.split('/')[1][-12:-10]
time = file.split('/')[1][-9:-5]
# generate date string to be used in tif file name
date = year+'-'+month+'-'+day +'_'+time
# generate name for tif file
name = os.path.join(DATA_DIR, FILENAME.format(period=period, metric=METRIC_BY_COMPOUND[var], var=var, date=date))+'.tif'
return name
def getDateTimeString(filename):
'''
get date from filename (last 10 characters of filename after removing extension)
INPUT filename: file name that ends in a date of the format YYYY-MM-DD (string)
RETURN date in the format YYYY-MM-DD (string)
'''
return os.path.splitext(os.path.basename(filename))[0][-10:]
def getDate_GEE(filename):
'''
get date from Google Earth Engine asset name (last 10 characters of filename after removing extension)
INPUT filename: asset name that ends in a date of the format YYYY-MM-DD (string)
RETURN date in the format YYYY-MM-DD (string)
'''
return os.path.splitext(os.path.basename(filename))[0][-10:]
def list_available_files(url, file_start=''):
'''
get the files available for a given day using a source url formatted with date
INPUT url: source url for the given day's data folder (string)
file_start: a string that is present in the begining of every source netcdf filename for this data (string)
RETURN list of files available for the given url (list of strings)
'''
# open and read the url
page = requests.get(url).text
# use BeautifulSoup to read the content as a nested data structure
soup = BeautifulSoup(page, 'html.parser')
# Extract all the <a> tags within the html content to find the files available for download marked with these tags.
# Get only the files that starts with a certain word present in the begining of every source netcdf filename
return [node.get('href') for node in soup.find_all('a') if type(node.get('href'))==str and node.get('href').startswith(file_start)]
def getNewDatesHistorical(existing_dates):
'''
Get new dates we want to try to fetch historical data for
INPUT existing_dates: list of dates that we already have in GEE, in the format of the DATE_FORMAT variable (list of strings)
RETURN new_dates: list of new dates we want to try to get, in the format of the DATE_FORMAT variable (list of strings)
'''
# create empty list to store dates we should process
new_dates = []
# start with today's date and time
date = datetime.datetime.utcnow()
# generate date string in same format used in GEE collection
date_str = datetime.datetime.strftime(date, DATE_FORMAT)
# find date beyond which we don't want to go back since that will exceed the maximum allowable assets in GEE
last_date = date - datetime.timedelta(days=MAX_ASSETS)
# if the date string is not in our list of existing dates and don't go beyond max allowable dates:
while (date_str not in existing_dates) and (date!=last_date):
# general source url for the given dates data folder
url = SOURCE_URL_HISTORICAL.split('/GEOS')[0].format(year=date.year, month='{:02d}'.format(date.month), day='{:02d}'.format(date.day))
# get the list of files available for the given date
files = list_available_files(url, file_start='GEOS-CF.v01.rpl.chm_tavg')
# if the first 12 hourly files are available for a day, we can process this data - add it to the list
# note: we are centering the averages about midnight each day, so we just need 12 hours from the most recent day and 12 hours from the previous day
if len(files) >= 12:
new_dates.append(date_str)
# go back one more day
date = date - datetime.timedelta(days=1)
# generate new string in same format used in GEE collection
date_str = datetime.datetime.strftime(date, DATE_FORMAT)
#repeat until we reach something in our existing dates
#reverse order so we pull oldest date first
new_dates.reverse()
return new_dates
def getNewDatesForecast(existing_dates):
'''
Get new dates we want to try to fetch forecasted data for
INPUT existing_dates: list of dates that we already have in GEE, in the format of the DATE_FORMAT variable (list of strings)
RETURN new_dates: list of new dates we want to try to get, in the format of the DATE_FORMAT variable (list of strings)
'''
if existing_dates:
# get start date of last forecast
first_date_str = existing_dates[0]
# convert date string to datetime object
existing_start_date = datetime.datetime.strptime(first_date_str, DATE_FORMAT)
else:
# if we don't have existing data, just choose an old date so that we keep checking back until that date
# let's assume we will probably have a forecast in the last 30 days, so we will check back that far for
# forecasts until we find one
existing_start_date = datetime.datetime.utcnow() - datetime.timedelta(days=30)
#create empty list to store dates we should process
new_dates = []
# start with today's date and time
date = datetime.datetime.utcnow()
# while the date is newer than the most recent forecast that we pulled:
while date > existing_start_date:
# general source url for this day's forecast data folder
url = SOURCE_URL_FORECAST.split('/GEOS')[0].format(start_year=date.year, start_month='{:02d}'.format(date.month), start_day='{:02d}'.format(date.day))
# check the files available for this day:
files = list_available_files(url, file_start='GEOS-CF.v01.fcst.chm_tavg')
# if all 120 files are available (5 days x 24 hours/day), we can process this data
if len(files) == 120:
#add the next five days forecast to the new dates
for i in range(5):
date = date + datetime.timedelta(days=1)
# generate a string from the date
date_str = datetime.datetime.strftime(date, DATE_FORMAT)
new_dates.append(date_str)
# once we have found the most recent forecast we can break from the while loop because we only want to process the most recent forecast
break
# if there was no forecast for this day, go back one more day
date = date - datetime.timedelta(days=1)
# repeat until we reach the forecast we already have
return new_dates
def convert(files, var, period):
'''
Convert netcdf files to tifs
INPUT files: list of file names for netcdfs that have already been downloaded (list of strings)
var: variable which we are converting files for (string)
period: period we are converting data for, historical or forecast (string)
RETURN tifs: list of file names for tifs that have been generated (list of strings)
'''
# make an empty list to store the names of tif files that we create
tifs = []
for f in files:
logging.info('Converting {} to tiff'.format(f))
# generate the subdatset name for current netcdf file for a particular variable
sds_path = SDS_NAME.format(fname=f, var=var)
# only one band available in each file, so we will pull band 1
band = 1
# generate a name to save the tif file we will translate the netcdf file into
tif = getTiffName(file=f, period=period, var=var)
# translate the netcdf into a tif
cmd = ['gdal_translate', '-b', str(band), '-q', '-a_nodata', str(NODATA_VALUE), '-a_srs', 'EPSG:4326', sds_path, tif]
subprocess.call(cmd)
# add the new tif files to the list of tifs
tifs.append(tif)
return tifs
def fetch(new_dates, unformatted_source_url, period):
'''
Fetch files by datestamp
INPUT new_dates: list of dates we want to try to fetch, in the format YYYY-MM-DD (list of strings)
unformatted_source_url: url for air quality data (string)
period: period for which we want to get the data, historical or forecast (string)
RETURN files: list of file names for netcdfs that have been downloaded (list of strings)
files_by_date: dictionary of file names along with the date for which they were downloaded (dictionary of strings)
'''
# make an empty list to store names of the files we downloaded
files = []
# create a list of hours to pull (24 hours per day, on the half-hour)
# starts after noon on previous day through noon of current day
hours = ['1230', '1330', '1430', '1530', '1630', '1730', '1830', '1930', '2030', '2130', '2230', '2330',
'0030', '0130', '0230', '0330', '0430', '0530', '0630', '0730', '0830', '0930', '1030', '1130']
# create an empty dictionary to store downloaded file names as value and corresponding dates as key
files_by_date = {}
# Loop over all hours of the new dates, check if there is data available, and download netcdfs
for date in new_dates:
# make an empty list to store names of the files we downloaded
# this list will be used to insert values to the "files_by_date" dictionary
files_for_current_date = []
# convert date string to datetime object and go back one day
first_date = datetime.datetime.strptime(new_dates[0], DATE_FORMAT) - datetime.timedelta(days=1)
# generate a string from the datetime object
first_date = datetime.datetime.strftime(first_date, DATE_FORMAT)
# loop through each hours we want to pull data for
for hour in hours:
# for the first half of the hours, get data from previous day
if hours.index(hour) < 12:
# convert date string to datetime object and go back one day
prev_date = datetime.datetime.strptime(date, DATE_FORMAT) - datetime.timedelta(days=1)
# generate a string from the datetime object
fetching_date = datetime.datetime.strftime(prev_date, DATE_FORMAT)
# for the second half, use the current day
else:
fetching_date = date
# Set up the url of the filename to download historical data
if period=='historical':
url = unformatted_source_url.format(year=int(fetching_date[:4]), month='{:02d}'.format(int(fetching_date[5:7])), day='{:02d}'.format(int(fetching_date[8:])), time=hour)
# Set up the url of the filename to download forecast data
elif period=='forecast':
url = unformatted_source_url.format(start_year=int(first_date[:4]), start_month='{:02d}'.format(int(first_date[5:7])), start_day='{:02d}'.format(int(first_date[8:])),year=int(fetching_date[:4]), month='{:02d}'.format(int(fetching_date[5:7])), day='{:02d}'.format(int(fetching_date[8:])), time=hour)
# Create a file name to store the netcdf in after download
f = DATA_DIR+'/'+url.split('/')[-1]
# try to download the data
tries = 0
while tries <3:
try:
logging.info('Retrieving {}'.format(f))
# download files from url and put in specified file location (f)
urllib.request.urlretrieve(url, f)
# if successful, add the file to the list of files we have downloaded
files.append(f)
files_for_current_date.append(f)
break
# if unsuccessful, log that the file was not downloaded
except Exception as e:
logging.info('Unable to retrieve data from {}'.format(url))
logging.info(e)
tries+=1
logging.info('try {}'.format(tries))
if tries==3:
logging.error('Unable to retrieve data from {}'.format(url))
exit()
# populate dictionary of file names along with the date for which they were downloaded
files_by_date[date]=files_for_current_date
return files, files_by_date
def daily_avg(date, var, period, tifs_for_date):
'''
Calculate a daily average tif file from all the hourly tif files
INPUT date: list of dates we want to try to fetch, in the format YYYY-MM-DD (list of strings)
var: variable for which we are taking daily averages (string)
period: period for which we are calculating metric, historical or forecast (string)
tifs_for_date: list of file names for tifs that were created from downloaded netcdfs (list of strings)
RETURN result_tif: file name for tif file created after averaging all the input tifs (string)
'''
# create a list to store the tifs and variable names to be used in gdal_calc
gdal_tif_list=[]
# set up calc input for gdal_calc
calc = '--calc="('
# go through each hour in the day to be averaged
for i in range(len(tifs_for_date)):
# generate a letter variable for that tif to use in gdal_calc (A, B, C...)
letter = ascii_uppercase[i]
# add each letter to the list to be used in gdal_calc
gdal_tif_list.append('-'+letter)
# pull the tif name
tif = tifs_for_date[i]
# add each tif name to the list to be used in gdal_calc
gdal_tif_list.append('"'+tif+'"')
# add the variable to the calc input for gdal_calc
if i==0:
# for first tif, it will be like: --calc="(A
calc= calc +letter
else:
# for second tif and onwards, keep adding each letter like: --calc="(A+B
calc = calc+'+'+letter
# calculate the number of tifs we are averaging
num_tifs = len(tifs_for_date)
# finish creating calc input
# since we are trying to find average, the algorithm is: (sum all tifs/number of tifs)*(conversion factor for corresponding variable)
calc= calc + ')*{}/{}"'.format(CONVERSION_FACTORS[var], num_tifs)
# generate a file name for the daily average tif
result_tif = DATA_DIR+'/'+FILENAME.format(period=period, metric=METRIC_BY_COMPOUND[var], var=var, date=date)+'.tif'
# create the gdal command to calculate the average by putting it all together
cmd = ('gdal_calc.py {} --outfile="{}" {}').format(' '.join(gdal_tif_list), result_tif, calc)
# using gdal from command line from inside python
subprocess.check_output(cmd, shell=True)
return result_tif
def daily_max(date, var, period, tifs_for_date):
'''
Calculate a daily maximum tif file from all the hourly tif files
INPUT date: list of dates we want to try to fetch, in the format YYYY-MM-DD (list of strings)
var: variable for which we are taking daily averages (string)
period: period for which we are calculating metric, historical or forecast (string)
tifs_for_date: list of file names for tifs that were created from downloaded netcdfs (list of strings)
RETURN result_tif: file name for tif file created after finding the max from all the input tifs (string)
'''
# create a list to store the tifs and variable names to be used in gdal_calc
gdal_tif_list=[]
# go through each hour in the day to find the maximum
for i in range(len(tifs_for_date)):
# generate a letter variable for that tif to use in gdal_calc
letter = ascii_uppercase[i]
# add each letter to the list of tifs to be used in gdal_calc
gdal_tif_list.append('-'+letter)
# pull the tif name
tif = tifs_for_date[i]
# add each tif name to the list to be used in gdal_calc
gdal_tif_list.append('"'+tif+'"')
#add the variable to the calc input for gdal_calc
if i==0:
calc= letter
else:
# set up calc input for gdal_calc to find the maximum from all tifs
calc = 'maximum('+calc+','+letter+')'
# finish creating calc input
calc= '--calc="'+calc + '*{}"'.format(CONVERSION_FACTORS[var])
#generate a file name for the daily maximum tif
result_tif = DATA_DIR+'/'+FILENAME.format(period=period, metric=METRIC_BY_COMPOUND[var], var=var, date=date)+'.tif'
# create the gdal command to calculate the maximum by putting it all together
cmd = ('gdal_calc.py {} --outfile="{}" {}').format(' '.join(gdal_tif_list), result_tif, calc)
# using gdal from command line from inside python
subprocess.check_output(cmd, shell=True)
return result_tif
def processNewData(var, all_files, files_by_date, period, assets_to_delete):
'''
Process and upload clean new data
INPUT var: variable that we are processing data for (string)
all_files: list of file names for netcdfs that have been downloaded (list of strings)
files_by_date: dictionary of netcdf file names along with the date for which they were downloaded (dictionary of strings)
period: period for which we want to process the data, historical or forecast (string)
assets_to_delete: list of old assets to delete (list of strings)
RETURN assets: list of file names for netcdfs that have been downloaded (list of strings)
'''
# if files is empty list do nothing, otherwise, process data
if all_files:
# create an empty list to store the names of the tifs we generate
tifs = []
# create an empty list to store the names we want to use for the GEE assets
assets=[]
# create an empty list to store the list of dates from the averaged or maximum tifs
dates = []
# create an empty list to store the list of datetime objects from the averaged or maximum tifs
datestamps = []
# loop over each downloaded netcdf file
for date, files in files_by_date.items():
logging.info('Converting files')
# Convert new files from netcdf to tif files
hourly_tifs = convert(files, var, period)
# take relevant metric (daily average or maximum) of hourly tif files for days we have pulled
metric = METRIC_BY_COMPOUND[var]
tif = globals()[metric](date, var, period, hourly_tifs)
# add the averaged or maximum tif file to the list of files to upload to GEE
tifs.append(tif)
# Get a list of the names we want to use for the assets once we upload the files to GEE
assets.append(getAssetName(date, period, var))
# get new list of date strings (in case order is different) from the processed tifs
dates.append(getDateTimeString(tif))
# generate datetime objects for each tif date
datestamps.append(datetime.datetime.strptime(date, DATE_FORMAT))
# delete old assets (none for historical)
for asset in assets_to_delete:
ee.data.deleteAsset(asset)
logging.info(f'Deleteing {asset}')
logging.info('Uploading files:')
for asset in assets:
logging.info(os.path.split(asset)[1])
# Upload new files (tifs) to GEE
eeUtil.uploadAssets(tifs, assets, GS_FOLDER, datestamps, timeout=3000)
return assets
#if no new assets, return empty list
else:
return []
def checkCreateCollection(VARS, period):
'''
List assets in collection if it exists, else create new collection
INPUT VARS: list variables (as named in netcdf) that we want to check collections for (list of strings)
period: period we are checking assets for, historical or forecast (string)
RETURN existing_dates_all_vars: list of dates, in the format of the DATE_FORMAT variable, that exist for all variable collections in GEE (list of strings)
existing_dates_by_var: list of dates, in the format of the DATE_FORMAT variable, that exist for each individual variable collection in GEE (list containing list of strings for each variable)
'''
# create a master list (not variable-specific) to store the dates for which all variables already have data for
existing_dates = []
# create an empty list to store the dates that we currently have for each AQ variable
# will be used in case the previous script run crashed before completing the data upload for every variable.
existing_dates_by_var = []
# loop through each variables that we want to pull
for var in VARS:
# For one of the variables, get the date of the most recent dataset
# All variables come from the same file
# If we have one for a particular data, we should have them all
collection = getCollectionName(period, var)
# Check if folder to store GEE collections exists. If not, create it.
# we will make one collection per variable, all stored in the parent folder for the dataset
parent_folder = PARENT_FOLDER.format(metric=METRIC_BY_COMPOUND[var], period=period)
if not eeUtil.exists(parent_folder):
logging.info('{} does not exist, creating'.format(parent_folder))
eeUtil.createFolder(parent_folder)
# If the GEE collection for a particular variable exists, get a list of existing assets
if eeUtil.exists(collection):
existing_assets = eeUtil.ls(collection)
# get a list of the dates from these existing assets
dates = [getDate_GEE(a) for a in existing_assets]
# append this list of dates to our list of dates by variable
existing_dates_by_var.append(dates)
# for each of the dates that we have for this variable, append the date to the master list
# list of which dates we already have data for (if it isn't already in the list)
for date in dates:
if date not in existing_dates:
existing_dates.append(date)
#If the GEE collection does not exist, append an empty list to our list of dates by variable
else:
existing_dates_by_var.append([])
# create a collection for this variable
logging.info('{} does not exist, creating'.format(collection))
eeUtil.createFolder(collection, True)
'''
We want make sure all variables correctly uploaded the data on the last run. To do this, we will
check that we have the correct number of appearances of the data in our GEE collection. If we do
not, we will want to re-upload this date's data.
'''
# Create a copy of the master list of dates that will store the dates that were properly uploaded for all variables.
existing_dates_all_vars = copy.copy(existing_dates)
for date in existing_dates:
#check how many times each date appears in our list of dates by variable
date_count = sum(x.count(date) for x in existing_dates_by_var)
# If this count is less than the number of variables we have, one of the variables did not finish
# upload for this date, and we need to re-upload this file.
if date_count < len(VARS):
#remove this from the list of existing dates for all variables
existing_dates_all_vars.remove(date)
return existing_dates_all_vars, existing_dates_by_var
def deleteExcessAssets(collection, all_assets, max_assets):
'''
Delete oldest assets, if more than specified in max_assets variable
INPUT collection: GEE collection in which the asset is located (string)
all_assets: list of all the assets currently in the GEE collection (list of strings)
max_assets: maximum number of assets allowed in the collection (int)
'''
# if we have more assets than allowed,
if len(all_assets) > max_assets:
# sort the list of dates so that the oldest is first
all_assets.sort()
logging.info('Deleting excess assets.')
# go through each assets, starting with the oldest, and delete until we only have the max number of assets left
for asset in all_assets[:-max_assets]:
eeUtil.removeAsset(collection +'/'+ asset)
def get_most_recent_date(all_assets):
'''
Get most recent data we have assets for
INPUT all_assets: list of all the assets currently in the GEE collection (list of strings)
RETURN most_recent_date: most recent date in GEE collection (datetime)
'''
# sort these dates oldest to newest
all_assets.sort()
# get the most recent date (last in the list) and turn it into a datetime
most_recent_date = datetime.datetime.strptime(all_assets[-1][-10:], DATE_FORMAT)
return most_recent_date
def clearCollectionMultiVar(period):
'''
Clear the GEE collection for all variables
INPUT period: period we are clearing collection for, historical or forecast (string)
'''
logging.info('Clearing collections.')
for var_num in range(len(VARS)):
# get name of variable we are clearing GEE collections for
var = VARS[var_num]
# get name of GEE collection for variable
collection = getCollectionName(period, var)
# if the collection exists,
if eeUtil.exists(collection):
# remove the / from the beginning of the collection name to be used in ee module
if collection[0] == '/':
collection = collection[1:]
# pull the image collection
a = ee.ImageCollection(collection)
# check how many assets are in the collection
collection_size = a.size().getInfo()
# if there are assets in the collection
if collection_size > 0:
# create a list of assets in the collection
list = a.toList(collection_size)
# delete each asset
for item in list.getInfo():
ee.data.deleteAsset(item['id'])
def listAllCollections(var, period):
'''
Get list of all assets in a collection
INPUT var: variable we are checking collection for (string)
period: period we are checking collection for, historical or forecast (string)
RETURN all_assets: list of old assets to delete (list of strings)
'''
all_assets = []
collection = getCollectionName(period, var)
if eeUtil.exists(collection):
if collection[0] == '/':
collection = collection[1:]
a = ee.ImageCollection(collection)
collection_size = a.size().getInfo()
if collection_size > 0:
list = a.toList(collection_size)
for item in list.getInfo():
all_assets.append(item['id'])
return all_assets
def initialize_ee():
'''
Initialize eeUtil and ee modules
'''
# get GEE credentials from env file
GEE_JSON = os.environ.get("GEE_JSON")
_CREDENTIAL_FILE = 'credentials.json'
GEE_SERVICE_ACCOUNT = os.environ.get("GEE_SERVICE_ACCOUNT")
with open(_CREDENTIAL_FILE, 'w') as f:
f.write(GEE_JSON)
auth = ee.ServiceAccountCredentials(GEE_SERVICE_ACCOUNT, _CREDENTIAL_FILE)
ee.Initialize(auth)
def create_headers():
'''
Create headers to perform authorized actions on API
'''
return {
'Content-Type': "application/json",
'Authorization': "{}".format(os.getenv('apiToken')),
}
def pull_layers_from_API(dataset_id):
'''
Pull dictionary of current layers from API
INPUT dataset_id: Resource Watch API dataset ID (string)
RETURN layer_dict: dictionary of layers (dictionary of strings)
'''
# generate url to access layer configs for this dataset in back office
rw_api_url = 'https://api.resourcewatch.org/v1/dataset/{}/layer'.format(dataset_id)
# request data
r = requests.get(rw_api_url)
# convert response into json and make dictionary of layers
layer_dict = json.loads(r.content.decode('utf-8'))['data']
return layer_dict
def update_layer(var, period, layer, new_date):
'''
Update layers in Resource Watch back office.
INPUT var: variable for which we are updating layers (string)
period: period we are updating layers for, historical or forecast (string)
layer: layer that will be updated (string)
new_date: date of asset to be shown in this layer, in the format of the DATE_FORMAT variable (string)
'''
# get name of asset - drop first / in string or asset won't be pulled into RW
asset = getAssetName(new_date, period, var)[1:]
# get previous date being used from
old_date = getDate_GEE(layer['attributes']['layerConfig']['assetId'])
# convert to datetime
old_date_dt = datetime.datetime.strptime(old_date, DATE_FORMAT)
# change to layer name text of date
old_date_text = old_date_dt.strftime("%B %-d, %Y")
# get text for new date
new_date_dt = datetime.datetime.strptime(new_date, DATE_FORMAT)
new_date_text = new_date_dt.strftime("%B %-d, %Y")
# replace date in layer's title with new date
layer['attributes']['name'] = layer['attributes']['name'].replace(old_date_text, new_date_text)
# replace the asset id in the layer def with new asset id
layer['attributes']['layerConfig']['assetId'] = asset
# replace the asset id in the interaction config with new asset id
old_asset = getAssetName(old_date, period, var)[1:]
layer['attributes']['interactionConfig']['config']['url'] = layer['attributes']['interactionConfig']['config']['url'].replace(old_asset,asset)
# send patch to API to replace layers
# generate url to patch layer
rw_api_url_layer = "https://api.resourcewatch.org/v1/dataset/{dataset_id}/layer/{layer_id}".format(
dataset_id=layer['attributes']['dataset'], layer_id=layer['id'])
# create payload with new title and layer configuration
payload = {
'application': ['rw'],
'layerConfig': layer['attributes']['layerConfig'],
'name': layer['attributes']['name'],
'interactionConfig': layer['attributes']['interactionConfig']
}
# patch API with updates
r = requests.request('PATCH', rw_api_url_layer, data=json.dumps(payload), headers=create_headers())
# check response
if r.ok:
logging.info('Layer replaced: {}'.format(layer['id']))
else:
logging.error('Error replacing layer: {} ({})'.format(layer['id'], r.status_code))
def updateResourceWatch(new_dates_historical, new_dates_forecast):
'''
This function should update Resource Watch to reflect the new data.
This may include updating the 'last update date', flushing the tile cache, and updating any dates on layers
INPUT new_dates_historical: list of dates for historical assets added to GEE, in the format of the DATE_FORMAT variable (list of strings)
new_dates_forecast: list of dates for forecast assets added to GEE, in the format of the DATE_FORMAT variable (list of strings)
'''
# Update the dates on layer legends
if new_dates_historical and new_dates_forecast:
logging.info('Updating Resource Watch Layers')
for var, ds_id in DATASET_IDS.items():
logging.info('Updating {}'.format(var))
# pull dictionary of current layers from API
layer_dict = pull_layers_from_API(ds_id)
# go through each layer, pull the definition and update
for layer in layer_dict:
# check which point on the timeline this is
order = layer['attributes']['layerConfig']['order']
# if this is the first point on the timeline, we want to replace it the most recent historical data
if order==0:
# get date of most recent asset added
date = new_dates_historical[-1]
# replace layer asset and title date with new
update_layer(var, 'historical', layer, date)
# otherwise, we want to replace it with the appropriate forecast data
else:
# forecast layers start at order 1, and we will want this point on the timeline to be the first forecast asset
# order 4 will be the second asset, and so on
# get date of appropriate asset
date = new_dates_forecast[order-1]
# replace layer asset and title date with new
update_layer(var, 'forecast', layer, date)
elif not new_dates_historical and not new_dates_forecast:
logging.info('Layers do not need to be updated.')
else:
if not new_dates_historical:
logging.error('Historical data was not updated, but forecast was.')
if not new_dates_forecast:
logging.error('Forecast data was not updated, but historical was.')
# Update Last Update Date and flush tile cache on RW
for var_num in range(len(VARS)):
var = VARS[var_num]
# specify GEE collection name
collection = getCollectionName('historical', var)
# get a list of assets in the collection
existing_assets = eeUtil.ls(collection)
try:
# Get the most recent date from the data in the GEE collection
most_recent_date = get_most_recent_date(existing_assets)
# Get the current 'last update date' from the dataset on Resource Watch
current_date = getLastUpdate(DATASET_IDS[var])
# If the most recent date from the GEE collection does not match the 'last update date' on the RW API, update it
if current_date != most_recent_date: #comment for testing
logging.info('Updating last update date and flushing cache.')
# Update dataset's last update date on Resource Watch
lastUpdateDate(DATASET_IDS[var], most_recent_date)
# get layer ids and flush tile cache for each
layer_ids = getLayerIDs(DATASET_IDS[var])
for layer_id in layer_ids:
flushTileCache(layer_id)
except KeyError:
continue
def delete_local(ext=None):
'''
This function will delete local files in the Docker container with a specific extension, if specified.
If no extension is specified, all local files will be deleted.
INPUT ext: optional, file extension for files you want to delete, ex: '.tif' (string)
'''
try:
if ext:
[file for file in os.listdir(DATA_DIR) if file.endswith(ext)]
else:
files = os.listdir(DATA_DIR)
for f in files:
logging.info('Removing {}'.format(f))
os.remove(DATA_DIR+'/'+f)
except NameError:
logging.info('No local files to clean.')
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logging.info('STARTING')
# Initialize eeUtil and ee modules
eeUtil.initJson()
initialize_ee()
'''
Process Historical Data
'''
logging.info('Starting Historical Data Processing')
period = 'historical'
# Clear collection in GEE if desired
if CLEAR_COLLECTION_FIRST:
clearCollectionMultiVar(period)
# Check if collection exists. If not, create it.
# Return a list of dates that exist for all variables collections in GEE (existing_dates),
# as well as a list of which dates exist for each individual variable (existing_dates_by_var).
# The latter will be used to determine if the previous script run crashed before completing the data upload for every variable.
logging.info('Getting existing dates.')
existing_dates, existing_dates_by_var = checkCreateCollection(VARS, period)
# Get a list of the dates that are available, minus the ones we have already uploaded correctly for all variables.
logging.info('Getting new dates to pull.')
new_dates_historical = getNewDatesHistorical(existing_dates)
# Fetch new files
logging.info('Fetching files for {}'.format(new_dates_historical))
files, files_by_date = fetch(new_dates_historical, SOURCE_URL_HISTORICAL, period='historical')
# Process historical data, one variable at a time
for var_num in range(len(VARS)):
logging.info('Processing {}'.format(VARS[var_num]))
# get variable name
var = VARS[var_num]
# Process new data files, don't delete any historical assets
new_assets_historical = processNewData(var, files, files_by_date, period='historical', assets_to_delete=[])
logging.info('Previous assets for {}: {}, new: {}, max: {}'.format(var, len(existing_dates_by_var[var_num]), len(new_dates_historical), MAX_ASSETS))
# Delete extra assets, past our maximum number allowed that we have set
# get list of existing assets in current variable's GEE collection
existing_assets = eeUtil.ls(getCollectionName(period, var))
# make list of all assets by combining existing assets with new assets
all_assets_historical = np.sort(np.unique(existing_assets + [os.path.split(asset)[1] for asset in new_assets_historical]))
# delete the excess assets
deleteExcessAssets(getCollectionName(period, var), all_assets_historical, MAX_ASSETS)
logging.info('SUCCESS for {}'.format(var))
# Delete local tif files because we will run out of space
delete_local(ext='.tif')
# Delete local netcdf files
delete_local()
'''
Process Forecast Data
'''
logging.info('Starting Forecast Data Processing')
period = 'forecast'
# Clear collection in GEE if desired
if CLEAR_COLLECTION_FIRST:
clearCollectionMultiVar(period)
# Check if collection exists. If not, create it.
# Return a list of dates that exist for all variables collections in GEE (existing_dates),
# as well as a list of which dates exist for each individual variable (existing_dates_by_var).
# The latter will be used to determine if the previous script run crashed before completing the data upload for every variable.
logging.info('Getting existing dates.')
existing_dates, existing_dates_by_var = checkCreateCollection(VARS, period)
# Get a list of the dates that are available, minus the ones we have already uploaded correctly for all variables.
logging.info('Getting new dates to pull.')
new_dates_forecast = getNewDatesForecast(existing_dates)
# Fetch new files
logging.info('Fetching files for {}'.format(new_dates_forecast))
files, files_by_date = fetch(new_dates_forecast, SOURCE_URL_FORECAST, period='forecast')
# Process forecast data, one variable at a time
for var_num in range(len(VARS)):
logging.info('Processing {}'.format(VARS[var_num]))
# get variable name
var = VARS[var_num]
# Process new data files, delete all forecast assets currently in collection
new_assets_forecast = processNewData(var, files, files_by_date, period='forecast', assets_to_delete=listAllCollections(var, period))
logging.info('New assets for {}: {}, max: {}'.format(var, len(new_dates_forecast), MAX_ASSETS))
logging.info('SUCCESS for {}'.format(var))
# Delete local tif files because we will run out of space
delete_local(ext='.tif')
# Delete local netcdf files
delete_local()
# Update Resource Watch
updateResourceWatch(new_dates_historical, new_dates_forecast)
logging.info('SUCCESS')
|
#!/usr/bin/env python3
import logging
import time
from acceptance.common.log import LogExec, initLog
from acceptance.common import test
test.TEST_NAME = "leader_failure"
logger = logging.getLogger(__name__)
class Test(test.Base):
"""
Test that we can kill the patroni leader node and the cluster will fail over to the second node.
"""
@Test.subcommand("run")
class TestRun(test.Base):
util = test.Util(None)
@LogExec(logger, "run")
def main(self):
self.util = test.Util(self.dc)
initial_leader, initial_replica = self.util.initial_check()
self.test_switch_leader(initial_leader, 2)
time.sleep(10) # let the cluster stabilize again.
self.test_switch_leader(initial_replica, 3)
def test_switch_leader(self, current_leader: str, expected_val):
modify_idx, _ = self.util.leader_info()
old_leader = current_leader
logger.info("Killing %s", old_leader)
self.dc('kill', old_leader)
new_leader = self.util.wait_for_leader(modify_idx)
self.util.test_write_to(new_leader, expected_val)
logger.info("Restart %s", old_leader)
self.dc('up', '-d', old_leader)
self.util.test_read_from(self.util.wait_for_replica(), str(expected_val))
if __name__ == "__main__":
initLog()
Test()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from pprint import pprint as print
import os
import re
import datetime
import struct
from math import ceil
from ctypes import *
from functools import wraps, partial
from itertools import chain, islice
from bisect import bisect
try:
import numpy as np
except ImportError:
class np: nan = float("nan")
from savReaderWriter import *
from error import *
from helpers import *
from py3k import *
# TODO:
# pytables integration
# numba.jit
# function to easily read mmapped array back in
class SavReaderNp(SavReader):
"""
Read SPSS .sav file data into a numpy array (either in-memory or mmap)
Parameters
----------
savFileName : str
The file name of the spss data file
recodeSysmisTo : value
Indicates to which value missing values should be recoded
rawMode : bool
Set to ``True`` to get faster processing speeds. ``rawMode=False``
indicates:
* that trailing blanks will stripped off of string values
* that datetime variables (if present) will be converted into
``datetime.datetime`` objects,
* that SPSS `$sysmis` values will be converted into
`recodeSysmisTo` (default ``np.nan``, except for datetimes).
ioUtf8 : bool
Indicates the mode in which text communicated to or from
the I/O Module will be. Valid values are True (UTF-8 mode aka
Unicode mode) and False (Codepage mode). Cf. `SET UNICODE=ON/OFF`
ioLocale : locale str
indicates the locale of the I/O module. Cf. `SET LOCALE`.
(default = None, which corresponds to `locale.setlocale(locale.LC_ALL, "")`.
For example, `en_US.UTF-8`.
Examples
--------
Typical use::
# memmapped array, omit filename to use in-memory array
reader_np = SavReaderNp("Employee data.sav")
array = reader_np.to_structured_array("/tmp/test.dat")
reader_np.close()
Note. The sav-to-array conversion is MUCH faster when uncompressed .sav
files are used. These are created with the SPSS command::
SAVE OUTFILE = 'some_file.sav' /UNCOMPRESSED.
This is NOT the default in SPSS.
See also
--------
savReaderWriter.SavWriter : use `_uncompressed.sav` savFileName
suffix to write uncompressed files"""
def __init__(self, savFileName, recodeSysmisTo=np.nan, rawMode=False,
ioUtf8=False, ioLocale=None):
super(SavReaderNp, self).__init__(savFileName,
ioUtf8=ioUtf8, ioLocale=ioLocale)
self.savFileName = savFileName
self.recodeSysmisTo = recodeSysmisTo
self.rawMode = rawMode
self.ioUtf8 = ioUtf8
self.ioLocale = ioLocale
self.caseBuffer = self.getCaseBuffer()
self.unpack = self.getStruct(self.varTypes, self.varNames).unpack_from
self._init_funcs()
self.gregorianEpoch = datetime.datetime(1582, 10, 14, 0, 0, 0)
self.do_convert_datetimes = True
self.nrows, self.ncols = self.shape
if self._is_uncompressed:
self.sav = open(self.savFileName, "rb")
self.__iter__ = self._uncompressed_iter
self.to_ndarray = self._uncompressed_to_ndarray
self.to_structured_array = self._uncompressed_to_structured_array
def _items(self, start, stop, step):
"""Helper function for __getitem__"""
for case in xrange(start, stop, step):
self.seekNextCase(self.fh, case)
self.wholeCaseIn(self.fh, byref(self.caseBuffer))
record = np.fromstring(self.caseBuffer, self.struct_dtype)
yield record
def convert_datetimes(func):
"""Decorator to convert all the SPSS datetimes into datetime.datetime
values. Missing datetimes are converted into the value
`datetime.datetime(1, 1, 1, 0, 0, 0)`"""
@wraps(func)
def _convert_datetimes(self, *args):
#print("@convert_datetimes called by: %s" % func.__name__)
array = func(self, *args)
if (self.rawMode or not self.datetimevars or not \
self.do_convert_datetimes):
return array
# calculate count so fromiter can pre-allocate
count = self.nrows if not args else -1
if len(args) == 1 and isinstance(args[0], slice):
start, stop, step = args[0].indices(self.nrows)
count = (stop - start) // step
# now fill the array with datetimes
dt_array = array.astype(self.datetime_dtype)
for varName in self.uvarNames:
if not varName in self.datetimevars:
continue
datetimes = (self.spss2datetimeDate(dt) for dt in array[varName])
dt_array[varName] = np.fromiter(datetimes, "datetime64[us]", count)
return dt_array
return _convert_datetimes
def convert_missings(func):
"""Decorator to recode numerical missing values into `recodeSysmisTo`
(default: `np.nan`), unless they are datetimes"""
@wraps(func)
def _convert_missings(self, *args):
array = func(self, *args)
cutoff = -sys.float_info.max
sysmis = self.recodeSysmisTo
is_to_structured_array = func.__name__.endswith('to_structured_array')
if self.rawMode:
return array
elif self.is_homogeneous and not is_to_structured_array:
array[:] = np.where(array <= cutoff, sysmis, array)
else:
for v in self.uvarNames:
if v in self.datetimevars or self.uvarTypes[v]:
continue
array[v] = np.where(array[v] <= cutoff, sysmis, array[v])
if hasattr(array, "flush"): # memmapped
array.flush()
return array
return _convert_missings
@convert_datetimes
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y], where y may be int or slice
Parameters
----------
key : int, slice
Returns
-------
record : numpy.ndarray
Raises
-------
IndexError, TypeError
"""
is_slice = isinstance(key, slice)
is_index = isinstance(key, int)
if is_slice:
start, stop, step = key.indices(self.nrows)
records = (item for item in self._items(start, stop, step))
count = (stop - start) // step
record = np.fromiter(iter(records), self.struct_dtype, count)
elif is_index:
if abs(key) > self.nrows - 1:
raise IndexError("index out of bounds")
key = self.nrows + key if key < 0 else key
self.seekNextCase(self.fh, key)
self.wholeCaseIn(self.fh, self.caseBuffer)
record = np.fromstring(self.caseBuffer, self.struct_dtype)
else:
raise TypeError("slice or int required")
# rewind for possible subsequent call to __iter__
self.seekNextCase(self.fh, 0)
return record
def __iter__(self):
"""x.__iter__() <==> iter(x). Yields records as a tuple.
If `rawMode=True`, trailing spaces of strings are not removed
and SPSS dates are not converted into `datetime` dates
Returns
-------
record : tuple
Raises
-------
SPSSIOError
"""
varNames = self.uvarNames
varTypes = self.uvarTypes
datetimevars = self.datetimevars
shortcut = self.rawMode or not self.do_convert_datetimes or \
not datetimevars
for row in xrange(self.nrows):
self.wholeCaseIn(self.fh, self.caseBuffer)
record = self.unpack(self.caseBuffer)
if shortcut:
yield record
continue
yield tuple([self.spss2datetimeDate(value) if v in datetimevars else
value.rstrip() if varTypes[v] else value for value, v
in izip(record, varNames)])
def _init_funcs(self):
"""Helper to initialize C functions of the SPSS I/O module: set their
argtypes and _errcheck attributes"""
self.seekNextCase = self.spssio.spssSeekNextCase
self.seekNextCase.argtypes = [c_int, c_long]
self.seekNextCase._errcheck = self._errcheck
self.record_size = sizeof(self.caseBuffer)
self.wholeCaseIn = self.spssio.spssWholeCaseIn
self.wholeCaseIn.argtypes = [c_int, POINTER(c_char * self.record_size)]
self.wholeCaseIn._errcheck = self._errcheck
def _errcheck(self, retcode, func, arguments):
"""Checks for return codes > 0 when calling C functions of the
SPSS I/O module"""
if retcode > 0:
error = retcodes.get(retcode, retcode)
msg = "function %r with arguments %r throws error: %s"
msg = msg % (func.__name__, arguments, error)
raise SPSSIOError(msg, retcode)
@memoized_property
def uvarNames(self):
"""Returns a list of variable names, as unicode strings"""
if self.ioUtf8: return self.varNames
return [v.decode(self.encoding) for v in self.varNames]
@memoized_property
def uvarTypes(self):
"""Returns a dictionary of variable names, as unicode strings (keys)
and variable types (values, int). Variable type == 0 indicates
numerical values, other values indicate the string length in bytes"""
if self.ioUtf8: return self.varTypes
return {v.decode(self.encoding): t for v, t in self.varTypes.items()}
@memoized_property
def uformats(self):
"""Returns a dictionary of variable names (keys) and SPSS formats
(values), both as unicode strings"""
if self.ioUtf8: return self.formats
encoding = self.encoding
return {v.decode(encoding): fmt.decode(encoding) for
v, fmt in self.formats.items()}
@memoized_property
def datetimevars(self):
"""Returns a list of the datetime variable nanes (as unicode strings)
in the dataset, if any"""
return [varName for varName in self.uvarNames if
re.search("date|time", self.uformats[varName], re.I)]
@memoized_property
def _titles(self):
"""Helper function that uses varLabels to get the titles for a dtype.
If no varLabels are available, varNames are used instead"""
titles = [self.varLabels[v] if self.varLabels[v] else
bytez("col_%03d" % col) for col, v in
enumerate(self.varNames)]
return [title.decode(self.encoding) if not
isinstance(title, unicode) else title for title in titles]
@memoized_property
def is_homogeneous(self):
"""Returns boolean that indicates whether the dataset contains only
numerical variables (datetimes excluded). If `rawMode=True`, datetimes
are also considered numeric. A dataset with string variables of equal
length is not considered to be homogeneous"""
is_all_numeric = bool( not max(list(self.varTypes.values())) )
if self.rawMode:
return is_all_numeric
return is_all_numeric and not self.datetimevars
@memoized_property
def struct_dtype(self):
"""Get the dtype that is used to unpack the binary record
Returns
-------
struct dtype : numpy.dtype (complex dtype if heterogeneous data,
simple dtype otherwise). A complex dtype uses `varNames` as
names and `varLabels` (if any) as titles (fields)."""
if self.is_homogeneous:
byteorder = u"<" if self.byteorder == u"little" else u">"
return np.dtype(byteorder + u"d")
fmt8 = lambda varType: int(ceil(varType / 8.) * 8)
varTypes = [self.varTypes[varName] for varName in self.varNames]
byteorder = u"<" if self.byteorder == "little" else u">"
formats = [u"a%d" % fmt8(t) if t else u"%sd" %
byteorder for t in varTypes]
obj = dict(names=self.uvarNames, formats=formats, titles=self._titles)
return np.dtype(obj)
@memoized_property
def trunc_dtype(self):
"""Returns the numpy dtype using the SPSS display formats
The following spss-format to numpy-dtype conversions are made:
+------------+------------------+
| spss | numpy |
+============+==================+
| <= `F2` | `float16` (`f2`) |
+------------+------------------+
| `F3`-`F5` | `float32` (`f4`) |
+------------+------------------+
| >= `F5` | `float64` (`f8`) |
+------------+------------------+
| (datetime) | `float64` (`f8`)*|
+------------+------------------+
| A1 >= | `S1` >= (`a1`) |
+------------+------------------+
*) Subsequently converted to `datetime.datetime` unless
`rawMode=True`. Examples of SPSS datetime display formats are `SDATE`,
`EDATE`, `ADATE`, `JDATE` and `TIME`.
Note that all numerical values are stored in SPSS files as double
precision floats. The SPSS display formats are used to create a more
compact dtype. Datetime formats are never shrunk to a more compact
format. In the table above, only F and A formats are displayed, but
other numerical (e.g. `DOLLAR`) or string (`AHEX`) are treated the
same way, e.g. `DOLLAR5.2` will become `float64`.
Returns
-------
truncated dtype : numpy.dtype (complex dtype)
See also
--------
:ref:`formats` : overview of SPSS display formats
:ref:`dateformats` : overview of SPSS datetime formats
"""
#if self.is_homogeneous:
# return self.struct_dtype
dst_fmts = [u"f2", u"f4", u"f8", u"f8"]
get_dtype = lambda src_fmt: dst_fmts[bisect([2, 5, 8], src_fmt)]
widths = [int(re.search(u"\d+", self.uformats[v]).group(0))
for v in self.uvarNames]
formats = [u'a%s' % widths[i] if self.uvarTypes[v] else u"f8" if
v in self.datetimevars else get_dtype(widths[i]) for
i, v in enumerate(self.uvarNames)]
obj = dict(names=self.uvarNames, formats=formats, titles=self._titles)
return np.dtype(obj)
@memoized_property
def datetime_dtype(self):
"""Return the modified dtype in order to accomodate `datetime.datetime`
values that were originally datetimes, stored as floats, in the SPSS
file
Returns
-------
datetime dtype : numpy.dtype (complex dtype)
"""
if not self.datetimevars:
return self.trunc_dtype
formats = ["datetime64[us]" if name in self.datetimevars else
fmt for (title, name), fmt in self.trunc_dtype.descr]
obj = dict(names=self.uvarNames, formats=formats, titles=self._titles)
return np.dtype(obj)
@memoize
def spss2datetimeDate(self, spssDateValue):
"""Convert an SPSS datetime into a ``datetime.datetime`` object
Parameters
----------
spssDateValue : float, int
Returns
-------
datetime : datetime.datetime; errors and missings are returned as
``datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0)``
See also
--------
savReaderWriter.SavReader.spss2strDate : convert SPSS datetime into
a datetime string
:ref:`dateformats` : overview of SPSS datetime formats
"""
try:
theDate = self.gregorianEpoch + \
datetime.timedelta(seconds=spssDateValue)
#theDate = np.datetime64(theDate)
return theDate
except (OverflowError, TypeError, ValueError):
return datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0)
# ---- functions that deal with uncompressed .sav files ----
@memoized_property
def _is_uncompressed(self):
"""Returns True if the .sav file was not compressed at all, False
otherwise (i.e., neither standard, nor zlib compression was used)."""
return self.fileCompression == b"uncompressed"
def _uncompressed_iter(self):
"""Faster version of __iter__ that can only be used with
uncompressed .sav files"""
self.sav.seek(self._offset)
for case in xrange(self.nrows):
yield self.unpack(self.sav.read(self.record_size))
@property
def _offset(self):
"""Returns the position of the type 999 record, which indicates the
end of the metadata and the start of the case data"""
unpack_int = lambda value: struct.unpack("i", value)
i = 0
while True:
self.sav.seek(i)
try:
code = unpack_int(self.sav.read(4))
except struct.error:
pass
i += 1
end_of_metadata = code == (999,)
if end_of_metadata:
self.sav.read(4)
return self.sav.tell()
@convert_datetimes
@convert_missings
def _uncompressed_to_structured_array(self, filename=None):
"""Read an uncompressed .sav file and return as a structured array"""
if not self._is_uncompressed:
raise ValueError("Only uncompressed files can be used")
self.sav.seek(self._offset)
if filename:
array = np.memmap(filename, self.trunc_dtype, 'w+', shape=self.nrows)
array[:] = np.fromfile(self.sav, self.trunc_dtype, self.nrows)
else:
array = np.fromfile(self.sav, self.trunc_dtype, self.nrows)
return array
@convert_missings
def _uncompressed_to_ndarray(self, filename=None):
"""Read an uncompressed .sav file and return as an ndarray"""
if not self._is_uncompressed:
raise ValueError("Only uncompressed files can be used")
if not self.is_homogeneous:
raise ValueError("Need only floats and no datetimes in dataset")
self.sav.seek(self._offset)
count = np.prod(self.shape)
if filename:
array = np.memmap(filename, float, 'w+', shape=count)
array[:] = np.fromfile(self.sav, float, count)
else:
array = np.fromfile(self.sav, float, count)
return array.reshape(self.shape)
# ------------------------------------------------------------------------
@convert_datetimes
@convert_missings
def to_structured_array(self, filename=None):
"""Return the data in <savFileName> as a structured array, optionally
using <filename> as a memmapped file.
Parameters
----------
filename : str, optional
The filename for the memory mapped array. If omitted,
the array will be in-memory
Returns
-------
array : numpy.ndarray (if `filename=None`) or numpy.core.memmap.memmap
The array has a complex dtype, i.e. is a structured array. If
defined, `varLabels` may also be used to retrieve columns
Examples
--------
For example::
reader_np = SavReaderNp("./test_data/Employee data.sav")
array = reader_np.to_structured_array()
mean_salary = array["salary"].mean().round(2)
mean_salary == array["Current Salary"].mean().round(2) # True
first_record = array[0]
reader_np.close()
See also
--------
savReaderWriter.SavReaderNp.to_ndarray
"""
self.do_convert_datetimes = False # no date conversion in __iter__
if filename:
array = np.memmap(filename, self.trunc_dtype, 'w+', shape=self.nrows)
for row, record in enumerate(self):
array[row] = record
#array.flush()
else:
if self._is_uncompressed:
array = self._uncompressed_to_array(as_ndarray=False)
else:
array = np.fromiter(self, self.trunc_dtype, self.nrows)
self.do_convert_datetimes = True
return array
def all(self, asRecarray=True, filename=None):
"""This convenience function returns all the records.
Wrapper for to_structured_array; overrides the SavReader version
Parameters
----------
asRecarray : bool, default True
whether the array should be a recarray, using varNames
as the fieldnames.
filename : str or None, default None
The filename for the memory mapped array. If omitted,
the array will be in-memory
Returns
-------
records : numpy.core.records.recarray (if `asRecarray==True`) or ,
numpy.ndarraydata (structured array, if `asRecarray==False`)
See also
--------
savReaderWriter.SavReaderNp.to_structured_array"""
structured_array = self.to_structured_array(filename)
if asRecarray:
return structured_array.view(np.recarray)
return structured_array
@convert_missings
def to_ndarray(self, filename=None):
"""Converts a homogeneous, all-numeric SPSS dataset into an ndarray,
unless the numerical variables are actually datetimes
Parameters
----------
filename : str, optional
The filename for the memory mapped array. If omitted,
the array will be in-memory
Raises
------
ValueError : if the data are not homogeneous. If `rawMode=False`
(default) SPSS datetimes are not considered to be numerical,
even though they are stored as such in the .sav file
Returns
-------
array : numpy.ndarray (if `filename=None`) or numpy.core.memmap.memmap
The array has a simple dtype, i.e. is a regular ndarray
Examples
--------
For example::
import numpy.ma
reader_np = SavReaderNp("./test_data/all_numeric.sav")
array = reader_np.to_ndarray()
average = numpy.ma.masked_invalid(array).mean()
reader_np.close()
See also
--------
savReaderWriter.SavReaderNp.is_homogeneous : determines whether a
dataset is considered to be all-numeric
savReaderWriter.SavReaderNp.to_structured_array
"""
if not self.is_homogeneous:
raise ValueError("Need only floats and no datetimes in dataset")
elif filename:
array = np.memmap(filename, float, 'w+', shape=self.shape)
for row, record in enumerate(self):
array[row,:] = record
else:
values = chain.from_iterable(self)
count = np.prod(self.shape)
array = np.fromiter(values, float, count).reshape(self.shape)
return array
def to_array(self, filename=None):
"""Wrapper for to_ndarray and to_structured_array. Returns an ndarray if the
dataset is all-numeric homogeneous (and no datetimes), a structured
array otherwise
See also
--------
savReaderWriter.SavReaderNp.to_ndarray
savReaderWriter.SavReaderNp.to_structured_array"""
if self.is_homogeneous:
return self.to_ndarray(filename)
else:
return self.to_structured_array(filename)
if __name__ == "__main__":
import time
from contextlib import closing
savFileName = "./test_data/all_numeric_datetime_uncompressed.sav"
kwargs = dict( \
savFileName = savFileName,
varNames = ["v1", "v2"],
varTypes = {"v1": 0, "v2": 0},
formats = {"v1": "DOLLAR15.2", "v2": "EDATE40"} )
if not os.path.exists(savFileName):
with SavWriter(**kwargs) as writer:
for i in xrange(10 ** 2):
value = None if not i else 11654150400.
writer.writerow([i, value])
klass = globals()[sys.argv[1]]
start = time.time()
filename = "./test_data/Employee data.sav"
#filename = "./test_data/greetings.sav"
filename = "./test_data/all_numeric.sav"
#filename = "/home/albertjan/nfs/Public/somefile_uncompressed.sav"
#filename = '/home/antonia/Desktop/big.sav'
#filename = '/home/albertjan/nfs/Public/bigger.sav'
with closing(klass(filename, rawMode=False, ioUtf8=False)) as sav:
#print(sav.struct_dtype.descr)
array = sav.to_ndarray() #"/tmp/test.dat")
#array = sav.to_structured_array()
#print(sav.formats)
#array = sav.all() #"/tmp/test.dat")
#for record in sav:
#print(record)
#pass
print("%s version: %5.3f" % (sys.argv[1], (time.time() - start)))
|
# File: gitn_branch.py
# Author: kmnk <kmnknmk at gmail.com>
# License: MIT license
from gitn.util.gitn import Gitn
from denite.process import Process
import copy
import os
import re
from .gitn import Source as Base
HIGHLIGHT = {
'container': {
'name': 'gitn_branch_line',
'pattern': '\\v([* ]) (.+)',
},
'containees': [
{
'name': 'gitn_branch_current',
'pattern': '*',
'color': 'Todo',
},
{
'name': 'gitn_branch_default',
'pattern': ' [^ ]\+',
'color': 'Statement',
},
{
'name': 'gitn_branch_remotes',
'pattern': ' remotes\/origin\/[^ ]\+',
'color': 'Special',
},
{
'name': 'gitn_branch_origin',
'pattern': ' origin\/[^ ]\+',
'color': 'Statement',
},
{
'name': 'gitn_branch_link',
'pattern': ' \->',
'color': 'Comment',
},
]
}
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'gitn_branch'
self.kind = 'gitn_branch'
self.vars = {
'command': ['git'],
'action': ['branch'],
'default_opts': ['--list'],
'separator': ['--'],
}
def on_init(self, context):
self.__proc = None
def on_close(self, context):
if self.__proc:
self.__proc.kill()
self.__proc = None
def highlight(self):
Gitn.highlight(self.vim, HIGHLIGHT)
def define_syntax(self):
self.vim.command(
'syntax region ' + self.syntax_name + ' start=// end=/$/ '
'contains=gitn_branch_line,deniteMatched contained')
def gather_candidates(self, context):
if self.__proc:
return self.__async_gather_candidates(context, 0.5)
opts = copy.copy(self.vars['default_opts'])
if len(context['args']) > 0:
args = context['args']
if 'all' in args:
opts += ['--all']
commands = []
commands += self.vars['command']
commands += self.vars['action']
commands += opts
commands += self.vars['separator']
self.__proc = Process(commands, context, self.vim.call('expand', context['path']))
return self.__async_gather_candidates(context, 2.0)
def __async_gather_candidates(self, context, timeout):
outs, errs = self.__proc.communicate(timeout=timeout)
context['is_async'] = not self.__proc.eof()
if self.__proc.eof():
self.__proc = None
candidates = []
for line in outs:
result = self.__parse_branch(line, context)
if result:
[name, ref_name, is_current, is_remote, is_tracked] = result
candidates.append({
'word': '{0} {1}{2}'.format(
'*' if is_current else ' ',
ref_name + ' -> ' if ref_name != '' else '',
name),
'action__name': name,
})
return candidates
def __parse_branch(self, line, context):
name = ''
ref_name = ''
current = ''
is_current = False
is_remote = False
is_tracked = True
m = False
if not m:
m = re.search(r'^([* ]) ([^ ]+)$', line)
if m: [current, name] = m.groups()
if not m:
m = re.search(r'^([* ]) ([^ ]+) -> ([^ ]+)$', line)
if m: [current, ref_name, name] = m.groups()
if not m:
m = re.search(r'^([* ]) ([(][^)]+[)])$', line)
if m:
[current, name] = m.groups()
is_tracked = False
is_current = current == '*'
#is_remote =
return [name, ref_name, is_current, is_remote, is_tracked]
|
"""Multiple functions used in the pipeline"""
import os
import json
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from rssnet.loaders.dataloaders import Rescale, Flip
from rssnet.utils import RSSNET_HOME
def get_class_weights(path_to_weights, signal_type):
"""Load class weights for custom loss
PARAMETERS
----------
path_to_weights: str
signal_type: str
Supported: 'range_doppler', 'range_angle'
RETURNS
-------
weights: torch tensor
Weights by class to use in the CE loss
"""
if signal_type in ('range_angle', 'rdra2ra', 'rad2ra'):
file_name = 'ra_weights.json'
elif signal_type in ('range_doppler', 'rdra2rd', 'rad2rd'):
file_name = 'rd_weights.json'
else:
raise ValueError('Signal type {} is not supported.'.format(signal_type))
with open(os.path.join(path_to_weights, file_name), 'r') as fp:
weights = json.load(fp)
weights = np.array([weights['background'], weights['pedestrian'],
weights['cyclist'], weights['car']])
weights = torch.from_numpy(weights)
return weights
def transform_masks_viz(masks, nb_classes):
masks = masks.unsqueeze(1)
masks = (masks.float()/nb_classes)
return masks
def get_metrics(metrics, loss):
"""Structure metrics and results in a dict"""
metrics_values = dict()
metrics_values['loss'] = loss.item()
prec, prec_by_class = metrics.get_pixel_prec_class() # harmonic_mean=True)
recall, recall_by_class = metrics.get_pixel_recall_class() # harmonic_mean=True)
miou, miou_by_class = metrics.get_miou_class() # harmonic_mean=True)
dice, dice_by_class = metrics.get_dice_class()
metrics_values['prec'] = prec
metrics_values['prec_by_class'] = prec_by_class.tolist()
metrics_values['recall'] = recall
metrics_values['recall_by_class'] = recall_by_class.tolist()
metrics_values['miou'] = miou
metrics_values['miou_by_class'] = miou_by_class.tolist()
metrics_values['dice'] = dice
metrics_values['dice_by_class'] = dice_by_class.tolist()
return metrics_values
def normalize(data, signal_type, carrada_path, norm_type='local'):
"""Function to normalize the input data.
Note that the 'train' and 'tvt' norm methods requires specific files
containing statistics on the dataset.
PARAMETERS
----------
data: torch tensor
Matrix to normalize
signal_type: str
carrada_path: str
Path to the files contraining statistics on the dataset
norm_type: str
Supported: 'local', 'train', 'tvt'
RETURNS
-------
norm_data: torch tensor
"""
if signal_type in ('range_doppler', 'range_angle', 'rad2rd', 'rad2ra') and \
norm_type in ('local'):
min_value = torch.min(data)
max_value = torch.max(data)
norm_data = torch.div(torch.sub(data, min_value), torch.sub(max_value, min_value))
return norm_data
if signal_type in ('rdra2rd', 'rdra2ra'):
if norm_type == 'train':
with open(os.path.join(carrada_path, 'rd_stats.json'), 'r') as fp:
rd_stats = json.load(fp)
with open(os.path.join(carrada_path, 'ra_stats.json'), 'r') as fp:
ra_stats = json.load(fp)
elif norm_type == 'tvt':
with open(os.path.join(carrada_path, 'rd_stats_all.json'), 'r') as fp:
rd_stats = json.load(fp)
with open(os.path.join(carrada_path, 'ra_stats_all.json'), 'r') as fp:
ra_stats = json.load(fp)
# Normalize representation independently
if norm_type in ('train', 'tvt'):
for i in range(data.shape[1]):
if i%2 == 0:
# range-Doppler
min_value = torch.tensor(rd_stats['min_val'])
max_value = torch.tensor(rd_stats['max_val'])
else:
# range-angle
min_value = torch.tensor(ra_stats['min_val'])
max_value = torch.tensor(ra_stats['max_val'])
data[:, i, :, :] = torch.div(torch.sub(data[:, i, :, :], min_value),
torch.sub(max_value, min_value))
elif norm_type in ('local'):
for i in range(data.shape[1]):
min_value = torch.min(data[:, i, :, :])
max_value = torch.max(data[:, i, :, :])
data[:, i, :, :] = torch.div(torch.sub(data[:, i, :, :], min_value),
torch.sub(max_value, min_value))
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
return data
elif signal_type == 'range_doppler':
if norm_type == 'train':
with open(os.path.join(carrada_path, 'rd_stats.json'), 'r') as fp:
rd_stats = json.load(fp)
elif norm_type == 'tvt':
with open(os.path.join(carrada_path, 'rd_stats_all.json'), 'r') as fp:
rd_stats = json.load(fp)
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
min_value = torch.tensor(rd_stats['min_val'])
max_value = torch.tensor(rd_stats['max_val'])
norm_data = torch.div(torch.sub(data, min_value),
torch.sub(max_value, min_value))
return norm_data
elif signal_type == 'range_angle':
if norm_type == 'train':
with open(os.path.join(carrada_path, 'ra_stats.json'), 'r') as fp:
ra_stats = json.load(fp)
elif norm_type == 'tvt':
with open(os.path.join(carrada_path, 'ra_stats_all.json'), 'r') as fp:
ra_stats = json.load(fp)
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
min_value = torch.tensor(ra_stats['min_val'])
max_value = torch.tensor(ra_stats['max_val'])
norm_data = torch.div(torch.sub(data, min_value),
torch.sub(max_value, min_value))
return norm_data
elif signal_type in ('rad2rd', 'rad2ra'):
if norm_type == 'train':
with open(os.path.join(carrada_path, 'rad_stats.json'), 'r') as fp:
rad_stats = json.load(fp)
elif norm_type == 'tvt':
with open(os.path.join(carrada_path, 'rad_stats_all.json'), 'r') as fp:
rad_stats = json.load(fp)
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
min_value = torch.tensor(rad_stats['min_val'])
max_value = torch.tensor(rad_stats['max_val'])
norm_data = torch.div(torch.sub(data, min_value),
torch.sub(max_value, min_value))
return norm_data
else:
raise TypeError('Signal {} is not supported.'.format(signal_type))
def define_loss(signal_type, custom_loss, device):
"""Define loss for training pipeline
PARAMETERS
----------
signal_type: str
custom_loss: str
Name of the custom loss to use.
Default: use Cross Entropy
device: str
Supported: 'cuda', 'cpu'
RETURNS
-------
loss: torch function
"""
if custom_loss == 'wce':
path_to_weights = os.path.join(RSSNET_HOME, 'model_configs')
weights = get_class_weights(path_to_weights, signal_type)
loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
else:
loss = nn.CrossEntropyLoss()
return loss
def get_transformations(transform_names, split='train', sizes=None):
"""Get transformation functions to apply to the input data
PARAMETERS
----------
transform_names: str
List of the transformation separated by comma
split: str
Split currently processed. Default: 'train'
sizes: tuple of ints or int
Sizes fore Rescale transformation
RETURNS
-------
transformations: list of functions
"""
transformations = list()
if 'rescale' in transform_names:
transformations.append(Rescale(sizes))
if 'flip' in transform_names and split == 'train':
transformations.append(Flip(0.5))
return transformations
def mask_to_img(mask):
mask_img = np.zeros((mask.shape[0],
mask.shape[1], 3), dtype=np.uint8)
mask_img[mask == 1] = [255, 0, 0]
mask_img[mask == 2] = [0, 255, 0]
mask_img[mask == 3] = [0, 0, 255]
mask_img = Image.fromarray(mask_img)
return mask_img
def get_qualitatives(outputs, masks, paths, seq_name, quali_iter):
"""Method to get qualitative results
PARAMETERS
----------
outputs: torch tensor
Predicted masks
masks: torch tensor
Ground truth masks
seq_name: str
quali_iter: int
Current iteration on the dataset
RETURNS
-------
quali_iter: str
"""
folder_path = os.path.join(paths['logs'], seq_name[0])
os.makedirs(folder_path, exist_ok=True)
outputs = torch.argmax(outputs, axis=1).cpu().numpy()
masks = torch.argmax(masks, axis=1).cpu().numpy()
for i in range(outputs.shape[0]):
mask_img = mask_to_img(masks[i])
mask_img.save(os.path.join(folder_path, 'mask_{}.png'.format(quali_iter)))
output_img = mask_to_img(outputs[i])
output_img.save(os.path.join(folder_path, 'output_{}.png'.format(quali_iter)))
quali_iter += 1
return quali_iter
|
# -*- coding: UTF-8 -*-
from assopy import models
from conference import models as cmodels
from django.db.models import Q
from collections import defaultdict
from decimal import Decimal
def _orders(**kw):
qs = models.Order.objects.filter(_complete=True)
if 'year' in kw:
qs = qs.filter(created__year=kw['year'])
if 'from' in kw:
qs = qs.filter(created__gte=kw['from'])
if 'to' in kw:
qs = qs.filter(created__lt=kw['to'])
return qs
def movimento_cassa(**kw):
"""
Elenca i movimenti di cassa in/out nel periodo specificato. Tramite **kw si
può specificare
- year
- from (data inizio)
- to (data fine)
Il dizionario in output ha tre chiavi:
- in
- out
sono i movimenti in ingresso/uscita, nella forma:
codice biglietto|discount -> quantità, totale
- error
sono gli ordini con righe d'ordine incongruenti, senza un biglietto
ma con importo maggiore di zero.
"""
items = models.OrderItem.objects\
.filter(order__in=_orders(**kw))\
.values('code', 'ticket', 'price', 'order__code')
fares = dict(cmodels.Fare.objects\
.values_list('code', 'description'))
output = {
'in': defaultdict(lambda: [0, 0]),
'out': defaultdict(lambda: [0, 0]),
'errors': defaultdict(lambda: [0, 0]),
}
for row in items:
if row['price'] < 0:
if row['code'].startswith('_'):
k = output['out'][(row['code'], fares.get(row['code']))]
else:
k = output['out'][('OTHER', '')]
elif not row['ticket']:
k = output['errors'][(row['order__code'], '')]
else:
k = output['in'][(row['code'], fares.get(row['code']))]
k[0] += 1
k[1] += row['price']
totals = {
'in': 0,
'out': 0,
'errors': 0,
}
for k, v in output.items():
data = sorted(v.items())
output[k] = data
totals[k] = sum([x[1][1] for x in data])
output['totals'] = totals
return output
movimento_cassa.description = "Elenco dei movimenti di cassa"
movimento_cassa.template = '''
<table>
<tr>
<th>Code</th>
<th>Qty</th>
<th style="width: 70px;">Price</th>
</tr>
{% for code, row in data.in %}
<tr>
<td title="{{ code.1 }}">{{ code.0 }}</td>
<td>{{ row.0 }}</td>
<td style="text-align: right;">€ {{ row.1|floatformat:"2" }}</td>
</tr>
{% endfor %}
<tr>
<th colspan="2">Total</th>
<td style="text-align: right;">€ {{ data.totals.in }}</td>
</tr>
{% for code, row in data.out %}
<tr>
<td title="{{ code.1 }}">{{ code.0 }}</td>
<td>{{ row.0 }}</td>
<td style="text-align: right; color: red;">€ {{ row.1|floatformat:"2" }}</td>
</tr>
{% endfor %}
<tr>
<th colspan="2">Total</th>
<td style="text-align: right;">€ {{ data.totals.out }}</td>
</tr>
</table>
{% if data.errors %}
<h3>Errors</h3>
<table>
<tr>
<td>Order</th>
<th style="width: 70px;">Price</th>
</tr>
{% for code, row in data.errors %}
<tr>
<td>{{ code.0 }}</th>
<td style="text-align: right;">€ {{ row.1|floatformat:"2" }}</th>
</tr>
{% endfor %}
<tr>
<th>Total</th>
<td style="text-align: right;">€ {{ data.totals.errors }}</td>
</tr>
</table>
{% endif %}
'''
def prezzo_biglietti_ricalcolato(**kw):
"""
Ricalcola il ricavo dei biglietti eliminando quelli gratuiti e
ridistribuendo il prezzo sui rimanenti.
"""
# mi interessano solo gli ordini che riguardano acquisti di biglietti
# "conferenza"
orders = models.Order.objects\
.filter(id__in=_orders(**kw), orderitem__ticket__fare__ticket_type='conference')\
.values('id')\
.distinct()
qs = models.OrderItem.objects\
.filter(order__in=orders)\
.values_list('ticket__fare__code', 'ticket__fare__name', 'price', 'order')
fares = set(cmodels.Fare.objects\
.filter(ticket_type='conference')\
.values_list('code', flat=True))
def _calc_prices(order_id, items):
"""
Elimina gli item degli sconti e riduce in maniera proporzionale
il valore dei restanti.
"""
prices = set()
discount = Decimal('0')
total = Decimal('0')
for item in items:
if item['price'] > 0:
prices.add(item['price'])
total += item['price']
else:
discount += item['price'] * -1
for ix, item in reversed(list(enumerate(items))):
if item['price'] > 0:
item['price'] = item['price'] * (total - discount) / total
else:
del items[ix]
grouped = defaultdict(list)
for fcode, fname, price, oid in qs:
if fcode in fares or price < 0:
grouped[oid].append({
'code': fcode,
'name': fname,
'price': price,
})
for oid, items in grouped.items():
_calc_prices(oid, items)
# dopo l'utilizzo di _calc_prices ottengo dei prezzi che non trovo
# più tra le tariffe ordinarie, raggruppo gli OrderItem risultanti
# per codice tariffa e nuovo prezzo
tcp = {}
for rows in grouped.values():
for item in rows:
code = item['code']
if code not in tcp:
tcp[code] = {
'code': code,
'name': item['name'],
'prices': {}
}
price = item['price']
if price not in tcp[code]['prices']:
tcp[code]['prices'][price] = { 'price': price, 'count': 0 }
tcp[code]['prices'][price]['count'] += 1
# Replace prices dicts with sorted lists
for code in tcp.keys():
prices_list = [entry
for price, entry in sorted(tcp[code]['prices'].items(),
reverse=True)]
tcp[code]['prices'] = prices_list
# Create list sorted by fare code
ticket_sales = [entry for code, entry in sorted(tcp.items())]
return ticket_sales
prezzo_biglietti_ricalcolato.template = '''
<table>
<tr>
<th>Code</th>
<th>Qty</th>
<th style="width: 70px;">Price</th>
</tr>
{% for ticket in data %}
{% for p in ticket.prices %}
<tr>
{% if forloop.counter == 1 %}
<td title="{{ ticket.name }}" rowspan="{{ ticket.prices|length }}">{{ ticket.code }}</td>
{% endif %}
<td>{{ p.count }}</td>
<td>€ {{ p.price|floatformat:"2" }}</td>
</tr>
{% endfor %}
{% endfor %}
</table>
'''
|
import tweepy as tw
import json
with open("key.json", "r") as f:
key = json.load(f)
auth = tw.OAuthHandler(key['API_KEY'], key['API_SECRET_KEY'])
auth.set_access_token(key['ACCESS_TOKEN'], key['ACCESS_TOKEN_SECRET'])
api = tw.API(auth, wait_on_rate_limit=True)
#api.update_status("Hello World!")
search_words = "Canada"
date_since = '2019-01-01'
tweets = tw.Cursor(api.search, q=search_words, lang='en', since=date_since).items(5)
for tweet in tweets:
print(tweet.text)
|
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for NS(Mutable)IndexSet
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the count of values for an NSIndexSet, so they need not
# obey the interface specification for synthetic children providers
class NSIndexSetClass_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
# NS(Mutable)IndexSet works in one of two modes: when having a compact block of data (e.g. a Range)
# the count is stored in the set itself, 3 pointers into it
# otherwise, it will store a pointer to an additional data structure (2 pointers into itself) and this
# additional structure will contain the count two pointers deep
# to distinguish the two modes, one reads two pointers deep into the object data: if only the MSB
# is set, then we are in mode 1, using that area to store flags, otherwise, the read pointer is the
# location to go look for count in mode 2
def count(self):
logger = lldb.formatters.Logger.Logger()
mode_chooser_vo = self.valobj.CreateChildAtOffset("mode_chooser",
2*self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger)
mode_chooser = mode_chooser_vo.GetValueAsUnsigned(0)
if self.sys_params.is_64_bit:
mode_chooser = mode_chooser & 0xFFFFFFFFFFFFFF00
else:
mode_chooser = mode_chooser & 0xFFFFFF00
if mode_chooser == 0:
mode = 1
else:
mode = 2
if mode == 1:
count_vo = self.valobj.CreateChildAtOffset("count",
3*self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger)
else:
count_ptr = mode_chooser_vo.GetValueAsUnsigned(0)
count_vo = self.valobj.CreateValueFromAddress("count",
count_ptr+2*self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger)
return count_vo.GetValueAsUnsigned(0)
class NSIndexSetUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
def count(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
expr = "(int)[" + stream.GetData() + " count]"
num_children_vo = self.valobj.CreateValueFromExpression("count",expr)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSIndexSet>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data,wrapper =lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(valobj,statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSIndexSet' or name_string == 'NSMutableIndexSet':
wrapper = NSIndexSetClass_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun',valobj)
else:
wrapper = NSIndexSetUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('unknown_class',valobj.GetName() + " seen as " + name_string)
return wrapper;
def NSIndexSet_SummaryProvider (valobj,dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj);
if provider != None:
if isinstance(provider,lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.count();
except:
summary = None
logger >> "got summary " + str(summary)
if summary == None:
summary = '<variable is not NSIndexSet>'
if isinstance(summary, basestring):
return summary
else:
summary = str(summary) + (' objects' if summary != 1 else ' object')
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger,dict):
debugger.HandleCommand("type summary add -F NSIndexSet.NSIndexSet_SummaryProvider NSIndexSet NSMutableIndexSet")
|
import numpy as np
from gym.envs.mujoco import HalfCheetahEnv
import inspect
def get_all_function_arguments(function, locals):
kwargs_dict = {}
for arg in inspect.getfullargspec(function).kwonlyargs:
if arg not in ["args", "kwargs"]:
kwargs_dict[arg] = locals[arg]
args = [locals[arg] for arg in inspect.getfullargspec(function).args]
if "args" in locals:
args += locals["args"]
if "kwargs" in locals:
kwargs_dict.update(locals["kwargs"])
return args, kwargs_dict
class HalfCheetahWrapper(HalfCheetahEnv):
"""HalfCheetah Wrapper that wraps Mujoco Halfcheetah-v2 env
with an additional defined reward function for model-based RL.
This is currently used for MBMPO.
"""
def __init__(self, *args, **kwargs):
HalfCheetahEnv.__init__(self, *args, **kwargs)
def reward(self, obs, action, obs_next):
if obs.ndim == 2 and action.ndim == 2:
assert obs.shape == obs_next.shape
forward_vel = obs_next[:, 8]
ctrl_cost = 0.1 * np.sum(np.square(action), axis=1)
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
else:
forward_vel = obs_next[8]
ctrl_cost = 0.1 * np.square(action).sum()
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
if __name__ == "__main__":
env = HalfCheetahWrapper()
env.reset()
for _ in range(1000):
env.step(env.action_space.sample())
|
import xlwt
from django.http import HttpResponse
from users.models import Usuario
from mrp.models import Report, ReportItem, ReportPeriod
from decimal import Decimal
from .functions import dollarFormat
def export_report_xls(request, report_id):
report = Report.objects.get(pk=report_id)
items = ReportItem.objects.filter(report=report_id)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = f'attachment; filename="{report.title}.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Report')
COLS_WIDTH = 256 * 20
ws.col(2).width = COLS_WIDTH
ws.col(3).width = COLS_WIDTH
ws.col(4).width = COLS_WIDTH
ws.col(5).width = COLS_WIDTH
ws.col(6).width = COLS_WIDTH
for count in range(0, 62):
ws.row(count).height_mismatch = True
ws.row(count).height = 320
# Create Custom color for cells
xlwt.add_palette_colour("custom_colour", 0x21)
wb.set_colour_RGB(0x21, 211, 247, 253)
th_style = xlwt.easyxf('pattern: pattern solid, fore_color custom_colour; border: top thin, right thin, bottom thin, left thin; font: bold 1; align: vert centre, horiz center;')
td_style = xlwt.easyxf('border: top thin, right thin, bottom thin, left thin; font: bold 1; align: vert centre, horiz center')
current_table = 2
total_carrying_cost = 0
total_order_cost = 0
for item in items:
ws.write(current_table + 0, 2, 'Item Number', th_style)
ws.write(current_table + 0, 3, item.part_number, td_style)
ws.write(current_table + 0, 5, 'Yield (%)', th_style)
ws.write(current_table + 0, 6, item.yield_percent, td_style)
ws.write(current_table + 2, 2, 'Lead Time', th_style)
ws.write(current_table + 2, 3, item.lead_time, td_style)
ws.write(current_table + 2, 5, 'Order Cost', th_style)
ws.write(current_table + 2, 6, item.order_cost, td_style)
ws.write(current_table + 4, 2, 'Parent', th_style)
ws.write(current_table + 4, 3, item.parent, td_style)
ws.write(current_table + 4, 5, 'Carrying Cost', th_style)
ws.write(current_table + 4, 6, item.carrying_cost, td_style)
ws.write(current_table + 6, 2, 'Quantity', th_style)
ws.write(current_table + 6, 3, item.qty, td_style)
ws.write(current_table + 6, 5, 'Lot Size', th_style)
ws.write(current_table + 6, 6, item.lot_size, td_style)
ws.write(current_table + 7, 5, 'Factor', th_style)
ws.write(current_table + 7, 4, '')
ws.write(current_table + 8, 2, 'Safety Stock', th_style)
ws.write(current_table + 8, 3, item.safe_stock, td_style)
ws.write(current_table + 9, 5, 'On Hand', th_style)
ws.write(current_table + 10, 2, 'Period', th_style)
ws.write(current_table + 10, 3, 'Gross Requirement', th_style)
ws.write(current_table + 10, 4, 'Receipts', th_style)
ws.write(current_table + 10, 5, item.on_hand, td_style)
ws.write(current_table + 10, 6, 'Net Requirement', th_style)
periods = ReportPeriod.objects.filter(item=item.id)
total_inventory = 0
net_req_count = 0
for period in periods:
row = period.period - 1
ws.write(current_table + row + 11, 2, period.period, td_style)
ws.write(current_table + row + 11, 3, period.gross_requirement, td_style)
ws.write(current_table + row + 11, 4, period.receipt, td_style)
ws.write(current_table + row + 11, 5, period.on_hand, td_style)
ws.write(current_table + row + 11, 6, period.net_requirement, td_style)
if period.on_hand > 0:
total_inventory += period.on_hand
if period.net_requirement > 0: net_req_count += 1
average_inventory = total_inventory / len(periods)
carrying_cost = Decimal(average_inventory) * item.carrying_cost
order_cost = net_req_count * item.order_cost
total_cost = carrying_cost + order_cost
ws.write(current_table + row + 12, 2, "Total Inventory", th_style)
ws.write(current_table + row + 12, 3, "Average Inventory", th_style)
ws.write(current_table + row + 12, 4, "Carrying Cost", th_style)
ws.write(current_table + row + 12, 5, "Order Cost", th_style)
ws.write(current_table + row + 12, 6, "Total Cost", th_style)
ws.write(current_table + row + 13, 2, total_inventory, td_style)
ws.write(current_table + row + 13, 3, average_inventory, td_style)
ws.write(current_table + row + 13, 4, dollarFormat(carrying_cost), td_style)
ws.write(current_table + row + 13, 5, dollarFormat(order_cost), td_style)
ws.write(current_table + row + 13, 6, dollarFormat(total_cost), td_style)
total_carrying_cost += carrying_cost
total_order_cost += order_cost
current_table += 25
final_total_cost = total_carrying_cost + total_order_cost
ws.write(current_table + row - 9, 4, "Total Carrying Cost", th_style)
ws.write(current_table + row - 9, 5, "Total Order Cost", th_style)
ws.write(current_table + row - 9, 6, "Total Cost", th_style)
ws.write(current_table + row - 8, 4, dollarFormat(total_carrying_cost), td_style)
ws.write(current_table + row - 8, 5, dollarFormat(total_order_cost), td_style)
ws.write(current_table + row - 8, 6, dollarFormat(final_total_cost), td_style)
wb.save(response)
return response
def export_users_xls(request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="users.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Users')
# Sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['First name', 'Last name', 'Email address', ]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# Sheet body, remaining rows
font_style = xlwt.XFStyle()
rows = Usuario.objects.all().values_list('first_name', 'last_name', 'email')
for row in rows:
row_num += 1
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
wb.save(response)
return response
def export_styling_xls(request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="users.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Styling Data') # this will make a sheet named Users Data - First Sheet
xlwt.add_palette_colour("custom_colour", 0x21)
wb.set_colour_RGB(0x21, 211, 247, 253)
styles = dict(
bold = 'font: bold 1',
italic = 'font: italic 1',
# Wrap text in the cell
wrap_bold = 'font: bold 1; align: wrap 1;',
# White text on a blue background
reversed = 'pattern: pattern solid, fore_color blue; font: color white;',
# Light orange checkered background
light_orange_bg = 'pattern: pattern fine_dots, fore_color white, back_color orange;',
# Heavy borders
bordered = 'border: top thin, right thin, bottom thin, left thin;',
# 16 pt red text
big_red = 'font: height 320, color red;',
custom_color = 'pattern: pattern solid, fore_color custom_colour;',
)
for idx, k in enumerate(sorted(styles)):
style = xlwt.easyxf(styles[k])
ws.write(idx, 0, k)
ws.write(idx, 1, styles[k], style)
wb.save(response)
return response
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from cliff import command
import url_access_checker.api as api
import url_access_checker.errors as errors
from url_access_checker.network import manage_network
LOG = logging.getLogger(__name__)
class CheckUrls(command.Command):
"""Check if it is possible to retrieve urls."""
def get_parser(self, prog_name):
parser = super(CheckUrls, self).get_parser(prog_name)
parser.add_argument('urls', type=str, nargs='+',
help='List of urls to check')
return parser
def take_action(self, parsed_args):
LOG.info('Starting url access check for {0}'.format(parsed_args.urls))
try:
api.check_urls(parsed_args.urls)
except errors.UrlNotAvailable as e:
sys.stdout.write(str(e))
raise e
class CheckUrlsWithSetup(CheckUrls):
def get_parser(self, prog_name):
parser = super(CheckUrlsWithSetup, self).get_parser(
prog_name)
parser.add_argument('-i', type=str, help='Interface', required=True)
parser.add_argument('-a', type=str, help='Addr/Mask pair',
required=True)
parser.add_argument('-g', type=str, required=True,
help='Gateway to be used as default')
parser.add_argument('--vlan', type=int, help='Vlan tag')
return parser
def take_action(self, pa):
with manage_network(pa.i, pa.a, pa.g, pa.vlan):
return super(
CheckUrlsWithSetup, self).take_action(pa)
|
""" Cisco_IOS_XR_platform_pifib_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR platform\-pifib package operational data.
This YANG module augments the
Cisco\-IOS\-XR\-lpts\-pre\-ifib\-oper
module with state data.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class UsageAddressFamilyEnum(Enum):
"""
UsageAddressFamilyEnum
Usage address family
.. data:: ipv4 = 0
Ipv4 af
.. data:: ipv6 = 1
Ipv6 af
"""
ipv4 = 0
ipv6 = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_platform_pifib_oper as meta
return meta._meta_table['UsageAddressFamilyEnum']
|
__name__ = ["phonology"]
__version__ = "1.5.4"
##################################################################
#
#
# Phonology - Consonants
def gen_nasal_stops_():
return [ "m", "n", "ŋ" ]
nasal_stop = gen_nasal_stops_()
def gen_non_nasal_stops_():
return [ "b", "d", "ɡ" , "p", "t", "k", "q" ]
non_nasal_stop = gen_non_nasal_stops_()
def gen_glottal_stop_():
return [ "ʔ" ]
glottal_stop = gen_glottal_stop_()
def gen_stops_():
stop_ = list()
stop_.extend(nasal_stop)
stop_.extend(non_nasal_stop)
return stop_
stop = gen_stops_()
def gen_all_stops_():
all_stops_ = list()
all_stops_.extend(stop)
all_stops_.extend(glottal_stop)
return all_stops_
all_stops = gen_all_stops_()
def gen_affricates_():
return [ "d͡z", "d͡ʒ" , "t͡s", "t͡ʃ" ]
affricate = gen_affricates_()
def sibilant_fricatives_():
return [ "z", "ʒ" , "s", "ʃ" , "ɬ" , "ɮ" ]
sibilant_fricative = sibilant_fricatives_()
def non_sibilant_fricatives_():
return [ "f", "v", "θ", "ð", "x", "χ", "ħ", "ʕ", "h" ]
non_sibilant_fricative = non_sibilant_fricatives_()
def gen_rhotic_fricatives_():
return [ "ɣ", "ʁ", "ɹ̠" ]
rhotic_fricative = gen_rhotic_fricatives_()
def gen_fric_():
""" 18 consonants """
fric_ = list()
fric_.extend(sibilant_fricative)
fric_.extend(non_sibilant_fricative)
fric_.extend(rhotic_fricative)
return fric_
fricative = gen_fric_()
def gen_semi_vowel_():
return [ "w", "ʋ", "ð̞", "j" ]
semi_vowel = gen_semi_vowel_()
rhotic_approximant = [ "ɰ", "ɹ̠" ]
onset_approximant_ext = [ "l" , "ʍ" , "ʕ̞" ]
def gen_onset_appr_():
on_appr_ = list()
on_appr_.extend(semi_vowel)
on_appr_.extend(rhotic_approximant)
on_appr_.extend(onset_approximant_ext)
return on_appr_
onset_approximant = gen_onset_appr_()
coda_approximant_ext = [ "ɫ" ]
def gen_coda_appr_():
co_appr_ = list()
co_appr_.extend(semi_vowel)
co_appr_.extend(coda_approximant_ext)
return co_appr_
coda_approximant = gen_coda_appr_()
def gen_trill_():
return [ "r", "ʀ" ]
trill = gen_trill_()
def gen_ejectives_():
return [ "pʼ" , "ť" , "kʼ" , "qʼ" , "tsʼ" , "t͡ʃʼ" ,
"fʼ" , "θʼ" , "sʼ" , "ʃʼ" , "x’" , "χ’" ]
ejective = gen_ejectives_()
def gen_pharyngeal_approximant_():
return [ "ʕ̞" ]
pharyngeal_approximant = gen_pharyngeal_approximant_()
# Affricate - Fricative matching dictionaries
# Remember Affricates are made up of
# a Stop followed by a Fricative.
def gen_affr_fric_match_():
return { "d͡z": "z", "d͡ʒ": "ʒ", "t͡s": "s", "t͡ʃ": "ʃ" }
affr_fric_match = gen_affr_fric_match_()
def gen_stop_affr_match_():
return { "d͡z": "d", "d͡ʒ": "d", "t͡s": "t", "t͡ʃ": "t" }
stop_affr_match = gen_stop_affr_match_()
|
import re
from typing import Any
from core import Program
from core.commons import log_objects
from core.interfaces.singleton import Singleton
from cryptography.fernet import Fernet
class Hasher(metaclass=Singleton):
__metaclass__ = Singleton
WRAP_REGEX: str = '<HASHED_DATA_SDBA>{(.+?)}</HASHED_DATA_SDBA>'
def __init__(self) -> None:
self.__crypto_key = Program.get_crypto_key()
def _unhash(self, estr: str) -> str:
"""
_unhash
Args:
estr (str):
Returns:
_unhash
"""
f = Fernet(self.__crypto_key)
decrypted = f.decrypt(estr.encode(Program.ENCODING))
return decrypted.decode(Program.ENCODING)
def _hash(self, estr: str) -> str:
"""
_hash
Args:
estr (str):
Returns:
str
"""
message = str(estr).encode()
f = Fernet(self.__crypto_key)
encrypted_str: str = f.encrypt(message).decode(Program.ENCODING)
return self.WRAP_REGEX.replace("{(.+?)}", "{}").format(encrypted_str)
def decode_data(self, strdata: Any) -> Any:
"""
decode_data
Args:
strdata (Any):
Returns:
Any
"""
hasher_tag: str = self.WRAP_REGEX.split('{')[0]
def decode(estr: str) -> str:
try:
if hasher_tag not in str(estr):
return estr
else:
patters: str = self.WRAP_REGEX.replace("{", "").replace("}", "")
m = re.search(patters, estr)
if m:
found = m.group(1)
return self._unhash(found)
elif estr == self.WRAP_REGEX.replace("{(.+?)}", ""):
return ""
else:
return estr
return estr
except Exception as ex:
log_objects(ex)
try:
if isinstance(strdata, dict):
ndct = {}
for k, v in strdata.items():
if isinstance(v, dict):
ndct[k] = v
else:
ndct[k] = decode(v)
return ndct
else:
return decode(strdata)
except Exception as e:
log_objects(e)
def encode_data(self, strdata: Any) -> str:
"""
encode_data
Args:
strdata (Any):
Returns:
str
"""
hasher_tag: str = self.WRAP_REGEX.split('{')[0]
def encode(estr: str) -> str:
if hasher_tag in str(estr):
return estr
else:
return self._hash(estr)
if isinstance(strdata, dict):
ndct = {}
for k, v in strdata.items():
if isinstance(v, dict):
ndct[k] = v
else:
ndct[k] = encode(v)
return ndct
else:
return encode(strdata)
|
#!/usr/bin/env python3
""" Video output writer for faceswap.py converter """
import os
from collections import OrderedDict
from math import ceil
import imageio
import imageio_ffmpeg as im_ffm
from ffmpy import FFmpeg, FFRuntimeError
from ._base import Output, logger
class Writer(Output):
""" Video output writer using imageio """
def __init__(self, output_folder, total_count, frame_ranges, source_video, **kwargs):
super().__init__(output_folder, **kwargs)
logger.debug("total_count: %s, frame_ranges: %s, source_video: '%s'",
total_count, frame_ranges, source_video)
self.source_video = source_video
self.frame_ranges = frame_ranges
self.frame_order = self.set_frame_order(total_count)
self.output_dimensions = None # Fix dims of 1st frame in case of different sized images
self.writer = None # Need to know dimensions of first frame, so set writer then
@property
def video_file(self):
""" Return full path to video output """
filename = os.path.basename(self.source_video)
filename = os.path.splitext(filename)[0]
filename = "{}_converted.{}".format(filename, self.config["container"])
retval = os.path.join(self.output_folder, filename)
logger.debug(retval)
return retval
@property
def video_tmp_file(self):
""" Temporary video file, prior to muxing final audio """
path, filename = os.path.split(self.video_file)
retval = os.path.join(path, "__tmp_{}".format(filename))
logger.debug(retval)
return retval
@property
def valid_tune(self):
""" Return whether selected tune is valid for selected codec """
return {"libx264": ["film", "animation", "grain", "stillimage", "fastdecode",
"zerolatency"],
"libx265": ["grain", "fastdecode", "zerolatency"]}
@property
def video_fps(self):
""" Return the fps of source video """
reader = imageio.get_reader(self.source_video, "ffmpeg")
retval = reader.get_meta_data()["fps"]
reader.close()
logger.debug(retval)
return retval
@property
def output_params(self):
""" FFMPEG Output parameters """
codec = self.config["codec"]
tune = self.config["tune"]
# Force all frames to the same size
output_args = ["-vf", "scale={}".format(self.output_dimensions)]
output_args.extend(["-c:v", codec])
output_args.extend(["-crf", str(self.config["crf"])])
output_args.extend(["-preset", self.config["preset"]])
if tune is not None and tune in self.valid_tune[codec]:
output_args.extend(["-tune", tune])
if codec == "libx264" and self.config["profile"] != "auto":
output_args.extend(["-profile:v", self.config["profile"]])
if codec == "libx264" and self.config["level"] != "auto":
output_args.extend(["-level", self.config["level"]])
logger.debug(output_args)
return output_args
def set_frame_order(self, total_count):
""" Return the full list of frames to be converted in order """
if self.frame_ranges is None:
retval = list(range(1, total_count + 1))
else:
retval = list()
for rng in self.frame_ranges:
retval.extend(list(range(rng[0], rng[1] + 1)))
logger.debug("frame_order: %s", retval)
return retval
def get_writer(self):
""" Add the requested encoding options and return the writer """
logger.debug("writer config: %s", self.config)
return imageio.get_writer(self.video_tmp_file,
fps=self.video_fps,
ffmpeg_log_level="error",
quality=None,
macro_block_size=8,
output_params=self.output_params)
def write(self, filename, image):
""" Frames come from the pool in arbitrary order, so cache frames
for writing out in correct order """
logger.trace("Received frame: (filename: '%s', shape: %s", filename, image.shape)
if not self.output_dimensions:
logger.info("Outputting to: '%s'", self.video_file)
self.set_dimensions(image.shape[:2])
self.writer = self.get_writer()
self.cache_frame(filename, image)
self.save_from_cache()
def set_dimensions(self, frame_dims):
""" Set the dimensions based on a given frame frame. This protects against different
sized images coming in and ensure all images go out at the same size for writers
that require it and mapped to a macro block size 16"""
logger.debug("input dimensions: %s", frame_dims)
self.output_dimensions = "{}:{}".format(
int(ceil(frame_dims[1] / 16) * 16),
int(ceil(frame_dims[0] / 16) * 16))
logger.debug("Set dimensions: %s", self.output_dimensions)
def save_from_cache(self):
""" Save all the frames that are ready to be output from cache """
while self.frame_order:
if self.frame_order[0] not in self.cache:
logger.trace("Next frame not ready. Continuing")
break
save_no = self.frame_order.pop(0)
save_image = self.cache.pop(save_no)
logger.trace("Rendering from cache. Frame no: %s", save_no)
self.writer.append_data(save_image[:, :, ::-1])
logger.trace("Current cache size: %s", len(self.cache))
def close(self):
""" Close the ffmpeg writer and mux the audio """
self.writer.close()
self.mux_audio()
def mux_audio(self):
""" Mux audio
ImageIO is a useful lib for frames > video as it also packages the ffmpeg binary
however muxing audio is non-trivial, so this is done afterwards with ffmpy.
A future fix could be implemented to mux audio with the frames """
if self.config["skip_mux"]:
logger.info("Skipping audio muxing due to configuration settings.")
self._rename_tmp_file()
return
logger.info("Muxing Audio...")
if self.frame_ranges is not None:
logger.warning("Muxing audio is not currently supported for limited frame ranges."
"The output video has been created but you will need to mux audio "
"yourself")
self._rename_tmp_file()
return
exe = im_ffm.get_ffmpeg_exe()
inputs = OrderedDict([(self.video_tmp_file, None), (self.source_video, None)])
outputs = {self.video_file: "-map 0:v:0 -map 1:a:0 -c: copy"}
ffm = FFmpeg(executable=exe,
global_options="-hide_banner -nostats -v 0 -y",
inputs=inputs,
outputs=outputs)
logger.debug("Executing: %s", ffm.cmd)
# Sometimes ffmpy exits for no discernible reason, but then works on a later attempt,
# so take 5 shots at this
attempts = 5
for attempt in range(attempts):
logger.debug("Muxing attempt: %s", attempt + 1)
try:
ffm.run()
except FFRuntimeError as err:
logger.debug("ffmpy runtime error: %s", str(err))
if attempt != attempts - 1:
continue
logger.error("There was a problem muxing audio. The output video has been "
"created but you will need to mux audio yourself either with the "
"EFFMpeg tool or an external application.")
os.rename(self.video_tmp_file, self.video_file)
break
logger.debug("Removing temp file")
if os.path.isfile(self.video_tmp_file):
os.remove(self.video_tmp_file)
def _rename_tmp_file(self):
""" Rename the temporary video file if not muxing audio. """
os.rename(self.video_tmp_file, self.video_file)
logger.debug("Removing temp file")
if os.path.isfile(self.video_tmp_file):
os.remove(self.video_tmp_file)
|
def lsb(p):
res = 0
for byte in p[::-1]:
res = res << 8
res += byte
return res
def signed2lsb(v, n=2):
v = int(v)
res = list(v.to_bytes(length=n, byteorder='little', signed=True))
# print(f'V: {v} to', res)
return res
def lsb2signed(p):
return int.from_bytes(bytes(p), 'little', signed=True)
def parse_voltage(b):
return lsb(b) * 3.3 / 4095
charger_state = {
0: "DISCHARGING",
2: "DOCKING_CHARGED",
6: "DOCKING_CHARGING",
18: "ADAPTER_CHARGED",
22: "ADAPTER_CHARGING",
}
ir_bit_state = {
0x01: "NEAR_LEFT",
0x02: "NEAR_CENTER",
0x04: "NEAR_RIGHT",
0x08: "FAR_CENTER",
0x10: "FAR_LEFT",
0x20: "FAR_RIGHT",
}
|
"""Giza URL Configuration"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^$',
views.show_giza,
name="show giza"
),
url(
r'^all/$',
views.show_all_giza,
name="show all giza"
),
url(
r'^new/$',
views.new_giza,
name='new giza'
),
url(
r'^new/(?P<email>.*)/$',
views.new_giza,
name='new giza with email'
),
url(
r'^(?P<id>\d+)/edit/$',
views.edit_giza,
name='edit giza'
),
url(
r'^(?P<id>\d+)/delete/$',
views.delete_giza,
name='delete giza'
),
url(
r'^search/(?P<search_type>.*)/(?P<search_word>.*)/$',
views.search_giza,
name='search giza'
)
]
|
# -*- coding: utf-8 -*-
"""HAR_Opportunity.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qfhns0ykD6eLkoWICPu6WbdD4r7V-6Uf
# Introduction
This notebook presents the several machine learning models using CNN and LSTM for HAR. To obtain a detailed description of the architecture, please refer to the dissertation, **"RECOGNISING HUMAN ACTIVITIES AUTONOMOUSLY THROUGH FUSION OF SENSOR DATA"**.
## Dataset
As a dataset, the [OPPORTUNITY Activity Recognition Data Set](http://archive.ics.uci.edu/ml/datasets/OPPORTUNITY+Activity+Recognition) is used. To prepare this dataset for the program, the following code is uncommented and executed.
"""
# Download dataset zip file and place data files in training and test set directries
zipfile_dataset_opportunity = "OpportunityUCIDataset.zip"
url_dataset_opportunity = "https://archive.ics.uci.edu/ml/machine-learning-databases/00226/OpportunityUCIDataset.zip"
#!wget $url_dataset_opportunity
#!unzip $zipfile_dataset_opportunity
#!ls OpportunityUCIDataset/dataset/
# Deploy dataset files into training and test directories
#!mkdir -p ../data/test
#!mkdir ../data/train
#%cd OpportunityUCIDataset/dataset/
#!cp S[1-3]-Drill.dat S1-ADL[1-5].dat S2-ADL[1-3].dat S3-ADL[1-3].dat ../../../data/train/
#!cp S[23]-ADL[23].dat ../../../data/test/
#%cd ../../
#!ls ../data/train/
#!ls ../data/test/
"""# 1.Parameters
Adjustable flags and parameters are listed. Hyperparameters for each ML model are in "[F] ML models" section.
|Name|Type|Explanation|
|-|-|-|
|flag_delete_null|Flag|Whether delete the Null class or not|
|flag_label|Flag|Activity type (gesture or locomotion)|
|flag_(ML model name)|Flag|Whether execute the model or not|
|flag_experiment|Flag|Whether run repeated evaluation for summary statistics or not|
|flag_model_load|Flag|Whether load the model from the file or not|
|flag_model_save|Flag|Whether save the model to the file after training or not|
|flag_EarlyStopping|Flag|Enable Early stopping|
|flag_es_monitor|Flag|Monitor type for Early stopping|
|ratio_train|Parameter|The ratio between training and validation sets|
|seed|Parameter|Fix the seed for reproducibility|
|flag_scale|Flag|Scaling technique|
|window_size|Parameter|The length of the sliding window|
|window_step|Parameter|The step of the sliding window|
|flag_sw_label|Flag|Class label of the sliding window|
|flag_balance_*|Flag|Enable data balancing|
|flag_data_dist|Flag|Display dataset distribution|
|flag_interpolate|Flag|Enable interpolation|
|flag_plot_model|Flag|Whether plot model graphs or not|
|flag_save_fig|Flag|Whether save graphs or not|
|flag_summary|Flag|Show summary of the dataset|
|flag_cm_norm|Flag|Whether normalise confusion matrix or not|
|flag_TensorBoard|Flag|Whether save the Tensorboard data or not|
"""
### [Note]
#
# Hyperparameters for each ML model are in "[F] ML models" section
#
### ---------- ---------- ---------- ---------- ----------
### Flags
flag_delete_null = False
# Label
flag_label = "ML_Both_Arms"
#flag_label = "Locomotion"
# ML
flag_CNN_1d = True
flag_LSTM_Mto1 = True
flag_CNN1D_LSTM = True
flag_ConvLSTM = True
flag_Ensemble = True
flag_experiment = True
flag_model_load = False
flag_model_save = True
flag_EarlyStopping = True
flag_es_monitor = "val_loss"
#flag_es_monitor = "val_accuracy"
### ---------- ---------- ---------- ---------- ----------
### Pre-processing
# Ratio of training dataset to be split
ratio_train = 0.85
# Randam seed for reproducibility
seed = 7
# scaling
flag_scaling = "Std" # for Gaussian
#flag_scaling = "Norm" # (0 - 1)
# Sliding window
window_size = 15
window_step = 8
flag_sw_label = "last"
#flag_sw_label = "mode"
# Data balancing
flag_balance_under1 = False
flag_balance_under2 = False
flag_balance_under3 = False
flag_balance_over1 = False
flag_balance_over2 = False
flag_balance_over3 = False
### ---------- ---------- ---------- ---------- ----------
### Evaluation
flag_data_dist = False
flag_interpolate = True
flag_plot_model = True
flag_savefig = True
flag_summary = True
flag_cm_norm = True
flag_TensorBoard = False
### ---------- ---------- ---------- ---------- ----------
### Directories
dir_log = 'log'
dir_model = 'model'
### ---------- ---------- ---------- ---------- ----------
### Names
# models
modelname_cnn_1d = 'CNN_1D'
modelname_lstm_Mto1 = 'LSTM_Mto1'
modelname_cnn1d_lstm = 'CNN1D_LSTM'
modelname_convlstm = 'ConvLSTM'
modelname_ensemble = 'Ensemble'
modelname_lstm_Mto1_null = 'LSTM_Mto1_null'
# Label list
labels_Loco = ['(Null)',
'Stand',
'Walk',
'Sit',
'Lie']
labels_ML = ['(Null)',
'Open Door 1', 'Open Door 2',
'Close Door 1', 'Close Door 2',
'Open Fridge', 'Close Fridge',
'Open Dishwasher', 'Close Dishwasher',
'Open Drawer 1', 'Close Drawer 1',
'Open Drawer 2', 'Close Drawer 2',
'Open Drawer 3', 'Close Drawer 3',
'Clean Table', 'Drink from Cup', 'Toggle Switch']
### ---------- ---------- ---------- ---------- ----------
"""# 2.Setup
## Import libraries
"""
# Pre-process
import os
import glob
import numpy as np
import random as rn
import pandas as pd
from numpy.lib.stride_tricks import as_strided
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from tensorflow.keras.utils import to_categorical
import collections
# Evaluation
from sklearn.metrics import f1_score, classification_report, confusion_matrix
import matplotlib.pyplot as plt
import time
from datetime import datetime
from numpy import mean, std
from matplotlib import pyplot
import seaborn as sns
sns.set()
# NNs
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
# CNN
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv1D, MaxPool1D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.layers import Input, BatchNormalization
from tensorflow.keras.optimizers import Adam
# LSTM
from tensorflow.keras.layers import LSTM, TimeDistributed
from tensorflow.keras import regularizers
# ConvLSTM
from tensorflow.keras.layers import ConvLSTM2D
# Ensemble
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Concatenate
# Set random seed (for reproducibility)
# Hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
# Built-in random
rn.seed(seed)
# Numpy.random
np.random.seed(seed)
# Tensorflow
tf.random.set_seed(seed)
"""## [F] Pre-processing"""
def read_files(files):
for i, file in enumerate(files):
print(f'[{i+1}] Reading file: {file}')
d = pd.read_csv(file, header=None, sep=' ')
# Truncate last residual records for sliding window
mod = (len(d) - window_size) % window_step
d = d[:len(d)-mod]
# Count records with NaN
d_nan = d.isnull().sum()
# Convert NaN
# Linear interpolation
if flag_interpolate:
d.interpolate(inplace=True)
# Convert remaining NaNs into 0
if flag_interpolate:
d.replace(np.nan, 0, inplace=True)
if i == 0:
dataset = d
dataset_nan = d_nan
else:
dataset = pd.concat([dataset, d])
dataset_nan = dataset_nan + d_nan
return dataset, dataset_nan
# Adjust label values (0 to num_classes)
def adjust_idx_labels(data_y):
if flag_label == 'Locomotion':
data_y[data_y == 4] = 3
data_y[data_y == 5] = 4
elif flag_label == 'ML_Both_Arms':
data_y[data_y == 406516] = 1
data_y[data_y == 406517] = 2
data_y[data_y == 404516] = 3
data_y[data_y == 404517] = 4
data_y[data_y == 406520] = 5
data_y[data_y == 404520] = 6
data_y[data_y == 406505] = 7
data_y[data_y == 404505] = 8
data_y[data_y == 406519] = 9
data_y[data_y == 404519] = 10
data_y[data_y == 406511] = 11
data_y[data_y == 404511] = 12
data_y[data_y == 406508] = 13
data_y[data_y == 404508] = 14
data_y[data_y == 408512] = 15
data_y[data_y == 407521] = 16
data_y[data_y == 405506] = 17
return data_y
def sliding_window(data, w_size, w_step):
shape = np.array(data.shape)
# Compute new shape & strides based on window size & step
newshape = ((shape - w_size) // w_step) + 1
newshape = np.append(newshape, [w_size[0], data.shape[1]])
# Original strides * window step
newstrides = np.array(data.strides) * w_step
# For window size & features, set original strides
newstrides = np.append(newstrides, data.strides)
# Create a view for new shape & stride
data_strided = as_strided(data, shape=newshape, strides=newstrides)
# Flatten strided shape
newshape_flatten = [i for i in newshape if i != 1]
return data_strided.reshape(newshape_flatten)
def opp_sliding_window(X, Y):
X = sliding_window(X, (window_size, X.shape[1]), (window_step, 1))
Y = sliding_window(Y, (window_size, Y.shape[1]), (window_step, 1))
return X, Y
"""## [F] ML models"""
# Train
epochs = 100
batch_size = 100
repeats = 10
# EarlyStopping
es_patience = 5
"""### CNN"""
# Layer
cnn_padding ='same'
cnn_activation = 'relu'
cnn_units = 128
cnn_dropout = 0.5
cnn_pool_size = 2
## 1D Conv
cnn_1d_filters = 64
cnn_1d_kernel_size = 5
def build_model_cnn_1d():
model = Sequential(name=modelname_cnn_1d)
# Conv layer 1
model.add(Conv1D(
input_shape = cnn_1d_input_shape,
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Conv layer 2
model.add(Conv1D(
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Conv layer 3
model.add(Conv1D(
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Conv layer 4
model.add(Conv1D(
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Maxpool layer
# model.add(MaxPool1D(
# pool_size = cnn_pool_size))
model.add(Flatten())
# Dense layer 1
model.add(Dense(
units = cnn_units,
activation = 'relu'))
# Dropout
model.add(Dropout(cnn_dropout))
# Dense layer 2
model.add(Dense(
units = cnn_units,
activation = 'relu'))
# Dropout
model.add(Dropout(cnn_dropout))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### LSTM"""
# Layer
lstm_units = 128
lstm_dropout = 0.5
lstm_weight_decay = 1e-4
# LSTM (Many-to-One, stateless)
def build_model_lstm_Mto1():
model = Sequential(name=modelname_lstm_Mto1)
# LSTM layer
model.add(LSTM(
input_shape = lstm_input_shape,
units = lstm_units,
# kernel_regularizer = regularizers.l2(lstm_weight_decay),
return_sequences = False)) # final layer of LSTM (only final output)
# Dropout
model.add(Dropout(lstm_dropout))
# Dense layer
model.add(Dense(
units = lstm_units,
activation = 'relu'))
# Dropout
model.add(Dropout(lstm_dropout))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### CNN-LSTM"""
# Data
cnn_lstm_steps = 3
cnn_lstm_length = int(window_size / cnn_lstm_steps)
# Layer
cnn_lstm_padding = 'same'
cnn_lstm_activation = 'relu'
cnn_lstm_dropout = 0.5
cnn_lstm_pool_size = 2
## CNN
cnn_lstm_filters = 64
cnn1d_lstm_kernel_size = 3
# LSTM
cnn_lstm_units = 128
def build_model_cnn1d_lstm():
model = Sequential(name=modelname_cnn1d_lstm)
## CNN (with TimeDistributed)
# Conv layer 1
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation),
input_shape = cnn1d_lstm_input_shape))
# Conv layer 2
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation)))
# Conv layer 3
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation)))
# Conv layer 4
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation)))
# Dropout
model.add(TimeDistributed(Dropout(cnn_lstm_dropout)))
# Maxpool layer
model.add(TimeDistributed(MaxPool1D(
pool_size = cnn_lstm_pool_size)))
model.add(TimeDistributed(Flatten()))
## LSTM
# LSTM layer 1
model.add(LSTM(
units = cnn_lstm_units,
return_sequences = True))
# Dropout
model.add(Dropout(cnn_lstm_dropout))
# LSTM layer 2
model.add(LSTM(
units = cnn_lstm_units,
return_sequences = False))
# Dropout
model.add(Dropout(cnn_lstm_dropout))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### ConvLSTM"""
# Data
convlstm_steps = 3
convlstm_length = int(window_size / convlstm_steps)
# Layer
convlstm_padding = 'same'
convlstm_activation = 'relu'
convlstm_dropout = 0.5
convlstm_pool_size = 2
## CNN
convlstm_filters = 64
convlstm_kernel_size = (1, 3)
convlstm_units = 128
def build_model_convlstm():
model = Sequential(name=modelname_convlstm)
# Conv LSTM layer 1
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
input_shape = convlstm_input_shape,
return_sequences = True))
# return_sequences = False)) # final layer of LSTM (only final output)
# Conv LSTM layer 2
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
return_sequences = True))
# Conv LSTM layer 3
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
return_sequences = True))
# Conv LSTM layer 4
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
return_sequences = False))
# Dropout
model.add(Dropout(convlstm_dropout))
model.add(Flatten())
# Dense layer
model.add(Dense(
units = convlstm_units,
activation = convlstm_activation))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### Ensemble"""
# Layer
ensemble_units = 10
ensemble_activation = 'relu'
def build_model_ensemble(inputs, outputs):
ensemble_merge = Concatenate(axis=1)(outputs)
# Dense layer
ensemble_hidden = Dense(
units = ensemble_units,
activation = ensemble_activation)(ensemble_merge)
# Output layer
ensemble_output = Dense(
units = num_classes,
activation = 'softmax')(ensemble_hidden)
model = Model(
inputs = inputs,
outputs = ensemble_output,
name = modelname_ensemble)
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""## [F] Evaluation"""
# Train and evaluate a model (once)
def evaluate_model(model_name, X_train, y_train, X_val, y_val, X_test, y_test):
# Build model
if model_name == modelname_cnn_1d:
model = build_model_cnn_1d()
elif model_name == modelname_lstm_Mto1:
model = build_model_lstm_Mto1()
elif model_name == modelname_cnn1d_lstm:
model = build_model_cnn1d_lstm()
elif model_name == modelname_convlstm:
model = build_model_convlstm()
else:
print("Error: specify correct model name")
return -1
# Train
history = model.fit(
x = X_train,
y = y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 0,
callbacks = [cb],
validation_data = (X_val, y_val)
)
num_epochs = len(history.history['loss'])
## Evaluate
# Accuracy
_, accuracy = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
# F1
y_pred = model.predict(X_test)
f1 = f1_score(y_test.argmax(axis=-1), y_pred.argmax(axis=-1), average='weighted')
return accuracy, f1, num_epochs
# Repeat experiment
def run_experiment(model_name, X_train, X_val, X_test, y_train, y_val, y_test, repeats=10):
print(f'Model: {model_name}')
scores_acc = []
scores_f1 = []
scores_epoch = []
for r in range(repeats):
acc, f1, epoch = evaluate_model(model_name, X_train, y_train, X_val, y_val, X_test, y_test)
print(f'[#{r+1:>2d}] Accuracy: {acc:.3f}, F1 score(weighted): {f1:.3f}, epoch: {epoch}')
scores_acc.append(acc)
scores_f1.append(f1)
scores_epoch.append(epoch)
# Summarise mean and standard deviation
print(f'Accuracy: {mean(scores_acc):.3f} (+/- {std(scores_acc):.3f})')
print(f'F1 score(weighted): {mean(scores_f1):.3f} (+/- {std(scores_f1):.3f})')
print(f'epoch: {mean(scores_epoch):.1f} (+/- {std(scores_epoch):.3f})')
# Boxplot of scores
metrics_list = ['Accuracy', 'F1 score']
all_scores = []
all_scores.append(scores_acc)
all_scores.append(scores_f1)
plt.boxplot(all_scores, labels=metrics_list)
if flag_savefig:
plt.savefig("boxplot_" + model_name + ".png")
plt.show()
# Plot a histogram of each variable in the dataset
def plot_variable_distributions(X, start=0, end=None, xlim=None):
if end is None:
end = X.shape[1]-1
print(X.shape)
num_features = end - start +1
print(f'# of plots: {num_features} ({start} - {end})')
plt.figure(figsize=(10, 2*num_features), tight_layout=True)
xaxis = None
for i, f in enumerate(range(start, end+1)):
print(i)
if xlim is None:
ax = plt.subplot(num_features, 1, i+1, title='Feature: ' + str(f))
else:
ax = plt.subplot(num_features, 1, i+1, sharex=xaxis, title='Feature: ' + str(f))
ax.set_xlim(xlim)
if i == 0:
xaxis = ax
plt.hist(X[:, f], bins=100)
plt.show()
# Plot graphs for loss and accuracy
def plot_acc_graph(history):
# Set figure size
fig = plt.figure(figsize=(15, 6))
plt.subplots_adjust(wspace=0.2)
# Loss
plt.subplot(1,2,1)
plt.plot(history.history['loss'],
label='Train',
color='black')
plt.plot(history.history['val_loss'],
label='Val',
color='red')
#plt.ylim(0, 1)
plt.legend()
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
# Accuracy
plt.subplot(1,2,2)
plt.plot(history.history['accuracy'],
label='Train',
color='black')
plt.plot(history.history['val_accuracy'],
label='Val',
color='red')
plt.ylim(0, 1)
plt.legend()
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
if flag_savefig:
plt.savefig("acc_graph_" + history.model.name + ".png")
plt.show()
# Print execution time
def print_execution_time(time_start):
time_elapsed = time.perf_counter() - time_start
min, sec = divmod(time_elapsed, 60)
hour, min = divmod(min, 60)
print(f"Execution time: {hour:.0f} hour {min:.0f} min {sec:.0f} sec")
"""# 3.Pre-processing"""
# For CSF3 (UoM) setting
import platform
system_name = platform.system()
print('system_name: ' + system_name)
if system_name == "Linux":
flag_summary = False
flag_cm_norm = False
flag_plot_model = False
"""## Read files"""
# dataset files (train & test)
files_train = glob.glob('../data/train/*.dat')
files_test = glob.glob('../data/test/*.dat')
# Read datafiles (if not yet)
if not 'R_dataset_train' in locals():
R_dataset_train, nan_train = read_files(files_train)
R_dataset_test, nan_test = read_files(files_test)
# Discard null action records
if flag_delete_null:
if flag_label == 'Locomotion':
dataset_train = R_dataset_train[R_dataset_train.iloc[:, 243] != 0]
dataset_test = R_dataset_test[R_dataset_test.iloc[:, 243] != 0]
elif flag_label == 'ML_Both_Arms':
dataset_train = R_dataset_train[R_dataset_train.iloc[:, 249] != 0]
dataset_test = R_dataset_test[R_dataset_test.iloc[:, 249] != 0]
else:
dataset_train = R_dataset_train
dataset_test = R_dataset_test
# Balancing data 1 (After reading files)
if flag_balance_under1:
if flag_label == 'Locomotion':
idx_label = 243
elif flag_label == 'ML_Both_Arms':
idx_label = 249
min_train = dataset_train.iloc[:, idx_label].value_counts().min()
dataset_train_np = dataset_train.to_numpy()
for i in dataset_train.iloc[:, idx_label].unique():
dataset_train_np = np.delete(dataset_train_np, np.where(dataset_train_np[:, idx_label] == i)[0][min_train:], axis=0)
dataset_train = pd.DataFrame(dataset_train_np)
"""## Divide X / Y
(features and labels)
"""
## Features (X)
# Strip unnecessay columns
# (following opportunity challenge specification)
X_train = pd.concat([
dataset_train.iloc[:, 1:46], # (included:excluded)
dataset_train.iloc[:, 50:59],
dataset_train.iloc[:, 63:72],
dataset_train.iloc[:, 76:85],
dataset_train.iloc[:, 89:98],
dataset_train.iloc[:, 102:134]],
axis=1)
X_test = pd.concat([
dataset_test.iloc[:, 1:46],
dataset_test.iloc[:, 50:59],
dataset_test.iloc[:, 63:72],
dataset_test.iloc[:, 76:85],
dataset_test.iloc[:, 89:98],
dataset_test.iloc[:, 102:134]],
axis=1)
## Labels (Y)
# from last 7 columns
if flag_label == 'Locomotion':
y_train = dataset_train.iloc[:,243]
y_test = dataset_test.iloc[:,243]
elif flag_label == 'ML_Both_Arms':
y_train = dataset_train.iloc[:,249]
y_test = dataset_test.iloc[:,249]
y_train = y_train.rename('Label')
y_test = y_test.rename('Label')
num_features = len(X_train.columns)
# Input shape of NNs
cnn_1d_input_shape = (window_size, num_features)
lstm_input_shape = (window_size, num_features)
cnn1d_lstm_input_shape = (None, cnn_lstm_length, num_features)
convlstm_input_shape = (convlstm_steps, 1, convlstm_length, num_features)
"""## (Distributions)"""
if flag_data_dist:
plot_variable_distributions(X_train.to_numpy(), start=0, end=29)
plot_variable_distributions(X_train.to_numpy(), start=30, end=59)
plot_variable_distributions(X_train.to_numpy(), start=60, end=89)
plot_variable_distributions(X_train.to_numpy(), start=90, end=112)
"""## Encode labels"""
## Encode label (one-hot)
# Adjust label values for to_categorical()
y_train = adjust_idx_labels(y_train)
y_test = adjust_idx_labels(y_test)
# Convert class vector (int) to one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_train = pd.DataFrame(y_train)
y_test = pd.DataFrame(y_test)
"""## Split train / val"""
# Split into train and val (No shuffle)
X_train, X_val, y_train, y_val = \
train_test_split(X_train, y_train,
train_size=ratio_train,
random_state=seed,
shuffle=False)
# Balancing data 2 (After splitting train, val, and test)
if flag_balance_under2:
min_train = y_train.value_counts().min()
X_train = X_train.to_numpy()
y_train = y_train.to_numpy()
y_train_n = y_train.argmax(axis=-1)
for i in range(len(np.unique(y_train_n))):
X_train = np.delete(X_train, np.where(y_train_n == i)[0][min_train:], axis=0)
y_train_n = np.delete(y_train_n, np.where(y_train_n == i)[0][min_train:], axis=0)
y_train = to_categorical(y_train_n)
X_train = pd.DataFrame(X_train)
y_train = pd.DataFrame(y_train)
# The number of classes
num_classes = len(y_train.columns)
# label list (for classification_report, confusion_matrix)
if flag_label == 'Locomotion':
labels_cr = labels_Loco
labels_cm = labels_Loco
elif flag_label == 'ML_Both_Arms':
labels_cr = labels_ML
labels_cm = labels_ML
if flag_delete_null:
labels_cr = np.delete(labels_cr, 0)
# labels_cm = np.delete(labels_cm, 0)
# confusion_matrix
labels = np.arange(0, num_classes)
"""## Scaling
"""
if flag_scaling == "Norm":
scaler = MinMaxScaler()
elif flag_scaling == "Std":
scaler = StandardScaler()
# Fit the scaler on the training data (to avoid data leakage)
scaler.fit(X_train)
# Scale (to numpy)
X_train = scaler.transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
# Convert to numpy
y_train = y_train.to_numpy()
y_val = y_val.to_numpy()
y_test = y_test.to_numpy()
"""## (Distributions)"""
if flag_data_dist:
plot_variable_distributions(X_train, start=0, end=29)
plot_variable_distributions(X_train, start=30, end=59)
plot_variable_distributions(X_train, start=60, end=89)
plot_variable_distributions(X_train, start=90, end=112)
"""## Sliding window"""
X_train_sw, y_train_sw = opp_sliding_window(X_train, y_train)
X_val_sw, y_val_sw = opp_sliding_window(X_val, y_val)
X_test_sw, y_test_sw = opp_sliding_window(X_test, y_test)
if flag_sw_label == "last":
# last class of each sliding window
y_train_sw_label = np.asarray([[i[-1]] for i in y_train_sw]).reshape(-1, y_train_sw.shape[-1])
y_val_sw_label = np.asarray([[i[-1]] for i in y_val_sw]).reshape(-1, y_val_sw.shape[-1])
y_test_sw_label = np.asarray([[i[-1]] for i in y_test_sw]).reshape(-1, y_test_sw.shape[-1])
elif flag_sw_label == "mode":
# mode in each sliding window
y_train_sw_mode = np.asarray([collections.Counter(i.argmax(axis=-1)).most_common()[0][0] for i in y_train_sw])
y_train_sw_label = to_categorical(y_train_sw_mode)
y_val_sw_mode = np.asarray([collections.Counter(i.argmax(axis=-1)).most_common()[0][0] for i in y_val_sw])
y_val_sw_label = to_categorical(y_val_sw_mode)
y_test_sw_mode = np.asarray([collections.Counter(i.argmax(axis=-1)).most_common()[0][0] for i in y_test_sw])
y_test_sw_label = to_categorical(y_test_sw_mode)
# For evaluation
y_test_classes_sw_label = y_test_sw_label.argmax(axis=-1)
# Blancing data 3 (After sliding window)
if flag_balance_under3:
y_train_sw_n = y_train_sw_label.argmax(axis=-1)
min_train = pd.DataFrame(y_train_sw_label).value_counts().min()
for i in range(num_classes):
X_train_sw = np.delete(X_train_sw, np.where(y_train_sw_n == i)[0][min_train:], axis=0)
y_train_sw_n = np.delete(y_train_sw_n, np.where(y_train_sw_n == i)[0][min_train:], axis=0)
y_train_sw_label = to_categorical(y_train_sw_n)
elif flag_balance_over3:
y_train_sw_n = y_train_sw_label.argmax(axis=-1)
max_train = pd.DataFrame(y_train_sw_n)[0].value_counts().max()
num_labels = np.unique(y_train_sw_n).size
X_train_sw_balanced = np.empty((num_labels * max_train, X_train_sw.shape[1], X_train_sw.shape[2]))
y_train_sw_balanced = np.empty((num_labels * max_train, y_train_sw.shape[1], y_train_sw.shape[2]))
y_train_sw_label_balanced = np.empty((num_labels * max_train, y_train_sw_label.shape[1]))
X_train_sw_balanced[:X_train_sw.shape[0]] = X_train_sw
y_train_sw_balanced[:y_train_sw.shape[0]] = y_train_sw
y_train_sw_label_balanced[:y_train_sw_label.shape[0]] = y_train_sw_label
l = X_train_sw.shape[0]
for c in np.unique(y_train_sw_n):
num = np.count_nonzero(y_train_sw_n == c)
if max_train > num:
num_diff = max_train - num
idx_c = np.where(y_train_sw_n == c)[0]
idx_add = np.random.choice(idx_c, num_diff, replace=True)
for i in idx_add:
X_train_sw_balanced[l] = X_train_sw[i]
y_train_sw_balanced[l] = y_train_sw[i]
y_train_sw_label_balanced[l] = y_train_sw_label[i]
l += 1
X_train_sw = X_train_sw_balanced
y_train_sw = y_train_sw_balanced
y_train_sw_label = y_train_sw_label_balanced
"""## [Summary]"""
if flag_summary:
# The number of samples (train, val, test)
num_samples = len(X_train) + len(X_val) + len(X_test)
num_train = X_train.shape[0]
num_val = X_val.shape[0]
num_test = X_test.shape[0]
num_classes_train = y_train.shape[-1]
num_classes_val = y_val.shape[-1]
num_classes_test = y_test.shape[-1]
y_counts = pd.concat([np.flip(pd.DataFrame(y_train).value_counts(sort=False)),
np.flip(pd.DataFrame(y_val).value_counts(sort=False)),
np.flip(pd.DataFrame(y_test).value_counts(sort=False))],
axis=1)
y_counts = y_counts.style.hide_index().highlight_null('red').set_precision(0)
y_counts_sw = pd.concat([np.flip(pd.DataFrame(y_train_sw_label).value_counts(sort=False)),
np.flip(pd.DataFrame(y_val_sw_label).value_counts(sort=False)),
np.flip(pd.DataFrame(y_test_sw_label).value_counts(sort=False))],
axis=1)
y_counts_sw = y_counts_sw.style.hide_index().highlight_null('red').set_precision(0)
print('[# of samples]')
print(f'Total: {num_samples:>7,}')
print(f'Train: {num_train:>7,}')
print(f'Val: {num_val:>7,}')
print(f'Test: {num_test:>7,}')
print()
print('[After sliding window]')
print(f'Train: {X_train_sw.shape[0]:>7,}')
print(f'Val: {X_val_sw.shape[0]:>7,}')
print(f'Test: {X_test_sw.shape[0]:>7,}')
print()
print('[# of features]')
print(f'{num_features}')
print()
print('[# of classes]')
print(f'Total: {num_classes}')
print(f'Train: {num_classes_train}')
print(f'Val: {num_classes_val}')
print(f'Test: {num_classes_test}')
print()
print('[Original data]')
print('Train Val Test')
display(y_counts)
print()
print('[After sliding window]')
print('Train Val Test')
display(y_counts_sw)
"""# 4.Train & Test
"""
## callbacks
cb = []
# EarlyStopping
if flag_EarlyStopping:
es = EarlyStopping(monitor=flag_es_monitor, patience=es_patience)
cb.append(es)
# TensorBoard
if flag_TensorBoard:
log_dir = dir_log + '/tb/' + datetime.utcnow().strftime('%Y%m%d-%H%M%S')
tb = TensorBoard(log_dir=log_dir, histogram_freq=1)
cb.append(tb)
# Train, val, test sets
X_train = X_train_sw
X_val = X_val_sw
X_test = X_test_sw
y_train = y_train_sw_label
y_val = y_val_sw_label
y_test = y_test_sw_label
y_test_classes = y_test_classes_sw_label
"""## CNN
"""
if flag_CNN_1d:
if flag_model_load:
model_cnn_1d = load_model(dir_model + "/" + modelname_cnn_1d + ".h5")
else:
model_cnn_1d = build_model_cnn_1d()
model_cnn_1d.summary()
if flag_plot_model:
plot_model(model_cnn_1d, show_shapes=True, to_file='model_' + model_cnn_1d.name + '.png')
if flag_CNN_1d:
if not flag_model_load:
time_start = time.perf_counter()
history_cnn_1d = model_cnn_1d.fit(
x = X_train,
y = y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
callbacks = [cb],
validation_data = (X_val, y_val)
)
print_execution_time(time_start)
if flag_CNN_1d:
if not flag_model_load:
plot_acc_graph(history_cnn_1d)
if flag_CNN_1d:
if flag_model_save:
time_current = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
model_cnn_1d.save(dir_model + '/' + model_cnn_1d.name + '_' + time_current + ".h5", overwrite=False)
if flag_CNN_1d:
y_pred_classes = model_cnn_1d.predict(X_test).argmax(axis=-1)
f1 = f1_score(y_test_classes, y_pred_classes, average='weighted')
print(f'F1 score(weighted) {f1:.3f}\n')
print(classification_report(y_test_classes, y_pred_classes, target_names=labels_cr))
if flag_cm_norm:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels, normalize='true')
cm = cm * 100
else:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels)
df_cm = pd.DataFrame(cm, index=labels_cm, columns=labels_cm)
plt.figure(figsize=(10,10))
sns.heatmap(df_cm, square=True, annot=True, cbar=False, fmt='.0f', cmap='Greens')
plt.xlabel('Predicted')
plt.ylabel('True')
if flag_savefig:
plt.savefig("confusion_matrix_" + model_cnn_1d.name + ".png")
plt.plot()
"""## LSTM"""
if flag_LSTM_Mto1:
if flag_model_load:
model_lstm_Mto1 = load_model(dir_model + "/" + modelname_lstm_Mto1 + ".h5")
else:
model_lstm_Mto1 = build_model_lstm_Mto1()
model_lstm_Mto1.summary()
if flag_plot_model:
plot_model(model_lstm_Mto1, show_shapes=True, to_file='model_' + model_lstm_Mto1.name + '.png')
if flag_LSTM_Mto1:
if not flag_model_load:
time_start = time.perf_counter()
history_lstm_Mto1 = model_lstm_Mto1.fit(
X_train,
y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
callbacks = [cb],
validation_data = (X_val, y_val)
)
print_execution_time(time_start)
if flag_LSTM_Mto1:
if not flag_model_load:
plot_acc_graph(history_lstm_Mto1)
if flag_LSTM_Mto1:
if flag_model_save:
time_current = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
model_lstm_Mto1.save(dir_model + '/' + model_lstm_Mto1.name + '_' + time_current + ".h5", overwrite=False)
if flag_LSTM_Mto1:
y_pred_classes = model_lstm_Mto1.predict(X_test).argmax(axis=-1)
f1 = f1_score(y_test_classes, y_pred_classes, average='weighted')
print(f'F1 score(weighted) {f1:.3f}\n')
print(classification_report(y_test_classes, y_pred_classes, target_names=labels_cr))
if flag_cm_norm:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels, normalize='true')
cm = cm * 100
else:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels)
df_cm = pd.DataFrame(cm, index=labels_cm, columns=labels_cm)
plt.figure(figsize=(10,10))
sns.heatmap(df_cm, square=True, annot=True, cbar=False, fmt='.0f', cmap='Greens')
plt.xlabel('Predicted')
plt.ylabel('True')
if flag_savefig:
plt.savefig("confusion_matrix_" + model_lstm_Mto1.name + ".png")
plt.plot()
"""## CNN-LSTM"""
# Reshape data into 4D for CNN1D-LSTM
X_train_cnn1d_lstm = X_train.reshape((X_train.shape[0], cnn_lstm_steps, cnn_lstm_length, X_train.shape[-1]))
X_val_cnn1d_lstm = X_val.reshape((X_val.shape[0], cnn_lstm_steps, cnn_lstm_length, X_val.shape[-1]))
X_test_cnn1d_lstm = X_test.reshape((X_test.shape[0], cnn_lstm_steps, cnn_lstm_length, X_test.shape[-1]))
if flag_CNN1D_LSTM:
if flag_model_load:
model_cnn1d_lstm = load_model(dir_model + "/" + modelname_cnn1d_lstm + ".h5")
else:
model_cnn1d_lstm = build_model_cnn1d_lstm()
model_cnn1d_lstm.summary()
if flag_plot_model:
plot_model(model_cnn1d_lstm, show_shapes=True, to_file='model_' + model_cnn1d_lstm.name + '.png')
if flag_CNN1D_LSTM:
if not flag_model_load:
time_start = time.perf_counter()
history_cnn1d_lstm = model_cnn1d_lstm.fit(
X_train_cnn1d_lstm,
y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
callbacks = [cb],
validation_data = (X_val_cnn1d_lstm, y_val)
)
print_execution_time(time_start)
if flag_CNN1D_LSTM:
if not flag_model_load:
plot_acc_graph(history_cnn1d_lstm)
if flag_CNN1D_LSTM:
if flag_model_save:
time_current = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
model_cnn1d_lstm.save(dir_model + '/' + model_cnn1d_lstm.name + '_' + time_current + ".h5", overwrite=False)
if flag_CNN1D_LSTM:
y_pred_classes = model_cnn1d_lstm.predict(X_test_cnn1d_lstm).argmax(axis=-1)
f1 = f1_score(y_test_classes, y_pred_classes, average='weighted')
print(f'F1 score(weighted) {f1:.3f}\n')
print(classification_report(y_test_classes, y_pred_classes, target_names=labels_cr))
if flag_cm_norm:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels, normalize='true')
cm = cm * 100
else:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels)
df_cm = pd.DataFrame(cm, index=labels_cm, columns=labels_cm)
plt.figure(figsize=(10,10))
sns.heatmap(df_cm, square=True, annot=True, cbar=False, fmt='.0f', cmap='Greens')
plt.xlabel('Predicted')
plt.ylabel('True')
if flag_savefig:
plt.savefig("confusion_matrix_" + model_cnn1d_lstm.name + ".png")
plt.plot()
"""## ConvLSTM"""
# Reshape data into 5D for ConvLSTM
X_train_convlstm = X_train.reshape((X_train.shape[0], convlstm_steps, 1, convlstm_length, X_train.shape[-1]))
X_val_convlstm = X_val.reshape((X_val.shape[0], convlstm_steps, 1, convlstm_length, X_val.shape[-1]))
X_test_convlstm = X_test.reshape((X_test.shape[0], convlstm_steps, 1, convlstm_length, X_test.shape[-1]))
if flag_ConvLSTM:
if flag_model_load:
model_convlstm = load_model(dir_model + "/" + modelname_convlstm + ".h5")
else:
model_convlstm = build_model_convlstm()
model_convlstm.summary()
if flag_plot_model:
plot_model(model_convlstm, show_shapes=True, to_file='model_' + model_convlstm.name + '.png')
if flag_ConvLSTM:
if not flag_model_load:
time_start = time.perf_counter()
history_convlstm = model_convlstm.fit(
X_train_convlstm,
y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
callbacks = [cb],
validation_data = (X_val_convlstm, y_val)
)
print_execution_time(time_start)
if flag_ConvLSTM:
if not flag_model_load:
plot_acc_graph(history_convlstm)
if flag_ConvLSTM:
if flag_model_save:
time_current = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
model_convlstm.save(dir_model + '/' + model_convlstm.name + '_' + time_current + ".h5", overwrite=False)
if flag_ConvLSTM:
y_pred_classes = model_convlstm.predict(X_test_convlstm).argmax(axis=-1)
f1 = f1_score(y_test_classes, y_pred_classes, average='weighted')
print(f'F1 score(weighted) {f1:.3f}\n')
print(classification_report(y_test_classes, y_pred_classes, target_names=labels_cr))
if flag_cm_norm:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels, normalize='true')
cm = cm * 100
else:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels)
df_cm = pd.DataFrame(cm, index=labels_cm, columns=labels_cm)
plt.figure(figsize=(10,10))
sns.heatmap(df_cm, square=True, annot=True, cbar=False, fmt='.0f', cmap='Greens')
plt.xlabel('Predicted')
plt.ylabel('True')
if flag_savefig:
plt.savefig("confusion_matrix_" + model_convlstm.name + ".png")
plt.plot()
"""## Ensemble"""
if flag_Ensemble:
# build sub-models
sub_models = list()
X_train_ensemble = list()
X_val_ensemble = list()
X_test_ensemble = list()
if flag_CNN_1d:
sub_models.append(model_cnn_1d)
X_train_ensemble.append(X_train)
X_val_ensemble.append(X_val)
X_test_ensemble.append(X_test)
if flag_LSTM_Mto1:
sub_models.append(model_lstm_Mto1)
X_train_ensemble.append(X_train)
X_val_ensemble.append(X_val)
X_test_ensemble.append(X_test)
if flag_CNN1D_LSTM:
sub_models.append(model_cnn1d_lstm)
X_train_ensemble.append(X_train_cnn1d_lstm)
X_val_ensemble.append(X_val_cnn1d_lstm)
X_test_ensemble.append(X_test_cnn1d_lstm)
if flag_ConvLSTM:
sub_models.append(model_convlstm)
X_train_ensemble.append(X_train_convlstm)
X_val_ensemble.append(X_val_convlstm)
X_test_ensemble.append(X_test_convlstm)
# freeze parameters for sub-models
for i in range(len(sub_models)):
for layer in sub_models[i].layers:
layer.trainable = False
layer._name = 'ensemble_' + sub_models[i].name + '_' + layer.name
inputs = [model.input for model in sub_models]
outputs = [model.output for model in sub_models]
model_ensemble = build_model_ensemble(inputs, outputs)
model_ensemble.summary()
if flag_plot_model:
plot_model(model_ensemble, show_shapes=True, to_file='model_' + model_ensemble.name + '.png')
if flag_Ensemble:
history_ensemble = model_ensemble.fit(
x = X_train_ensemble,
y = y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
callbacks = [cb],
validation_data = (X_val_ensemble, y_val)
)
if flag_Ensemble:
plot_acc_graph(history_ensemble)
if flag_Ensemble:
y_pred_classes = model_ensemble.predict(X_test_ensemble).argmax(axis=-1)
f1 = f1_score(y_test_classes, y_pred_classes, average='weighted')
print(f'F1 score(weighted) {f1:.3f}\n')
print(classification_report(y_test_classes, y_pred_classes, target_names=labels_cr))
if flag_cm_norm:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels, normalize='true')
cm = cm * 100
else:
cm = confusion_matrix(y_test_classes, y_pred_classes, labels=labels)
df_cm = pd.DataFrame(cm, index=labels_cm, columns=labels_cm)
plt.figure(figsize=(10,10))
sns.heatmap(df_cm, square=True, annot=True, cbar=False, fmt='.0f', cmap='Greens')
plt.xlabel('Predicted')
plt.ylabel('True')
if flag_savefig:
plt.savefig("confusion_matrix_" + model_ensemble.name + ".png")
plt.plot()
"""# 5.Experiment"""
if flag_experiment:
time_start = time.perf_counter()
print(f'repeats: {repeats}, epochs: {epochs}, batch_size: {batch_size}')
if flag_EarlyStopping:
print(f' EarlyStopping is set (patience: {es_patience})\n')
else:
print(f' no EarlyStopping\n')
## CNN 1D
run_experiment(modelname_cnn_1d,
X_train, X_val, X_test,
y_train, y_val, y_test,
repeats=repeats)
## LSTM Mto1
run_experiment(modelname_lstm_Mto1,
X_train, X_val, X_test,
y_train, y_val, y_test,
repeats=repeats)
## CNN1D-LSTM
run_experiment(modelname_cnn1d_lstm,
X_train_cnn1d_lstm, X_val_cnn1d_lstm, X_test_cnn1d_lstm,
y_train, y_val, y_test,
repeats=repeats)
## ConvLSTM
run_experiment(modelname_convlstm,
X_train_convlstm, X_val_convlstm, X_test_convlstm,
y_train, y_val, y_test,
repeats=repeats)
print_execution_time(time_start)
|
import shutil
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
BertModel,
BertTokenizer,
AdamW
)
import argparse
import os
from tqdm import tqdm
import requests
from torchtext.utils import download_from_url, extract_archive
from torchtext.datasets.text_classification import URLS
import sys
import argparse
import logging
def run_pipeline(input_options):
"""
This method downloads the dataset and extract it along with the vocab file
:param input_options: Input arg parameters
"""
dataset_tar = download_from_url(
URLS["AG_NEWS"], root=input_options["output"])
extracted_files = extract_archive(dataset_tar)
if not os.path.isfile(input_options["VOCAB_FILE"]):
filePointer = requests.get(
input_options["VOCAB_FILE_URL"], allow_redirects=True)
if filePointer.ok:
with open(input_options["VOCAB_FILE"], "wb") as f:
f.write(filePointer.content)
else:
raise RuntimeError("Error in fetching the vocab file")
def PrintOptions(options):
"""
Logging for debugging
"""
for a in options.items():
print(a)
def run_pipeline_component(options):
"""
Method called from entry point to execute the pipeline
"""
print("Running data prep job from container")
logging.getLogger().setLevel(logging.INFO)
PrintOptions(options)
run_pipeline(
options
)
# if __name__ == "__main__":
# run_pipeline_component({
# "output": "./",
# "VOCAB_FILE": "bert_base_uncased_vocab.txt",
# "VOCAB_FILE_URL": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"
# })
|
"""
A Product specifies a persistent object in disk such as a file in the local
filesystem or an table in a database. Each Product is uniquely identified,
for example a file can be specified using a absolute path, a table can be
fully specified by specifying a database, a schema and a name. Names
are lazy evaluated, they can be built from templates
"""
import abc
import logging
from ploomber.products.metadata import Metadata
from ploomber.products._resources import process_resources
def _prepare_metadata(metadata):
return metadata
class Product(abc.ABC):
"""
Abstract class for all Products
Attributes
----------
prepare_metadata : callable
A hook to execute before saving metadata, should include a "metadata"
parameter and might include "product". "metadata" will be a
dictionary with the metadata to save, it is not recommended to change
any of the existing keys but additional key-value pairs might be
included
"""
# TODO: previously, File didn't have a client parameter but it does now,
# it's best to include it here to simplify the constructors in the concrete
# classes
def __init__(self, identifier):
self._identifier = self._init_identifier(identifier)
if self._identifier is None:
raise TypeError('_init_identifier must return a value, returned '
'None')
self.task = None
self.logger = logging.getLogger('{}.{}'.format(__name__,
type(self).__name__))
self._outdated_data_dependencies_status = None
self._outdated_code_dependency_status = None
self._is_outdated_status = None
# not all products have clients, but they should still have a client
# property to keep the API consistent
self._client = None
self.metadata = Metadata(self)
self.prepare_metadata = _prepare_metadata
@property
def task(self):
if self._task is None:
raise ValueError('This product has not been assigned to any Task')
return self._task
@property
def client(self):
return self._client
@task.setter
def task(self, value):
self._task = value
def render(self, params, **kwargs):
"""
Render Product - this will render contents of Templates used as
identifier for this Product, if a regular string was passed, this
method has no effect
"""
self._identifier.render(params, **kwargs)
def _is_outdated(self, outdated_by_code=True):
"""
Given current conditions, determine if the Task that holds this
Product should be executed
Returns
-------
bool
True if the Task should execute
"""
if self._is_outdated_status is None:
self._is_outdated_status = self._check_is_outdated(
outdated_by_code)
return self._is_outdated_status
def _is_remote_outdated(self, outdated_by_code):
"""
Check if the remote version of this Product is outdated. Note that
this is only valid for File products (which have their own)
implementation. For non-File products (e.g., SQL products), there
isn't a notion of "remote version", since they're already remote
hence we simply use the existing implementation
"""
return self._is_outdated(outdated_by_code=outdated_by_code)
def _check_is_outdated(self, outdated_by_code):
# check product...
p_exists = self.exists()
# check dependencies only if the product exists
if p_exists:
oudated_data = self._outdated_data_dependencies()
outdated_code = (outdated_by_code
and self._outdated_code_dependency())
run = oudated_data or outdated_code
if run:
self.logger.info(
'Task "%s" is outdated, it will be executed...',
self.task.name)
else:
self.logger.info(
'Task "%s" is up-to-date, it will be skipped...',
self.task.name)
return run
else:
self.logger.info(
'Product of task "%s" does not exist, it will be executed...',
self.task.name)
return True
def _outdated_data_dependencies(self):
"""
Determine if the product is outdated by checking upstream timestamps
"""
if self._outdated_data_dependencies_status is not None:
self.logger.debug(('Returning cached data dependencies status. '
'Outdated? %s'),
self._outdated_data_dependencies_status)
return self._outdated_data_dependencies_status
outdated = any([
self._is_outdated_due_to_upstream(up.product)
for up in self.task.upstream.values()
])
self._outdated_data_dependencies_status = outdated
self.logger.debug(('Finished checking data dependencies status. '
'Outdated? %s'),
self._outdated_data_dependencies_status)
return self._outdated_data_dependencies_status
def _is_outdated_due_to_upstream(self, up_prod):
"""
A task becomes data outdated if an upstream product has a higher
timestamp or if an upstream product is outdated
"""
if (self.metadata.timestamp is None
or up_prod.metadata.timestamp is None):
return True
else:
return (
(up_prod.metadata.timestamp > self.metadata.timestamp)
# this second condition propagates outdated status
# from indirect upstream dependencies. e.g., a -> b -> c
# user runs in order but then it only runs a. Since a is
# outdated, so should c
or up_prod._is_outdated())
def _outdated_code_dependency(self):
"""
Determine if the product is outdated by checking the source code that
it generated it
"""
if self._outdated_code_dependency_status is not None:
self.logger.debug(('Returning cached code dependencies status. '
'Outdated? %s'),
self._outdated_code_dependency_status)
return self._outdated_code_dependency_status
outdated, diff = self.task.dag.differ.is_different(
a=self.metadata.stored_source_code,
b=str(self.task.source),
a_params=self.metadata.params,
# process resource params to compare the file hash instead of
# the path to the file
b_params=process_resources(
self.task.params.to_json_serializable(params_only=True)),
extension=self.task.source.extension)
self._outdated_code_dependency_status = outdated
self.logger.debug(('Finished checking code status for task "%s" '
'Outdated? %s'), self.task.name,
self._outdated_code_dependency_status)
if outdated:
self.logger.info('Task "%s" has outdated code. Diff:\n%s',
self.task.name, diff)
return self._outdated_code_dependency_status
def _reset_cached_outdated_status(self):
self._outdated_data_dependencies_status = None
self._outdated_code_dependency_status = None
self._is_outdated_status = None
def __str__(self):
return str(self._identifier)
def __repr__(self):
# NOTE: this assumes ._identifier has a best_repr property,
# should we refactor it?
return '{}({})'.format(
type(self).__name__, self._identifier.best_repr(shorten=True))
def __getstate__(self):
state = self.__dict__.copy()
# logger is not pickable, so we remove them and build
# them again in __setstate__
del state['logger']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.logger = logging.getLogger('{}.{}'.format(__name__,
type(self).__name__))
def to_json_serializable(self):
"""Returns a JSON serializable version of this product
"""
# NOTE: this is used in tasks where only JSON serializable parameters
# are supported such as NotebookRunner that depends on papermill
return str(self)
def __len__(self):
# MetaProduct return the number of products, this is a single Product
# hence the 1
return 1
# Subclasses must implement the following methods
@abc.abstractmethod
def _init_identifier(self, identifier):
pass # pragma: no cover
@abc.abstractmethod
def fetch_metadata(self):
pass # pragma: no cover
@abc.abstractmethod
def save_metadata(self, metadata):
pass # pragma: no cover
@abc.abstractmethod
def exists(self):
"""
This method returns True if the product exists, it is not part
of the metadata, so there is no cached status
"""
pass # pragma: no cover
@abc.abstractmethod
def delete(self, force=False):
"""Deletes the product
"""
pass # pragma: no cover
# NOTE: currently optional but there is a conflict with this. metadata
# defines a delete public method which calls product._delete_metadata
# but not all products implement this
def _delete_metadata(self):
raise NotImplementedError(
'_delete_metadata not implemented in {}'.format(
type(self).__name__))
# download and upload are only relevant for File but we add them to keep
# the API consistent
def download(self):
pass # pragma: no cover
def upload(self):
pass # pragma: no cover
|
from leer.core.storage.merkle_storage import MMR
from secp256k1_zkp import PedersenCommitment, PublicKey
from leer.core.storage.default_paths import txo_storage_path
from leer.core.lubbadubdub.ioput import IOput
import os
import hashlib
def _hash(data):
#TODO move to utils
m=hashlib.sha256()
m.update(bytes(data))
return m.digest()
class CommitmentMMR(MMR):
def sum(self, x1,x2):
# each index is 33 bytes for commitments and 32 for hash
comm1, hash1 = x1[:33], x1[33:65]
comm2, hash2 = x2[:33], x2[33:65]
comm1, comm2 = PedersenCommitment(commitment=comm1, raw=True), PedersenCommitment(commitment=comm2, raw=True)
#XXX we definetely need sum of pedersen commitments on libsecp256 level.
pk= PublicKey()
pk.combine([comm1.to_public_key().public_key, comm2.to_public_key().public_key])
sm = pk.to_pedersen_commitment()
first_part = sm.serialize()
second_part = _hash(hash1+hash2)
return first_part+second_part
class TXOMMR(MMR):
def sum(self, x1,x2):
return _hash(x1+x2)
class ConfirmedTXOStorage:
'''
Storage for all TXOs which are already in blocks.
It is double MMR tree: first for commitments only, second for full txout
Commitment tree:
Each commitment leaf obj is `b""` and commitment leaf index is `bytes(serialized_apc)+bytes(hash(serialized_apc))`.
Summ of two nodes in commitment tree = bytes(serialized_apc1+serialized_apc1)+hash(hash(serialized_apc)+hash(serialized_apc)),
where commitments are summed as points on curve and summ of hashes is concantenation.
Txout tree:
Each txout leaf obj is serialized ioput, leaf index is hash(ioput_index).
Summ of two nodes is hash of concantenation
'''
def __init__(self, path=txo_storage_path):
self.commitments = CommitmentMMR("commitments", os.path.join(path, "confirmed"), clear_only=False)
self.txos = TXOMMR("txos", os.path.join(path, "confirmed"), discard_only=True)
def __getitem__(self, hash_and_pc):
res = self.txos.get_by_hash(_hash(hash_and_pc))
if not res:
raise KeyError(hash_and_pc)
utxo=IOput()
utxo.deserialize(res)
return utxo
def __setitem__(self, hash_and_pc, utxo):
#TODO __setitem__ interface should be substituted with append-like interface
#XXX problem here: commitments indexes should be apc+hash(apc), not hash_and_pc
#here we should save
self.txos.append(_hash(hash_and_pc),utxo.serialize())
self.commitments.append(utxo.commitment_index,b"")
def append(self, utxo):
self.txos.append(_hash(utxo.serialized_index), utxo.serialize())
self.commitments.append(utxo.commitment_index,b"")
def spend(self, utxo, return_revert_obj=False):
txos = self.txos.discard(_hash(utxo.serialized_index))
commitment = self.commitments.clear(utxo.commitment_index)
if return_revert_obj:
return (txos, commitment)
def find(self, hash_and_pc):
'''
In contrast with __getitem__ find will try to find even spent
outputs for other (syncing) nodes.
'''
res = self.txos.find_by_hash(_hash(hash_and_pc))
if not res:
raise KeyError(hash_and_pc)
utxo=IOput()
utxo.deserialize(res)
return utxo
def unspend(self, revert_obj):
(txos, commitment) = revert_obj
self.txos.revert_discarding(txos)
self.commitments.revert_clearing(commitment)
def __contains__(self, serialized_index):
return bool(self.txos.get_by_hash(_hash(serialized_index)))
def remove(self,n):
self.commitments.remove(n)
ser_removed_outputs = self.txos.remove(n)
removed_outputs=[]
for _ser in ser_removed_outputs:
utxo=IOput()
utxo.deserialize(_ser)
removed_outputs.append(utxo)
return removed_outputs
def get_commitment_root(self):
return self.commitments.get_root()
def get_txo_root(self):
return self.txos.get_root()
def get_state(self):
return self.commitments.get_state()
def set_state(self, state):
self.txos.set_state(state)
self.commitments.set_state(state)
def find_wo_deser(self, hash_and_pc):
res = self.txos.find_by_hash(_hash(hash_and_pc))
if not res:
raise KeyError(hash_and_pc)
return res
class TXOsStorage:
class Interface:
def __init__(self):
self.storage = {}
def __getitem__(self, hash_and_pc):
if not hash_and_pc in self.storage:
raise KeyError(hash_and_pc)
return self.storage[hash_and_pc]
def __setitem__(self, hash_and_pc, utxo):
#TODO __setitem__ interface should be substituted with append-like interface
#here we should save
self.storage[hash_and_pc]=utxo
def remove(self, utxo):
self.remove_by_index(utxo.serialized_index)
def remove_by_index(self, _index):
self.storage.pop(_index)
def __contains__(self, utxo):
return utxo in self.storage
def flush(self):
self.storage = {}
__shared_states = {}
def __init__(self, storage_space, path):
if not path in self.__shared_states:
self.__shared_states[path]={}
self.__dict__ = self.__shared_states[path]
self.path = path
self.confirmed = ConfirmedTXOStorage(self.path)
self.mempool = self.Interface()
self.storage_space = storage_space
self.storage_space.register_txos_storage(self)
def known(self, output_index):
return (output_index in self.confirmed) or (output_index in self.mempool)
def confirm(self, output_index):
utxo = self.mempool.storage.pop(output_index)
self.confirmed.append(utxo)
def apply_tx_get_merkles_and_rollback(self, tx):
rollback_inputs = []
for _i in tx.inputs:
rollback_inputs.append(self.confirmed.spend(_i, return_revert_obj=True))
for _o in tx.outputs:
self.confirmed.append(_o)
roots=[self.confirmed.get_commitment_root(), self.confirmed.get_txo_root()]
for r_i in rollback_inputs:
self.confirmed.unspend(r_i)
self.confirmed.remove(len(tx.outputs))
return roots
#TODO bad naming. It should be apply block, or block_transaction
def apply_tx(self, tx, new_state):
rollback_inputs = []
for _i in tx.inputs:
if self.storage_space.utxo_index:
self.storage_space.utxo_index.remove_utxo(_i)
rollback_inputs.append(self.confirmed.spend(_i, return_revert_obj=True))
for _o in tx.outputs:
if self.storage_space.utxo_index:
self.storage_space.utxo_index.add_utxo(_o)
self.confirmed.append(_o)
self.mempool.remove(_o)
self.confirmed.set_state(new_state)
return (rollback_inputs, len(tx.outputs))
def rollback(self, pruned_inputs, num_of_added_outputs, prev_state):
for r_i in pruned_inputs:
if self.storage_space.utxo_index:
#r_i[0][2] is serialized txo (0 is txo, 2 is serialized object)
utxo=IOput()
utxo.deserialize(r_i[0][2])
self.storage_space.utxo_index.add_utxo(utxo)
self.confirmed.unspend(r_i)
outputs_for_mempool = self.confirmed.remove(num_of_added_outputs)
for _o in outputs_for_mempool:
self.mempool[_o.serialized_index]=_o
self.storage_space.utxo_index.remove_utxo(_o)
self.confirmed.set_state(prev_state)
def find_serialized(self, output_index):
if output_index in self.mempool:
return self.mempool[output_index].serialize()
else:
return self.confirmed.find_wo_deser(output_index)
|
import os
#: Location of the wordle API
WORDLE_API_URL = os.environ.get("INPUT_WORDLEAPIURL", "")
#: Puzzle size to solve
PUZZLE_SIZE = int(os.environ.get("INPUT_PUZZLESIZE", "5"))
|
import bpy
from bpy.types import (Panel,
Menu,
Operator,
PropertyGroup,
)
import blvcw.crystal_well_global_state as GlobalState
from blvcw.crystal_well_components import CrystalWellLoader, CrystalWellSettings
from blvcw.crystal_well_simulation import CrystalWellSimulator
class WM_OT_VIRTUAL_CRYSTAL_WELL(Operator):
"""
VCW operator that is called when pressing the "Render" button in the UI panel.
It makes sure that a crystal_well_loader was created and that the crystal_well_settings dict is filled
before the main simulation object is created.
Will not execute if either an import error exists or no output_directory was chosen for rendering output.
"""
bl_label = "Render"
bl_idname = "wm.vcw_execute"
def execute(self, context):
scene = context.scene
vcw = scene.virtual_crystal_well
crystal_well_loader = GlobalState.crystal_well_loader
if crystal_well_loader is None:
if vcw.crystal_object == "CUSTOM":
# Import error
return {"CANCELLED"}
else:
crystal_well_loader = CrystalWellLoader(crystal_object=vcw.crystal_object)
crystal_well_loader.set_number_crystal_variants_per_render(number_crystal_variants=vcw.number_variants)
if GlobalState.output_directory_error and vcw.number_images != 0:
print("No existing output directory chosen!")
return {"CANCELLED"}
if GlobalState.output_directory_warning:
print("WARNING: Saving to non-empty directory")
scaling_crystals_average = (vcw.scaling_crystals_average[0], # Necessary because bpy object is not serializable
vcw.scaling_crystals_average[1],
vcw.scaling_crystals_average[2])
rotation_crystals_average = (vcw.rotation_crystals_average[0],
vcw.rotation_crystals_average[1],
vcw.rotation_crystals_average[2])
crystal_well_settings = CrystalWellSettings(number_crystals=vcw.number_crystals,
number_crystals_std_dev=vcw.number_crystals_std_dev,
crystal_object=vcw.crystal_object,
distributor=vcw.crystal_distributor,
total_crystal_area_min=vcw.total_crystal_area_min,
total_crystal_area_max=vcw.total_crystal_area_max,
crystal_area_min=vcw.crystal_area_min**2,
crystal_area_max=vcw.crystal_area_max**2,
crystal_edge_min=vcw.crystal_edge_min,
crystal_edge_max=vcw.crystal_edge_max,
crystal_aspect_ratio_max=vcw.crystal_aspect_ratio_max/0.7,
smooth_shading_distributor=vcw.smooth_shading_distributor,
scaling_crystals_average=scaling_crystals_average,
scaling_crystals_std_dev=vcw.scaling_crystals_std_dev,
rotation_crystals_average=rotation_crystals_average,
rotation_crystals_std_dev=vcw.rotation_crystals_std_dev,
crystal_material_name=vcw.crystal_material,
crystal_material_min_ior=vcw.crystal_material_min_ior,
crystal_material_max_ior=vcw.crystal_material_max_ior,
crystal_material_min_brightness=vcw.crystal_material_min_brightness,
crystal_material_max_brightness=vcw.crystal_material_max_brightness,
light_type=vcw.light_type,
light_angle_min=vcw.light_angle_min,
light_angle_max=vcw.light_angle_max,
use_bottom_light=vcw.use_bottom_light,
res_x=vcw.resolution_x,
res_y=vcw.resolution_y,
crystal_import_path=vcw.import_path,
output_path=vcw.output_path,
number_variants=vcw.number_variants,
number_images=vcw.number_images)
if vcw.save_settings and not GlobalState.output_directory_error:
crystal_well_settings.write_json()
crystal_well_settings.print_settings()
GlobalState.has_rendered = True
crystal_well_simulator = CrystalWellSimulator(crystal_well_settings=crystal_well_settings,
crystal_well_loader=crystal_well_loader)
for ui_update in crystal_well_simulator.generate_image():
if vcw.update_ui and ui_update:
# Update VCW in blender after each image is rendered
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
return {"FINISHED"}
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
from parameterized import parameterized
from monai.apps import check_hash
TEST_CASE_1 = ["b94716452086a054208395e8c9d1ae2a", "md5", True]
TEST_CASE_2 = ["abcdefg", "md5", False]
TEST_CASE_3 = [None, "md5", True]
TEST_CASE_4 = [None, "sha1", True]
TEST_CASE_5 = ["b4dc3c246b298eae37cefdfdd2a50b091ffd5e69", "sha1", True]
class TestCheckMD5(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5])
def test_result(self, md5_value, t, expected_result):
test_image = np.ones((5, 5, 3))
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "test_file.png")
test_image.tofile(filename)
result = check_hash(filename, md5_value, hash_type=t)
self.assertTrue(result == expected_result)
def test_hash_type_error(self):
with self.assertRaises(ValueError):
with tempfile.TemporaryDirectory() as tempdir:
check_hash(tempdir, "test_hash", "test_type")
if __name__ == "__main__":
unittest.main()
|
import redis
redisClient = redis.Redis(host='redis')
|
def factorial(n):
answer = 1
for i in range(1, n+1):
answer *= i
return(answer)
def Leibniz(goto):
pi = 0
sign = 1
for i in range(1, 1+goto):
pi += 1/((2*i-1)*sign)
sign = -sign
pi = pi*4
return(pi)
def AbrahamSharp(goto):
pi = 0
for i in range(goto+1):
a = 2*(-1**i)
b = (1/2)-i
c = 3**b
d = (2*i)-1
n = a*c/d
pi += n
return(pi)
def Euler(goto):
pi = 0
for i in range(goto+1):
pi += factorial(i)/factorial(factorial(2*i+1))
return(pi)
def Bellard(goto):
pi = 0
for i in range(goto+1):
a = (-1**i)
b = 2**(10*i)
pi += a/b
return(pi/(2**6))
print('Happy Pi Day, World!')
print('Pi:')
print('Leibniz`s formula:', Leibniz(1000))
print('Abraham`s Sharp formula:', AbrahamSharp(1000))
print('Eulet`s formula:', Leibniz(3))
print('Bellard`s formula:', Leibniz(1000))
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.electric_load_center import GeneratorFuelCellAuxiliaryHeater
log = logging.getLogger(__name__)
class TestGeneratorFuelCellAuxiliaryHeater(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_generatorfuelcellauxiliaryheater(self):
pyidf.validation_level = ValidationLevel.error
obj = GeneratorFuelCellAuxiliaryHeater()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_excess_air_ratio = 2.2
obj.excess_air_ratio = var_excess_air_ratio
# real
var_ancillary_power_constant_term = 3.3
obj.ancillary_power_constant_term = var_ancillary_power_constant_term
# real
var_ancillary_power_linear_term = 4.4
obj.ancillary_power_linear_term = var_ancillary_power_linear_term
# real
var_skin_loss_ufactor_times_area_value = 5.5
obj.skin_loss_ufactor_times_area_value = var_skin_loss_ufactor_times_area_value
# alpha
var_skin_loss_destination = "SurroundingZone"
obj.skin_loss_destination = var_skin_loss_destination
# object-list
var_zone_name_to_receive_skin_losses = "object-list|Zone Name to Receive Skin Losses"
obj.zone_name_to_receive_skin_losses = var_zone_name_to_receive_skin_losses
# alpha
var_heating_capacity_units = "Watts"
obj.heating_capacity_units = var_heating_capacity_units
# real
var_maximum_heating_capacity_in_watts = 9.9
obj.maximum_heating_capacity_in_watts = var_maximum_heating_capacity_in_watts
# real
var_minimum_heating_capacity_in_watts = 10.1
obj.minimum_heating_capacity_in_watts = var_minimum_heating_capacity_in_watts
# real
var_maximum_heating_capacity_in_kmol_per_second = 11.11
obj.maximum_heating_capacity_in_kmol_per_second = var_maximum_heating_capacity_in_kmol_per_second
# real
var_minimum_heating_capacity_in_kmol_per_second = 12.12
obj.minimum_heating_capacity_in_kmol_per_second = var_minimum_heating_capacity_in_kmol_per_second
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].name, var_name)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].excess_air_ratio, var_excess_air_ratio)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].ancillary_power_constant_term, var_ancillary_power_constant_term)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].ancillary_power_linear_term, var_ancillary_power_linear_term)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].skin_loss_ufactor_times_area_value, var_skin_loss_ufactor_times_area_value)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].skin_loss_destination, var_skin_loss_destination)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].zone_name_to_receive_skin_losses, var_zone_name_to_receive_skin_losses)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].heating_capacity_units, var_heating_capacity_units)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].maximum_heating_capacity_in_watts, var_maximum_heating_capacity_in_watts)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].minimum_heating_capacity_in_watts, var_minimum_heating_capacity_in_watts)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].maximum_heating_capacity_in_kmol_per_second, var_maximum_heating_capacity_in_kmol_per_second)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].minimum_heating_capacity_in_kmol_per_second, var_minimum_heating_capacity_in_kmol_per_second)
|
#!/usr/bin/env python3
import requests
import json
import sys
#get the secrets from your Google Cloud project, use the Oauth2 Playground for your refresh token
client_id=sys.argv[1]
client_secret=sys.argv[2]
refresh_token=sys.argv[3]
credentials=sys.argv[4]
def get_list_from_file(filename):
try:
# open and read the file into list
with open(filename) as f:
string_list = f.read().splitlines()
f.close()
print(string_list)
return string_list
except:
print("\033[1m"+"Issue Occured with obtaining list from file"+"\033[0m")
sys.exit(1)
def generate_vault_access_token(client_id,client_secret,refresh_token):
try:
url = "https://www.googleapis.com/oauth2/v4/token"
body = json.dumps({
"client_id": client_id,
"client_secret": client_secret,
"refresh_token": refresh_token,
"grant_type": "refresh_token"
})
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
}
response = requests.request(
"POST",
url,
headers=headers,
data=body
)
jsonContent = json.loads(response.text)
vaultAccessToken = jsonContent["access_token"]
return vaultAccessToken
except:
print("\033[1m"+"Issue Occured with generating Google Vault Access Token"+"\033[0m")
sys.exit(1)
def generate_matter(leaver_user,vaultAccessToken):
try:
matterList = []
for user in leaver_user:
url = "https://vault.googleapis.com/v1/matters/"
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
"Authorization": "Bearer " + vaultAccessToken
}
body = json.dumps ({
"state": "OPEN",
"description": "Generated by Python",
"name": user + "'s archive"
})
response = requests.request(
"POST",
url,
headers=headers,
data=body
)
jsonContent = json.loads(response.text)
matterID=jsonContent["matterId"]
#print("Matter ID for " + user + " is " + matterID)
print(jsonContent)
matterList.append({
"matterInstance": {
"user": user,
"userInfo": {
"matterID": matterID,
"savedQueryID": "",
"matterExportID": ""
}
}
})
return matterList
except:
print("\033[1m"+"Issue Occured with generating Google Vault Matter"+"\033[0m")
sys.exit(1)
def generate_search_query(matterList,vaultAccessToken):
try:
for matter in matterList:
matterList = []
for key, value in matter.items():
user=(matter['matterInstance']['user'])
matterID=(matter['matterInstance']['userInfo']['matterID'])
url = "https://vault.googleapis.com/v1/matters/"+matterID+"/savedQueries"
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
"Authorization": "Bearer " + vaultAccessToken
}
body=json.dumps({
"displayName": user + "'s email search query",
"query": {
"corpus": "MAIL",
"dataScope": "ALL_DATA",
"searchMethod": "ACCOUNT",
"accountInfo": { "emails": [user]},
"mailOptions": {"excludeDrafts" : "false"},
"timeZone": "Atlantic/Canary",
"method": "ACCOUNT"
}}
)
response = requests.request(
"POST",
url,
headers=headers,
data=body
)
jsonContent = json.loads(response.text)
print(jsonContent)
savedQueryID=jsonContent["savedQueryId"]
#print("savedQueryId for " + user + " is " + savedQueryID + " matterID is " + matterID)
matterList.append({
"matterInstance": {
"user": user,
"userInfo": {
"matterID": matterID,
"savedQueryID": savedQueryID,
"matterExportID": ""
}
}
}
)
return matterList
except:
print("\033[1m"+"Issue Occured with generating Google Vault Matter Search Query"+"\033[0m")
sys.exit(1)
def generate_export(savedQueryID,matterList,vaultAccessToken):
try:
for matter in matterList:
matterList = []
for key, value in matter.items():
user=(matter['matterInstance']['user'])
matterID=(matter['matterInstance']['userInfo']['matterID'])
savedQueryID=(matter['matterInstance']['userInfo']['savedQueryID'])
print(user,matterID,savedQueryID)
url = "https://vault.googleapis.com/v1/matters/",matterID,"/exports"
url=''.join(url)
print(url)
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
"Authorization": "Bearer " + vaultAccessToken
}
body = json.dumps(
{
"name": user + "'s Export",
"query": {
"corpus": "MAIL",
"dataScope": "ALL_DATA",
"searchMethod": "ACCOUNT",
"accountInfo": { "emails": [user]},
"mailOptions": {"excludeDrafts" : "false"},
"timeZone": "Atlantic/Canary",
"method": "Account",
},
"exportOptions": {
"mailOptions": {
"exportFormat": "MBOX",
"showConfidentialModeContent": "true"
},
"region": "any"
}
}
)
response = requests.request(
"POST",
url,
headers=headers,
data=body
)
jsonContent = json.loads(response.text)
matterExportID=jsonContent["id"]
print(jsonContent)
#print("matterExportID for " + user + " is " + matterExportID + " searchQueryID is " + savedQueryID + " matterID is " + matterID)
matterList.append({
"matterInstance": {
"user": user,
"userInfo": {
"matterID": matterID,
"savedQueryID": savedQueryID,
"matterExportID": matterExportID
}
}
}
)
return matterList
except:
print("\033[1m"+"Issue Occured with generating Google Vault Matter Export"+"\033[0m")
sys.exit(1)
def set_matter_permissions(adminAccountIDs,matterList,vaultAccessToken):
try:
for matter in matterList:
matterList = []
for key, value in matter.items():
for each in adminAccountIDs:
print(each)
print(matterList)
matterID=(matter['matterInstance']['userInfo']['matterID'])
url = "https://vault.googleapis.com/v1/matters/",matterID,":addPermissions"
print(url)
url=''.join(url)
print(url)
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
"Authorization": "Bearer " + vaultAccessToken
}
body = json.dumps(
{
"matterPermission":
{
"role": "COLLABORATOR",
"accountId": each
},
"sendEmails": "false",
"ccMe": "false"
}
)
response = requests.request(
"POST",
url,
headers=headers,
data=body
)
jsonContent = (response.text)
print(jsonContent)
return each
except:
print("\033[1m"+"Issue Occured with setting Google Vault Matter permissions"+"\033[0m")
sys.exit(1)
def generate_links_notify(matterList,credentials):
try:
for matter in matterList:
matterList = []
user=(matter['matterInstance']['user'])
matterID=(matter['matterInstance']['userInfo']['matterID'])
savedQueryID=(matter['matterInstance']['userInfo']['savedQueryID'])
exportID=(matter['matterInstance']['userInfo']['matterExportID'])
print("************************************************************************************************************************************************************************************************************************************************************")
print("Export Link for " + user + " https://vault.google.com/matter/"+ matterID + "/exports")
print("Matter Link for " + user + " https://vault.google.com/matter/"+ matterID)
print("Search Query Link for " + user + " https://vault.google.com/matter/"+ matterID + "/search")
print("************************************************************************************************************************************************************************************************************************************************************")
url=credentials
body=json.dumps(
{
'text': "Export Link for " + user + " https://vault.google.com/matter/"+ matterID + "/exports"
}
)
headers={
'Content-type': 'application/json'
}
response = requests.request(
"POST",
url,
data=body,
)
except:
print("\033[1m"+"Issue Occured with generating links for notifications"+"\033[0m")
sys.exit(1)
vaultAccessToken=generate_vault_access_token(client_id,client_secret,refresh_token)
input_filename='.txt'
leaver_user=get_list_from_file(input_filename)
adminAccountIDs='.txt'
admin_users=get_list_from_file(adminAccountIDs)
matter=generate_matter(leaver_user,vaultAccessToken)
savedQueryID=generate_search_query(matter,vaultAccessToken)
matterExportID=generate_export(savedQueryID,matter,vaultAccessToken)
last_admin=set_matter_permissions(admin_users,matter,vaultAccessToken)
generate_links_notify(matter,credentials)
|
import os.path as osp
import torch
import torch.utils.data as data
import numpy as np
__all__ = ['CNNDataLayer']
def c3d_loader(path, number):
data = []
for index in range(number - 2, number + 3):
index = min(max(index, 1), 3332)
data.append(np.load(osp.join(path, str(index).zfill(5)+'.npy')))
data = np.array(data, dtype=np.float32)
data = (data - 0.5) / 0.5
data = data[np.newaxis, ...]
return data
# TODO: Add c2d loader
_DATA_LOADERS = {
'C3D': c3d_loader,
}
class CNNDataLayer(data.Dataset):
def __init__(self, args, phase='train'):
self.data_root = args.data_root
self.phase = phase
if self.phase == 'extract':
self.sessions = args.train_session_set + args.test_session_set
else:
self.sessions = getattr(args, phase+'_session_set')
self.loader = _DATA_LOADERS[args.model]
self.inputs = []
for session_name in self.sessions:
session_path = osp.join(self.data_root, 'target', session_name+'.txt')
session_data = open(session_path, 'r').read().splitlines()
self.inputs.extend(session_data)
def __getitem__(self, index):
data_path, number, air_target, bed_target = self.inputs[index].split()
data = self.loader(osp.join(self.data_root, 'slices_npy_64x64', data_path), int(number))
data = torch.from_numpy(data)
air_target = np.array(air_target.split(','), dtype=np.float32)
air_target = torch.from_numpy(air_target)
bed_target = np.array(bed_target.split(','), dtype=np.float32)
bed_target = torch.from_numpy(bed_target)
if self.phase == 'extract':
save_path = osp.join(self.data_root, 'c3d_features', data_path, number.zfill(5)+'.npy')
return data, air_target, bed_target, save_path
else:
return data, air_target, bed_target
def __len__(self):
return len(self.inputs)
|
#!/usr/bin/env python
# coding=utf-8
import pickle
import os
import sys, locale
import time
import get_pickle
import numpy as np
import tensorflow as tf
from configparser import ConfigParser
#cfg = ConfigParser()
#cfg.read(u'conf/ner_conf.ini')
path = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(path + r'/../')
from predict import ModelLoader
from predict import show_result
class Pipeline(object):
def __init__(self):
print("Starting new Tensorflow session...")
self.session = tf.Session()
print("Loading pipeline modules...")
self.cws_model = ModelLoader(r'../ckpt/cws/bi-lstm.ckpt-6', 'cws')
self.pos_model = ModelLoader(r'../ckpt/pos/bi-lstm.ckpt-6', 'pos')
self.ner_model = ModelLoader(r'../ckpt/ner/bi-lstm.ckpt-6', 'ner')
def analyze(self, sentence, word2id, id2tag, zy, word2id_p, id2tag_p, word2id_n, id2tag_n):
'''
Return a list of three string output:cws, pos, ner
'''
#cws
cws_tag = self.cws_model.predict(sentence ,word2id , id2tag)
cws_str = merge_cws(cws_tag)
#pos
pos_tagging = self.pos_model.predict(cws_str, word2id_p, id2tag_p)
#ner
ner_tagging = self.ner_model.predict(cws_str, word2id_n, id2tag_n)
return cws_str, pos_tagging, ner_tagging
def merge_cws(cws_tag):
words = []
tmp = []
rss = ''
for (w, t) in cws_tag:
for i in range(len(t)):
if t[i] in ['s', 'n']:
words.append(w[i])
else:
tmp.extend(w[i])
if t[i] == 'e':
words.append(tmp)
tmp = []
for each in words:
if isinstance(each, list):
each = "".join(each)
rss += each + ' '
return rss
def main():
word2id_c, id2tag_c, word2id_p, id2tag_p, word2id_n, id2tag_n, zy = get_pickle.get_pickle()
pipe = Pipeline()
sentence = u'我爱吃北京烤鸭。我爱中华人民共和国。'
# sentence = raw_input("请您输入一句话:").strip().decode(sys.stdin.encoding or locale.getpreferredencoding(True))
cws, pos, ner = pipe.analyze(sentence, word2id_c, id2tag_c, zy, word2id_p, id2tag_p, word2id_n, id2tag_n)
pos_sen = show_result(pos)
ner_sen = show_result(ner)
print "您输入的句子为:", sentence
print "分词结果为:", cws
print "词性标注结果为:", pos_sen
print "命名实体识别结果为:", ner_sen
if __name__ == "__main__":
main()
|
#faça um algoritmo que leia o preço de um produto e mostre seu novo preço com 5% de desconto
#minha resposta
print('\033[1;36m+=\033[m'*15, '\033[1;7;36mPRODUTO COM DESCONTO\033[m', '\033[1;36m+=\033[m'*15)
produto = float(input ('\033[31mQual o preço do produto? R$\033[m'))
calculo = produto * 0.05 #5% do valor
desconto = produto - calculo
print ('\033[33mO produto que custava \033[1;7;33mR${:.2f}\033[m, \033[33mna promoção com desconto de \033[m\033[1;7;33m5%\033[m \033[33mvai custar \033[1;7;33mR${:.2f}\033[m'.format(produto, desconto))
#resposta do Gustavo
#preco = float(input('Qual o preço do produto? R$'))
#novo = preco - (preco * 5 / 100)
#print ('O produto que custava R${:.2f}, na promoção com desconto de 5% vai custar R${:.2f}'.format(preco, novo))
|
"""Contains the Goon Character class"""
import json
from botc import Character, Outsider
from ._utils import BadMoonRising, BMRRole
with open('botc/gamemodes/badmoonrising/character_text.json') as json_file:
character_text = json.load(json_file)[BMRRole.goon.value.lower()]
class Goon(Outsider, BadMoonRising, Character):
"""Goon: Each night, the 1st player to choose you with their ability is drunk until dusk.
You become their alignment.
"""
def __init__(self):
Character.__init__(self)
BadMoonRising.__init__(self)
Outsider.__init__(self)
self._desc_string = character_text["description"]
self._examp_string = character_text["examples"]
self._instr_string = character_text["instruction"]
self._lore_string = character_text["lore"]
self._brief_string = character_text["brief"]
self._action = character_text["action"]
self._art_link = "https://bloodontheclocktower.com/wiki/images/a/a4/Goon_Token.png"
self._art_link_cropped = "https://imgur.com/NaRvjH3.png"
self._wiki_link = "https://bloodontheclocktower.com/wiki/Goon"
self._role_enum = BMRRole.goon
self._emoji = "<:bmrgoon:781151556330192966>"
|
# -*- coding: utf-8 -*-
import sys
import subprocess
from functools import wraps
import matplotlib as mpl
import seaborn as sns
import pandas as pd
def customize(func):
@wraps(func)
def call_w_context(*args, **kwargs):
if not PlotConfig.FONT_SETTED:
_use_chinese(True)
set_context = kwargs.pop('set_context', True)
if set_context:
with plotting_context(), axes_style():
sns.despine(left=True)
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None):
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
if rc is None:
rc = {}
rc_default = {}
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
def print_table(table, name=None, fmt=None):
from IPython.display import display
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
display(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
class PlotConfig(object):
FONT_SETTED = False
USE_CHINESE_LABEL = False
MPL_FONT_FAMILY = mpl.rcParams["font.family"]
MPL_FONT = mpl.rcParams["font.sans-serif"]
MPL_UNICODE_MINUS = mpl.rcParams["axes.unicode_minus"]
def get_chinese_font():
if sys.platform.startswith('linux'):
cmd = 'fc-list :lang=zh -f "%{family}\n"'
output = subprocess.check_output(cmd, shell=True)
if isinstance(output, bytes):
output = output.decode("utf-8")
zh_fonts = [
f.split(',', 1)[0] for f in output.split('\n') if f.split(',', 1)[0]
]
return zh_fonts
return []
def _use_chinese(use=None):
if use is None:
return PlotConfig.USE_CHINESE_LABEL
elif use:
PlotConfig.USE_CHINESE_LABEL = use
PlotConfig.FONT_SETTED = True
_set_chinese_fonts()
else:
PlotConfig.USE_CHINESE_LABEL = use
PlotConfig.FONT_SETTED = True
_set_default_fonts()
def _set_chinese_fonts():
default_chinese_font = ['SimHei', 'FangSong', 'STXihei', 'Hiragino Sans GB',
'Heiti SC', 'WenQuanYi Micro Hei']
chinese_font = default_chinese_font + get_chinese_font()
# 设置中文字体
mpl.rc(
"font", **{
# seaborn 需要设置 sans-serif
"sans-serif": chinese_font,
"family": ','.join(chinese_font) + ',sans-serif'
}
)
# 防止负号乱码
mpl.rcParams["axes.unicode_minus"] = False
def _set_default_fonts():
mpl.rc(
"font", **{
"sans-serif": PlotConfig.MPL_FONT,
"family": PlotConfig.MPL_FONT_FAMILY
}
)
mpl.rcParams["axes.unicode_minus"] = PlotConfig.MPL_UNICODE_MINUS
class _PlotLabels(object):
def get(self, v):
if _use_chinese():
return getattr(self, v + "_CN")
else:
return getattr(self, v + "_EN")
class ICTS(_PlotLabels):
TITLE_CN = "{} IC"
TITLE_EN = "{} Period Forward Return Information Coefficient (IC)"
LEGEND_CN = ["IC", "1个月移动平均"]
LEGEND_EN = ["IC", "1 month moving avg"]
TEXT_CN = "均值 {:.3f} \n方差 {:.3f}"
TEXT_EN = "Mean {:.3f} \nStd. {:.3f}"
ICTS = ICTS()
class ICHIST(_PlotLabels):
TITLE_CN = "%s IC 分布直方图"
TITLE_EN = "%s Period IC"
LEGEND_CN = "均值 {:.3f} \n方差 {:.3f}"
LEGEND_EN = "Mean {:.3f} \nStd. {:.3f}"
ICHIST = ICHIST()
class ICQQ(_PlotLabels):
NORM_CN = "正态"
NORM_EN = "Normal"
T_CN = "T"
T_EN = "T"
CUSTOM_CN = "自定义"
CUSTOM_EN = "Theoretical"
TITLE_CN = "{} IC {}分布 Q-Q 图"
TITLE_EN = "{} Period IC {} Dist. Q-Q"
XLABEL_CN = "{} 分布分位数"
XLABEL_EN = "{} Distribution Quantile"
YLABEL_CN = "Observed Quantile"
YLABEL_EN = "Observed Quantile"
ICQQ = ICQQ()
class QRETURNBAR(_PlotLabels):
COLUMN_CN = "{} 天"
COLUMN_EN = "{} Day"
TITLE_CN = "各分位数平均收益"
TITLE_EN = "Mean Period Wise Return By Factor Quantile"
YLABEL_CN = "平均收益 (bps)"
YLABEL_EN = "Mean Return (bps)"
QRETURNBAR = QRETURNBAR()
class QRETURNVIOLIN(_PlotLabels):
LEGENDNAME_CN = "滞后天数"
LEGENDNAME_EN = "forward_periods"
TITLE_CN = "各分位数收益分布图"
TITLE_EN = "Period Wise Return By Factor Quantile"
YLABEL_CN = "收益 (bps)"
YLABEL_EN = "Return (bps)"
QRETURNVIOLIN = QRETURNVIOLIN()
class QRETURNTS(_PlotLabels):
TITLE_CN = "最大分位收益减最小分位收益 ({} 天)"
TITLE_EN = "Top Minus Bottom Quantile Mean Return ({} Period Forward Return)"
LEGEND0_CN = "当日收益 (加减 {:.2f} 倍当日标准差)"
LEGEND0_EN = "mean returns spread (+/- {:.2f} std)"
LEGEND1_CN = "1 个月移动平均"
LEGEND1_EN = "1 month moving avg"
YLABEL_CN = "分位数平均收益差 (bps)"
YLABEL_EN = "Difference In Quantile Mean Return (bps)"
QRETURNTS = QRETURNTS()
class ICGROUP(_PlotLabels):
TITLE_CN = "分组 IC"
TITLE_EN = "Information Coefficient By Group"
ICGROUP = ICGROUP()
class AUTOCORR(_PlotLabels):
TITLE_CN = "因子自相关性 (滞后 {} 天)"
TITLE_EN = "{} Period Factor Autocorrelation"
YLABEL_CN = "自相关性"
YLABEL_EN = "Autocorrelation Coefficient"
TEXT_CN = "均值 {:.3f}"
TEXT_EN = "Mean {:.3f}"
AUTOCORR = AUTOCORR()
class TBTURNOVER(_PlotLabels):
TURNOVER_CN = "{:d} 分位换手率"
TURNOVER_EN = "quantile {:d} turnover"
TITLE_CN = "{} 天换手率"
TITLE_EN = "{} Period Top and Bottom Quantile Turnover"
YLABEL_CN = "分位数换手率"
YLABEL_EN = "Proportion Of Names New To Quantile"
TBTURNOVER = TBTURNOVER()
class ICHEATMAP(_PlotLabels):
TITLE_CN = "{} 天 IC 月度均值"
TITLE_EN = "Monthly Mean {} Period IC"
ICHEATMAP = ICHEATMAP()
class CUMRET(_PlotLabels):
YLABEL_CN = "累积收益"
YLABEL_EN = "Cumulative Returns"
TITLE_CN = "因子值加权多空组合累积收益 ({} 天平均)"
TITLE_EN = """Factor Weighted Long/Short Portfolio Cumulative Return
({} Fwd Period)"""
CUMRET = CUMRET()
class TDCUMRET(_PlotLabels):
YLABEL_CN = "累积收益"
YLABEL_EN = "Cumulative Returns"
TITLE_CN = "做多最大分位做空最小分位组合累积收益 ({} 天平均)"
TITLE_EN = """Long Top/Short Bottom Factor Portfolio Cumulative Return
({} Fwd Period)"""
TDCUMRET = TDCUMRET()
class CUMRETQ(_PlotLabels):
YLABEL_CN = "累积收益(对数轴)"
YLABEL_EN = "Log Cumulative Returns"
TITLE_CN = "分位数 {} 天 Forward Return 累积收益 (对数轴)"
TITLE_EN = """Cumulative Return by Quantile
({} Period Forward Return)"""
CUMRETQ = CUMRETQ()
class AVGCUMRET(_PlotLabels):
TITLE_CN = "因子预测能力 (前 {} 天, 后 {} 天)"
TITLE_EN = "Average Cumulative Returns by Quantile ({} days backword, {} days forward)"
COLUMN_CN = "{} 分位"
COLUMN_EN = "Quantile {}"
XLABEL_CN = "天数"
XLABEL_EN = "Periods"
YLABEL_CN = "平均累积收益 (bps)"
YLABEL_EN = "Mean Return (bps)"
AVGCUMRET = AVGCUMRET()
class EVENTSDIST(_PlotLabels):
TITLE_CN = "因子数量随时间分布"
TITLE_EN = "Distribution of events in time"
XLABEL_CN = "日期"
XLABEL_EN = "Date"
YLABEL_CN = "因子数量"
YLABEL_EN = "Number of events"
EVENTSDIST = EVENTSDIST()
class MISSIINGEVENTSDIST(_PlotLabels):
TITLE_CN = "因子数量随时间分布"
TITLE_EN = "Distribution of missing events in time"
XLABEL_CN = "日期"
XLABEL_EN = "Date"
YLABEL_CN = "因子缺失率"
YLABEL_EN = "Rate of missing events"
MISSIINGEVENTSDIST = MISSIINGEVENTSDIST()
|
#!/bin/python
# Read a list of score files from the Gobnilp's scorer and learn the BN structure using Gobnilp.
import sys
import os
from subprocess import call
def LearnWithGobnilp(pathToGobnilp, pathToScore):
print "Learning the network structures from file", pathToScore, "using Gobnilp."
baseName = os.path.splitext(pathToScore)[0];
resultFilename = baseName + ".gobnilp.result";
timeFilename = baseName + ".gobnilp.time";
matrixFilename = baseName + ".gobnilp.matrix";
with open("gobnilp.set", "w") as gobnilpSettings:
gobnilpSettings.write("gobnilp/outputfile/solution = \"" + resultFilename + "\"\n");
gobnilpSettings.write("gobnilp/outputfile/scoreandtime = \"" + timeFilename + "\"\n");
gobnilpSettings.write("gobnilp/outputfile/adjacencymatrix = \"" + matrixFilename + "\"\n");
gobnilpCommand = [pathToGobnilp, "-f=jkl", pathToScore];
call(gobnilpCommand, shell = False, stdout=open(os.devnull, "w"));
call(["rm", "-f", "gobnilp.set"], shell=False);
print "Finished learning."
return matrixFilename, resultFilename, timeFilename;
def Error():
print "Usage:", sys.argv[0], "data_filenames";
exit(0);
def LearnGobnilp(fileList):
pathToGobnilp = "/opt/bnet/learning/gobnilp-1.4.1-cplex/bin/gobnilp";
results = [LearnWithGobnilp(pathToGobnilp, datasetFile) for datasetFile in fileList if os.path.isfile(datasetFile)];
matrixFiles, resultFiles, timeFiles = [[row[i] for row in results] for i in range(len(results[0]))];
return matrixFiles, resultFiles, timeFiles;
if __name__ == "__main__":
try:
LearnGobnilp(sys.argv[1:]);
except:
Error();
|
input_value=int(input())
print("{:,}".format(input_value))
|
from datetime import datetime, timedelta
import jwt
from fastapi import Depends, HTTPException
from jwt import PyJWTError
from passlib.context import CryptContext
from pydantic import EmailStr
from starlette import status
from config import apisecrets, oauth2_scheme
from models import User
from v1.services.user import UserService
user_service = UserService()
PWD_CONTEXT = CryptContext(schemes=["bcrypt"], deprecated="auto")
SECRET_KEY = apisecrets.SECRET_KEY
ACCESS_TOKEN_ALGORITHM = 'HS256'
CREDENTIALS_EXCEPTION = HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid credentials.",
headers={"WWW-Authenticate": "Bearer"})
class AuthService:
def valid_password(self, plain_pass: str, hashed_pass: str) -> bool:
return PWD_CONTEXT.verify(plain_pass, hashed_pass)
def authenticate_user(self, email: EmailStr, password: str) -> User:
user = user_service.get_user_by_email(email)
if not user:
return
if not self.valid_password(password, user.password):
return
return user
def create_access_token(self, data: dict,
expires_delta: timedelta) -> bytes:
to_encode = data.copy()
expire = datetime.utcnow() + expires_delta
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode,
SECRET_KEY,
algorithm=ACCESS_TOKEN_ALGORITHM)
return encoded_jwt
def current_user(self, token: str = Depends(oauth2_scheme)) -> User:
try:
payload = jwt.decode(token,
SECRET_KEY,
algorithms=[ACCESS_TOKEN_ALGORITHM])
id = payload.get("id")
except PyJWTError:
raise CREDENTIALS_EXCEPTION
return user_service.get_user(id)
|
"""Test pipe subpackage"""
import typing
from src.oolongt.pipe import noop, pipe
from tests.params.pipe import param_noop, param_pipe
@param_noop()
def test_noop(expected: typing.Any):
"""Test `noop`
Arguments:
expected {typing.Any} -- expected value (same as input)
"""
received = noop(expected)
assert received == expected
@param_pipe()
def test_pipe(
init: typing.Any,
pipeline: typing.Iterable,
expected: typing.Any):
"""Test `pipe`
Arguments:
init {typing.Any} -- input value
pipeline {typing.Any} -- (iterable of) callables
expected {typing.Any} -- expected result
"""
received = pipe(init, *pipeline)
assert received == expected
|
from dfs.datasheets.parsers.treatments.parser_2014 import TreatmentDatasheetParser2014
import dfs.datasheets.datatabs as datatabs
import dfs.datasheets.datasheet as datasheet
class TreatmentDatasheetParser2017(TreatmentDatasheetParser2014):
def format_output_filename(self, input_filename):
return input_filename.replace('2017', '2017_Converted')
def parse_witness_tree_tab(self, workbook, sheet):
worksheet = workbook[datasheet.TAB_NAME_WITNESS_TREES]
tab = datatabs.witnesstree.WitnessTreeTab()
for rownumber in range(3, 33):
tree = datatabs.witnesstree.WitnessTreeTabTree()
tree.micro_plot_id = self.parse_int(worksheet[f'B{rownumber}'].value)
if None == tree.micro_plot_id:
continue
tree.tree_number = self.parse_int(worksheet[f'A{rownumber}'].value)
tree.species_known = worksheet[f'C{rownumber}'].value
tree.species_guess = worksheet[f'D{rownumber}'].value
tree.dbh = self.parse_float(worksheet[f'E{rownumber}'].value)
live_or_dead = self.parse_int(worksheet[f'F{rownumber}'].value)
if None != live_or_dead:
tree.live_or_dead = 'L' if 1 == live_or_dead else 'D'
else:
tree.live_or_dead = 'L'
tree.azimuth = self.parse_int(worksheet[f'G{rownumber}'].value)
tree.distance = self.parse_float(worksheet[f'H{rownumber}'].value)
tab.witness_trees.append(tree)
return tab
def parse_tree_table_tab(self, workbook, sheet):
worksheet = workbook[datasheet.TAB_NAME_TREE_TABLE]
tab = datatabs.tree.TreeTableTab()
row_valid = True
i = 3
subplot_tree_numbers = {}
while (row_valid):
if not worksheet[f'A{i}'].value:
row_valid = False
continue
species = datatabs.tree.TreeTableSpecies()
species.micro_plot_id = worksheet[f'A{i}'].value
if species.micro_plot_id not in subplot_tree_numbers:
subplot_tree_numbers[species.micro_plot_id] = 1
else:
subplot_tree_numbers[species.micro_plot_id] += 1
species.tree_number = subplot_tree_numbers[species.micro_plot_id]
species.species_known = worksheet[f'C{i}'].value
species.species_guess = worksheet[f'D{i}'].value
species.diameter_breast_height = self.parse_float(worksheet[f'E{i}'].value)
live_or_dead = self.parse_int(worksheet[f'F{i}'].value)
if None != live_or_dead:
species.live_or_dead = 'L' if 1 == live_or_dead else 'D'
species.comments = worksheet[f'G{i}'].value
tab.tree_species.append(species)
i += 1
return tab
def parse_sapling_tab(self, workbook, sheet):
worksheet = workbook[datasheet.TAB_NAME_SAPLING]
tab = datatabs.sapling.SaplingTab()
row_valid = True
i = 3
subplot_sapling_numbers = {}
while (row_valid):
if not worksheet[f'A{i}'].value:
row_valid = False
continue
species = datatabs.sapling.SaplingSpecies()
species.micro_plot_id = self.parse_int(worksheet[f'A{i}'].value)
if species.micro_plot_id not in subplot_sapling_numbers:
subplot_sapling_numbers[species.micro_plot_id] = 1
else:
subplot_sapling_numbers[species.micro_plot_id] += 1
species.sapling_number = subplot_sapling_numbers[species.micro_plot_id]
species.quarter = self.parse_int(worksheet[f'B{i}'].value)
species.scale = self.parse_int(worksheet[f'C{i}'].value)
species.species_known = worksheet[f'D{i}'].value
species.species_guess = worksheet[f'E{i}'].value
species.diameter_breast_height = self.parse_float(worksheet[f'F{i}'].value)
tab.sapling_species.append(species)
i += 1
return tab
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pathlib import PurePath
from typing import Dict, Optional, Set
from pants.backend.python.target_types import PythonRequirementsField, PythonSources
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.core.util_rules.strip_source_roots import (
SourceRootStrippedSources,
StripSourcesFieldRequest,
)
from pants.engine.addresses import Address
from pants.engine.rules import rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.target import Targets
from pants.util.frozendict import FrozenDict
@dataclass(frozen=True)
class PythonModule:
module: str
@classmethod
def create_from_stripped_path(cls, path: PurePath) -> "PythonModule":
module_name_with_slashes = (
path.parent if path.name == "__init__.py" else path.with_suffix("")
)
return cls(module_name_with_slashes.as_posix().replace("/", "."))
@dataclass(frozen=True)
class FirstPartyModuleToAddressMapping:
mapping: FrozenDict[str, Address]
def address_for_module(self, module: str) -> Optional[Address]:
target = self.mapping.get(module)
if target is not None:
return target
# If the module is not found, try the parent, if any. This is to accommodate `from`
# imports, where we don't care about the specific symbol, but only the module. For example,
# with `from typing import List`, we only care about `typing`.
# Unlike with third party modules, we do not look past the direct parent.
if "." not in module:
return None
parent_module = module.rsplit(".", maxsplit=1)[0]
return self.mapping.get(parent_module)
@rule
async def map_first_party_modules_to_addresses() -> FirstPartyModuleToAddressMapping:
all_targets = await Get[Targets](AddressSpecs([DescendantAddresses("")]))
candidate_targets = tuple(tgt for tgt in all_targets if tgt.has_field(PythonSources))
sources_per_target = await MultiGet(
Get[SourceRootStrippedSources](StripSourcesFieldRequest(tgt[PythonSources]))
for tgt in candidate_targets
)
modules_to_addresses: Dict[str, Address] = {}
modules_with_multiple_owners: Set[str] = set()
for tgt, sources in zip(candidate_targets, sources_per_target):
for f in sources.snapshot.files:
module = PythonModule.create_from_stripped_path(PurePath(f)).module
if module in modules_to_addresses:
modules_with_multiple_owners.add(module)
else:
modules_to_addresses[module] = tgt.address
# Remove modules with ambiguous owners.
for module in modules_with_multiple_owners:
modules_to_addresses.pop(module)
return FirstPartyModuleToAddressMapping(FrozenDict(sorted(modules_to_addresses.items())))
@dataclass(frozen=True)
class ThirdPartyModuleToAddressMapping:
mapping: FrozenDict[str, Address]
def address_for_module(self, module: str) -> Optional[Address]:
target = self.mapping.get(module)
if target is not None:
return target
# If the module is not found, try the parent module, if any. For example,
# pants.task.task.Task -> pants.task.task -> pants.task -> pants
if "." not in module:
return None
parent_module = module.rsplit(".", maxsplit=1)[0]
return self.address_for_module(parent_module)
@rule
async def map_third_party_modules_to_addresses() -> ThirdPartyModuleToAddressMapping:
all_targets = await Get[Targets](AddressSpecs([DescendantAddresses("")]))
modules_to_addresses: Dict[str, Address] = {}
modules_with_multiple_owners: Set[str] = set()
for tgt in all_targets:
if not tgt.has_field(PythonRequirementsField):
continue
for python_req in tgt[PythonRequirementsField].value:
for module in python_req.modules:
if module in modules_to_addresses:
modules_with_multiple_owners.add(module)
else:
modules_to_addresses[module] = tgt.address
# Remove modules with ambiguous owners.
for module in modules_with_multiple_owners:
modules_to_addresses.pop(module)
return ThirdPartyModuleToAddressMapping(FrozenDict(sorted(modules_to_addresses.items())))
@dataclass(frozen=True)
class PythonModuleOwner:
"""The target that owns a Python module.
If >1 target own the same module, the `address` field should be set to `None` to avoid
ambiguity.
"""
address: Optional[Address]
@rule
async def map_module_to_address(
module: PythonModule,
first_party_mapping: FirstPartyModuleToAddressMapping,
third_party_mapping: ThirdPartyModuleToAddressMapping,
) -> PythonModuleOwner:
third_party_address = third_party_mapping.address_for_module(module.module)
if third_party_address:
return PythonModuleOwner(third_party_address)
first_party_address = first_party_mapping.address_for_module(module.module)
if first_party_address:
return PythonModuleOwner(first_party_address)
return PythonModuleOwner(address=None)
def rules():
return [
map_first_party_modules_to_addresses,
map_third_party_modules_to_addresses,
map_module_to_address,
]
|
AL = 'AL'
NL = 'NL'
MLB = 'MLB'
LEAGUES = [AL, NL, MLB]
def mlb_teams(year):
""" For given year return teams active in the majors.
Caveat, list is not complete; those included are only those
with a current team still active.
"""
year = int(year)
return sorted(al_teams(year) + nl_teams(year))
def al_teams(year):
""" For given year return teams existing in AL.
Caveat, list is not complete; those included are only those
with a current team still active.
"""
teams = []
year = int(year)
if year >= 1901:
teams.append('BOS')
teams.append('CLE')
teams.append('CHW')
teams.append('DET')
else:
return []
if year >= 1903:
teams.append('NYY')
if year >= 1969:
teams.append('KCR')
if year >= 1977:
teams.append('SEA')
teams.append('TOR')
league = AL
angels(year, teams)
astros(year, teams, league)
athletics(year, teams)
brewers(year, teams, league)
orioles(year, teams)
rangers(year, teams)
rays(year, teams)
twins(year, teams)
return sorted(teams)
def nl_teams(year):
""" For given year return teams existing in NL.
Caveat, list is not complete; those included are only those
with a current team still active.
"""
teams = []
year = int(year)
if year >= 1876:
teams.append('CHC')
else:
return []
if year >= 1883:
teams.append('PHI')
if year >= 1887:
teams.append('PIT')
if year >= 1890:
teams.append('CIN')
if year >= 1892:
teams.append('STL')
if year >= 1962:
teams.append('NYM')
if year >= 1969:
teams.append('SDP')
if year >= 1993:
teams.append('COL')
if year >= 1996:
teams.append('ARI')
league = NL
astros(year, teams, league)
braves(year, teams)
brewers(year, teams, league)
dodgers(year, teams)
giants(year, teams)
marlins(year, teams)
nationals(year, teams)
return sorted(teams)
TEAMS = {AL : al_teams, NL : nl_teams, MLB : mlb_teams}
def angels(year, teams):
""" Append appropriate Angels abbreviation for year if applicable.
"""
if year >= 2005:
teams.append('LAA')
elif year >= 1997:
teams.append('ANA')
elif year >= 1965:
teams.append('CAL')
elif year >= 1961:
teams.append('LAA')
def astros(year, teams, league):
""" Append appropriate Astros abbreviation for year if applicable.
"""
if year >= 2013 and league == AL:
teams.append('HOU')
elif year >= 1962 and year < 2013 and league == NL:
teams.append('HOU')
def athletics(year, teams):
""" Append appropriate Athletics abbreviation for year if applicable.
"""
if year >= 1968:
teams.append('OAK')
elif year >= 1955:
teams.append('KCA')
elif year >= 1901:
teams.append('PHA')
def braves(year, teams):
""" Append appropriate Braves abbreviation for year if applicable.
"""
if year >= 1966:
teams.append('ATL')
elif year >= 1953:
teams.append('MLN')
elif year >= 1876:
teams.append('BSN')
def brewers(year, teams, league):
""" Append appropriate Brewers abbreviation for year if applicable.
"""
if year >= 1970:
if year >= 1993 and league == NL:
teams.append('MIL')
elif year < 1993 and league == AL:
teams.append('MIL')
elif year == 1969 and league == AL:
teams.append('SEP')
def dodgers(year, teams):
""" Append appropriate Dodgers abbreviation for year if applicable.
"""
if year >= 1958:
teams.append('LAD')
elif year >= 1884:
teams.append('BRO')
def giants(year, teams):
""" Append appropriate Giants abbreviation for year if applicable.
"""
if year >= 1958:
teams.append('SFG')
elif year >= 1883:
teams.append('NYG')
def marlins(year, teams):
""" Append appropriate Marlins abbreviation for year if applicable.
"""
if year >= 2012:
teams.append('MIA')
elif year >= 1993:
teams.append('FLA')
def nationals(year, teams):
""" Append appropriate Nationals abbreviation for year if applicable.
"""
if year >= 2005:
teams.append('WSN')
elif year >= 1969:
teams.append('MON')
def orioles(year, teams):
""" Append appropriate Orioles abbreviation for year if applicable.
"""
if year >= 1954:
teams.append('BAL')
elif year >= 1902:
teams.append('SLB')
elif year == 1901:
teams.append('MLA')
def rangers(year, teams):
""" Append appropriate Rangers abbreviation for year if applicable.
"""
if year >= 1972:
teams.append('TEX')
elif year >= 1961:
teams.append('WSA')
def rays(year, teams):
""" Append appropriate Rays abbreviation for year if applicable.
"""
if year >= 2008:
teams.append('TBR')
elif year >= 1998:
teams.append('TBD')
def twins(year, teams):
""" Append appropriate Twins abbreviation for year if applicable.
"""
if year >= 1961:
teams.append('MIN')
elif year >= 1901:
teams.append('WSH')
def valid_teams_subset(year, teams):
""" Ensure teams list is valid.
"""
all_teams = mlb_teams(int(year))
for team in teams:
if team not in all_teams:
return False
return True
|
import socket, sys
from threading import Thread
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 53331))
print("Connected to chat")
def messenger():
while True:
message = input()
if message == "exit":
sock.send(message.encode())
sock.close()
sys.exit()
else:
sock.send(message.encode())
def receiver():
while True:
data = sock.recv(4096)
if data:
print(data.decode())
message_thread = Thread(target=messenger)
get_thread = Thread(target=receiver)
message_thread.start()
get_thread.start()
|
input_num = int(input())
count = 0
# 1と0以外が混ざっている場合は終了する
for i in str(input_num):
# 1がある値をカウントアップ
if int(i) == 1:
count += 1
print(count)
|
# Raspberry Pi Cat Laser Server
# Flask-based web appplication that allows control and calibration of the cat
# laser toy. This shouldn't be exposed out to other users! Rather this is meant
# for internal use and calibration (like getting a calibration.json file to use
# with the laser driver script).
# Author: Tony DiCola
from flask import *
import json, sys
import model
# Flask app configuration
DEBUG = True
# Cat laser toy configuration
SERVO_I2C_ADDRESS = 0x40 # I2C address of the PCA9685-based servo controller
SERVO_XAXIS_CHANNEL = 1 # Channel for the x axis rotation which controls laser up/down
SERVO_YAXIS_CHANNEL = 0 # Channel for the y axis rotation which controls laser left/right
SERVO_PWM_FREQ = 50 # PWM frequency for the servos in HZ (should be 50)
SERVO_MIN = 150 # Minimum rotation value for the servo, should be -90 degrees of rotation.
SERVO_MAX = 600 # Maximum rotation value for the servo, should be 90 degrees of rotation.
SERVO_CENTER = 200 # Center value for the servo, should be 0 degrees of rotation.
# Initialize flask app
app = Flask(__name__)
app.config.from_object(__name__)
# Setup the servo and laser model
servos = None
if len(sys.argv) > 1 and sys.argv[1] == "test":
# Setup test servo for running outside a Raspberry Pi
import modeltests
servos = modeltests.TestServos()
else:
# Setup the real servo when running on a Raspberry Pi
import servos
servos = servos.Servos(SERVO_I2C_ADDRESS, SERVO_XAXIS_CHANNEL, SERVO_YAXIS_CHANNEL, SERVO_PWM_FREQ)
model = model.LaserModel(servos, SERVO_MIN, SERVO_MAX, SERVO_CENTER)
# Main view for rendering the web page
@app.route('/')
def main():
return render_template('main.html', model=model)
# Error handler for API call failures
@app.errorhandler(ValueError)
def valueErrorHandler(error):
return jsonify({'result': error.message}), 500
def successNoResponse():
return jsonify({'result': 'success'}), 204
# API calls used by the web app
@app.route('/set/servo/xaxis/<xaxis>', methods=['PUT'])
def setServoXAxis(xaxis):
model.setXAxis(xaxis)
return successNoResponse()
@app.route('/set/servo/yaxis/<yaxis>', methods=['PUT'])
def setServoYAaxis(yaxis):
model.setYAxis(yaxis)
return successNoResponse()
@app.route('/set/servos/<xaxis>/<yaxis>', methods=['PUT'])
def setServos(xaxis, yaxis):
model.setXAxis(xaxis)
model.setYAxis(yaxis)
return successNoResponse()
@app.route('/get/servos', methods=['GET'])
def getServos():
return jsonify({'xaxis': model.getXAxis(), 'yaxis': model.getYAxis() }), 200
@app.route('/get/calibration', methods=['GET'])
def getCalibration():
return jsonify({'target': model.targetCalibration, 'servo': model.servoCalibration}), 200
@app.route('/set/calibration', methods=['POST'])
def setCalibration():
model.setCalibration(json.loads(request.form['targetCalibration']), json.loads(request.form['servoCalibration']))
return successNoResponse()
@app.route('/target/<int:x>/<int:y>', methods=['PUT'])
def target(x, y):
model.target(x, y)
return successNoResponse()
# Start running the flask app
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
"""Charmed Machine Operator for the PostgreSQL database."""
import logging
import secrets
import string
import subprocess
from typing import List
from charms.operator_libs_linux.v0 import apt
from ops.charm import ActionEvent, CharmBase
from ops.main import main
from ops.model import (
ActiveStatus,
BlockedStatus,
MaintenanceStatus,
ModelError,
Relation,
WaitingStatus,
)
from cluster import Patroni
logger = logging.getLogger(__name__)
PEER = "postgresql-replicas"
class PostgresqlOperatorCharm(CharmBase):
"""Charmed Operator for the PostgreSQL database."""
def __init__(self, *args):
super().__init__(*args)
self._postgresql_service = "postgresql"
self.framework.observe(self.on.install, self._on_install)
self.framework.observe(self.on.leader_elected, self._on_leader_elected)
self.framework.observe(self.on.start, self._on_start)
self.framework.observe(self.on.get_initial_password_action, self._on_get_initial_password)
self._cluster = Patroni(self._unit_ip)
@property
def _unit_ip(self) -> str:
"""Current unit ip."""
return self.model.get_binding(PEER).network.bind_address
def _on_install(self, event) -> None:
"""Install prerequisites for the application."""
self.unit.status = MaintenanceStatus("installing PostgreSQL")
# Prevent the default cluster creation.
self._cluster.inhibit_default_cluster_creation()
# Install the PostgreSQL and Patroni requirements packages.
try:
self._install_apt_packages(event, ["postgresql", "python3-pip", "python3-psycopg2"])
except (subprocess.CalledProcessError, apt.PackageNotFoundError):
self.unit.status = BlockedStatus("failed to install apt packages")
return
try:
resource_path = self.model.resources.fetch("patroni")
except ModelError as e:
logger.error(f"missing patroni resource {str(e)}")
self.unit.status = BlockedStatus("Missing 'patroni' resource")
return
# Build Patroni package path with raft dependency and install it.
try:
patroni_package_path = f"{str(resource_path)}[raft]"
self._install_pip_packages([patroni_package_path])
except subprocess.SubprocessError:
self.unit.status = BlockedStatus("failed to install Patroni python package")
return
self.unit.status = WaitingStatus("waiting to start PostgreSQL")
def _on_leader_elected(self, _) -> None:
"""Handle the leader-elected event."""
data = self._peers.data[self.app]
# The leader sets the needed password on peer relation databag if they weren't set before.
data.setdefault("postgres-password", self._new_password())
data.setdefault("replication-password", self._new_password())
def _on_start(self, event) -> None:
"""Handle the start event."""
# Doesn't try to bootstrap the cluster if it's in a blocked state
# caused, for example, because a failed installation of packages.
if self._has_blocked_status:
return
postgres_password = self._get_postgres_password()
replication_password = self._get_postgres_password()
# If the leader was not elected (and the needed passwords were not generated yet),
# the cluster cannot be bootstrapped yet.
if not postgres_password or not replication_password:
logger.info("leader not elected and/or passwords not yet generated")
self.unit.status = WaitingStatus("awaiting passwords generation")
event.defer()
return
# Set some information needed by Patroni to bootstrap the cluster.
cluster_name = self.app.name
member_name = self.unit.name.replace("/", "-")
success = self._cluster.bootstrap_cluster(
cluster_name, member_name, postgres_password, replication_password
)
if success:
# The cluster is up and running.
self.unit.status = ActiveStatus()
else:
self.unit.status = BlockedStatus("failed to start Patroni")
def _on_get_initial_password(self, event: ActionEvent) -> None:
"""Returns the password for the postgres user as an action response."""
event.set_results({"postgres-password": self._get_postgres_password()})
@property
def _has_blocked_status(self) -> bool:
"""Returns whether the unit is in a blocked state."""
return isinstance(self.unit.status, BlockedStatus)
def _get_postgres_password(self) -> str:
"""Get postgres user password.
Returns:
The password from the peer relation or None if the
password has not yet been set by the leader.
"""
data = self._peers.data[self.app]
return data.get("postgres-password")
@property
def _replication_password(self) -> str:
"""Get replication user password.
Returns:
The password from the peer relation or None if the
password has not yet been set by the leader.
"""
data = self._peers.data[self.app]
return data.get("replication-password")
def _install_apt_packages(self, _, packages: List[str]) -> None:
"""Simple wrapper around 'apt-get install -y.
Raises:
CalledProcessError if it fails to update the apt cache.
PackageNotFoundError if the package is not in the cache.
PackageError if the packages could not be installed.
"""
try:
logger.debug("updating apt cache")
apt.update()
except subprocess.CalledProcessError as e:
logger.exception("failed to update apt cache, CalledProcessError", exc_info=e)
raise
for package in packages:
try:
apt.add_package(package)
logger.debug(f"installed package: {package}")
except apt.PackageNotFoundError:
logger.error(f"package not found: {package}")
raise
except apt.PackageError:
logger.error(f"package error: {package}")
raise
def _install_pip_packages(self, packages: List[str]) -> None:
"""Simple wrapper around pip install.
Raises:
SubprocessError if the packages could not be installed.
"""
try:
command = [
"pip3",
"install",
" ".join(packages),
]
logger.debug(f"installing python packages: {', '.join(packages)}")
subprocess.check_call(command)
except subprocess.SubprocessError:
logger.error("could not install pip packages")
raise
def _new_password(self) -> str:
"""Generate a random password string.
Returns:
A random password string.
"""
choices = string.ascii_letters + string.digits
password = "".join([secrets.choice(choices) for i in range(16)])
return password
@property
def _peers(self) -> Relation:
"""Fetch the peer relation.
Returns:
A:class:`ops.model.Relation` object representing
the peer relation.
"""
return self.model.get_relation(PEER)
if __name__ == "__main__":
main(PostgresqlOperatorCharm)
|
from PIL import Image as Img
from nbtschematic import SchematicFile
from getbrightnessval import get_bright
def get_image_pixels(path, dark, light):
try:
im = Img.open(path)
except:
print("unable to get image file")
size = im.size
sizex = size[0]
sizey = size[1]
pix = im.load()
sf = SchematicFile(shape=(1, sizey, sizex))
for i in range(sizex):
for j in range(sizey):
pin = pix[i, j]
if get_bright(pin[0], pin[1], pin[2]) < (256 / 2):
sf.blocks[0, j, i] = dark
print("set block " + str(i) + " 1 " + str(j) + " to dark block")
else:
if not True:
sf.blocks[0, j, i] = light
print("set block " + str(i) + " 1 " + str(j) + " to light block")
return sf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.