content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gzip
import numpy as np
import scipy
import scipy.signal
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
def load_log(filename):
"""Load data from logs generated from limtoc. Returns
a numpy record array.
"""
datatype = None
values = []
if filename.endswith('.gz'):
openf = lambda x: gzip.open(x, 'rt')
else:
openf = open
with openf(filename) as f:
for line in f:
if not line.strip():
continue
if datatype is None:
if line.strip().endswith('fields are:'):
# ignore lines until we get here
# next line describes data
datatype = []
elif 'k = ' in line:
print(line.strip())
continue
if not datatype:
field_names = line.split(',')
datatype = np.dtype(
[('time', '<f8')] + [(name.strip(), '<f8') for name in field_names])
else:
vals = [float(f) for f in line.split(',')]
assert len(vals) == len(datatype) - 1
vals.insert(0, vals[0]) # copy clock field value to time
values.append(tuple(vals))
data = np.rec.array(values, dtype=datatype)
t = data.time
dt = np.diff(t)
overflow_counts = 2**32 * np.cumsum(dt < 0)
data.time[1:] += overflow_counts
data.time -= data.time[0]
data.time /= 168e6 # real time clock frequency
return data
def plot_log(record, show_plot=True, show_roi=True):
colors = sns.color_palette('Paired', 10)
fig, ax = plt.subplots(2, 1, figsize=(11, 6), sharex=True)
if show_roi:
i = np.squeeze(np.argwhere(
np.abs(record.steer_angle) > 0.5*record.steer_angle.std()))
i0, i1 = i[0], i[-1]
i2 = np.squeeze(np.argwhere(
record.time > record.time[i0] + 1))[0]
assert i2 < i1, 'record does not show oscillation for 1 second'
i1 = i2
ax[0].plot([record.time[i0], record.time[i1]],
[record.steer_angle[i0], record.steer_angle[i1]],
color=colors[1], linestyle=' ', marker='X', markersize=10,
label='roi start/end')
else:
i0 = 0
i1 = -1
ax[0].plot(record.time[i0:i1], record.steer_angle[i0:i1], color=colors[1], label='steer angle')
ax[0].plot(record.time[i0:i1], 0*record.time[i0:i1], color='black', linewidth=1, zorder=1)
ax[0].legend()
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('[rad]')
ax[1].plot(record.time[i0:i1], record.kistler_torque[i0:i1], color=colors[3],
alpha=0.8, label='sensor torque')
ax[1].plot(record.time[i0:i1], record.motor_torque[i0:i1], color=colors[5],
alpha=0.8, label='motor torque')
ax[1].plot(record.time[i0:i1], record.feedback_torque[i0:i1], color=colors[7],
alpha=0.8, label='feedback torque command')
ax[1].legend()
ax[1].set_xlabel('time [s]')
ax[1].set_ylabel('torque [N-m]')
ax[1].plot(record.time[i0:i1], 0*record.time[i0:i1], color='black', linewidth=1, zorder=1)
if show_plot:
plt.show()
if show_roi:
return fig, ax, (i0, i1)
return fig, ax
def display_log_plot(filename):
vals = load_log(filename)
fig, ax = plot_log(vals, show_plot=False)
try:
title = filename
mpld3
except NameError:
fig.suptitle(title)
else:
ax[0].set_title(title)
plt.show()
def plot_fft(record):
N = len(record)
dt = np.diff(record.time).mean()
xf = scipy.fftpack.fftfreq(N, dt)
yf = scipy.fftpack.fft(record.steer_angle)
# plot only frequency components less than 10 Hz
fftslice = slice(0, np.where(xf > 10)[0][0])
fig, ax = plt.subplots()
ax.plot(xf[fftslice], 2/N * np.abs(yf[fftslice]))
plt.show()
return fig, ax
# motor torque = -k * x
def calculate_stiffness(record):
k_commanded = np.linalg.lstsq(np.reshape(record.steer_angle, (-1, 1)),
-np.reshape(record.feedback_torque, (-1, 1)))[0][0][0]
k_measured = np.linalg.lstsq(np.reshape(record.steer_angle, (-1, 1)),
-np.reshape(record.motor_torque, (-1, 1)))[0][0][0]
return k_commanded, k_measured
def calculate_fit(record):
# use equation of the form:
# x = (a + bt)*exp(c*t)*sin(2*pi*e*t + d)
fit = []
z = np.where(np.diff(np.sign(record.steer_angle)))[0]
dt = record.time[-1] - record.time[0]
freq = len(z)/dt/2
amp0 = np.max(np.abs(
record.steer_angle[z[0]:z[1]]))
amp1 = np.max(np.abs(
record.steer_angle[z[-2]:z[-1]]))
sign = np.sign(record.steer_angle[0])
T = 2*(record.time[z[1]] - record.time[z[0]])
delay = ((T - (record.time[z[1]] - record.time[0])) /
(2*T)) * 2*np.pi
n = 'exponential'
f = lambda a, b, c, d, e: lambda t: a*np.exp(c*t)*np.sin(2*np.pi*e*t + d)
p0 = (amp0, 0, np.log(amp1/amp0)/dt, delay, freq)
fit.append([n, f, p0])
n = 'linear'
f = lambda a, b, c, d, e: lambda t: (a + b*t)*np.sin(2*np.pi*e*t + d)
p0 = (amp0, (amp1 - amp0)/dt, 0, delay, freq)
fit.append([n, f, p0])
g = lambda record, f: lambda p: f(*p)(record.time) - record.steer_angle
for i in range(len(fit)):
n, f, p0 = fit[i]
p = scipy.optimize.leastsq(g(record, f), p0)[0]
fit[i][2] = p
return fit
def plot_fit(record, fit_params, show_plot=True):
colors = sns.color_palette('Paired', 10)
fig, ax = plt.subplots(len(fit_params), 1, figsize=(12, 8), sharex=True)
for i in range(len(fit_params)):
n, f, p = fit_params[i]
ax[i].plot(record.time, record.steer_angle, color=colors[1],
label='steer angle')
ax[i].plot(record.time, f(*p)(record.time), color=colors[5],
linestyle='--',
label='{} decay fit, {}'.format(n, p))
ax[i].plot(record.time, 0*record.time, color='black', linewidth=1, zorder=1)
ax[i].legend()
ax[i].set_ylabel('[rad]')
ax[-1].set_xlabel('time [s]')
plt.show()
return fig, ax
|
# =========================================================
# For more info, see https://hoseinkh.github.io/projects/
# =========================================================
import pickle
import numpy as np
import pandas as pd
from sortedcontainers import SortedList
from tqdm import tqdm
## ********************************************************
## Parameters
num_neighbors_to_consider = 25 # number of neighbors we'd like to consider
min_num_of_common_movies_to_be_cosidered_similar = 5 # number of common movies users must have in common in order to consider
# we use this minimum to ensure the movies are similar enough to do the ...
# ... do the calculations. This helps to increase the accuracy of the model.
## ***************************
## Load dictionaries:
with open('./Data/user_to_movie.json', 'rb') as f:
user_to_movie = pickle.load(f)
#
with open('./Data/movie_to_user.json', 'rb') as f:
movie_to_user = pickle.load(f)
#
with open('./Data/user_and_movie_to_rating.json', 'rb') as f:
user_and_movie_to_rating = pickle.load(f)
#
with open('./Data/user_and_movie_to_rating___test_data.json', 'rb') as f:
user_and_movie_to_rating___test_data = pickle.load(f)
## ********************************************************
N_max_user_id_in_train = np.max(list(user_to_movie.keys())) + 1
# the test set may contain movies the train set doesn't have data on
m1_max_movie_id_in_train= np.max(list(movie_to_user.keys()))
m2_max_movie_id_in_test = np.max([m for (u, m), r in user_and_movie_to_rating___test_data.items()])
M_max_movie_id_in_tain_and_test = max(m1_max_movie_id_in_train, m2_max_movie_id_in_test) + 1
print("N:", N_max_user_id_in_train, "M:", M_max_movie_id_in_tain_and_test)
#
if N_max_user_id_in_train > 10000:
print("N_max_user_id_in_train =", N_max_user_id_in_train, "are you sure you want to continue?")
print("Comment out these lines if so...")
exit()
#
#
## to find the user similarities, you have to do O(N^2 * M) calculations!
## in the "real-world" we would want to parallelize this
#
## note: we really only have to do half the calculations ...
# ... since w_ij is symmetric, however then we need to store them, ...
# ... hence here we simply sacrifice computational time for space. This ...
# ... trade-off depends on the implementation and the database.
neighbors = [] # store neighbors in this list. Neighbors of user i are neighbors[i]
averages = [] # each user's average rating for later use
deviations = [] # each user's deviation for later use
for i in tqdm(range(N_max_user_id_in_train)):
## For each user i: find the num_neighbors_to_consider closest users to user i
movies_i = user_to_movie[i]
movies_i_set = set(movies_i)
# userMovie2ratings_dict___user_i
# calculate avg and deviation
userMovie2ratings_dict___user_i = { movie:user_and_movie_to_rating[(i, movie)] for movie in movies_i }
avg_rating_for_user_i = np.mean(list(userMovie2ratings_dict___user_i.values()))
## let's calcualte the new ratings: rating_im - avg_i
dev_of_rating__user_i = { movie:(rating - avg_rating_for_user_i) for movie, rating in userMovie2ratings_dict___user_i.items() }
dev_of_rating__user_i_values = np.array(list(dev_of_rating__user_i.values()))
sigma_i = np.sqrt(dev_of_rating__user_i_values.dot(dev_of_rating__user_i_values))
#
# save these for later use
averages.append(avg_rating_for_user_i)
deviations.append(dev_of_rating__user_i)
#
## In the following we calculate the similarities between ...
# ... other users with user i
sl = SortedList()
for j in range(N_max_user_id_in_train):
# For each user j, we want to calculate the similarity
# don't include user i
if j != i:
movies_j = user_to_movie[j]
movies_j_set = set(movies_j)
common_movies_Ui_Uj = (movies_i_set & movies_j_set) # intersection
if len(common_movies_Ui_Uj) > min_num_of_common_movies_to_be_cosidered_similar:
# this user has the minimum number of required common movies to be considered for the computations
# calculate avg and deviation for this user
userMovie2ratings_dict___user_j = { movie:user_and_movie_to_rating[(j, movie)] for movie in movies_j }
avg_rating_for_user_j = np.mean(list(userMovie2ratings_dict___user_j.values()))
dev_of_rating__user_j = { movie:(rating - avg_rating_for_user_j) for movie, rating in userMovie2ratings_dict___user_j.items() }
dev_of_rating__user_j_values = np.array(list(dev_of_rating__user_j.values()))
sigma_j = np.sqrt(dev_of_rating__user_j_values.dot(dev_of_rating__user_j_values))
#
## calculate the correlation coefficient
numerator = sum(dev_of_rating__user_i[m]*dev_of_rating__user_j[m] for m in common_movies_Ui_Uj)
w_ij = numerator / (sigma_i * sigma_j)
#
# insert into sorted list and truncate
# negate weight, because list is sorted ascending
# maximum value (1) is "closest"
# since we are interested in high values of the weights, we store -w_ij (later we remove the negative)
sl.add((-w_ij, j))
# we only need to consider the top neighbors, so ...
# ... delete the last one if the size exceeds.
if len(sl) > num_neighbors_to_consider:
del sl[-1]
#
# store the top neighbors
neighbors.append(sl)
#
# print out useful things
# if i % 1 == 0:
# print(i)
#
## ********************************************************
## Make a prediction for the rating that user i gives to the movie m
def predict(i, m):
if False: # we deactivate this for getting the training MSE
## Check to see if user i has already rated movie m or not ...
# ... if so, return the actual rating!
try:
prediction = user_and_movie_to_rating[(i,m)]
return prediction
except KeyError:
pass
## User i has not rated movie m. We need to predict it.
# calculate the weighted sum of deviations
numerator = 0
denominator = 0
for neg_w, j in neighbors[i]:
# remember, the weight is stored as its negative
# so the negative of the negative weight is the positive weight
try:
numerator += (-neg_w) * deviations[j][m]
denominator += abs(neg_w)
except KeyError:
# neighbor may not have rated the same movie
pass
#
if denominator == 0:
# we can't do anything, hence use the user i's average rating as prediction
prediction = averages[i]
else:
prediction = numerator / denominator + averages[i]
#
if True:
## The predicted rating can be anythong, but here for instance we want ...
# ... to have the ratings between 0.5 and 5. Hence we curb it!
# you can avoid this.
prediction = min(5, prediction)
prediction = max(0.5, prediction) # min rating is 0.5
#
return prediction
## ********************************************************
## ***************************
## using neighbors, calculate MSE for the train set
train_predictions = []
train_targets = []
for (i, m), target in user_and_movie_to_rating.items():
# predict the rating that user i gives to movie m
prediction = predict(i, m)
#
# save the prediction and target
train_predictions.append(prediction)
train_targets.append(target)
## ***************************
## using neighbors, calculate MSE for the test set
test_predictions = []
test_targets = []
for (i, m), target in user_and_movie_to_rating___test_data.items():
# predict the rating that user i gives to movie m
prediction = predict(i, m)
#
# save the prediction and target
test_predictions.append(prediction)
test_targets.append(target)
#
## ***************************
# calculate accuracy
def mse(pred, targ):
pred = np.array(pred)
targ = np.array(targ)
return np.mean((pred - targ)**2)
#
#
print('MSE for train set = {}'.format(mse(train_predictions, train_targets)))
print('MSE for test set = {}'.format(mse(test_predictions, test_targets)))
|
"""
Useful recipes from various internet sources (thanks)
mostly decorator patterns
"""
import os.path as op
import re
import sys
import logging
import functools
from collections import defaultdict
class memoized(object):
"""
Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
Taken from recipe (http://wiki.python.org/moin/PythonDecoratorLibrary)
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def inspect(item, maxchar=80):
"""
Inspect the attributes of an item.
"""
for i in dir(item):
try:
member = str(getattr(item, i))
if maxchar and len(member) > maxchar:
member = member[:maxchar] + "..."
except:
member = "[ERROR]"
print >> sys.stderr, "{}: {}".format(i, member)
def timeit(func):
"""
<http://www.zopyx.com/blog/a-python-decorator-for-measuring-the-execution-time-of-methods>
"""
import time
def timed(*args, **kw):
ts = time.time()
result = func(*args, **kw)
te = time.time()
msg = "{0}{1} {2:.2f}s".format(func.__name__, args, te - ts)
logging.debug(msg)
return result
return timed
def depends(func):
"""
Decorator to perform check on infile and outfile. When infile is not present, issue
warning, and when outfile is present, skip function calls.
"""
from maize.apps.base import need_update, listify
infile = "infile"
outfile = "outfile"
def wrapper(*args, **kwargs):
assert outfile in kwargs, \
"You need to specify `outfile=` on function call"
if infile in kwargs:
infilename = listify(kwargs[infile])
for x in infilename:
assert op.exists(x), \
"The specified infile `{0}` does not exist".format(x)
outfilename = kwargs[outfile]
if need_update(infilename, outfilename):
return func(*args, **kwargs)
else:
msg = "File `{0}` exists. Computation skipped." \
.format(outfilename)
logging.debug(msg)
outfilename = listify(outfilename)
for x in outfilename:
assert op.exists(x), \
"Something went wrong, `{0}` not found".format(x)
return outfilename
return wrapper
"""
Functions that make text formatting easier.
"""
class Registry (defaultdict):
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(list, *args, **kwargs)
def iter_tag(self, tag):
for key, ts in self.items():
if tag in ts:
yield key
def get_tag(self, tag):
return list(self.iter_tag(tag))
def count(self, tag):
return sum(1 for x in self.iter_tag(tag))
def update_from(self, filename):
from maize.formats.base import DictFile
d = DictFile(filename)
for k, v in d.items():
self[k].append(v)
class SummaryStats (object):
def __init__(self, a, dtype=None, title=None):
import numpy as np
self.data = a = np.array(a, dtype=dtype)
self.min = a.min()
self.max = a.max()
self.size = a.size
self.mean = np.mean(a)
self.sd = np.std(a)
self.median = np.median(a)
self.sum = a.sum()
self.title = title
a.sort()
self.firstq = a[self.size / 4]
self.thirdq = a[self.size * 3 / 4]
self.p1 = a[int(self.size * .025)]
self.p2 = a[int(self.size * .975)]
if dtype == "int":
self.mean = int(self.mean)
self.sd = int(self.sd)
self.median = int(self.median)
def __str__(self):
s = self.title + ": " if self.title else ""
s += "Min={0} Max={1} N={2} Mean={3} SD={4} Median={5} Sum={6}".\
format(self.min, self.max, self.size,
self.mean, self.sd, self.median,
self.sum)
return s
def todict(self, quartile=False):
d = {
"Min": self.min, "Max": self.max,
"Mean": self.mean, "Median": self.median
}
if quartile:
d.update({
"1st Quartile": self.firstq, "3rd Quartile": self.thirdq
})
return d
def tofile(self, filename):
fw = open(filename, "w")
for x in self.data:
print >> fw, x
fw.close()
logging.debug("Array of size {0} written to file `{1}`.".\
format(self.size, filename))
class AutoVivification(dict):
"""
Implementation of perl's autovivification feature.
Thanks to <http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python>
"""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def enumerate_reversed(sequence):
"""
Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
"""
for index in reversed(xrange(len(sequence))):
yield index, sequence[index]
def percentage(a, b, precision=1, mode=0):
"""
>>> percentage(100, 200)
'100 of 200 (50.0%)'
"""
_a, _b = a, b
pct = "{0:.{1}f}%".format(a * 100. / b, precision)
a, b = thousands(a), thousands(b)
if mode == 0:
return "{0} of {1} ({2})".format(a, b, pct)
elif mode == 1:
return "{0} ({1})".format(a, pct)
elif mode == 2:
return _a * 100. / _b
return pct
def thousands(x):
"""
>>> thousands(12345)
'12,345'
"""
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
except Exception:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
finally:
s = '%d' % x
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
return locale.format('%d', x, True)
SUFFIXES = {1000: ['', 'Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb'],
1024: ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']}
def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix)
def autoscale(bp, optimal=6):
"""
>>> autoscale(150000000)
20000000
>>> autoscale(97352632)
10000000
"""
slen = str(bp)
tlen = slen[0:2] if len(slen) > 1 else slen[0]
precision = len(slen) - 2 # how many zeros we need to pad?
bp_len_scaled = int(tlen) # scale bp_len to range (0, 100)
tick_diffs = [(x, abs(bp_len_scaled / x - optimal)) for x in [1, 2, 5, 10]]
best_stride, best_tick_diff = min(tick_diffs, key=lambda x: x[1])
while precision > 0:
best_stride *= 10
precision -= 1
return best_stride
def gene_name(st, exclude=("ev",), sep="."):
"""
Helper functions in the BLAST filtering to get rid alternative splicings.
This is ugly, but different annotation groups are inconsistent with respect
to how the alternative splicings are named. Mostly it can be done by removing
the suffix, except for ones in the exclude list.
"""
if any(st.startswith(x) for x in exclude):
sep = None
st = st.split('|')[0]
if sep in st:
name, suffix = st.rsplit(sep, 1)
else:
name, suffix = st, ""
# We only want to remove suffix that are isoforms, longer suffix would
# suggest that it is part of the right gene name
if len(suffix) != 1:
name = st
return name
def seqid_parse(seqid, sep=["-"], stdpf=True):
"""
This function tries to parse seqid (1st col in bed files)
return prefix, numeric id, and suffix, for example:
>>> seqid_parse('chr1_random')
('Chr', '1', '_random')
>>> seqid_parse('AmTr_v1.0_scaffold00001', '', stdpf=False)
('AmTr_v1.0_scaffold', '00001', '')
>>> seqid_parse('AmTr_v1.0_scaffold00001')
('Sca', '00001', '')
>>> seqid_parse('PDK_30s1055861')
('C', '1055861', '')
>>> seqid_parse('PDK_30s1055861', stdpf=False)
('PDK', '1055861', '')
>>> seqid_parse("AC235758.1", stdpf=False)
('AC', '235758.1', '')
"""
seqid = seqid.split(';')[0]
if "mito" in seqid or "chloro" in seqid:
return (seqid, "", "")
numbers = re.findall(r'\d+\.*\d*', seqid)
if not numbers:
return (seqid, "", "")
id = numbers[-1]
lastnumi = seqid.rfind(id)
suffixi = lastnumi + len(id)
suffix = seqid[suffixi:]
if sep is None:
sep = [""]
elif type(sep) == str:
sep = [sep]
prefix = seqid[: lastnumi]
if not stdpf:
sep = "|".join(sep)
atoms = re.split(sep, prefix)
if len(atoms) == 1:
prefix = atoms[0]
else:
prefix = atoms[-2]
prefix = prefix.replace("Chromosome", "Chr")
else: # use standard prefix
if re.findall("chr", prefix, re.I):
prefix = "Chr"
if re.findall("lg", prefix, re.I):
prefix = "LG"
elif re.findall("sca", prefix, re.I):
prefix = "Sca"
elif re.findall("supercontig", prefix, re.I):
prefix = "SCg"
elif re.findall("ctg|contig", prefix, re.I):
prefix = "Ctg"
elif re.findall("BAC", prefix, re.I):
prefix = "BAC"
else:
prefix = "C"
return prefix, id, suffix
def fixChromName(name, orgn="medicago"):
"""
Convert quirky chromosome names encountered in different
release files, which are very project specific, into a more
general format.
For example, in Medicago
Convert a seqid like
`Mt3.5.1_Chr1` to `chr1`
`Mt3.5_Chr3` to `chr3`
`chr01_pseudomolecule_IMGAG` to `chr1`
Some examples from Maize
Convert a seqid like
`chromosome:AGPv2:2:1:237068873:1` to `2`
Special cases
`chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt`
`chromosome:AGPv2:chloroplast:1:140384:1` to `Pt`
"""
import re
mtr_pat1 = re.compile(r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)")
mtr_pat2 = re.compile(r"([A-z0-9]+)_[A-z]+_[A-z]+")
zmays_pat = re.compile(r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+")
zmays_sub = { 'mitochondrion' : 'Mt', 'chloroplast' : 'Pt' }
if orgn == "medicago":
for mtr_pat in (mtr_pat1, mtr_pat2):
match = re.search(mtr_pat, name)
if match:
n = match.group(1)
n = n.replace("0", "")
name = re.sub(mtr_pat, n, name)
elif orgn == "maize":
match = re.search(zmays_pat, name)
if match:
n = match.group(1)
name = re.sub(zmays_pat, n, name)
if name in zmays_sub:
name = zmays_sub[name]
return name
def fill(text, delimiter="", width=70):
"""
Wrap text with width per line
"""
texts = []
for i in xrange(0, len(text), width):
t = delimiter.join(text[i:i + width])
texts.append(t)
return "\n".join(texts)
def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from maize.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width / max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g)
def uniqify(L):
"""
Uniqify a list, maintains order (the first occurrence will be kept).
"""
seen = set()
nL = []
for a in L:
if a in seen:
continue
nL.append(a)
seen.add(a)
return nL
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counter layer for zeroing its input tensor depending on counter state."""
from kws_streaming.layers import modes
from kws_streaming.layers.compat import tf
class Counter(tf.keras.layers.Layer):
"""Counter layer.
In training or non streaming inference it returns input as it is.
But during streaming inference it will count number of calls and if
its number <= max_counter then it will return zeros with the shape
of input tensor. If number of calls is > max_counter then it will
return input data. This layer can be convenient for models in streaming
mode where they just started execution and returned data do not have any
value because streaming buffers are not fully initialized, so model will
return zeros.
"""
def __init__(self,
max_counter=0,
mode=modes.Modes.TRAINING,
state_name_tag='counter',
**kwargs):
super(Counter, self).__init__(**kwargs)
self.max_counter = max_counter
self.mode = mode
self.state_name_tag = state_name_tag
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
# create state variable for streamable inference mode only
self.state = self.add_weight(
shape=[1, 1, 1],
name='counter',
trainable=False,
initializer=tf.zeros_initializer)
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming mode with external state,
# state becomes an input output placeholder
self.input_state = tf.keras.layers.Input(
shape=(1, 1),
batch_size=1,
name=self.name + '/' +
self.state_name_tag) # adding names to make it unique
self.output_state = None
def call(self, inputs):
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
return self._streaming_internal_state(inputs)
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming mode with extrnal state in addition to output
# we return output state
output, self.output_state = self._streaming_external_state(
inputs, self.input_state)
return output
elif self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE):
return inputs
else:
raise ValueError(f'Encountered unexpected mode `{self.mode}`.')
def get_config(self):
config = {
'max_counter': self.max_counter,
'mode': self.mode,
'state_name_tag': self.state_name_tag,
}
base_config = super(Counter, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_input_state(self):
# input state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.input_state]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.')
def get_output_state(self):
# output state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.output_state]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.')
def _streaming_internal_state(self, inputs):
new_state = tf.cond(self.state[0][0][0] <= self.max_counter,
lambda: self.state + 1, lambda: self.state)
assign_state = self.state.assign(new_state)
with tf.control_dependencies([assign_state]):
outputs = tf.cond(self.state[0][0][0] > self.max_counter, lambda: inputs,
lambda: tf.zeros_like(inputs))
return outputs
def _streaming_external_state(self, inputs, state):
state_one = state + 1
new_state = tf.cond(state[0][0][0] <= self.max_counter,
lambda: state_one, lambda: state)
with tf.control_dependencies([new_state]):
outputs = tf.cond(new_state[0][0][0] > self.max_counter, lambda: inputs,
lambda: tf.zeros_like(inputs))
return outputs, new_state
|
import threading
import time
import datetime as dt
import socket # Import socket module
import G_Check_Time as chk_time
from H_Derived_Values import tech_heat_load
def record_pulse(lstArgs):
GPIO_read = lstArgs[0]
Pulse_Val = lstArgs[1]
strTech = lstArgs[2]
strPulseMeter = lstArgs[3]
dictInstructions = lstArgs[4]
dtReadTime = lstArgs[5]
BMS_GUI = dictInstructions['General_Inputs']['GUI_BMS']
BMS_thread_lock = dictInstructions['Threads']['BMS_thread_lock']
BMS_thread_lock.acquire(True)
lstLastMinute = dictInstructions[strTech]['GUI_Information'][strPulseMeter]['Pulse_Minute_Readings']
lstTimes = dictInstructions[strTech]['GUI_Information'][strPulseMeter]['Pulse_reading_times']
BMS_thread_lock.release()
if lstLastMinute[0] == True: #The first 'bit' of the list is set by D_Database as to whether the minute's data has or has not been taken
lstReadingVal = 0 #Set the last reading value to 0 (we are interested in the time it was taken but don't want to double count
lstLastMinute = [False, lstReadingVal, Pulse_Val] #Transfer the last reading to the current minute's list and add the new reading
lstReadingTime = lstTimes[len(lstTimes)-1] #Take the last reading's time from the previous minute
lstTimes = [False, lstReadingTime, dtReadTime] #Transfer the last reading time to the current minute's list and add the new reading's time
else:
lstLastMinute.append(Pulse_Val)
lstTimes.append(dtReadTime)
BMS_thread_lock.acquire(True)
dictInstructions[strTech]['GUI_Information'][strPulseMeter]['Pulse_Minute_Readings'] = lstLastMinute
dictInstructions[strTech]['GUI_Information'][strPulseMeter]['Pulse_reading_times'] = lstTimes
BMS_thread_lock.release()
def pulse_check(dictGlobalInstructions):
time.sleep(10) #Give the sensors a few seconds to have logged some readings
BMS_GUI = dictGlobalInstructions['General_Inputs']['GUI_BMS']
BMS_thread_lock = dictGlobalInstructions['Threads']['BMS_thread_lock']
db_BMS = dictGlobalInstructions['Database']
dtReadTime = dt.datetime.now()
tmNextPulseForecast = dtReadTime
host = socket.gethostname() # Get local machine name
port = 9801 # Reserve a port for OBEMS service.
lstOBEMS = dictGlobalInstructions['User_Inputs']['OBEMS'] #Map for OBEMS channels to HeatSet PCBs for pulse meters
lstPulseCount = []
lstArgs = []
for i in range(0, len(lstOBEMS)):
lstPulseCount.append(0) #Create a zero pulse count against each channel to be read
strType = lstOBEMS[i][0]
for key in dictGlobalInstructions[strType]['GUI_Information']:
if dictGlobalInstructions[strType]['GUI_Information'][key]['ID'] == lstOBEMS[i][1]:
BMS_thread_lock.acquire(True)
dictGlobalInstructions[strType]['GUI_Information'][key]['Pulse_Minute_Readings'] = [False, 0, 0]
dictGlobalInstructions[strType]['GUI_Information'][key]['Pulse_reading_times'] = [False, BMS_GUI.time_created, dtReadTime]
BMS_thread_lock.release()
while BMS_GUI.quit_sys == False:
dtReadTime = dt.datetime.now()
for i in range(0, len(lstOBEMS)):
strType = lstOBEMS[i][0]
ID_num = lstOBEMS[i][1]
strCH = lstOBEMS[i][2] #Determine the channel to read
#print(strType + ";" + str(ID_num) + ";" + strCH)
s = socket.socket() # Create a socket object
s.connect((host, port)) #Connect to the OBEMS server
s.send( strCH.encode() ) #Send the channel read request
strRec = s.recv(1024) #Read the port
intPulseCountTotal = int(strRec[-9:]) #The last nine values of the string are the maximum number of pulses - convert to a channel
intTotalPulses = lstPulseCount[i]
lstPulseCount[i] = intPulseCountTotal #Update the locally stored total number of pulses to date
s.close()
#print(lstPulseCount)
for key in dictGlobalInstructions[strType]['GUI_Information']:
#print(strType)
#print(key)
if dictGlobalInstructions[strType]['GUI_Information'][key]['ID'] == ID_num:
#print(True)
#print(intTotalPulses)
#print(key)
boolFlow = dictGlobalInstructions[strType]['GUI_Information'][key]['Pulse_calc_flow']
#print(intTotalPulses)
#print(intPulseCountTotal)
if intPulseCountTotal > intTotalPulses: #If the server cumulative pulses is greater than what was previously read then there have been 1 or more pulses
intPulseCount = intPulseCountTotal - intTotalPulses #Establish the total number of pulses since the last read
Pulse_Val = dictGlobalInstructions[strType]['GUI_Information'][key]['Pulse_Value'] #User defined single pulse value (e.g. 0.25L for hot water flow meter or 1Wh for electricity sub-meter)
Pulse_Val_mult = Pulse_Val * intPulseCount #The value of the pulses read
#print(Pulse_Val_mult)
GPIO_read = dictGlobalInstructions[strType]['GUI_Information'][key]['Pulse_GPIO'] #Which GPIO is used to monitor the pulse?
lstArgs = [GPIO_read, Pulse_Val_mult, strType, key, dictGlobalInstructions, dtReadTime]
#print(strType)
#print(key)
#print(intPulseCount)
record_pulse(lstArgs) #record kWh (electrical) or litres in pulse meter section
time.sleep(10)
|
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.questionnaire import SUBMIT_URL_PATH, THANK_YOU_URL_PATH
class TestQuestionnaireInterstitial(IntegrationTestCase):
BASE_URL = "/questionnaire/"
def test_interstitial_page_button_text_is_continue(self):
self.launchSurvey("test_interstitial_page")
self.post(action="start_questionnaire")
self.post({"favourite-breakfast": "Cereal"})
self.assertInBody("Continue")
def test_interstitial_can_continue_and_submit(self):
self.launchSurvey("test_interstitial_page")
self.post(action="start_questionnaire")
self.post({"favourite-breakfast": "Cereal"})
self.post()
self.assertInUrl("lunch-block")
self.post({"favourite-lunch": "Pizza"})
self.assertInUrl(SUBMIT_URL_PATH)
self.post()
self.assertInUrl(THANK_YOU_URL_PATH)
def test_interstitial_definition(self):
self.launchSurvey("test_interstitial_definition")
self.assertInBody("Successfully")
self.assertInBody("Questionnaire")
self.assertInBody("In a way that accomplishes a desired aim or result")
self.assertInBody(
"A set of printed or written questions with a choice of answers, devised for the purposes of a survey or statistical study"
)
def test_interstitial_content_variant_definition(self):
self.launchSurvey("test_interstitial_definition")
self.post()
self.post({"content-variant-definition-answer": "Answer"})
self.assertInBody("Answer")
self.assertInBody("A spoken or written reply or response to a question")
|
from io import BytesIO
import pickle
import platform
import numpy as np
import pytest
from matplotlib import cm
from matplotlib.testing.decorators import image_comparison
from matplotlib.dates import rrulewrapper
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.figure as mfigure
def test_simple():
fig = plt.figure()
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.subplot(121)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.axes(projection='polar')
plt.plot(np.arange(10), label='foobar')
plt.legend()
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
# ax = plt.subplot(121, projection='hammer')
# pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
plt.figure()
plt.bar(x=np.arange(10), height=np.arange(10))
pickle.dump(plt.gca(), BytesIO(), pickle.HIGHEST_PROTOCOL)
fig = plt.figure()
ax = plt.axes()
plt.plot(np.arange(10))
ax.set_yscale('log')
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@image_comparison(['multi_pickle.png'], remove_text=True, style='mpl20',
tol={'aarch64': 0.082}.get(platform.machine(), 0.0))
def test_complete():
fig = plt.figure('Figure with a label?', figsize=(10, 6))
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
# Ensure lists also pickle correctly.
plt.subplot(3, 3, 1)
plt.plot(list(range(10)))
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.subplot(3, 3, 5)
plt.pcolor(data)
ax = plt.subplot(3, 3, 6)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.streamplot(x, y, u, v)
ax = plt.subplot(3, 3, 7)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x**2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)
#
# plotting is done, now test its pickle-ability
#
result_fh = BytesIO()
pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)
plt.close('all')
# make doubly sure that there are no figures left
assert plt._pylab_helpers.Gcf.figs == {}
# wind back the fh and load in the figure
result_fh.seek(0)
fig = pickle.load(result_fh)
# make sure there is now a figure manager
assert plt._pylab_helpers.Gcf.figs != {}
assert fig.get_label() == 'Figure with a label?'
def test_no_pyplot():
# tests pickle-ability of a figure not created with pyplot
from matplotlib.backends.backend_pdf import FigureCanvasPdf
fig = mfigure.Figure()
_ = FigureCanvasPdf(fig)
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
def test_renderer():
from matplotlib.backends.backend_agg import RendererAgg
renderer = RendererAgg(10, 20, 30)
pickle.dump(renderer, BytesIO())
def test_image():
# Prior to v1.4.0 the Image would cache data which was not picklable
# once it had been drawn.
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.imshow(np.arange(12).reshape(3, 4))
manager.canvas.draw()
pickle.dump(fig, BytesIO())
def test_polar():
plt.subplot(111, polar=True)
fig = plt.gcf()
pf = pickle.dumps(fig)
pickle.loads(pf)
plt.draw()
class TransformBlob:
def __init__(self):
self.identity = mtransforms.IdentityTransform()
self.identity2 = mtransforms.IdentityTransform()
# Force use of the more complex composition.
self.composite = mtransforms.CompositeGenericTransform(
self.identity,
self.identity2)
# Check parent -> child links of TransformWrapper.
self.wrapper = mtransforms.TransformWrapper(self.composite)
# Check child -> parent links of TransformWrapper.
self.composite2 = mtransforms.CompositeGenericTransform(
self.wrapper,
self.identity)
def test_transform():
obj = TransformBlob()
pf = pickle.dumps(obj)
del obj
obj = pickle.loads(pf)
# Check parent -> child links of TransformWrapper.
assert obj.wrapper._child == obj.composite
# Check child -> parent links of TransformWrapper.
assert [v() for v in obj.wrapper._parents.values()] == [obj.composite2]
# Check input and output dimensions are set as expected.
assert obj.wrapper.input_dims == obj.composite.input_dims
assert obj.wrapper.output_dims == obj.composite.output_dims
def test_rrulewrapper():
r = rrulewrapper(2)
try:
pickle.loads(pickle.dumps(r))
except RecursionError:
print('rrulewrapper pickling test failed')
raise
def test_shared():
fig, axs = plt.subplots(2, sharex=True)
fig = pickle.loads(pickle.dumps(fig))
fig.axes[0].set_xlim(10, 20)
assert fig.axes[1].get_xlim() == (10, 20)
@pytest.mark.parametrize("cmap", cm._cmap_registry.values())
def test_cmap(cmap):
pickle.dumps(cmap)
def test_unpickle_canvas():
fig = mfigure.Figure()
assert fig.canvas is not None
out = BytesIO()
pickle.dump(fig, out)
out.seek(0)
fig2 = pickle.load(out)
assert fig2.canvas is not None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/31 4:01 下午
# @File : compare_eval_result.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
import collections
import json
import os
import pandas as pd
import requests
def collect_data(devfile="../data_root_dir/newcos/dev.json", eval_results="../output_root_dir/newcos/eval_results-newcos.json"):
"""
生成excel, 对比main.trainer.py生成的结果和devfile
:param devfile: 训练文件,格式是 [(text, keyword, labels),..]
:param eval_results: main.trainer.py生成的文件output文件中的json文件 [(predid, probality)]
:return:
"""
labels = ["消极","中性","积极"]
with open(devfile) as f:
dev_data = json.load(f)
with open(eval_results) as f:
eval_data = json.load(f)
assert len(dev_data) == len(eval_data)
data = []
for d, res in zip(dev_data, eval_data):
one_data = {"text": d[0], "keyword":d[1], "label": d[2], "predict":labels[res[0]], "probability": format(res[1], "0.3f")}
data.append(one_data)
df = pd.DataFrame(data)
excel_file = "result2.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return data
def compare_model(hostname='http://127.0.0.1:3314'):
"""
把收集到的数据,放到线上,对比一下准确率,不是和咱们自己的模型对比
:param hostname:
:return:
"""
url = hostname + '/lavector/rest/aspect-sentiment-batch'
headers = {'Content-Type': 'application/json'}
mydata = collect_data()
post_data = []
for d in mydata:
one = (d["text"], [d["keyword"]])
post_data.append(one)
data = {'channel': 'jd', 'data': post_data}
print(f"发送请求到{url}, 数据量{len(post_data)}")
res = requests.post(url, data=json.dumps(data), headers=headers)
result = res.json()
myresults = []
for r in result['result']:
keyword_list = list(r.keys())
pres_list = list(r.values())
assert len(keyword_list) == 1
assert len(pres_list) == 1
keyword = keyword_list[0]
pres = pres_list[0]
for k,v in pres.items():
if v == 1:
if k == "负向":
predict = "消极"
elif k =="正向":
predict = "积极"
else:
predict = "中性"
myresults.append([keyword,predict])
assert len(post_data) == len(myresults)
#保存到文件
newdata = []
for d, res in zip(mydata, myresults):
if res[0] != d["keyword"]:
print(f"这条数据预测回来的关键字不一致{res[0]}")
continue
d["online_predict"] = res[1]
newdata.append(d)
df = pd.DataFrame(newdata)
excel_file = "result_online.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return newdata
def read_result_online():
"""
读取result_online.xlsx,比较
上文,关键字,下午的字数比
pretext + keyword + posttest
predict 表示的结果是75个字的,25+25+25的结果
online_predict 表示的结果是 15+30+20
:return:
"""
df = pd.read_excel("result_online.xlsx")
total = 0
predict_yes = 0
online_yes = 0
for index, row in df.iterrows():
label = row['label']
predict = row['predict']
online_predict = row['online_predict']
if predict != online_predict:
total += 1
if predict == label:
predict_yes +=1
elif online_predict == label:
online_yes +=1
else:
print("都没预测正确")
print(row)
print()
print(f"共有{total}个不一样, 75个字预测的结果是{predict_yes}, 线上65个字的预测结果是{online_yes}")
def dopredict(test_data, url="http://127.0.0.1:5000/api/predict_macbert", proxy=False, other_parameters = {}):
"""
预测结果
:param test_data:
:return:
"""
data = {'data': test_data}
data.update(other_parameters)
headers = {'content-type': 'application/json'}
if proxy:
r = requests.post(url, data=json.dumps(data), headers=headers, timeout=360,
proxies=dict(http='socks5://127.0.0.1:9080', https='socks5://127.0.0.1:9080'))
else:
r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)
if r.status_code != 200:
print(f"端口异常,请检查")
return r.json()
def download_data_and_compare(hostname=["http://192.168.50.139:8081/api/"], dirpath="/opt/lavector/absa/", jsonfile=["192.168.50.139_200_8081_0517.json"], service="absa", usecache=True):
"""
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:param service: "absa", "dem8", 使用哪个接口的服务进行预测
:return:
"""
from absa_api import export_data
#从label-studio下载文
if usecache:
json_files = [os.path.join(dirpath,j) for j in jsonfile]
else:
json_files = []
for hname, jfile in zip(hostname,jsonfile):
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
json_files.append(json_file)
original_data = []
for json_file in json_files:
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{json_file}的数据{len(data)} 条")
original_data.extend(data)
# original_data = original_data[:600]
# data = predict_comare_excel(original_data, isabsa=isabsa)
data = predict_comare_excel_bad_case(original_data, service=service)
return data
def download_data_and_compare_same(hostname=["http://192.168.50.139:8081/api/","http://192.168.50.139:8085/api/"], dirpath="/opt/lavector/absa/", jsonfile=["192.168.50.139_500_8081_0129.json","192.168.50.139_500_8085_0129.json"], isabsa=True):
"""
对比相同的hostname的数据
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:return:
"""
from absa_api import export_data
#从label-studio下载文
if len(hostname) != 2:
raise Exception("必须准2个hostname,里面包含相同的评估数据")
result = []
for hname, jfile in zip(hostname,jsonfile):
original_data = []
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{hname}的数据{len(data)} 条")
original_data.extend(data)
predict_data, excel_data = predict_comare_excel(original_data, isabsa=isabsa)
result.append([hname, predict_data, excel_data])
#对比2个人标注的数据
diffrent_data = []
print(f"对比host为 {result[0][0], result[1][0]}")
hname1, data1, pre1 = result[0]
hname2, data2, pre2 = result[1]
if len(data1) != len(data2):
raise Exception("两个人标注的数据总数不一致")
for d1, d2 in zip(data1,data2):
if d1[0] != d2[0]:
print("这条数据不一致")
else:
if d1[4] != d2[4]:
print(f"2个人标注的标签不一致")
print(d1[0])
print(d1[1])
print(d1[4])
print(d2[4])
one_data = {"text": d1[0], "keyword": d1[1], "P1_label": d1[4], "P2_label": d2[4], "location": d1[2:4]}
diffrent_data.append(one_data)
print(f"不一致的数据总量是{len(diffrent_data)}")
df = pd.DataFrame(diffrent_data)
writer = pd.ExcelWriter("diffrent.xlsx", engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到diffrent.xlsx excel成功")
return data
def predict_comare_excel(got_data,result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx", isabsa=True):
"""
:param got_data:
:param result_excel:
:param export_wrong_examples_excel:
:param correct_examples_excel:
:param isabsa:
:return: data是预处理后的,excel_data是模型预测的结果
"""
from convert_label_studio_data import format_data, do_truncate_data
# [(text, keyword, start_idx, end_idx, label)]
original_data = format_data(got_data)
# 获取的数据长度为7个,我们只要5个就好
original_data = [d[:5] for d in original_data ]
# original_data, truncate_data, locations = do_truncate_data(data)
if isabsa:
url = "http://192.168.50.139:3326/api/absa_predict"
else:
url = "http://192.168.50.139:3326/api/dem8_predict"
predict_result = dopredict(test_data=original_data, url=url)
# print(predict_result)
excel_data = []
for ori, d in zip(original_data, predict_result):
one_data = {"text": ori[0], "keyword": ori[1], "label": ori[4], "predict": d[0], "location": d[3],
"probability": format(d[1], "0.3f"), "channel": ori[-2], "wordtype": ori[-1]}
excel_data.append(one_data)
df = pd.DataFrame(excel_data)
writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{result_excel}")
# 预测错误的样本
predict_wrong_examples = []
# 保存预测错误的样本到excel中
correct_examples = []
for ori, d in zip(original_data, predict_result):
one_data = {"text": ori[0], "keyword": ori[1], "label": ori[4], "predict": d[0], "location": d[3],
"probability": format(d[1], "0.3f"), "channel": ori[-2], "wordtype": ori[-1]}
if one_data["label"] != one_data["predict"]:
print(f"{one_data['text']}: 模型预测的结果与ground truth不一致")
predict_wrong_examples.append(one_data)
else:
correct_examples.append(one_data)
print(f"标注的总样本数是{len(original_data)},预测错误的样本总数是{len(predict_wrong_examples)}")
print(f"标注的总样本数是{len(original_data)},预测正确的样本总数是{len(correct_examples)}")
df = pd.DataFrame(predict_wrong_examples)
writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
df = pd.DataFrame(correct_examples)
writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
print(f"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成")
print(f"保存全部为正确的样本到excel: {correct_examples_excel}完成")
print(f"准确率为{(len(correct_examples)) / len(original_data)}")
return original_data, excel_data
def get_json_data_compare(jsonfile="/opt/lavector/192.168.50.119_8086.json"):
"""
获取jsonfile,然后预测
:return:
"""
#加载从label-studio获取的到json文件
data = predict_comare_excel(jsonfile, result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx")
return data
def predict_comare_excel_bad_case(got_data,result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx", service="absa", minitest=False):
"""
:param got_data:
:param result_excel:
:param export_wrong_examples_excel:
:param correct_examples_excel:
:param service: "absa", "dem8"
:return: data是预处理后的,excel_data是模型预测的结果
"""
from convert_label_studio_data import format_data, do_truncate_data, format_purchase_data
# [(text, keyword, start_idx, end_idx, label)]
if service in ["absa", "dem8"]:
original_data = format_data(got_data)
elif service == "purchase":
# 返回的数据是 text, title, keywords, start, end, label
original_data = format_purchase_data(got_data)
# 获取的数据长度为7个,我们只要5个就好
# original_data, truncate_data, locations = do_truncate_data(data)
if minitest:
original_data = original_data[:20]
if service == "absa":
url = "http://192.168.50.189:3326/api/absa_predict"
post_data = [d[:5] for d in original_data]
elif service == "dem8":
url = "http://192.168.50.189:3326/api/dem8_predict"
post_data = [list(d[:2]) + [d[6]] + list(d[2:4]) for d in original_data]
elif service == "purchase":
url = "http://192.168.50.139:3326/api/purchase_predict"
post_data = [d[:3] for d in original_data] # 接口接受数据是 "content", "title", "keyword"
predict_result = dopredict(test_data=post_data, url=url)
# print(predict_result)
# 所有excel数据
excel_data = []
# 预测错误的样本
predict_wrong_examples = []
# 保存预测错误的样本到excel中
correct_examples = []
if service in ["absa", "dem8"]:
for ori, d in zip(original_data, predict_result):
one_data = {"text": ori[0], "keyword": ori[1], "label": ori[4], "predict": d[0], "location": d[3],
"probability": format(d[1], "0.3f"), "channel": ori[-2], "wordtype": ori[-1]}
excel_data.append(one_data)
if one_data["label"] != one_data["predict"]:
print(f"{one_data['text']}: 模型预测的结果与ground truth不一致")
predict_wrong_examples.append(one_data)
else:
correct_examples.append(one_data)
elif service == "purchase":
for ori, d in zip(original_data, predict_result):
one_data = {"text": ori[0], "title": ori[1], "keyword": ori[2], "location": d[3],
"probability": format(d[1], "0.3f"), "predict": d[0], "label": ori[-1]}
excel_data.append(one_data)
if one_data["label"] != one_data["predict"]:
print(f"{one_data['text']}: 模型预测的结果与ground truth不一致")
predict_wrong_examples.append(one_data)
else:
correct_examples.append(one_data)
df = pd.DataFrame(excel_data)
writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{result_excel}")
print(f"标注的总样本数是{len(original_data)},预测错误的样本总数是{len(predict_wrong_examples)}")
print(f"标注的总样本数是{len(original_data)},预测正确的样本总数是{len(correct_examples)}")
df = pd.DataFrame(predict_wrong_examples)
writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
df = pd.DataFrame(correct_examples)
writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
print(f"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成")
print(f"保存全部为正确的样本到excel: {correct_examples_excel}完成")
print(f"准确率为{(len(correct_examples)) / len(original_data)}")
return original_data, excel_data
def all_bad_case():
"""
测试所有的badcase
总样本数是13688,预测错误的样本总数是1049
总样本数是13688,预测正确的样本总数是12639
保存全部为错误的样本到excel: wrong.xlsx完成
保存全部为正确的样本到excel: correct.xlsx完成
准确率为0.9233635300993571
:return:
"""
from convert_label_studio_data import collect_json
data = collect_json(dirpath="/opt/lavector/absa")
predict_comare_excel_bad_case(got_data=data)
def download_data_and_compare_sentence(hostname=["http://192.168.50.139:8081/api/"], dirpath="/opt/lavector/sentence/", jsonfile=["192.168.50.139_200_8085_0517.json"], usecache=True):
"""
从label_studio的某个hostname下载数据,然后预测,最后给出结果
:return:
"""
from sentence_api import export_data
#从label-studio下载文
if usecache:
json_files = [os.path.join(dirpath,j) for j in jsonfile]
else:
json_files = []
for hname, jfile in zip(hostname,jsonfile):
json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)
json_files.append(json_file)
original_data = []
for json_file in json_files:
#加载从label-studio获取的到json文件
with open(json_file, 'r') as f:
data = json.load(f)
print(f"共收集主机{json_file}的数据{len(data)} 条")
original_data.extend(data)
data = predict_comare_excel_sentence(original_data)
return data
def predict_comare_excel_sentence(got_data,result_excel="result.xlsx", export_wrong_examples_excel="wrong.xlsx",correct_examples_excel= "correct.xlsx"):
"""
:param got_data:
:param result_excel:
:param export_wrong_examples_excel:
:param correct_examples_excel:
:param isabsa:
:return: data是预处理后的,excel_data是模型预测的结果
"""
from convert_label_studio_data import format_sentence_data
# [(text, label, channel)]
original_data = format_sentence_data(got_data)
post_data = [d[0] for d in original_data]
predict_result = dopredict(test_data=post_data, url="http://192.168.50.139:5016/api/predict_truncate")
# print(predict_result)
excel_data = []
for ori, d in zip(original_data, predict_result):
one_data = {"channel": ori[-1], "text": ori[0], "label": ori[1], "predict": d[0], "score": d[1]}
excel_data.append(one_data)
df = pd.DataFrame(excel_data)
writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{result_excel}")
# 预测错误的样本
predict_wrong_examples = []
# 保存预测错误的样本到excel中
correct_examples = []
for ori, d in zip(original_data, predict_result):
one_data = {"channel": ori[-1], "text": ori[0], "label": ori[1], "predict": d[0], "score": d[1]}
if one_data["label"] != one_data["predict"]:
print(f"{one_data['text']}: 模型预测的结果与ground truth不一致")
predict_wrong_examples.append(one_data)
else:
correct_examples.append(one_data)
print(f"总样本数是{len(original_data)},预测错误的样本总数是{len(predict_wrong_examples)}")
print(f"总样本数是{len(original_data)},预测正确的样本总数是{len(correct_examples)}")
df = pd.DataFrame(predict_wrong_examples)
writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
df = pd.DataFrame(correct_examples)
writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')
df.to_excel(writer, sheet_name='table1')
writer.save()
print(f"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成")
print(f"保存全部为正确的样本到excel: {correct_examples_excel}完成")
print(f"准确率为{(len(correct_examples)) / len(original_data)}")
return original_data, excel_data
def predict_direct(result_excel="result.xlsx", isabsa=True, absa_dem8=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[70, 20, 20, 20, 70],leibie_num=[200, 200, 200, 200, 200, 200, 200, 200],require_tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"],
num_by_channel=False,add_search_num=50, other_column=[],sql_extra='',excel_colum=[], sheet_name=[],sheet_operator="",sheet_index_name="",ptime_keyword=">:2021-06-01",no_limit=False,only_predict=None, level_info=False, leavel_sql_cache_flush_bytime=False,full_score=True, softmax=True, both_softmax_logits=False):
"""
从数据库获取数据后直接预测,并保存到excel中, 每个type是一个sheet
:param isabsa: 使用absa的接口还是成分的接口,False为成分的接口
:param absa_dem8: 预测玩属性之后预测情感
:param result_excel:
:param export_wrong_examples_excel:
:param correct_examples_excel:
:param isabsa:
:param sql_extra: 额外的sql查询条件
:param excel_colum: 额外的excel列, list
:param other_column: 其它的sql查询时的column内容
:param sheet_name: 生成哪些sheet, sheet_name=["225","226","227","228","229","314","221"], 即225 in pack_tags的话,这个就算一个sheet中的一个元素,sheet的名字是225
:param sheet_operator: sheet_operator="in", 查询sheet属于哪个sheet_name的条件
:param sheet_index_name: sheet_index_name="pack_tags"
:param level_info: 关键字的层级关系,需要结合线上sql进行查询
:param leavel_sql_cache_flush_bytime: 更新线上数据库的缓存,如果cache时间过久,cache时间大于1天,那么自动更新
:param softmax: 是否做softmax
:return: data是预处理后的,excel_data是模型预测的结果
"""
from read_hive import query_data_from_db
from convert_label_studio_data import format_data, do_truncate_data
got_data = query_data_from_db(channel=channel, channel_num=channel_num,
leibie_num=leibie_num,
require_tags=require_tags, num_by_channel=num_by_channel, unique_type=1, ptime_keyword=ptime_keyword,
table="da_wide_table_before", add_search_num=add_search_num,other_column=other_column,sql_extra=sql_extra,no_limit=no_limit, level_info=level_info, leavel_sql_cache_flush_bytime=leavel_sql_cache_flush_bytime)
# 预测数据
if absa_dem8:
# 如果先预测属性,那么需要给属性的参数
original_data = [(d['text'], d['keyword'], d['wordtype']) for d in got_data]
else:
original_data = [(d['text'],d['keyword']) for d in got_data]
channel_list = [d['channel'] for d in got_data]
type_list = [d['wordtype'] for d in got_data]
other_parameters = {}
if isabsa:
if absa_dem8:
if full_score:
other_parameters = {"full_score": True}
url = "http://192.168.50.189:3326/api/absa_dem8_predict"
else:
if full_score:
# 完全的score
labels = ["消极", "中性", "积极"]
url = "http://192.168.50.189:3326/api/absa_predict_fullscore"
else:
url = "http://192.168.50.189:3326/api/absa_predict"
else:
url = "http://192.168.50.189:3326/api/dem8_predict"
assert original_data, "过滤后的到的数据为空,请检查"
other_parameters["search_first"] = True
other_parameters["softmax"] = softmax
other_parameters["both_softmax_logits"] = both_softmax_logits
predict_result = dopredict(test_data=original_data, url=url, other_parameters=other_parameters)
# print(predict_result)
assert len(predict_result) == len(original_data), "注意模型输入和模型的输出数据的长度不一致"
num_before = len(predict_result)
#初始化一个excel
writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')
# 每个类别一个sheet
if num_by_channel:
sheets = set(channel_list)
index_word = 'channel'
else:
sheets = set(type_list)
index_word = 'wordtype'
if sheet_name and sheet_operator and sheet_index_name:
# 自定义的sheet_name 具有优先权
sheets = sheet_name
index_word = sheet_index_name
#剩余的总的条数
after_num = 0
#如果先预测属性,然后预测情感的,那么拆开一层,过滤出没有情感的,和多个单词的情感,只保留一个
if absa_dem8:
filter_predict = []
for preds in predict_result:
one_pred = None
if isinstance(preds[0],str):
one_pred = preds
filter_predict.append(one_pred)
predict_result = filter_predict
excel_colum_dict = {
# 都对应着源数据的列
"商品名称": "index_keyword",
"商品ID": "commodity_id",
"商品URL": "url",
"评价": "text",
"关键词": "keyword",
"父级标签": "parent",
"子级标签": "children",
"渠道": "channel",
"类型": "wordtype",
}
for sheet_name in sheets:
sheet_data = []
for ori, d in zip(got_data, predict_result):
if d is None:
# 跳过为None的那个
continue
if sheet_operator == 'in':
sheet_flag = sheet_name in ori[index_word]
else:
sheet_flag = sheet_name == ori[index_word]
if sheet_flag:
if only_predict and d[0] != only_predict:
# 只要消极,中性或积极
continue
after_num +=1
# one_data = {"商品名称":ori["index_keyword"],"商品URL":ori['url'],"评价": ori['text'], "关键词": ori['keyword'],"父级标签":ori['parent'], "子级标签":ori['children'], "情感倾向": d[0], "位置": d[-1],
# "正向": format(d[1][2], "0.3f"),"中性": format(d[1][1], "0.3f"), "负向": format(d[1][0], "0.3f"), "渠道": ori['channel'], "类型": ori['wordtype']}
one_data = {"评价": ori['text'], "关键词": ori['keyword'],"情感倾向": d[0], "位置": d[-1],"渠道": ori['channel'], "类型": ori['wordtype']}
if full_score:
if isinstance(d[1], list):
if both_softmax_logits:
assert len(d[1]) == 2, "里面应该包含2个数据,一个是score,一个是logits"
score = d[1][0]
logits = d[1][1]
one_data.update({"正向probability": format(score[2], "0.3f"), "中性probability": format(score[1], "0.3f"), "负向probability": format(score[0], "0.3f")})
one_data.update({"正向logits": format(logits[2], "0.3f"), "中性logits": format(logits[1], "0.3f"), "负向logits": format(logits[0], "0.3f")})
else:
# 说明score是list的格式
one_data.update({"正向": format(d[1][2], "0.3f"), "中性": format(d[1][1], "0.3f"), "负向": format(d[1][0], "0.3f")})
else:
# 说明不是list的格式
one_data.update({"正向": format(d[1], "0.3f"), "中性": format(d[1], "0.3f"), "负向": format(d[1], "0.3f")})
if excel_colum:
# 额外的列
for ex_col in excel_colum:
# 列名字的映射, 例如ex_col: 商品名称
key_name = excel_colum_dict[ex_col]
one_data[ex_col] = ori[key_name]
sheet_data.append(one_data)
df = pd.DataFrame(sheet_data)
df.to_excel(writer,sheet_name=sheet_name)
print(f"经过only_predict之后剩余条目{after_num}, 过滤掉了{num_before - after_num}条")
writer.save()
print(f"保存到excel成功{result_excel}")
return original_data
def save_one_by_one(path='/Users/admin/tmp',tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"]):
tags = ["promotion"]
for tag in tags:
predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd","tmall"],channel_num=[100,100],leibie_num=[200],require_tags=[tag],num_by_channel=False,other_column=['commodity_id'],other_column_in_filter=[6940,3754,3702,7385,6964,6833,6831,8479,5908,6662],add_search_num=100000)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd","tmall"],channel_num=[100,100],leibie_num=[200],require_tags=[tag],num_by_channel=False,add_search_num=300)
des = os.path.join(path, f"{tag}.xlsx")
os.system(f"cp -a result.xlsx {des}")
def save_one_by_one2(path='/Users/admin/tmp'):
pack_id = ["225", "226", "227", "228", "314", "221", "229"]
for pid in pack_id:
print(f"开始检索 {pid}")
predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "tmall"], channel_num=[200, 200],
leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], require_tags=["pack"], num_by_channel=True,
other_column=['url', 'pack_tags', 'keyword'],
sql_extra=f'and pack_tags REGEXP "{pid}" ',
add_search_num=1000, excel_colum=['url', "index_keyword", "pack_tags"],
sheet_name=[pid], sheet_operator="in",
sheet_index_name="pack_tags")
des = os.path.join(path, f"{pid}.xlsx")
os.system(f"cp -a result.xlsx {des}")
keywords = ["塑料", "玻璃", "瓶", "胶囊", "滴管", "泡泡", "少女风", "国风", "套装", "限量版", "旅行装"]
# for kword in keywords:
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "tmall"], channel_num=[200, 200],
# leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], require_tags=["pack"], num_by_channel=True,
# other_column=['url', 'pack_tags', 'keyword'],
# sql_extra=f'and content REGEXP "{kword}"',
# add_search_num=1000, excel_colum=['url', "keyword", "pack_tags"],
# sheet_name=[kword],
# sheet_operator="in", sheet_index_name="text")
# des = os.path.join(path, f"{kword}.xlsx")
# os.system(f"cp -a result.xlsx {des}")
def predict_excel_absa(from_excel='/Users/admin/Desktop/query-impala-2131.xlsx',result_excel="result.xlsx"):
"""
从excel读取数据,预测情感,保存到result_excel
:param from_excel:
:type from_excel:
:param result_excel:
:type result_excel:
:return:
:rtype:
"""
# 统计信息
words_num = collections.Counter()
df = pd.read_excel(from_excel)
content_data = df['content'].to_list()
words_num.update([len(c) for c in content_data])
sorted_num = sorted(words_num.items())
less_5 = [i[1] for i in sorted_num if i[0] < 6]
print(f"小于等于5个字符的句子有{sum(less_5)}个, 占总样本的{sum(less_5)/len(content_data)*100}%百分比")
print(sorted_num)
url = "http://192.168.50.189:3326/api/absa_predict_sentence"
predict_result = dopredict(test_data=content_data, url=url)
labels = [p[0] for p in predict_result]
scores = [p[1] for p in predict_result]
df['predict'] = labels
df['scores'] = scores
# 标签占比
label_cnt = collections.Counter(labels)
print(f"标签占比是{label_cnt}")
df.to_excel(result_excel)
if __name__ == '__main__':
# predict_excel_absa()
# collect_data()
# compare_model()
# read_result_online()
# download_data_and_compare()
# download_data_and_compare(hostname=["http://192.168.50.139:8086/api/"], dirpath="/opt/lavector/components/", jsonfile= ["192.168.50.139_500_8086_0601.json"],isabsa=False, usecache=False)
# download_data_and_compare(hostname=["http://192.168.50.139:8081/api/"], dirpath="/opt/lavector/absa/", jsonfile= ["192.168.50.139_200_8081_0517.json"],isabsa=True, usecache=False)
# download_data_and_compare(hostname=["http://192.168.50.139:8081/api/","http://192.168.50.139:8085/api/"], dirpath="/opt/lavector/absa/", jsonfile= ["192.168.50.139_500_8081_0226.json","192.168.50.139_500_8085_0226.json"],isabsa=True, usecache=True)
# download_data_and_compare(hostname=["http://192.168.50.139:7081/api/","http://192.168.50.139:7082/api/","http://192.168.50.139:7083/api/","http://192.168.50.139:7084/api/"], dirpath="/opt/lavector/dem8_verify/", jsonfile= ["dem8_0812_7081.json","dem8_0812_7082.json","dem8_0812_7083.json","dem8_0812_7084.json"],isabsa=False, usecache=True)
# download_data_and_compare(hostname=["http://192.168.50.139:7081/api/","http://192.168.50.139:7082/api/","http://192.168.50.139:7083/api/","http://192.168.50.139:7084/api/"], dirpath="/opt/lavector/price_service_skin/", jsonfile= ["dem8_0813_7081_300.json","dem8_0813_7082_300.json","dem8_0813_7083_300.json","dem8_0813_7084_300.json"],isabsa=False, usecache=False)
# get_json_data_compare()
# download_data_and_compare_same()
# all_bad_case()
# predict_direct(result_excel="result.xlsx", isabsa=False, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[80,80,80,80,80],leibie_num=[400,0,0,0,0,0,0,0],require_tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"],num_by_channel=False,softmax=False)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd","tmall"],channel_num=[100,100],leibie_num=[200,200,200,200,200,0,0,0],require_tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"],num_by_channel=False,softmax=False)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd","tmall"],channel_num=[100,100],leibie_num=[200],require_tags=["component"],num_by_channel=False,other_column=['commodity_id'],other_column_in_filter=[6940,3754,3702,7385,6964,6833,6831,8479,5908,6662],add_search_num=10000)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd","tmall"],channel_num=[100,100],leibie_num=[200],require_tags=["effect"],num_by_channel=False,other_column=['commodity_id'],other_column_in_filter=[6940,3754,3702,7385,6964,6833,6831,8479,5908,6662],add_search_num=10000)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[100,100,100,100,100],leibie_num=[0, 100, 0, 0, 0, 0, 0, 0],num_by_channel=True, require_tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"],other_column=['url','keyword'],level_info=True,leavel_sql_cache_flush_bytime=True)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[800,0,0,0,800],leibie_num=[100, 100, 100, 100, 100, 100, 100, 100],num_by_channel=True, require_tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"],other_column=['url','keyword','commodity_id'],level_info=True,leavel_sql_cache_flush_bytime=True, sql_extra='and commodity_id IN (10932,375,11340)', excel_colum=['商品名称','商品ID','商品URL','父级标签','子级标签'],ptime_keyword=">:2021-03-01",add_search_num=2000)
# 功效,肤感,香味
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[300,0,0,0,300],leibie_num=[0, 100, 100, 0, 100, 0, 0, 0],num_by_channel=True, require_tags=["component", "effect", "fragrance", "pack", "skin", "promotion", "service","price"],other_column=['url','keyword','commodity_id'],level_info=True,leavel_sql_cache_flush_bytime=True, sql_extra='and commodity_id IN (10932,375,11340)', excel_colum=['商品名称','商品ID','商品URL','父级标签','子级标签'],ptime_keyword=">:2021-07-01",add_search_num=50,softmax=True,both_softmax_logits=True)
# save_one_by_one()
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd","tmall"],channel_num=[100,100],leibie_num=[200,200,200,200],require_tags=["pack", "promotion", "service","price"],num_by_channel=False,other_column=['commodity_id'],other_column_in_filter=[6940,3754,3702,7385,6964,6833,6831,8479,5908,6662],add_search_num=20000)
# predict_direct(result_excel="result.xlsx", channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[100, 100, 100, 100, 100],leibie_num=[100],require_tags=["component"],num_by_channel=True)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[100, 100, 100, 100, 100],leibie_num=[20,20,10,10,10,10,10,10],num_by_channel=True,other_column=['content'],keywords_in_contents=['but', '却', '虽然', '但是', '然而', '而', '偏偏', '只是', '不过', '至于', '不料', '岂知', '然鹅', '没想到'],add_search_num=1000)
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "tmall"],channel_num=[200, 200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0],require_tags=["pack"], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and pack_tags REGEXP "225|226|227|228|229|314|221" ',
# add_search_num=10000,excel_colum=['url',"index_keyword","pack_tags"],sheet_name=["225","226","227","228","229","314","221"],sheet_operator="in", sheet_index_name="pack_tags")
# predict_direct(result_excel="result11528.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[900000, 900000, 900000, 900000, 900000],leibie_num=[0, 0, 0, 0, 900000, 0, 0, 0],require_tags=["skin"], num_by_channel=True,other_column=['url','pack_tags','keyword','commodity_id'],sql_extra='and commodity_id IN (11528) and skin_feel_tags REGEXP "137|138|139|140|141|142|143|144|145|146|147|165|166|167|168|169"',
# add_search_num=100,excel_colum=['url',"keyword",'commodity_id'],no_limit=True,only_predict='消极')
# predict_direct(result_excel="result11340.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[900000, 900000, 900000, 900000, 900000],leibie_num=[0, 0, 0, 0, 900000, 0, 0, 0],require_tags=["skin"], num_by_channel=True,other_column=['url','pack_tags','keyword','commodity_id'],sql_extra='and commodity_id IN (11340) and skin_feel_tags REGEXP "137|138|139|140|141|142|143|144|145|146|147|165|166|167|168|169"',
# add_search_num=100,excel_colum=['url',"keyword",'commodity_id'],no_limit=True,only_predict='消极')
# predict_direct(result_excel="result.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[200, 200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0],require_tags=["pack"], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and content REGEXP "塑料|玻璃|瓶|胶囊|滴管|泡泡|少女风|国风|套装|限量版|旅行装"',add_search_num=10000,excel_colum=['url',"keyword","pack_tags"],sheet_name=["塑料","玻璃","瓶","胶囊","滴管","泡泡","少女风","国风","套装","限量版","旅行装"],sheet_operator="in", sheet_index_name="text")
# predict_direct(result_excel="密封.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[200, 200,200, 200,200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and pack_tags REGEXP "人为拆开|密封性不好|没有锡纸密封|漏液|密封|被人打开过|没有塑封|有塑料封口|有塑封|封口差|没有封口|封口开了|包装被拆开了|漏了|漏出|洒出来了|没有封膜|被打开过|瓶子外面+油|盖子没盖好|盖子没有盖紧|有溢出"',add_search_num=10000,ptime_keyword=">:2021-03-01")
# predict_direct(result_excel="损坏.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[200, 200,200,200,200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and pack_tags REGEXP "包装+坏|破损|完好无损|一层层包装很仔细|包装+结实|包装完好|保护+很好|瘪了|摔破了|包装+烂了|盒子+烂了|挤+坏|压+坏"',add_search_num=10000,ptime_keyword=">:2021-03-01")
# predict_direct(result_excel="可回收.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[200, 200,200, 200,200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and pack_tags REGEXP "可回收|可循环利用|减少包装|减少浪费"',add_search_num=10000,ptime_keyword=">:2021-03-01")
# predict_direct(result_excel="可降解.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[200, 200,200, 200,200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and pack_tags REGEXP "可降解"',add_search_num=10000,ptime_keyword=">:2021-01-01")
# predict_direct(result_excel="可降解.xlsx", isabsa=True, channel=["jd", "weibo", "redbook", "tiktok", "tmall"],channel_num=[200, 200,200, 200,200],leibie_num=[0, 0, 0, 200, 0, 0, 0, 0], num_by_channel=True,other_column=['url','pack_tags','keyword'],sql_extra='and content NOT REGEXP "//@"',add_search_num=10000,ptime_keyword=">:2021-01-01")
# download_data_and_compare_sentence(hostname=["http://192.168.50.139:8085/api/"], dirpath="/opt/lavector/sentence/",
# jsonfile=["192.168.50.139_200_8085_0528.json"], usecache=False)
# save_one_by_one2()
# all_bad_case()
download_data_and_compare(hostname=["http://192.168.50.189:8086/api/","http://192.168.50.189:8087/api/","http://192.168.50.189:8088/api/"], dirpath="/opt/lavector/purchase/", jsonfile= ["p8086_2500_0913.json","p8087_2500_0913.json","p8088_2500_0913.json"],service="purchase", usecache=True)
|
from ._fft import ( # irfft,; irfft2,; irfftn,
fft,
fft2,
fftn,
ifft,
ifft2,
ifftn,
rfft,
rfft2,
rfftn,
)
# from ._fftconvolve import fftconvolve
# from ._fftshift import fftshift, ifftshift
__all__ = [
"fft",
"fft",
"fft2",
"fftconvolve",
"fftn",
"fftshift",
"ifft",
"ifft2",
"ifftn",
"ifftshift",
"rfft",
"rfft2",
"rfftn",
]
|
import unittest
from alg_euclid import euclid, euclidMutualSubst
class EuclidTest(unittest.TestCase):
def testEuclid_8_6(self):
assert euclid(8, 6) == 2
def testEuclid_12_8(self):
assert euclid(12, 8) == 4
def testEuclid_180_168(self):
assert euclid(180, 168) == 12
def testEuclidMutualSubst_8_6(self):
assert euclidMutualSubst(8, 6) == 2
def testEuclidMutualSubst_12_8(self):
assert euclidMutualSubst(12, 8) == 4
def testEuclidMutualSubst_180_168(self):
assert euclidMutualSubst(180, 168) == 12
if __name__ == "__main__":
unittest.main() # run all tests
|
"""
`NASA EARTHDATA ORNL DAAC Daymet`_ web services
.. _NASA EARTHDATA ORNL DAAC Daymet: https://daymet.ornl.gov/dataaccess.html
"""
from __future__ import absolute_import
from . import core
from .core import (get_daymet_singlepixel)
from .core import (get_variables)
from .core import (get_daymet_gridded)
from ulmo import util
|
from termplanner.planner_cli import PlannerRunner
def cli():
"""Creates and calls Planner."""
planner_runner = PlannerRunner()
planner_runner.cli()
if __name__ == "__main__":
cli()
|
from _pytest.config.argparsing import Parser
def pytest_addoption(parser: Parser, pluginmanager):
"""
Adds command line options used by the Seekret plugin.
"""
_ = pluginmanager # Unused.
group = parser.getgroup('seekret')
group.addoption('--run-profile',
dest='run_profile',
type=str,
default=None,
help='Run profile YAML file to use for the test session')
|
# Copyright(c) 2018 WindRiver Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
# Note: this string is being used for regex parsing later with re module.
#
# Use Python's raw string notation for regular expressions and
# uses the backslash character ('\') to indicate special
# forms or to allow special characters to be used without invoking
# their special meaning.
SOCK = r'/var/lib/libvirt/qemu/org\.qemu\.guest_agent\..*\.instance-.*\.sock'
monitor_opts = [
cfg.IntOpt('guest_monitoring_interval',
default=10,
help='''
Guest monitoring interval of VM status (in seconds).
* The value should not be too low as there should not be false negative
* for reporting QEMU_GUEST_AGENT failures
* VM needs time to do powering-off.
* guest_monitoring_interval should be greater than
* the time to SHUTDOWN VM gracefully.
* e.g. | 565da9ba-3c0c-4087-83ca | iim1 | ACTIVE | powering-off | Running
'''),
cfg.IntOpt('guest_monitoring_timeout',
default=2,
help='Guest monitoring timeout (in seconds).'),
cfg.IntOpt('guest_monitoring_failure_threshold',
default=3,
help='Failure threshold before sending notification.'),
cfg.StrOpt('qemu_guest_agent_sock_path',
default=SOCK,
help=r'''
* The file path of qemu guest agent sock.
* Please use Python raw string notation as regular expressions.
e.g. r'/var/lib/libvirt/qemu/org\.qemu\.guest_agent\..*\.instance-.*\.sock'
'''),
]
def register_opts(conf):
conf.register_opts(monitor_opts, group='introspectiveinstancemonitor')
def list_opts():
return {
'introspectiveinstancemonitor': monitor_opts
}
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import CONF_HUMIDITY, CONF_ID, CONF_TEMPERATURE, \
ICON_THERMOMETER, ICON_WATER_PERCENT, UNIT_CELSIUS, UNIT_PERCENT
DEPENDENCIES = ['i2c']
hdc1080_ns = cg.esphome_ns.namespace('hdc1080')
HDC1080Component = hdc1080_ns.class_('HDC1080Component', cg.PollingComponent, i2c.I2CDevice)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(HDC1080Component),
cv.Optional(CONF_TEMPERATURE): sensor.sensor_schema(UNIT_CELSIUS, ICON_THERMOMETER, 1),
cv.Optional(CONF_HUMIDITY): sensor.sensor_schema(UNIT_PERCENT, ICON_WATER_PERCENT, 0),
}).extend(cv.polling_component_schema('60s')).extend(i2c.i2c_device_schema(0x40))
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield i2c.register_i2c_device(var, config)
if CONF_TEMPERATURE in config:
sens = yield sensor.new_sensor(config[CONF_TEMPERATURE])
cg.add(var.set_temperature(sens))
if CONF_HUMIDITY in config:
sens = yield sensor.new_sensor(config[CONF_HUMIDITY])
cg.add(var.set_humidity(sens))
|
# coding: utf-8
# 演示如何批量调用API检测是否换脸
# In[1]:
import requests
import cv2
import os
import shutil
import glob
import argparse
import time
# 解析命令行参数
parser = argparse.ArgumentParser(description = 'Benchmarking deapfake imgs')
parser.add_argument('--in_dir', default = 'IN',help="Raw imgs dir")
parser.add_argument('--out_dir', default = 'OUT',help="Where to save deepfake imgs")
#当得分低于threshold 将会视为换脸图片
parser.add_argument('--threshold', type = float, default = 0.2)
parser.add_argument('--draw', action = 'store_true',help="Draw fake face on img")
parser.add_argument('--debug', action = 'store_true',help="Debug model")
parser.add_argument('--nosleep', action = 'store_true',help="No sleep")
args = parser.parse_args()
print(args)
# 根据返回的假脸的坐标绘制位置
# In[2]:
#记录查询总数和失败个数
g_query_sum=0
g_query_fail=0
g_query_deepfakes=0
#记录开始时间
g_starttime = time.time()
def draw_face(path,face_list=[],p=0.2):
deepfakes_num=0
#img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
img = cv2.imread(path)
for i in range(len(face_list)):
score=face_list[i]["score"]
#print(score)
if float(score) <= p:
left=int(face_list[i]["location"]["left"])
top=int(face_list[i]["location"]["top"])
width=int(face_list[i]["location"]["width"])
height=int(face_list[i]["location"]["height"])
cv2.rectangle(img, (left,top), (left+width,top+height), (0,255,0), 4)
cv2.putText(img,"score={}".format(score),
( int(left),int(top-height/5)),cv2.FONT_HERSHEY_PLAIN,2.0,(255,255,255),2,1)
deepfakes_num+=1
cv2.imwrite(path, img)
# In[3]:
def deepfakes_detect_by_img(path):
global g_query_fail
url="http://gwgp-h9xcyrmorux.n.bdcloudapi.com/rest/deepfakes/v1/detect"
files={"file":( path, open(path,"rb") ,"image/jpeg",{})}
res=requests.request("POST",url, data={"type":1}, files=files)
if args.debug:
print(res)
print(res.text)
print(res.headers)
face_num=0
face_list=[]
try:
face_num=res.json()["face_num"]
if face_num > 0:
face_list=res.json()["face_list"]
#draw_face(path,face_list,p=0.2)
except:
print("Fail to detect {}!".format(path))
face_num=0
face_list=[]
g_query_fail+=1
return face_num,face_list
# # 批量检测指定目录下的图片
# In[4]:
#原始爬取疑似换脸图片的目录
deepfakes_raw_dir=args.in_dir
if not os.path.exists(deepfakes_raw_dir):
print("No files found!")
#保存换脸图片的目录
deepfakes_dir=args.out_dir
if not os.path.exists(deepfakes_dir):
os.mkdir(deepfakes_dir)
for maindir, subdir, file_name_list in os.walk(deepfakes_raw_dir):
for filename in file_name_list:
g_query_sum+=1
filename = os.path.join(maindir, filename)#合并成一个完整路径
if not args.nosleep:
time.sleep(1)
starttime = time.time()
face_num,face_list=deepfakes_detect_by_img(filename)
endtime = time.time()
dtime = endtime - starttime
if args.debug:
print("{}/(size={}K) cost {}s".format(os.path.basename(filename),os.path.getsize(filename)/1000,dtime))
deepfakes=0
for i in range(face_num):
score=face_list[i]["score"]
#score小于0.2的认为是假脸
if float(score) <= args.threshold:
deepfakes+=1
if deepfakes_dir is not None:
copy_filename="{}/{}".format(deepfakes_dir,os.path.basename(filename))
shutil.copyfile(filename,copy_filename)
#画出假脸坐标
if args.draw:
draw_face(copy_filename,face_list,p=args.threshold)
if deepfakes > 0:
print("检测图片{},其中检测到人脸{}个,疑似假脸{}个".format(filename,face_num,deepfakes))
g_query_deepfakes+=1
#总结
g_endtime = time.time()
g_costtime=g_endtime-g_starttime
if args.debug:
print("查询总数{} 失败率为{}% 疑似具有假脸的图片个数{} 保存在目录{} 总耗时{}s 平均{}s".
format(g_query_sum,100.0*g_query_fail/g_query_sum,g_query_deepfakes,deepfakes_dir,g_costtime,g_costtime/g_query_sum))
|
n=int(input("Enter the size of the board : "))
counter=0
def reset():
global board
board=[[0]*n for i in range(n)]
if(marker(board, 0)==False and counter==1):
print("No feasible solution exists for the given dimensions")
def display(board):
global counter
counter+=1
for i in range(0, n):
for j in range(0, n):
print(board[i][j], end=" ")
print()
print()
def check(board, row, column):
for i in range(0, column):
if(board[row][i]==1):
return False
for i, j in zip(range(row, -1, -1), range(column, -1, -1)):
if(board[i][j]==1):
return False
for i, j in zip(range(row, n), range(column, -1, -1)):
if(board[i][j]==1):
return False
return True
def marker(board, column):
possibility=False
if column==n:
display(board)
return True
for i in range(0, n):
if(check(board, i, column)==True):
board[i][column]=1
possibility=marker(board, column+1)
board[i][column]=0
return possibility
reset()
print("There are a total of "+str(counter)+" possibilities for the given dimensions !")
|
# coding: utf-8
from dataset_translation.dataset import Dataset
class TestDataset:
@staticmethod
def test_fill_intents():
intents = [{
'slug': 'greetings',
'created_at': '2021-02-10T12:56:05.547Z',
'updated_at': '2021-07-01T13:09:12.057Z',
'description': 'Says hello',
'name': 'greetings',
'rating': 0,
'dataset_id': 0,
'is_activated': True,
'position': 2,
'is_forking': False,
'strictness': None
}]
cai_intents = [{
'name': 'greetings',
'description': 'Says hello',
'strictness': None,
'expressions': []
}]
transformed_intents = Dataset().fill_intents(intents)
assert transformed_intents == cai_intents
assert len(transformed_intents) == len(cai_intents)
@staticmethod
def test_fill_gazettes():
entities = [{
'dataset_id': 0,
'entity_id': None,
'is_open': True,
'is_activated': True,
'slug': 'music-genre',
'created_at': '2021-02-15T10:13:05.659Z',
'updated_at': '2021-02-15T10:13:05.659Z',
'strictness': None,
'enrichment_strictness': 95,
'is_custom': True,
'color': '#b39ddb',
'name': 'MUSIC-GENRE',
'is_forking': False,
'webhook_id': None,
'gold_enrichment_from': None,
'enrichment_type': 'map',
'enrichment_webhook_id': None,
'locked': True,
'type': 2,
'regex_pattern': None,
'regex_flags': None
}]
synonyms = [{
'entity_id': 0,
'value': 'rock',
'slug': None,
'created_at': '2021-07-09T10:01:24.804Z',
'updated_at': '2021-07-09T10:01:24.804Z',
'language': 'en'
}]
cai_synonyms = [{
'name': 'MUSIC-GENRE',
'slug': 'music-genre',
'locked': True,
'type': 'restricted',
'is_open': True,
'strictness': None,
'enrichment_strictness': 95,
'synonyms': ["rock"],
'regex_pattern': None,
'regex_flags': None
}]
assert Dataset().fill_gazettes(entities, synonyms, 'en') == cai_synonyms
cai_synonyms['name' == 'MUSIC-GENRE']['synonyms'] = []
assert Dataset().fill_gazettes(entities, synonyms, 'fr') == cai_synonyms
@staticmethod
def test_fill_expressions():
expressions = [{
'intent_id': 0,
'source': 'I want to listen to rock',
'created_at': '2021-07-28T14:42:13.067Z',
'updated_at': '2021-07-28T14:42:14.095Z',
'tokens': "[{\"pos\":\"PRON\",\"word\":\"I\",\"space\":true,\"entity_id\":1},{\"pos\":\"VERB\",\"word\":\"want\",\"space\":true},{\"pos\":\"PART\",\"word\":\"to\",\"space\":true},{\"pos\":\"VERB\",\"word\":\"listen\",\"space\":true},{\"pos\":\"ADP\",\"word\":\"to\",\"space\":true},{\"pos\":\"NOUN\",\"word\":\"rock\",\"space\":false,\"entity_id\":0}]",
'language': 'en'
}]
cai_synonyms = [
{
'name': 'MUSIC-GENRE',
'slug': 'music-genre',
'locked': True,
'type': 'restricted',
'is_open': True,
'strictness': None,
'enrichment_strictness': 95,
'synonyms': ["rock"],
'regex_pattern': None,
'regex_flags': None
},
{
'name': 'PRONOUN',
'slug': 'pronoun',
'locked': True,
'type': 'gold',
'is_open': True,
'strictness': None,
'enrichment_strictness': 95,
'regex_pattern': None,
'regex_flags': None
}
]
cai_expressions = [{
'source': 'I want to listen to rock',
'tokens': [
{
'pos': 'PRON',
'word': 'I',
'space': True,
'entity': {
'name': 'PRONOUN',
'type': 'gold',
'is_custom': False
}
},
{
'pos': 'VERB',
'word': 'want',
'space': True,
'entity': None
},
{
'pos': 'PART',
'word': 'to',
'space': True,
'entity': None
},
{
'pos': 'VERB',
'word': 'listen',
'space': True,
'entity': None
},
{
'pos': 'ADP',
'word': 'to',
'space': True,
'entity': None
},
{
'pos': 'NOUN',
'word': 'rock',
'space': False,
'entity': {
'name': 'MUSIC-GENRE',
'type': 'restricted',
'is_custom': True
}
}
]
}]
intents = [{
'name': 'ask-music',
'description': None,
'strictness': 60,
'expressions': []
}]
cai_intents_en = intents
cai_intents_fr = intents
cai_intents_en[0]['expressions'] = cai_expressions
Dataset().fill_expressions(intents, cai_synonyms, expressions, 'en')
assert intents == cai_intents_en
Dataset().fill_expressions(intents, cai_synonyms, expressions, 'fr')
assert intents == cai_intents_fr
@staticmethod
def test_to_cai_format():
original_dataset = {
'version': 5,
'datasets': [
{
'slug': None,
'created_at': '2021-02-15T10:13:05.609Z',
'updated_at': '2021-07-28T14:43:12.028Z',
'strictness': 50,
'type': 0,
'classifier': 4,
'recognizer': 0,
'manual_training': True,
'big_bot': False,
'bot_id': 'a6d7cb05-468b-4fce-ad31-5a10539ba0ce',
'version_id': '9deee18f-8aa5-4a38-b797-f6a16ab56784',
'is_forking': False,
'resolve_pronouns': False,
'resolve_descriptions': False,
'last_data_update_at': "{\"en\": \"2021-07-28T14:43:12.027Z\", \"fr\": \"2021-07-28T14:41:37.198Z\"}",
'language': 'fr',
'restricted_entity_strictness': 90,
'free_entity_strictness': 0,
'is_generating': False,
'sharenet': 0
}
],
'intents': [
{
'slug': 'ask-music',
'created_at': '2021-02-15T10:13:07.284Z',
'updated_at': '2021-02-15T10:13:07.284Z',
'description': None,
'name': 'ask-music',
'rating': 0,
'dataset_id': 0,
'is_activated': True,
'position': 2,
'is_forking': False,
'strictness': 60
}
],
'entities': [
{
'dataset_id': 0,
'entity_id': None,
'is_open': True,
'is_activated': True,
'slug': 'music-genre',
'created_at': '2021-02-15T10:13:05.659Z',
'updated_at': '2021-02-15T10:13:05.659Z',
'strictness': None,
'enrichment_strictness': 95,
'is_custom': True,
'color': '#b39ddb',
'name': 'MUSIC-GENRE',
'is_forking': False,
'webhook_id': None,
'gold_enrichment_from': None,
'enrichment_type': 'map',
'enrichment_webhook_id': None,
'locked': True,
'type': 2,
'regex_pattern': None,
'regex_flags': None
},
{
'dataset_id': 0,
'entity_id': None,
'is_open': True,
'is_activated': True,
'slug': 'pronoun',
'created_at': '2021-02-15T10:13:05.614Z',
'updated_at': '2021-02-15T10:13:05.614Z',
'strictness': None,
'enrichment_strictness': 95,
'is_custom': False,
'color': '#cffff4',
'name': 'PRONOUN',
'is_forking': False,
'webhook_id': None,
'gold_enrichment_from': None,
'enrichment_type': None,
'enrichment_webhook_id': None,
'locked': True,
'type': 0,
'regex_pattern': None,
'regex_flags': None
}
],
'synonyms': [{
'entity_id': 0,
'value': 'rock',
'slug': None,
'created_at': '2021-07-09T10:01:24.804Z',
'updated_at': '2021-07-09T10:01:24.804Z',
'language': 'en'
}],
'expressions': [{
'intent_id': 0,
'source': 'I want to listen to rock',
'created_at': '2021-07-28T14:42:13.067Z',
'updated_at': '2021-07-28T14:42:14.095Z',
'tokens': "[{\"pos\":\"PRON\",\"word\":\"I\",\"space\":true,\"entity_id\":1},{\"pos\":\"VERB\",\"word\":\"want\",\"space\":true},{\"pos\":\"PART\",\"word\":\"to\",\"space\":true},{\"pos\":\"VERB\",\"word\":\"listen\",\"space\":true},{\"pos\":\"ADP\",\"word\":\"to\",\"space\":true},{\"pos\":\"NOUN\",\"word\":\"rock\",\"space\":false,\"entity_id\":0}]",
'language': 'en'
}]
}
cai_dataset = {
'language': 'en',
'intents': [
{
'name': 'ask-music',
'description': '',
'strictness': 60,
'expressions': [{
'source': 'I want to listen to rock',
'tokens': [
{
'pos': 'PRON',
'word': 'I',
'space': True,
'entity': {
'name': 'PRONOUN',
'type': 'gold',
'is_custom': False
}
},
{
'pos': 'VERB',
'word': 'want',
'space': True,
'entity': None
},
{
'pos': 'PART',
'word': 'to',
'space': True,
'entity': None
},
{
'pos': 'VERB',
'word': 'listen',
'space': True,
'entity': None
},
{
'pos': 'ADP',
'word': 'to',
'space': True,
'entity': None
},
{
'pos': 'NOUN',
'word': 'rock',
'space': False,
'entity': {
'name': 'MUSIC-GENRE',
'type': 'restricted',
'is_custom': True
}
}
]}]
}
],
'gazettes': [
{
'name': 'MUSIC-GENRE',
'slug': 'music-genre',
'locked': True,
'type': 'restricted',
'is_open': True,
'strictness': None,
'enrichment_strictness': 95,
'synonyms': ['rock'],
'regex_pattern': None,
'regex_flags': None
},
{
'name': 'PRONOUN',
'slug': 'pronoun',
'locked': True,
'type': 'gold',
'is_open': True,
'strictness': None,
'enrichment_strictness': 95,
'synonyms': [],
'regex_pattern': None,
'regex_flags': None
}
]
}
assert Dataset().to_cai_format(original_dataset, 'en') == cai_dataset
cai_dataset['language'] = 'fr'
cai_dataset['intents']['name' == 'ask-music']['expressions'] = []
cai_dataset['gazettes']['name' == 'MUSIC-GENRE']['synonyms'] = []
assert Dataset().to_cai_format(original_dataset, 'fr') == cai_dataset
|
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Alexandros Papangelis and Yi-Chia Wang"
import csv
import json
import math
import os
import pickle
import re
from copy import deepcopy
from plato.utilities.parser.data_parser import DataParser
from plato.dialogue.action import DialogueAct, DialogueActItem, Operator
from plato.dialogue.state import SlotFillingDialogueState
from plato.agent.component.dialogue_policy.\
reinforcement_learning.reward_function import SlotFillingReward
from plato.agent.component.dialogue_state_tracker.\
slot_filling_dst import SlotFillingDST
from applications.cambridge_restaurants.camrest_nlu import CamRestNLU
from plato.agent.component.nlu.slot_filling_nlu\
import SlotFillingNLU
from plato.agent.component.user_simulator.goal import Goal
from plato.utilities.dialogue_episode_recorder import DialogueEpisodeRecorder
from plato.domain.ontology import Ontology
from plato.domain.database import SQLDataBase
"""
Parse_DSTC2 is a specific data parser for the DSTC 2 dataset. It parses the
system and user logs and produces several kinds of output. It produces csv data
files for nlu and nlg for both user and system, it produces simple reactive
policies for both user and system, and it produces Plato dialogue Experience
Records that can be used ton train Plato dialogue policies.
"""
class Parser(DataParser):
def __init__(self, args):
"""
Initialize the internal structures of the DSTC2 parser.
"""
super(Parser, self).__init__()
self.nlu = None
self.path = None
self.ontology_path = None
self.database_path = None
self.ontology = None
self.database = None
self.DSTrackerSys = None
self.DSTrackerUsr = None
self.NLUSys = None
self.NLUUsr = None
self.DStateSys = None
self.prev_sys_act = None
self.reward = SlotFillingReward()
self.informable_slots = []
self.requestable_slots = []
self.system_requestable_slots = []
self.NActions = 0
self.agent_role = 'system'
self.dstc2_acts = ['repeat', 'canthelp', 'affirm', 'negate', 'deny',
'ack', 'thankyou', 'bye',
'reqmore', 'hello', 'welcomemsg', 'expl-conf',
'select', 'offer', 'reqalts',
'confirm-domain', 'confirm']
self.dontcare_patterns = ['anything', 'any', 'i do not care',
'i dont care', 'dont care',
'it does not matter', 'it doesnt matter',
'does not matter', 'doesnt matter']
self.bye_patterns = ['bye']
# Flag that controls whether we will use an nlu to parse the utterance
# in the logs or the provided
# annotations (for both sides, system and user).
self.TRAIN_LANG_2_LANG = False
# Counts of user responses to system actions -
# {system_act: {total_turns: int, user_act: frequency}}
self.user_policy_reactive = {}
# Frequency-based user dialogue_policy
self.user_policy = {}
# Counts of system responses to system actions given the state
# {encoding(DStateSys, user_act):
# {total_turns: int, sys_act: frequency}}
self.system_policy_reactive = {}
# Frequency-based system dialogue_policy
self.system_policy = {}
# Keep self.goals
self.goals = []
self.recorder_sys = None
self.recorder_usr = None
def initialize(self, **kwargs):
"""
Initialize some structures given the ontology and database, and load
pre-trained models for processing the utterances and tracking the
dialogue state.
:param kwargs:
:return: nothing
"""
path = ''
if 'data_path' in kwargs:
path = kwargs['data_path']
if 'ontology' in kwargs and 'database' in kwargs:
self.ontology_path = kwargs['ontology']
self.database_path = kwargs['database']
self.ontology = Ontology(self.ontology_path)
self.database = SQLDataBase(self.database_path)
if self.TRAIN_LANG_2_LANG:
# nlu for sys in order to collect data to train
# Language 2 Language Agent
# (must be trained in a previous pass)
self.NLUSys = CamRestNLU('models/camrest_nlu/sys/model')
self.NLUSys.initialize(self.ontology)
# nlu for usr in order to collect data to train
# Language 2 Language Agent
# (must be trained in a previous pass)
self.NLUUsr = CamRestNLU('models/camrest_nlu/usr/model')
self.NLUUsr.initialize(self.ontology)
# Track the dialogue state from the system's perspective
dst_args = \
dict(
zip(['ontology', 'database', 'domain'],
[self.ontology, self.database, 'CamRest']))
self.DSTrackerSys = SlotFillingDST(dst_args)
# Track the dialogue state from the user's perspective
self.DSTrackerUsr = SlotFillingDST(dst_args)
self.informable_slots = \
deepcopy(list(self.ontology.ontology['informable'].keys()))
self.requestable_slots = \
deepcopy(self.ontology.ontology['requestable'] + ['this'])
self.system_requestable_slots = \
deepcopy(self.ontology.ontology['system_requestable'])
self.NActions = \
4 + len(self.system_requestable_slots) + \
len(self.requestable_slots)
self.agent_role = 'system'
if isinstance(path, str):
if os.path.exists(os.path.dirname(path)):
self.path = path
else:
raise FileNotFoundError('Invalid data path %s ' % path)
else:
raise ValueError('Unacceptable value for data path: %s ' % path)
nlu_args = \
dict(
zip(['ontology', 'database', 'domain'],
[self.ontology, self.database, 'CamRest']))
self.nlu = SlotFillingNLU(nlu_args)
self.nlu.initialize(self.ontology)
self.recorder_sys = DialogueEpisodeRecorder()
self.recorder_usr = DialogueEpisodeRecorder()
def delexicalize(self, utterance, usr_acts, sys_acts):
"""
De-lexicalize an utterance.
:param utterance: the utterance to delexicalize
:param usr_acts: user's dialogue acts
:param sys_acts: system's dialogue acts
:return: the de-lexicalized utterance
"""
# Do not track 'bye' utterances as those were generated by taking
# into account the goal the user had at time
# of data collection - which is not reflected in randomly sampled
# goals.
for pattern in self.bye_patterns:
if pattern in utterance:
return 'UNK'
delex_utterance = deepcopy(utterance)
# Replace synonyms (we are only generating nlg templates here)
delex_utterance = delex_utterance.replace('center', 'centre')
delex_utterance = delex_utterance.replace('cheaply', 'cheap')
delex_utterance = delex_utterance.replace('moderately', 'moderate')
delex_utterance = delex_utterance.replace('expensively', 'expensive')
for dc in self.dontcare_patterns:
delex_utterance = delex_utterance.replace(dc, 'dontcare')
# Look for a request (there may also be implicit or explicit confirms)
sys_req_slot = ''
for sa in sys_acts:
if sa['act'] == 'request':
sys_req_slot = sa['slots'][0][1]
break
# Replace each slot value with token
for usr_act in usr_acts:
for slot in usr_act['slots']:
if slot[0] == 'this':
if sys_req_slot:
slot[0] = sys_req_slot
else:
continue
if len(slot) > 1:
if usr_act['act'] == 'request':
if slot[1] == 'addr':
delex_utterance = \
delex_utterance.replace(
'address', '<' + slot[1].upper() + '>')
elif slot[1] == 'phone':
delex_utterance = \
delex_utterance.replace(
'phone number',
'<' + slot[1].upper() +
'>')
delex_utterance = \
delex_utterance.replace(
'phone', '<' + slot[1].upper() + '>')
elif slot[1] == 'postcode':
delex_utterance = \
delex_utterance.replace(
'postcode', '<' + slot[1].upper() + '>')
delex_utterance = \
delex_utterance.replace(
'post code', '<' + slot[1].upper() + '>')
delex_utterance = \
delex_utterance.replace(
'postal code', '<' + slot[1].upper() + '>')
delex_utterance = \
delex_utterance.replace(
'zip code', '<' + slot[1].upper() + '>')
else:
delex_utterance = \
delex_utterance.replace(
slot[1], '<' + slot[1].upper() + '>')
else:
delex_utterance = \
delex_utterance.replace(
slot[1], '<' + slot[0].upper() + '>')
return delex_utterance if '<' in delex_utterance else 'UNK'
def delexicalizeNLG(self, utterance, dacts):
"""
De-lexicalise utterance, specifically for nlg.
:param utterance: the utterance to de-lexicalize
:param dacts: dialogue acts
:return: the de-lexicalized utterance
"""
delex_transcript = deepcopy(utterance).lower()
delex_dacts = ''
for dact in dacts:
act = dact['act']
slots = dact['slots']
delex_dacts += 'act_' + act + ' '
if slots:
for slot in slots:
s = slot[0]
v = ''
if len(slot) > 1:
v = slot[1]
if s == 'slot':
delex_dacts += s
if v:
delex_dacts += '_' + v + ' '
else:
# Deal with some special cases
if v == 'dontcare':
delex_transcript = \
delex_transcript.replace(
'it doesnt matter', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'doesnt matter', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'do not care', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'dont care', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'any kind', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'any thing', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'any type', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'any', '<' + s + '>')
elif v == 'seafood':
delex_transcript = \
delex_transcript.replace(
'sea food', '<' + s + '>')
elif v == 'moderate':
delex_transcript = \
delex_transcript.replace(
'moderately', '<' + s + '>')
elif v == 'centre':
delex_transcript = \
delex_transcript.replace(
'center', '<' + s + '>')
elif v == 'asian oriental':
delex_transcript = \
delex_transcript.replace(
'asian oriental', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'oriental', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'asian', '<' + s + '>')
elif v == 'north american':
delex_transcript = \
delex_transcript.replace(
'north american', '<' + s + '>')
delex_transcript = \
delex_transcript.replace(
'american', '<' + s + '>')
if s == 'postcode':
idx = delex_transcript.find(v)
if idx >= 0:
delex_transcript = \
delex_transcript.replace(
delex_transcript[idx:], '<' + s + '>')
delex_transcript = \
delex_transcript.replace(str(v), '<' + s + '>')
delex_dacts += '<' + s + '> '
return [delex_transcript.strip(), delex_dacts.strip()]
def delexicalizeNLU(self, utterance, sys_act):
"""
De-lexicalise utterance, specifically for nlu.
:param utterance: the utterance to de-lexicalize
:param sys_act: system dialogue acts
:return: the de-lexicalized utterance
"""
delex_utterance = deepcopy(utterance)
# Replace synonyms (we are only generating nlg templates here)
delex_utterance = delex_utterance.replace('center', 'centre')
delex_utterance = delex_utterance.replace('cheaply', 'cheap')
delex_utterance = delex_utterance.replace('moderately', 'moderate')
delex_utterance = delex_utterance.replace('expensively', 'expensive')
sys_dact = None
if sys_act:
if self.prev_sys_act:
# If there is a slot value
if self.prev_sys_act['slots'] and \
self.prev_sys_act['slots'][0]:
if self.prev_sys_act['slots'][0][0] != 'slot':
sys_dact = \
DialogueAct(
self.prev_sys_act['act'],
[DialogueActItem(
self.prev_sys_act['slots'][0][0],
Operator.EQ,
self.prev_sys_act['slots'][0][1])])
else:
sys_dact = \
DialogueAct(
self.prev_sys_act['act'],
[DialogueActItem(
self.prev_sys_act['slots'][0][1],
Operator.EQ,
'')])
dacts = self.nlu.process_input(utterance, sys_dact)
# If the utterance cannot be parsed, skip it
if dacts[0].intent == 'UNK':
return 'UNK'
# TODO: This is a quick and dirty way to do this. Revisit soon!
# Search and replace each value
for dact in dacts:
# For inform dacts swap slot values
if dact.intent == 'inform':
for item in dact.params:
if item.value:
delex_utterance = \
delex_utterance.replace(
item.value, '<' + item.slot.upper() + '>')
else:
if not sys_dact or \
sys_dact.intent not in \
['request', 'impl-conf', 'expl-conf'] or \
not sys_dact.params:
return 'UNK'
for dc in self.dontcare_patterns:
delex_utterance = \
delex_utterance.replace(
dc,
'<' +
sys_dact.params[0].slot.upper() + '>')
# For request dacts swap slot names
elif dact.intent == 'request':
for item in dact.params:
if item.slot == 'addr':
delex_utterance = \
delex_utterance.replace(
'address', '<' + item.slot.upper() + '>')
elif item.slot == 'phone':
delex_utterance = delex_utterance.replace(
'phone number', '<' + item.slot.upper() + '>')
delex_utterance = delex_utterance.replace(
'phone', '<' + item.slot.upper() + '>')
elif item.slot == 'postcode':
delex_utterance = \
delex_utterance.replace(
'postcode', '<' + item.slot.upper() + '>')
delex_utterance = \
delex_utterance.replace(
'post code', '<' + item.slot.upper() + '>')
delex_utterance = \
delex_utterance.replace(
'postal code', '<' + item.slot.upper() + '>')
delex_utterance = \
delex_utterance.replace(
'zip code', '<' + item.slot.upper() + '>')
else:
delex_utterance = \
delex_utterance.replace(
item.slot,
'<' + item.slot.upper() + '>')
# Reject any utterances that make it here undelexicalized
return delex_utterance if '<' in delex_utterance else 'UNK'
def BIO_tag(self, dialogue_acts, utterance, mode):
"""
Compute Begin In Out tags for the given utterance
:param dialogue_acts: the dialogue acts
:param utterance: the utterance to compute tags for
:param mode: system or user
:return: intents and BIO tags
"""
utterance = self.correct_synonyms(utterance)
if mode == 'sys' or mode == 'system':
acts_with_slots = {'inform', 'deny', 'confirm'}
elif mode == 'usr' or mode == 'user':
acts_with_slots = {'canthelp', 'select', 'canthelp.exception',
'impl-conf',
'offer', 'inform', 'expl-conf'}
else:
acts_with_slots = {'canthelp', 'select', 'canthelp.exception',
'impl-conf',
'offer', 'inform', 'expl-conf'}
curr_intents = set()
for act in dialogue_acts:
curr_act = act['act']
if curr_act == 'request':
curr_act = '{}_{}'.format(curr_act, act['slots'][0][1])
# Alex: Correction for 'dontcare'
if curr_act == 'inform' and act['slots'][0][1] == 'dontcare':
curr_act = 'dontcare_{}'.format(act['slots'][0][0])
curr_intents.add(curr_act)
firstword2split = {}
split2tag = {}
for act in dialogue_acts:
curr_act = act['act']
if curr_act in acts_with_slots and len(act['slots']) > 0:
for curr_slot in act['slots']:
slot_name = curr_slot[0]
if isinstance(curr_slot[1], int):
curr_slot[1] = str(curr_slot[1])
# Special case for 'asian' and 'asian oriental' (there is
# no ambiguity in this case).
if curr_slot[
1] in ['asian oriental', 'australian asian'] and \
'asian' in utterance and \
'asian oriental' not in utterance and \
'australian asian' not in utterance:
curr_slot[1] = 'asian'
slot_value_split = curr_slot[1].split()
splits = \
firstword2split.get(slot_value_split[0].lower(), [])
splits.append(slot_value_split)
firstword2split[slot_value_split[0]] = splits
split2tag[tuple(slot_value_split)] = \
'{}-{}'.format(curr_act, slot_name)
transcript_split = utterance.split()
iob_tags = []
len_transcript = len(transcript_split)
i = 0
while i < len_transcript:
word = transcript_split[i].lower()
if word in firstword2split:
splits = firstword2split[word]
for split in splits:
full_split_matches = True
for j in range(len(split)):
if i + j < len(transcript_split):
if split[j].lower() != \
transcript_split[i + j].lower():
full_split_matches = False
break
else:
break
if full_split_matches:
tag = split2tag[tuple(split)]
for k in range(len(split)):
if k == 0:
iob_tags.append('{}-{}'.format('B', tag))
else:
iob_tags.append('{}-{}'.format('I', tag))
i += len(split)
break
else:
i += 1
else:
iob_tags.append('O')
i += 1
return curr_intents, iob_tags
def correct_synonyms(self, utterance):
# Replace synonyms
utterance = utterance.replace('center', 'centre')
utterance = utterance.replace('cheaply', 'cheap')
utterance = utterance.replace('moderately', 'moderate')
utterance = utterance.replace('expensively', 'expensive')
return utterance
def parse_data(
self,
data_filepath='data/',
ontology=None,
database=None
):
"""
Parse the DSTC2 data. This function will parse the user and system logs
and will shadow each agent (user or system) by using a dialogue state
tracker to track and update the current state, actions taken,
utterances produced, and ground truth.
As user and system in this specific data collection interact via
speech, as you can see from the ASR logs, there is no guarantee that:
1) what the user says is what the system hears
2) what the system understands (nlu & DST) reflects the truth
However, the system at the time of data collection was acting upon the
information coming through its ASR, nlu, and DST components, so to
accurately extract the system's policy we keep track of the dialogue
State, given the system's ASR & nlu output.
This way we can attempt to replicate the user's and the system's
behaviour and generate policies for both.
Depending on the configuration, this function can use pre-trained nlu
and nlg models to parse the utterances.
As we do not have access to the exact ontology and database that were
used at the time of collecting the data, some warnings will be produced
by Plato for missing database items.
It generates the following files:
Plato experience logs that can be loaded into Plato to train any
component (see runDSTC2DataParser.py for an example).
Plato experience log from the system's side:
path + DSTC2_system
Plato experience log from the user's side:
path + /DSTC2_user
User-side dialogue_policy that is a dictionary of:
SystemAct --> <List of User Responses with Probabilities>
models/user_simulator/user_policy_reactive.pkl
System-side dialogue_policy that is a dictionary of:
UserAct --> <List of System Responses with Probabilities>
models/camrest_policy/system_policy_reactive.pkl
User-side dialogue_policy that is a dictionary of:
UserDialogueState --> <List of User Responses with Probabilities>
models/user_simulator/user_policy.pkl
System-side dialogue_policy that is a dictionary of:
SystemDialogueState --> <List of System Responses with Probabilities>
models/camrest_policy/sys_policy.pkl
File listing the user's goals found while parsing the data.
This file can be loaded into Plato's Simulated User to sample goals
from there instaed of generating random goals.
Data/goals.pkl
:param data_filepath: Path to the data:
<PATH_TO_DSTC2_DATA>/dstc2_traindev/data/
:param ontology: the domain ontology
:param database: the domain database
:return:
"""
# Get state encoding length
temp_dstate = \
SlotFillingDialogueState(
{'slots': self.ontology.ontology['system_requestable']})
temp_dstate.initialize()
for (dirpath, dirnames, filenames) in os.walk(self.path):
if not filenames or filenames[0] == '.DS_Store':
continue
print('Parsing files at %s' % dirpath)
# Open files
with open(dirpath + '/label.json') as label_file, \
open(dirpath + '/log.json') as log_file:
label = json.load(label_file)
log = json.load(log_file)
prev_usr_act_slot = ''
# Initialize the dialogue states
self.DSTrackerSys.initialize()
DStateSys = deepcopy(self.DSTrackerSys.get_state())
DStateSys_prev = deepcopy(DStateSys)
self.DSTrackerUsr.initialize()
sys_dacts = []
usr_dacts = []
prev_usr_act_slots = ''
# Update user dialogue state with goal information
goal = Goal()
constr = {}
req = {}
for c in label['task-information']['goal']['constraints']:
constr[c[0]] = DialogueActItem(c[0], Operator.EQ, c[1])
goal.constraints = constr
for r in label['task-information']['goal']['request-slots']:
req[r] = DialogueActItem(r, Operator.EQ, [])
goal.requests = req
# Save goal
self.goals.append(goal)
self.DSTrackerUsr.update_goal(goal)
DStateUsr = deepcopy(self.DSTrackerUsr.get_state())
DStateUsr_prev = deepcopy(DStateUsr)
sys_turn = {}
user_turn = {}
prev_sys_input = ''
# Parse each dialogue turn
for t in range(len(label['turns'])):
# The system has the initiative and always starts first
sys_turn = log['turns'][t]
user_turn = label['turns'][t]
sys_acts = []
sys_slots = {'area': False,
'food': False,
'pricerange': False,
'addr': False,
'name': False,
'phone': False,
'postcode': False}
delex_utterance = \
self.delexicalize(
user_turn['transcription'],
user_turn['semantics']['json'],
sys_turn['output']['dialog-acts'])
usr_dacts = []
# Get all semantic acts
for udact in user_turn['semantics']['json']:
# TODO: THIS OVERRIDES PREVIOUS ACTS
user_act = udact['act']
user_dact = DialogueAct(user_act, [])
if user_act == 'bye':
user_terminating = True
udact['slots'] = [['slot']]
elif user_act == 'request':
requested = udact['slots'][0][1]
# For each slot-value pair
for slot in udact['slots']:
for sdact in sys_turn['output']['dialog-acts']:
sys_act = sdact['act']
dact_items = []
if sys_act not in sys_acts:
sys_acts.append(sys_act)
sys_act_slot = sys_act
if sdact['slots']:
if sdact['act'] == 'request':
sys_act_slot += \
'_' + sdact['slots'][0][1]
ss = sdact['slots'][0][1]
dact_items.append(
DialogueActItem(
ss,
Operator.EQ,
''))
else:
sys_act_slot += \
'_' + sdact['slots'][0][0]
ss = sdact['slots'][0][0]
dact_items.append(
DialogueActItem(
ss,
Operator.EQ,
sdact['slots'][0][1]))
if ss:
sys_slots[ss] = True
# Retrieve user act slot
if user_act == 'request':
usr_act_slot = user_act + '_' + slot[1]
user_dact.add_item(
DialogueActItem(
slot[1],
Operator.EQ,
''))
elif user_act == 'bye':
# Add underscore for consistent parsing
# later
usr_act_slot = user_act + '_'
else:
usr_act_slot = user_act + '_' + slot[0]
user_dact.add_item(
DialogueActItem(
slot[0],
Operator.EQ,
slot[1]))
# Reactive version of user policy - just
# reacts to system actions
if sys_act_slot not in \
self.user_policy_reactive:
self.user_policy_reactive[sys_act_slot] = \
{}
self.user_policy_reactive[
sys_act_slot]['total_turns'] = 0
self.user_policy_reactive[
sys_act_slot]['dacts'] = {}
self.user_policy_reactive[
sys_act_slot]['responses'] = {}
if usr_act_slot not in \
self.user_policy_reactive[
sys_act_slot]['dacts']:
self.user_policy_reactive[
sys_act_slot][
'dacts'][usr_act_slot] = 1
if delex_utterance != 'UNK':
self.user_policy_reactive[
sys_act_slot][
'responses'][delex_utterance] = 1
else:
self.user_policy_reactive[
sys_act_slot][
'dacts'][usr_act_slot] += 1
if delex_utterance != 'UNK':
if delex_utterance not in\
self.user_policy_reactive[
sys_act_slot]['responses']:
self.user_policy_reactive[
sys_act_slot][
'responses'][delex_utterance] = 1
else:
self.user_policy_reactive[
sys_act_slot][
'responses'][delex_utterance] += 1
self.user_policy_reactive[
sys_act_slot]['total_turns'] += 1
if user_dact not in usr_dacts:
usr_dacts.append(user_dact)
# Update system's dialogue_policy. Here we use the previous
# dialogue state, which is where the sys
# act was taken from.
state_enc_sys = \
self.encode_state(DStateSys, agent_role='system')
# Collapse state encoding to a number
dstate_idx = ''.join([str(bit) for bit in state_enc_sys])
sys_dacts = []
if 'output' in sys_turn and \
'dialog-acts' in sys_turn['output'] and \
sys_turn['output']['dialog-acts']:
sys_act_slots = ''
for sda in sys_turn['output']['dialog-acts']:
sys_act_slot = sda['act']
sys_dact = DialogueAct(sys_act_slot, [])
if sda['slots']:
if sys_act_slot == 'request':
sys_act_slot += '_' + sda['slots'][0][1]
sys_dact.add_item(
DialogueActItem(
sda['slots'][0][1],
Operator.EQ,
''))
else:
sys_act_slot += '_' + sda['slots'][0][0]
sys_dact.add_item(
DialogueActItem(
sda['slots'][0][0],
Operator.EQ,
sda['slots'][0][1]))
sys_dacts.append(sys_dact)
sys_act_slots += sys_act_slot + ';'
# Trim last ;
if sys_act_slots:
sys_act_slots = sys_act_slots[:-1]
if dstate_idx not in self.system_policy:
self.system_policy[dstate_idx] = {}
self.system_policy[dstate_idx]['total_turns'] = 0
self.system_policy[dstate_idx]['dacts'] = {}
self.system_policy[dstate_idx]['responses'] = {}
if sys_act_slots not in\
self.system_policy[dstate_idx]['dacts']:
self.system_policy[
dstate_idx]['dacts'][sys_act_slots] = 1
else:
self.system_policy[
dstate_idx]['dacts'][sys_act_slots] += 1
self.system_policy[dstate_idx]['total_turns'] += 1
for prev_usr_act_slot in prev_usr_act_slots.split(';'):
if prev_usr_act_slot not in \
self.system_policy_reactive:
self.system_policy_reactive[
prev_usr_act_slot] = {}
self.system_policy_reactive[
prev_usr_act_slot]['total_turns'] = 0
self.system_policy_reactive[
prev_usr_act_slot]['dacts'] = {}
self.system_policy_reactive[
prev_usr_act_slot]['responses'] = {}
if sys_act_slots not in \
self.system_policy_reactive[
prev_usr_act_slot]['dacts']:
self.system_policy_reactive[
prev_usr_act_slot][
'dacts'][sys_act_slots] = 1
else:
self.system_policy_reactive[
prev_usr_act_slot][
'dacts'][sys_act_slots] += 1
self.system_policy_reactive[
prev_usr_act_slot]['total_turns'] += 1
if self.TRAIN_LANG_2_LANG:
usr_dacts = \
self.NLUSys.process_input(
user_turn['transcription'], DStateSys)
sys_dacts = \
self.NLUUsr.process_input(
sys_turn['output']['transcript'], DStateUsr)
# Track the system's dialogue state. This will be relevant
# in the next turn.
# Encode DStateSys
DStateSys_prev = deepcopy(DStateSys)
self.DSTrackerSys.update_state(usr_dacts)
DStateSys = \
deepcopy(
self.DSTrackerSys.update_state_db(
self.db_lookup()))
# Track the user's dialogue state. This is relevant in
# the present turn.
DStateUsr_prev = deepcopy(DStateUsr)
# For Supervised agent this seems to help as it keeps
# track of the slots filled
# self.DSTrackerUsr.update_state(usr_dacts)
self.DSTrackerUsr.update_state(sys_dacts)
self.DSTrackerUsr.update_state_db(sys_acts=sys_dacts)
# self.DSTrackerUsr.update_state_sysact(usr_dacts)
DStateUsr = deepcopy(self.DSTrackerUsr.get_state())
# Encode DStateSys
state_enc_usr = \
self.encode_state(DStateUsr, agent_role='user')
# Collapse state encoding to a number
dstate_idx = ''.join([str(bit) for bit in state_enc_usr])
# Agent to agent version of user policy - based on state
# Note: It may be duplicate effort to re-iterate over
# the user acts. For now I let it be for clarity.
# Disregard empty user actions
if usr_dacts:
usr_act_slots = ''
for ud in usr_dacts:
usr_act_slot = ud.intent
if ud.params:
usr_act_slot += '_' + ud.params[0].slot
usr_act_slots += usr_act_slot + ';'
# Trim last ;
usr_act_slots = usr_act_slots[:-1]
if dstate_idx not in self.user_policy:
self.user_policy[dstate_idx] = {}
self.user_policy[dstate_idx]['total_turns'] = 0
self.user_policy[dstate_idx]['dacts'] = {}
self.user_policy[dstate_idx]['responses'] = {}
if usr_act_slots not in \
self.user_policy[dstate_idx]['dacts']:
self.user_policy[
dstate_idx]['dacts'][usr_act_slots] = 1
else:
self.user_policy[
dstate_idx]['dacts'][usr_act_slots] += 1
self.user_policy[dstate_idx]['total_turns'] += 1
prev_usr_act_slots = usr_act_slots
if not sys_acts:
continue
# Record experience
if bool(label['task-information']['feedback']['success']):
# Hack for Supervised policies that cannot handle
# multiple actions
if prev_usr_act_slot and \
'request' in prev_usr_act_slot and\
len(sys_dacts) > 1 and\
sys_dacts[0].intent == 'offer':
ssdacts = deepcopy(sys_dacts[1:])
else:
ssdacts = deepcopy(sys_dacts)
t_intent, t_bio_tags = self.BIO_tag(
user_turn['semantics']['json'],
user_turn['transcription'],
mode='sys')
self.recorder_sys.record(
DStateSys_prev,
DStateSys,
ssdacts,
input_utterance=prev_sys_input,
output_utterance=sys_turn['output']['transcript'],
reward=-1,
success=False,
role='system',
custom={'dact_str':
str(sys_turn['output']['dialog-acts']),
'utterance':
user_turn['transcription'],
'intent': t_intent,
'bio_tags': t_bio_tags
}
)
self.recorder_usr.record(
DStateUsr_prev,
DStateUsr,
deepcopy(usr_dacts),
input_utterance=sys_turn['output']['transcript'],
output_utterance=user_turn['transcription'],
reward=-1,
success=False,
role='user',
custom=str(user_turn['semantics']['json'])
)
prev_sys_input =\
sys_turn['input']['live']['asr-hyps'][0]['asr-hyp']
# Record final experience (end of dialogue) - prev & current
# states will be the same here.
if bool(label['task-information']['feedback']['success']):
# Hack for Supervised policies that cannot handle
# multiple actions
if prev_usr_act_slot and \
'request' in prev_usr_act_slot and \
len(sys_dacts) > 1 and \
sys_dacts[0].intent == 'offer':
ssdacts = deepcopy(sys_dacts[1:])
else:
ssdacts = deepcopy(sys_dacts)
self.recorder_sys.record(
DStateSys_prev,
DStateSys,
ssdacts,
20 if label['task-information']['feedback']['success']
else -20,
bool(label['task-information']['feedback']['success']),
input_utterance=prev_sys_input,
output_utterance='',
force_terminate=True)
self.recorder_usr.record(
DStateUsr_prev,
DStateUsr,
deepcopy(usr_dacts),
20 if label['task-information']['feedback']['success']
else -20,
bool(label['task-information']['feedback']['success']),
input_utterance=sys_turn['output']['transcript'],
output_utterance=user_turn['transcription'],
force_terminate=True)
# Save data for LU, ST, and LG
print('\n\nProcessing nlu, dst, and nlg files...')
# sys's DST, nlg and usr's nlu
with open(data_filepath + 'DSTC2_NLG_sys.csv', 'a') as sys_nlg_file, \
open(data_filepath + 'DSTC2_NLU_usr.csv', 'a') \
as usr_nlu_file, \
open(data_filepath + 'DST_sys.csv', 'a') as sys_dst_file:
sys_nlg_writer = csv.writer(sys_nlg_file, delimiter=',')
usr_nlu_writer = csv.writer(usr_nlu_file, delimiter=',')
sys_dst_writer = csv.writer(sys_dst_file, delimiter=',')
# Write headers
sys_nlg_writer.writerow(
['dialog-acts_str', 'transcript', 'nlg_input', 'nlg_output'])
usr_nlu_writer.writerow(['transcript', 'intent', 'iob'])
sys_dst_writer.writerow(
# Previous Dialogue State
['dst_prev_food', 'dst_prev_area',
'dst_prev_pricerange',
# Input from NLU
'nlu_intent',
'req_slot',
'inf_area_value',
'inf_food_value',
'inf_pricerange_value',
# New Dialogue State
'dst_food', 'dst_area', 'dst_pricerange',
'dst_req_slot'
]
)
for dialogue in self.recorder_sys.dialogues:
for sys_turn in dialogue:
utterance = sys_turn['output_utterance']
dact_str = sys_turn['custom']['dact_str'] \
if 'dact_str' in sys_turn['custom'] \
else sys_turn['custom']
if not utterance or not dact_str:
continue
try:
dialogue_acts = json.loads(dact_str.replace("'", '"'))
except:
tmp = dact_str.replace("\'", '"')
tmp = re.sub(r'([a-z])"([a-z])', r"\1'\2", tmp)
dialogue_acts = json.loads(tmp)
intent, iob_tags = \
self.BIO_tag(dialogue_acts, utterance, 'usr')
[delex_transcript, delex_dacts] = \
self.delexicalizeNLG(utterance, dialogue_acts)
sys_nlg_writer.writerow(
[dact_str, utterance, delex_dacts, delex_transcript])
usr_nlu_writer.writerow(
[utterance, ' '.join(intent), ' '.join(iob_tags)])
# Special cases for sys nlg
# If there is act_offer <name> in delex_dacts add another
# pair with act_offer <name> removed
if "act_offer <name> " in delex_dacts:
delex_dacts = \
delex_dacts.replace('act_offer <name> ', '')
delex_transcript = \
delex_transcript.replace('of <name> ', '')
delex_transcript = \
delex_transcript.replace('<name> ', 'it ')
sys_nlg_writer.writerow(
[dact_str, utterance,
delex_dacts, delex_transcript])
dst_prev_food = 'none'
if sys_turn['state'].slots_filled['food']:
dst_prev_food = sys_turn['state'].slots_filled['food']
dst_prev_pricerange = 'none'
if sys_turn['state'].slots_filled['pricerange']:
dst_prev_pricerange = \
sys_turn['state'].slots_filled['pricerange']
dst_prev_area = 'none'
if sys_turn['state'].slots_filled['area']:
dst_prev_area = sys_turn['state'].slots_filled['area']
dst_req_slot = 'none'
nlu_intent = set()
inf_area_value = 'none'
inf_food_value = 'none'
inf_pricerange_value = 'none'
if sys_turn['new_state'].user_acts:
for uact in sys_turn['new_state'].user_acts:
nlu_intent.add(uact.intent)
for p in uact.params:
if p.slot == 'area':
inf_area_value = p.value
elif p.slot == 'food':
inf_food_value = p.value
elif p.slot == 'pricerange':
inf_pricerange_value = p.value
# Special case for requests
if uact.intent == 'request' and uact.params:
dst_req_slot = uact.params[0].slot
dst_food = 'none'
if sys_turn['new_state'].slots_filled['food']:
dst_food = \
sys_turn['new_state'].slots_filled['food']
dst_pricerange = 'none'
if sys_turn['new_state'].slots_filled['pricerange']:
dst_pricerange = \
sys_turn['new_state'].slots_filled[
'pricerange']
dst_area = 'none'
if sys_turn['new_state'].slots_filled['area']:
dst_area = \
sys_turn['new_state'].slots_filled['area']
sys_dst_writer.writerow([dst_prev_food,
dst_prev_area,
dst_prev_pricerange,
' '.join(nlu_intent),
dst_req_slot,
inf_area_value,
inf_food_value,
inf_pricerange_value,
dst_food,
dst_area,
dst_pricerange,
dst_req_slot])
# Another special case for sys nlg: sys side has no bye()
sys_nlg_writer.writerow(["[{'slots': [], 'act': 'bye'}]",
'good bye', 'act_bye', 'good bye'])
# sys's nlu and usr's DST, nlg
with open(data_filepath + 'DSTC2_NLU_sys.csv', 'a') as sys_nlu_file, \
open(data_filepath + 'DSTC2_NLG_usr.csv', 'a') as \
usr_nlg_file, \
open(data_filepath + 'DST_usr.csv', 'a') as usr_dst_file:
sys_nlu_writer = csv.writer(sys_nlu_file, delimiter=',')
usr_nlg_writer = csv.writer(usr_nlg_file, delimiter=',')
usr_dst_writer = csv.writer(usr_dst_file, delimiter=',')
# Write headers
sys_nlu_writer.writerow(['transcript', 'intent', 'iob'])
usr_nlg_writer.writerow(['dialog-acts_str', 'transcript',
'nlg_input', 'nlg_output'])
usr_dst_writer.writerow(['dst_prev_food', 'dst_prev_area',
'dst_prev_pricerange', 'dst_intent',
'dst_slot', 'dst_value', 'dst_food',
'dst_area', 'dst_pricerange',
'dst_req_slot'
])
for dialogue in self.recorder_usr.dialogues:
for usr_turn in dialogue:
utterance = usr_turn['output_utterance']
dact_str = usr_turn['custom']
if not utterance or not dact_str:
continue
try:
dialogue_acts = json.loads(dact_str.replace("'", '"'))
except:
tmp = dact_str.replace("\'", '"')
tmp = re.sub(r'([a-z])"([a-z])', r"\1'\2", tmp)
dialogue_acts = json.loads(tmp)
intent, iob_tags = \
self.BIO_tag(dialogue_acts, utterance, 'sys')
[delex_transcript, delex_dacts] = \
self.delexicalizeNLG(utterance, dialogue_acts)
usr_nlg_writer.writerow(
[dact_str, utterance, delex_dacts, delex_transcript])
sys_nlu_writer.writerow(
[utterance, ' '.join(intent), ' '.join(iob_tags)])
dst_prev_food = 'none'
if usr_turn['state'].slots_filled['food']:
dst_prev_food = usr_turn['state'].slots_filled['food']
dst_prev_pricerange = 'none'
if usr_turn['state'].slots_filled['pricerange']:
dst_prev_pricerange = usr_turn['state'].slots_filled[
'pricerange']
dst_prev_area = 'none'
if usr_turn['state'].slots_filled['area']:
dst_prev_area = usr_turn['state'].slots_filled['area']
dst_req_slot = 'none'
nlu_intent = set()
inf_area_value = 'none'
inf_food_value = 'none'
inf_pricerange_value = 'none'
if usr_turn['new_state'].user_acts:
for sact in usr_turn['new_state'].user_acts:
nlu_intent.add(sact.intent)
for p in sact.params:
if p.slot == 'area':
inf_area_value = p.value
elif p.slot == 'food':
inf_food_value = p.value
elif p.slot == 'pricerange':
inf_pricerange_value = p.value
# Special case for requests
if sact.intent == 'request' and sact.params:
dst_req_slot = sact.params[0].slot
dst_food = 'none'
if usr_turn['new_state'].slots_filled['food']:
dst_food = \
usr_turn['new_state'].slots_filled['food']
dst_pricerange = 'none'
if usr_turn['new_state'].slots_filled['pricerange']:
dst_pricerange = \
usr_turn['new_state'].slots_filled[
'pricerange']
dst_area = 'none'
if usr_turn['new_state'].slots_filled['area']:
dst_area = \
usr_turn['new_state'].slots_filled['area']
usr_dst_writer.writerow([dst_prev_food,
dst_prev_area,
dst_prev_pricerange,
' '.join(nlu_intent),
dst_req_slot,
inf_area_value,
inf_food_value,
inf_pricerange_value,
dst_food,
dst_area,
dst_pricerange,
dst_req_slot])
print('Done!')
# Normalize frequencies for user policy
for sa in self.user_policy_reactive:
for ua in self.user_policy_reactive[sa]['dacts']:
self.user_policy_reactive[sa]['dacts'][ua] /= \
self.user_policy_reactive[sa]['total_turns']
for ur in self.user_policy_reactive[sa]['responses']:
self.user_policy_reactive[sa]['responses'][ur] /= \
self.user_policy_reactive[sa]['total_turns']
# Normalize frequencies for system policy
for ua in self.system_policy_reactive:
for sa in self.system_policy_reactive[ua]['dacts']:
self.system_policy_reactive[ua]['dacts'][sa] /= \
self.system_policy_reactive[ua]['total_turns']
for sr in self.system_policy_reactive[ua]['responses']:
self.system_policy_reactive[ua]['responses'][sr] /=\
self.system_policy_reactive[ua]['total_turns']
# Normalize frequencies for user calculated policy
for state in self.user_policy:
for ua in self.user_policy[state]['dacts']:
self.user_policy[state]['dacts'][ua] /= \
self.user_policy[state]['total_turns']
# Normalize frequencies for system calculated policy
for state in self.system_policy:
for sa in self.system_policy[state]['dacts']:
self.system_policy[state]['dacts'][sa] /= \
self.system_policy[state]['total_turns']
def db_lookup(self):
"""
Perform an SQL database query
:return: a dictionary containing the results of the query
"""
# TODO: Add check to assert if each slot in DStateSys.slots_filled
# actually exists in the schema.
DStateSys = self.DSTrackerSys.get_state()
# Query the database
cursor = self.database.SQL_connection.cursor()
sql_command = " SELECT * FROM CamRestaurants "
args = ''
prev_arg = False
for slot in DStateSys.slots_filled:
if DStateSys.slots_filled[slot] and\
DStateSys.slots_filled[slot] != 'dontcare':
if prev_arg:
args += " AND "
args += slot + " = \"" + DStateSys.slots_filled[slot] + "\""
prev_arg = True
if args:
sql_command += " WHERE " + args + ";"
cursor.execute(sql_command)
db_result = cursor.fetchall()
if db_result:
# Get the slot names
slot_names = [i[0] for i in cursor.description]
result = []
for db_item in db_result:
result.append(dict(zip(slot_names, db_item)))
# Calculate entropy of requestable slot values in results
entropies = \
dict.fromkeys(self.ontology.ontology['system_requestable'])
value_probabilities = {}
# Count the values
for req_slot in self.ontology.ontology['system_requestable']:
value_probabilities[req_slot] = {}
for db_item in result:
if db_item[req_slot] not in value_probabilities[req_slot]:
value_probabilities[req_slot][db_item[req_slot]] = 1
else:
value_probabilities[req_slot][db_item[req_slot]] += 1
# Calculate probabilities
for slot in value_probabilities:
for value in value_probabilities[slot]:
value_probabilities[slot][value] /= len(result)
# Calculate entropies
for slot in entropies:
entropies[slot] = 0
if slot in value_probabilities:
for value in value_probabilities[slot]:
entropies[slot] +=\
value_probabilities[slot][value] * \
math.log(value_probabilities[slot][value])
entropies[slot] = -entropies[slot]
return result, entropies
# Failed to retrieve anything
print('Warning! Database call retrieved zero results.')
return ['empty'], {}
def save(self, path):
"""
Save all the models.
:param path: Path to save the experience logs
:return:
"""
# Save data
self.recorder_sys.save(path+'DSTC2_system')
self.recorder_usr.save(path+'DSTC2_user')
# Pickle the self.user_policy_reactive and the responses
obj = {'dialogue_policy': self.user_policy_reactive}
with open('models/user_simulator/user_policy_reactive.pkl',
'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
# Pickle the self.system_policy_reactive and the responses
obj = {'dialogue_policy': self.system_policy_reactive}
with open('models/camrest_policy/sys/system_policy_reactive.pkl',
'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
# Pickle the self.user_policy
obj = {'dialogue_policy': self.user_policy}
with open('models/user_simulator/user_policy.pkl', 'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
# Pickle the self.system_policy and the responses
obj = {'dialogue_policy': self.system_policy}
with open('models/camrest_policy/sys/sys_policy.pkl', 'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
# Pickle self.goals
obj = {'goals': self.goals}
with open('data/goals.pkl', 'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
# These encodings are the same that the SupervisedPolicy uses,
# for performance comparison.
def encode_state(self, state, agent_role='system'):
"""
Encodes the dialogue state into an index.
:param state: the state to encode
:param agent_role: role of the agent
:return: int - a unique state encoding
"""
temp = [int(state.is_terminal_state)]
temp.append(1) if state.system_made_offer else temp.append(0)
# If the agent plays the role of the user it needs access to its own
# goal
if agent_role == 'user':
# The user agent needs to know which constraints and requests
# need to be communicated and which of them
# actually have.
if state.user_goal:
for c in self.informable_slots:
if c != 'name':
if c in state.user_goal.constraints and\
state.user_goal.constraints[c].value:
temp.append(1)
else:
temp.append(0)
# Put these features separately from the above
for c in self.informable_slots:
if c != 'name':
if c in state.user_goal.actual_constraints and \
state.user_goal.actual_constraints[c].value:
temp.append(1)
else:
temp.append(0)
for r in self.requestable_slots:
if r in state.user_goal.requests:
temp.append(1)
else:
temp.append(0)
# Put these features separately from the above
for r in self.requestable_slots:
if r in state.user_goal.actual_requests and\
state.user_goal.actual_requests[r].value:
temp.append(1)
else:
temp.append(0)
else:
temp += \
[0] * 2 * (len(self.informable_slots) - 1 +
len(self.requestable_slots))
if agent_role == 'system':
for value in state.slots_filled.values():
# This contains the requested slot
temp.append(1) if value else temp.append(0)
for r in self.requestable_slots:
temp.append(1) if r == state.requested_slot else temp.append(0)
return temp
def encode_action(self, actions, system=True):
"""
Encode the dialogue actions
:param actions: The list of actions to encode
:param system: If the actions were taken by a 'system' or a 'user'
agent
:return:
"""
# TODO: Action encoding in a principled way
if not actions:
print('WARNING: Parse DSTC2 action encoding called with empty'
' actions list (returning 0).')
return 0
# TODO: Handle multiple actions
action = actions[0]
if self.dstc2_acts and action.intent in self.dstc2_acts:
return self.dstc2_acts.index(action.intent)
if action.intent == 'request':
if system:
return len(self.dstc2_acts) + \
self.system_requestable_slots.index(
action.params[0].slot)
else:
return len(self.dstc2_acts) + \
self.requestable_slots.index(action.params[0].slot)
if action.intent == 'inform':
if system:
return len(self.dstc2_acts) + \
len(self.system_requestable_slots) + \
self.requestable_slots.index(action.params[0].slot)
else:
return len(self.dstc2_acts) + \
len(self.requestable_slots) + \
self.requestable_slots.index(action.params[0].slot)
# Default fall-back action
print('Parse DSTC2 ({0}) action encoder warning: Selecting default '
'action (unable to encode: {1})!'
.format(self.agent_role, action))
return 0
def encode_action_dstc(self, actions, system=True):
"""
Endoce the dialogue actions - specific for DSTC2
:param actions:
:param system:
:return:
"""
if not actions:
print('WARNING: Parse DSTC2 action encoding called with empty '
'actions list (returning 0).')
return 0
# TODO: Handle multiple actions
action = actions[0]
if self.dstc2_acts and action.intent in self.dstc2_acts:
return self.dstc2_acts.index(action.intent)
if action.intent == 'request':
if system and \
action.params[0].slot in self.system_requestable_slots:
return len(self.dstc2_acts) + \
self.system_requestable_slots.index(
action.params[0].slot)
elif action.params[0].slot in self.requestable_slots:
return len(self.dstc2_acts) + \
self.requestable_slots.index(action.params[0].slot)
if action.intent == 'inform' and \
action.params[0].slot in self.requestable_slots:
if system:
return len(self.dstc2_acts) + \
len(self.system_requestable_slots) + \
self.requestable_slots.index(action.params[0].slot)
else:
return len(self.dstc2_acts) + \
len(self.requestable_slots) + \
self.requestable_slots.index(action.params[0].slot)
# Default fall-back action
print('Parse DSTC2 action encoder warning: Selecting default action '
'(unable to encode: {0})!'.format(action))
return 0
|
from flask import Blueprint
# 创建蓝图对象
profile_blue = Blueprint('profile',__name__,url_prefix='/user')
# 导入视图函数
from . import views
|
import pendulum
from flask import jsonify, Blueprint, current_app, abort
from ..core.models import FastDictTimeseries
bp = Blueprint('base', __name__)
@bp.route('/')
def base_root():
db = current_app.cdb
return jsonify(db.info())
@bp.route('/metrics')
def metrics():
db = current_app.cdb
connection = db.get_connection()
out = []
for m in connection.metric_definitions:
out.append(m.to_dict())
return jsonify(out)
@bp.route('/events')
def events():
db = current_app.cdb
connection = db.get_connection()
out = []
for m in connection.event_definitions:
out.append(m.to_dict())
return jsonify(out)
@bp.route('/database')
def database():
db = current_app.cdb
s = db.get_database_structure()
return jsonify(s)
@bp.route('/timeseries/<key>/<metric>/last_value')
def last_value(key, metric):
db = current_app.cdb
s = db.get_last_value(key, metric)
return jsonify([x for x in s.get_serializable_iterator("iso")])
@bp.route('/timeseries/<key>/<metric>/<int:days>days')
def metric_days(key, metric, days):
db = current_app.cdb
t = pendulum.now("utc").add(hours=1)
f = pendulum.now("utc").subtract(days=days)
s = db.get_timeseries(key, [metric], f, t)[0]
return jsonify([x for x in s.get_serializable_iterator("iso")])
@bp.route('/timeseries/<key>/<int:days>days')
def days(key, days):
db = current_app.cdb
t = pendulum.now("utc").add(hours=1)
f = pendulum.now("utc").subtract(days=days)
res = db.get_all_metrics(key, f, t)
if res:
return jsonify([x for x in res.get_serializable_iterator("iso")])
return jsonify(None), 200
@bp.route('/timeseries/<key>/full')
def full_download(key):
db = current_app.cdb
res = db.get_full_timeseries(key)
if res:
return jsonify([x for x in res.get_serializable_iterator("iso")])
return jsonify(None), 200
|
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import Config
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI, convert_unicode=True)
# print(Config.SQLALCHEMY_DATABASE_URI)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
# from app.models import User
Base.metadata.create_all(bind=engine)
|
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
import os
if os.name == 'posix':
hiddenimports = ['libvtkCommonPython','libvtkFilteringPython','libvtkIOPython','libvtkImagingPython','libvtkGraphicsPython','libvtkRenderingPython','libvtkHybridPython','libvtkParallelPython','libvtkPatentedPython']
else:
hiddenimports = ['vtkCommonPython','vtkFilteringPython','vtkIOPython','vtkImagingPython','vtkGraphicsPython','vtkRenderingPython','vtkHybridPython','vtkParallelPython','vtkPatentedPython']
|
"""
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
Your runtime beats 47.05 % of python submissions.
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
"""
Algorithm:
* Count the number of occurrences of each
character in both s and t and compare if they
are equal
Your runtime beats 35.24 % of python submissions.
"""
return sorted(s) == sorted(t)
|
from keep_current_storage.use_cases import request_objects as ro
def test_build_document_list_request_object_without_parameters():
req = ro.DocumentListRequestObject()
assert req.filters is None
assert bool(req) is True
def test_build_document_list_request_object_from_empty_dict():
req = ro.DocumentListRequestObject.from_dict({})
assert req.filters is None
assert bool(req) is True
def test_build_document_list_request_object_with_empty_filters():
req = ro.DocumentListRequestObject(filters={})
assert req.filters == {}
assert bool(req) is True
def test_build_document_list_request_object_from_dict_with_empty_filters():
req = ro.DocumentListRequestObject.from_dict({'filters': {}})
assert req.filters == {}
assert bool(req) is True
def test_build_document_list_request_object_with_filters():
req = ro.DocumentListRequestObject(filters={'a': 1, 'b': 2})
assert req.filters == {'a': 1, 'b': 2}
assert bool(req) is True
def test_build_document_list_request_object_from_dict_with_filters():
req = ro.DocumentListRequestObject.from_dict({'filters': {'a': 1, 'b': 2}})
assert req.filters == {'a': 1, 'b': 2}
assert bool(req) is True
def test_build_document_list_request_object_from_dict_with_invalid_filters():
req = ro.DocumentListRequestObject.from_dict({'filters': 5})
assert req.has_errors()
assert req.errors[0]['parameter'] == 'filters'
assert bool(req) is False
|
import os
import math
from typing import List
import numpy as np
import cv2
from detection.acuro_markers.obstacle_detection import ObstacleDetection
from detection.acuro_markers.robot_detection import RobotDetection
from detection.acuro_markers.marker_position import MarkerPosition
from detection.position_calculator import PositionCalculator
class ObstacleRobotFinder:
def __init__(self):
self.obstacle_detection = ObstacleDetection()
self.robot_detection = RobotDetection()
self.position_calculator = PositionCalculator()
#mm
self.image_width = 1600
self.image_height = 904
self.obstacle_height = 412
self.robot_height = 254
self.aruco_marker_width = 80
self.aruco_robot_marker_width = 143
#pixel
self.obstacle_radius = 42
self.distance_between_center_and_prehenseur = 130
self.distortion_coefficients = np.array(
[
[
0.055764032942161694,
-0.1700050453380352,
-0.0028056916670508593,
0.0006434607299710345,
0.0331770702717552
]
])
self.camera_matrix = np.array([
[
1321.5030177675765,
0.0,
763.385168511886
],
[
0.0,
1327.9592573621323,
494.93250836436187
],
[
0.0,
0.0,
1.0
]
])
def read_image(self, image):
script_dir = os.path.dirname( __file__ )
rel_path = image
abs_file_path = os.path.join( script_dir, rel_path )
image = cv2.imread( abs_file_path )
return image
def detect_obstacle_position(self, image, DEBUG=False):
obstacles_position = self.obstacle_detection.detect_aruco_marker_on_obstacle( image )
obstacles_3d_positions = self.obstacle_detection\
.calculate_obstacle_position(obstacles_position=obstacles_position,
aruco_marker_width=self.aruco_marker_width,
camera_matrix=self.camera_matrix,
distortion_coefficient=self.distortion_coefficients)
for marker_position in obstacles_3d_positions:
marker_position.set_markers_points(np.array([[0.0, 0.0, self.obstacle_height]]))
marker_position.set_rotation_vector(np.array([[0.0, 0.0, 0.0]]))
image_copy, obstacles_bottom_position = self.detect_bottom_of_obstacle(
image=image,
markers_position=obstacles_3d_positions)
if DEBUG:
cv2.putText(image_copy, "1", obstacles_bottom_position[0]["center_of_obstacle"],
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 2)
cv2.putText(image_copy, "2", (obstacles_bottom_position[1]["center_of_obstacle"]),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow("Detect obstacle", image_copy)
cv2.waitKey(0)
return obstacles_bottom_position
def detect_bottom_of_obstacle(self, markers_position: List[MarkerPosition], image):
if image is None:
return []
image_copy = image.copy()
obstacles_bottom_position = []
for marker_position in markers_position:
center_of_bottom_obstacle, _ = cv2.projectPoints(
marker_position.get_markers_points(),
marker_position.get_rotation_vector(),
marker_position.get_translation_vector(),
self.camera_matrix,
self.distortion_coefficients
)
center_of_bottom_obstacle = tuple(center_of_bottom_obstacle.reshape(2, ).astype(np.int32))
obstacles_bottom_position.append({
"center_of_obstacle": center_of_bottom_obstacle,
"radius": self.obstacle_radius
})
image_copy = cv2.circle(image_copy, center_of_bottom_obstacle, self.obstacle_radius, (0, 255, 255), 2)
return image_copy, obstacles_bottom_position
def detect_robot(self, image, DEBUG=False):
robot_position, aruco_marker_position = self.robot_detection.detect_aruco_marker_on_robot(
image
)
bottom_left = robot_position["bottom_left"]
bottom_right = robot_position["bottom_right"]
angle_robot = self.position_calculator.calculate_angle_between_two_position(bottom_right, bottom_left)
robot_3d_position = self.robot_detection \
.calculate_3d_robot_position(robot_position=aruco_marker_position,
aruco_marker_width=self.aruco_robot_marker_width,
camera_matrix=self.camera_matrix,
distortion_coefficient=self.distortion_coefficients )
robot_3d_position.set_markers_points(np.array([[0.0, 0.0, self.robot_height]]))
robot_3d_position.set_rotation_vector(np.array([[0.0, 0.0, 0.0]]))
image_copy, center_of_bottom_robot, prehenseur_position = self.detect_bottom_of_robot(
image=image,
marker_position=robot_3d_position,
angle=angle_robot, DEBUG=DEBUG
)
if DEBUG:
cv2.putText(image, "1", bottom_left,
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 2)
cv2.putText(image, "2", bottom_right, cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 2)
cv2.imshow("Detection robot and prehenseur", image_copy)
cv2.waitKey(0)
return center_of_bottom_robot, prehenseur_position, angle_robot
def detect_bottom_of_robot(self, marker_position: MarkerPosition, image, angle, DEBUG):
if image is None:
return None
image_copy = image.copy()
center_of_bottom_of_robot, _ = cv2.projectPoints(
marker_position.get_markers_points(),
marker_position.get_rotation_vector(),
marker_position.get_translation_vector(),
self.camera_matrix,
self.distortion_coefficients
)
center_of_bottom_of_robot = tuple(center_of_bottom_of_robot.reshape(2,).astype(np.int32))
prehenseur_position = (int(center_of_bottom_of_robot[0] +
(self.distance_between_center_and_prehenseur * math.cos(angle))),
int(center_of_bottom_of_robot[1] -
(self.distance_between_center_and_prehenseur * math.sin(angle))))
if DEBUG:
image_copy = cv2.circle(image_copy, center_of_bottom_of_robot, 1, color=(0, 255, 255), thickness=5)
image_copy = cv2.circle(image_copy, prehenseur_position, 1, color=(255, 255, 255), thickness=5)
cv2.putText(image_copy, "Point central base", (center_of_bottom_of_robot[0] - 90,
center_of_bottom_of_robot[1] - 20),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 255), 2)
cv2.putText(image_copy, "Prehenseur", (prehenseur_position[0],
prehenseur_position[1] - 20),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 2)
return image_copy, center_of_bottom_of_robot, prehenseur_position
#AN_IMAGE = "testing7.jpg"
#image = cv2.imread(AN_IMAGE)
#obstacle_robot_finder = ObstacleRobotFinder()
#obstacle_position = obstacle_robot_finder.detect_obstacle_position(image=image, DEBUG=False)
#x, y, angle = obstacle_robot_finder.detect_robot(image, False)
|
import pytest # type: ignore
from conflow.node import NodeList, T, Node # type: ignore
NESTED_VALUES = [
['a', ['b', 'c', ['d', 'e']]]
]
@pytest.mark.parametrize('value,representation', [
([1, 2, 3], "NodeList('test', [1, 2, 3])"),
([42, 3.3, 'abc'], "NodeList('test', [42, 3.3, 'abc'])"),
])
def test_node_list(value: T, representation: str) -> None:
node = NodeList('test', value)
assert repr(node) == representation
assert node == value
assert node != ['missing value']
for el in value:
assert el in node
assert len(node) == len(value)
assert 'missing value' not in node
@pytest.mark.parametrize('value', NESTED_VALUES)
def test_node_list_nested(value: T) -> None:
representation = "NodeList('test', ['a', ['b', 'c', ['d', 'e']]])"
node = NodeList('test', value)
assert repr(node) == representation
assert node == value
assert node[0] == value[0]
assert node[1] == value[1]
assert node[1][2] == value[1][2]
def test_list_iterator():
values = [1, 2.0, 'str', None]
node_list = NodeList('test', values)
for i, node in enumerate(node_list):
assert isinstance(node, Node)
assert node == values[i]
def test_list_item():
values = [1, 2.0, 'str', None, 'last_value']
node_list = NodeList('test', values)
for i in range(len(node_list)):
node = node_list[i]
assert isinstance(node, Node)
assert node == values[i]
with pytest.raises(IndexError):
node_list[42]
assert node_list[-1] == values[-1]
def test_list_value():
values = [1, 2.0, 'str', None, 'last_value']
node_list = NodeList('test', values)
assert node_list() == values
def test_list_keys():
values = [
{1: 1, 2: 2},
{1: 1, 2: 2},
{1: 1, 2: 2},
]
node_list = NodeList('test', values)
assert node_list() == values
|
from unittest import TestCase
import os
import gcnvkernel.io.io_vcf_parsing as io
class test_io_vcf_parsing(TestCase):
def test_read_sample_segments_and_calls(self):
current_dir = os.getcwd()
#for GATK PythonUnitTestRunner/Java tests
test_sub_dir = current_dir + "/src/test/resources/org/broadinstitute/hellbender/tools/copynumber/gcnv-postprocess/"
# for running in IntelliJ/Python tests
# test_sub_dir = current_dir + "/../../../../../../../../src/test/resources/org/broadinstitute/hellbender/tools/copynumber/gcnv-postprocess/"
clustered_vcf = test_sub_dir + 'clustered.1000G.vcf.gz'
pesky_intervals_vcf = test_sub_dir + 'genotyped-intervals-HG00099.mapped.ILLUMINA.bwa.GBR.exome.20130415.bam.cram.vcf.gz'
pesky_sample_name = 'HG00099'
contig = '14'
debug_path = io.read_sample_segments_and_calls(pesky_intervals_vcf, clustered_vcf, pesky_sample_name, contig)
self.assertTrue(len(debug_path) == 12) #should match number of chr 14 lines in clustered VCF (12)
clustered_vcf = current_dir + '/src/test/resources/org/broadinstitute/hellbender/tools/copynumber/clustering/threeSamples.vcf.gz'
intervals_vcf = test_sub_dir + 'intervals_output_SAMPLE_000.vcf.gz'
sample_name = 'SAMPLE_000'
contig = "1"
path = io.read_sample_segments_and_calls(intervals_vcf, clustered_vcf, sample_name, contig)
# no segments on contig 1
self.assertTrue(len(path) == 0)
contig = "2"
path = io.read_sample_segments_and_calls(intervals_vcf, clustered_vcf, sample_name, contig)
self.assertTrue(len(path) == 2)
sample1_intervals_vcf = test_sub_dir + 'intervals_output_SAMPLE_001.vcf.gz'
sample1_name = 'SAMPLE_001'
contig = "2"
path1 = io.read_sample_segments_and_calls(sample1_intervals_vcf, clustered_vcf, sample1_name, contig)
# all samples should have the same number of intervals
self.assertTrue(len(path) == len(path1))
|
from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
from binance_f.model.constant import *
import pyotp
import pprint
import tweepy
import secrets
#Uses Tweetpy to access Twitter API
print("Logging in Twitter's API...")
auth = tweepy.OAuthHandler(secrets.API_Key, secrets.API_Secret_Key)
auth.set_access_token(secrets.Access_Token, secrets.Token_Secret)
api = tweepy.API(auth)
print("Success!")
print("Waiting for a match...")
#Filters out mentions and RTs
def from_creator(status):
if hasattr(status, 'retweeted_status'):
return False
elif status.in_reply_to_status_id != None:
return False
elif status.in_reply_to_screen_name != None:
return False
elif status.in_reply_to_user_id != None:
return False
else:
return True
#Listens for Musk writing about Doge and opens a DOGEUSDTPERP position on Binance Futures of a defult 1000 Dogges
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
if from_creator(status):
tweet = status.text.lower()
if "doge" in tweet:
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
result = request_client.post_order(symbol="DOGEUSDT", side=OrderSide.BUY, ordertype=OrderType.MARKET, quantity=1000)
print ("Elon Musk just tweeted about DOGE!!")
print("Rushing to buy a bag od doggies")
PrintBasic.print_obj(result)
return True
return True
def on_error(self, status_code):
if status_code == 420:
print("Error 420")
#returning False in on_error disconnects the stream
return False
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
myStream.filter(follow=['44196397'])
|
from collections import deque
class ConvertItem(set):
def __str__(self):
return "NFAStates: {0}".format(" ".join(str(state.index) for state in self))
class Converter:
def __init__(self):
self.dfa_class = None
self.nfa = None
self.eq_symbol_set = None
self.dfa_start_state = None
self.dfa_accepting_states = None
self.created = None
self.pending = None
@staticmethod
def sorted_state_indexes(state_set):
return tuple(state.index for state in sorted(state_set, key=lambda state: state.index))
def pre_configure(self, nfa, eq_symbols, dfa_class):
self.nfa = nfa
self.dfa_class = dfa_class
self.eq_symbol_set = eq_symbols
self.dfa_accepting_states = set()
self.created = {}
self.pending = deque()
init_nfa_state_set = self.nfa.start_state.epsilon_closure()
state_indexes = self.sorted_state_indexes(init_nfa_state_set)
dfa_start_state = self.new_dfa_state(init_nfa_state_set)
self.created[state_indexes] = dfa_start_state
self.dfa_start_state = dfa_start_state
self.pending.append(dfa_start_state)
def new_dfa_state(self, nfa_state_set):
dfa_state = self.dfa_class.StateClass()
dfa_state.items.nfa_states = ConvertItem(nfa_state_set)
return dfa_state
def _convert(self):
while self.pending:
src_dfa_state = self.pending.popleft()
for symbol in self.eq_symbol_set.sigma:
des_nfa_states = set()
for src_nfa_state in src_dfa_state.items.nfa_states:
des_nfa_states.update(src_nfa_state.reach(symbol))
if des_nfa_states:
link_data = self.eq_symbol_set.index(symbol)
indexes = self.sorted_state_indexes(des_nfa_states)
if indexes in self.created:
src_dfa_state.link(link_data, self.created[indexes])
else:
new_dfa_state = self.new_dfa_state(des_nfa_states)
src_dfa_state.link(link_data, new_dfa_state)
self.created[indexes] = new_dfa_state
self.pending.append(new_dfa_state)
for state in des_nfa_states:
if state in self.nfa.accepting_states:
self.dfa_accepting_states.add(new_dfa_state)
def reset(self):
self.dfa_class = None
self.nfa = None
self.eq_symbol_set = None
self.dfa_start_state = None
self.dfa_accepting_states = None
self.created = None
self.pending = None
def convert(self, nfa, eq_symbols, dfa_class):
self.pre_configure(nfa, eq_symbols, dfa_class)
self._convert()
dfa_construction_data = {
"start_state": self.dfa_start_state,
"accepting_states": self.dfa_accepting_states,
"states": set(self.created.values()),
}
dfa = self.dfa_class(**dfa_construction_data)
self.reset()
return dfa
|
import torch.nn as nn
from pytorch_tools.modules import bn_from_name
from pytorch_tools.modules.residual import conv1x1
from pytorch_tools.modules.residual import conv3x3
from pytorch_tools.modules.decoder import UnetDecoderBlock
from pytorch_tools.utils.misc import initialize
from .base import EncoderDecoder
from .encoders import get_encoder
class UnetCenterBlock(UnetDecoderBlock):
def forward(self, x):
self.block(x)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels=(256, 128, 64, 32, 16),
final_channels=1,
center=False,
drop_rate=0,
output_stride=32,
attn_type=None,
**bn_params, # norm layer, norm_act
):
super().__init__()
if center:
channels = encoder_channels[0]
self.center = UnetCenterBlock(channels, channels)
else:
self.center = None
in_chs = self.compute_channels(encoder_channels, decoder_channels)
kwargs = {**bn_params, "attn_type": attn_type}
self.layer1 = UnetDecoderBlock(in_chs[0], decoder_channels[0], upsample=output_stride == 32, **kwargs)
self.layer2 = UnetDecoderBlock(in_chs[1], decoder_channels[1], upsample=output_stride != 8, **kwargs)
self.layer3 = UnetDecoderBlock(in_chs[2], decoder_channels[2], **kwargs)
self.layer4 = UnetDecoderBlock(in_chs[3], decoder_channels[3], **kwargs)
self.layer5 = UnetDecoderBlock(in_chs[4], decoder_channels[4], **kwargs)
self.dropout = nn.Dropout2d(drop_rate, inplace=False) # inplace=True raises a backprop error
self.final_conv = conv1x1(decoder_channels[4], final_channels, bias=True)
def compute_channels(self, encoder_channels, decoder_channels):
channels = [
encoder_channels[0] + encoder_channels[1],
encoder_channels[2] + decoder_channels[0],
encoder_channels[3] + decoder_channels[1],
encoder_channels[4] + decoder_channels[2],
0 + decoder_channels[3],
]
return channels
def forward(self, x):
encoder_head = x[0]
skips = x[1:]
if self.center:
encoder_head = self.center(encoder_head)
x = self.layer1([encoder_head, skips[0]])
x = self.layer2([x, skips[1]])
x = self.layer3([x, skips[2]])
x = self.layer4([x, skips[3]])
x = self.layer5([x, None])
x = self.dropout(x)
x = self.final_conv(x)
return x
class Unet(EncoderDecoder):
"""Unet_ is a fully convolution neural network for image semantic segmentation
Args:
encoder_name (str): name of classification model (without last dense layers) used as feature
extractor to build segmentation model.
encoder_weights (str): one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
decoder_channels (List[int]): list of numbers of ``Conv2D`` layer filters in decoder blocks
num_classes (int): a number of classes for output (output shape - ``(batch, classes, h, w)``).
center (bool): if ``True`` add ``Conv2dReLU`` block on encoder head (useful for VGG models)
drop_rate (float): Probability of spatial dropout on last feature map
decoder_attention_type (Union[str, None]): Attention to use in decoder layers. Options are:
`se`, `sse`, `eca`, `scse`. Check code for reference papers and details about each type of attention.
encoder_norm_layer (str): Normalization layer to use. One of 'abn', 'inplaceabn'. The inplace version lowers
memory footprint. But increases backward time. Defaults to 'abn'.
encoder_norm_act (str): Activation for normalizion layer. 'inplaceabn' doesn't support `ReLU` activation.
decoder_norm_layer (str): same as encoder_norm_layer but for decoder
decoder_norm_act (str): same as encoder_norm_act but for decoder
Returns:
``torch.nn.Module``: **Unet**
.. _Unet:
https://arxiv.org/pdf/1505.04597
"""
def __init__(
self,
encoder_name="resnet34",
encoder_weights="imagenet",
decoder_channels=(256, 128, 64, 32, 16),
num_classes=1,
center=False, # usefull for VGG models
output_stride=32,
drop_rate=0,
decoder_attention_type=None,
encoder_norm_layer="abn",
encoder_norm_act="relu",
decoder_norm_layer="abn",
decoder_norm_act="relu",
**encoder_params,
):
if output_stride != 32:
encoder_params["output_stride"] = output_stride
encoder = get_encoder(
encoder_name,
norm_layer=encoder_norm_layer,
norm_act=encoder_norm_act,
encoder_weights=encoder_weights,
**encoder_params,
)
decoder = UnetDecoder(
encoder_channels=encoder.out_shapes,
decoder_channels=decoder_channels,
final_channels=num_classes,
center=center,
drop_rate=drop_rate,
output_stride=output_stride,
attn_type=decoder_attention_type,
norm_layer=bn_from_name(decoder_norm_layer),
norm_act=decoder_norm_act,
)
super().__init__(encoder, decoder)
self.name = f"u-{encoder_name}"
# set last layer bias for better convergence with sigmoid loss
# -4.59 = -np.log((1 - 0.01) / 0.01)
nn.init.constant_(self.decoder.final_conv.bias, -4.59)
|
from .preview import Preview
|
import os ,sys,subprocess
from messagebox import Message
class Agent(object):
def __init__(self,specfile,null):
spec = specfile
cmd = 'pyinstaller --onefile %s' %spec
compiles = os.system(cmd)
esc = os.getcwd() + '\\dist\\'
esc2 = os.listdir(esc)
if os.path.isfile(esc + null + '.exe'):
Message("Success","Agent Create Successful !","Info")
else:
Message("Error", "Agent Create Unsuccessful..", "Info")
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Specification of the request object."""
__all__ = ['SetSlice', 'GetSlice', 'DelSlice']
from .base import Expression
from .func import Function
from ..utils import updateitems
from ..crud.update import Update, Slice
class GetSlice(Function):
pass
Expression.__getslice__ = lambda self, i, j: GetSlice(params=[self, Slice(i, j)])
class SetSlice(Function):
def _run(self):
expr, i, j, seq = self.params
updateitems(
self.ctx, expr, lambda item: item[expr.prop].__setslice__(i, j, seq)
)
Expression.__setslice__ = lambda self, i, j, seq: Update(
pset={self.prop: Slice(i, j, seq)}
)
class DelSlice(Function):
def _run(self):
expr, i, j = self.params
updateitems(
self.ctx, expr, lambda item: item[expr.prop].__delslice__(i, j)
)
Expression.__delslice__ = lambda self, i, j: DelSlice(params=[self, i, j])
|
import bs4 as bs
import pickle
import requests
import datetime as dt
import os
import pandas as pd
import pandas_datareader.data as web
def save_sp500_tickers():
resp=requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup=bs.BeautifulSoup(resp.text)
table=soup.find('table',{'class':'wikitable sortable'})
tickers=[]
for row in table.findAll('tr')[1:]:
ticker=row.findAll('td')[0].text[:-1]
tickers.append(ticker)
with open("sp500tickers.pickle",'wb') as f:
pickle.dump(tickers, f)
return tickers
#save_sp500_tickers()
def get_data_from_yahoo(reload_sp500=False):
if reload_sp500:
tickers=save_sp500_tickers()
else:
with open("sp500tickers.pickle",'rb') as f:
tickers=pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
# Making repository for local data
#yahoo pulls take time
start= dt.datetime(2000,1,1)
end=dt.datetime(2016,12,31)
for ticker in tickers:
print(ticker)
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
try:
df=web.DataReader(ticker, 'yahoo', start, end)
df.to_csv('stock_dfs/{}.csv'.format(ticker))
except:
print("Error")
continue
else:
print('already have {}'.format(ticker))
# here we are not updating if the file already exists
# in practice these values are updated regualrly and
# we keep a csv for every s&p 500 and we save it in the folder and
# if it is there we don't update here.
get_data_from_yahoo()
#Getting the data
|
from binance.websocket.spot.websocket_client import SpotWebsocketClient
from binance.lib.utils import config_logging
import time
import logging
config_logging(logging, logging.DEBUG)
def message_handler(message):
print(message)
ws_client = SpotWebsocketClient()
ws_client.start()
ws_client.mini_ticker(
symbol='bnbusdt',
id=1,
callback=message_handler,
)
# Combine selected streams
ws_client.instant_subscribe(
stream=['bnbusdt@bookTicker', 'ethusdt@bookTicker'],
callback=message_handler,
)
time.sleep(10)
ws_client.stop()
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
from .ttypes import *
from thrift.Thrift import TProcessor
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
try:
from thrift.protocol import fastproto
except:
fastproto = None
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
from thrift.util.Decorators import *
class Iface:
def init(self, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None, int9=None, int10=None, int11=None, int12=None, int13=None, int14=None, int15=None, int16=None):
"""
Parameters:
- int1
- int2
- int3
- int4
- int5
- int6
- int7
- int8
- int9
- int10
- int11
- int12
- int13
- int14
- int15
- int16
"""
pass
class ContextIface:
def init(self, handler_ctx, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None, int9=None, int10=None, int11=None, int12=None, int13=None, int14=None, int15=None, int16=None):
"""
Parameters:
- int1
- int2
- int3
- int4
- int5
- int6
- int7
- int8
- int9
- int10
- int11
- int12
- int13
- int14
- int15
- int16
"""
pass
# HELPER FUNCTIONS AND STRUCTURES
class init_args:
"""
Attributes:
- int1
- int2
- int3
- int4
- int5
- int6
- int7
- int8
- int9
- int10
- int11
- int12
- int13
- int14
- int15
- int16
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.int1 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.int2 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.int3 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.int4 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.int5 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.int6 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.int7 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.int8 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.int9 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I64:
self.int10 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.int11 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I64:
self.int12 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I64:
self.int13 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I64:
self.int14 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I64:
self.int15 = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I64:
self.int16 = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('init_args')
if self.int1 != None:
oprot.writeFieldBegin('int1', TType.I64, 1)
oprot.writeI64(self.int1)
oprot.writeFieldEnd()
if self.int2 != None:
oprot.writeFieldBegin('int2', TType.I64, 2)
oprot.writeI64(self.int2)
oprot.writeFieldEnd()
if self.int3 != None:
oprot.writeFieldBegin('int3', TType.I64, 3)
oprot.writeI64(self.int3)
oprot.writeFieldEnd()
if self.int4 != None:
oprot.writeFieldBegin('int4', TType.I64, 4)
oprot.writeI64(self.int4)
oprot.writeFieldEnd()
if self.int5 != None:
oprot.writeFieldBegin('int5', TType.I64, 5)
oprot.writeI64(self.int5)
oprot.writeFieldEnd()
if self.int6 != None:
oprot.writeFieldBegin('int6', TType.I64, 6)
oprot.writeI64(self.int6)
oprot.writeFieldEnd()
if self.int7 != None:
oprot.writeFieldBegin('int7', TType.I64, 7)
oprot.writeI64(self.int7)
oprot.writeFieldEnd()
if self.int8 != None:
oprot.writeFieldBegin('int8', TType.I64, 8)
oprot.writeI64(self.int8)
oprot.writeFieldEnd()
if self.int9 != None:
oprot.writeFieldBegin('int9', TType.I64, 9)
oprot.writeI64(self.int9)
oprot.writeFieldEnd()
if self.int10 != None:
oprot.writeFieldBegin('int10', TType.I64, 10)
oprot.writeI64(self.int10)
oprot.writeFieldEnd()
if self.int11 != None:
oprot.writeFieldBegin('int11', TType.I64, 11)
oprot.writeI64(self.int11)
oprot.writeFieldEnd()
if self.int12 != None:
oprot.writeFieldBegin('int12', TType.I64, 12)
oprot.writeI64(self.int12)
oprot.writeFieldEnd()
if self.int13 != None:
oprot.writeFieldBegin('int13', TType.I64, 13)
oprot.writeI64(self.int13)
oprot.writeFieldEnd()
if self.int14 != None:
oprot.writeFieldBegin('int14', TType.I64, 14)
oprot.writeI64(self.int14)
oprot.writeFieldEnd()
if self.int15 != None:
oprot.writeFieldBegin('int15', TType.I64, 15)
oprot.writeI64(self.int15)
oprot.writeFieldEnd()
if self.int16 != None:
oprot.writeFieldBegin('int16', TType.I64, 16)
oprot.writeI64(self.int16)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
value = pprint.pformat(self.int1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int1=%s' % (value))
value = pprint.pformat(self.int2, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int2=%s' % (value))
value = pprint.pformat(self.int3, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int3=%s' % (value))
value = pprint.pformat(self.int4, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int4=%s' % (value))
value = pprint.pformat(self.int5, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int5=%s' % (value))
value = pprint.pformat(self.int6, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int6=%s' % (value))
value = pprint.pformat(self.int7, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int7=%s' % (value))
value = pprint.pformat(self.int8, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int8=%s' % (value))
value = pprint.pformat(self.int9, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int9=%s' % (value))
value = pprint.pformat(self.int10, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int10=%s' % (value))
value = pprint.pformat(self.int11, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int11=%s' % (value))
value = pprint.pformat(self.int12, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int12=%s' % (value))
value = pprint.pformat(self.int13, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int13=%s' % (value))
value = pprint.pformat(self.int14, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int14=%s' % (value))
value = pprint.pformat(self.int15, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int15=%s' % (value))
value = pprint.pformat(self.int16, indent=0)
value = padding.join(value.splitlines(True))
L.append(' int16=%s' % (value))
return "%s(\n%s)" % (self.__class__.__name__, ",\n".join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
all_structs.append(init_args)
init_args.thrift_spec = (
None, # 0
(1, TType.I64, 'int1', None, None, 2, ), # 1
(2, TType.I64, 'int2', None, None, 2, ), # 2
(3, TType.I64, 'int3', None, None, 2, ), # 3
(4, TType.I64, 'int4', None, None, 2, ), # 4
(5, TType.I64, 'int5', None, None, 2, ), # 5
(6, TType.I64, 'int6', None, None, 2, ), # 6
(7, TType.I64, 'int7', None, None, 2, ), # 7
(8, TType.I64, 'int8', None, None, 2, ), # 8
(9, TType.I64, 'int9', None, None, 2, ), # 9
(10, TType.I64, 'int10', None, None, 2, ), # 10
(11, TType.I64, 'int11', None, None, 2, ), # 11
(12, TType.I64, 'int12', None, None, 2, ), # 12
(13, TType.I64, 'int13', None, None, 2, ), # 13
(14, TType.I64, 'int14', None, None, 2, ), # 14
(15, TType.I64, 'int15', None, None, 2, ), # 15
(16, TType.I64, 'int16', None, None, 2, ), # 16
)
init_args.thrift_struct_annotations = {
}
init_args.thrift_field_annotations = {
}
def init_args__init__(self, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None, int9=None, int10=None, int11=None, int12=None, int13=None, int14=None, int15=None, int16=None,):
self.int1 = int1
self.int2 = int2
self.int3 = int3
self.int4 = int4
self.int5 = int5
self.int6 = int6
self.int7 = int7
self.int8 = int8
self.int9 = int9
self.int10 = int10
self.int11 = int11
self.int12 = int12
self.int13 = int13
self.int14 = int14
self.int15 = int15
self.int16 = int16
init_args.__init__ = init_args__init__
def init_args__setstate__(self, state):
state.setdefault('int1', None)
state.setdefault('int2', None)
state.setdefault('int3', None)
state.setdefault('int4', None)
state.setdefault('int5', None)
state.setdefault('int6', None)
state.setdefault('int7', None)
state.setdefault('int8', None)
state.setdefault('int9', None)
state.setdefault('int10', None)
state.setdefault('int11', None)
state.setdefault('int12', None)
state.setdefault('int13', None)
state.setdefault('int14', None)
state.setdefault('int15', None)
state.setdefault('int16', None)
self.__dict__ = state
init_args.__getstate__ = lambda self: self.__dict__.copy()
init_args.__setstate__ = init_args__setstate__
class init_result:
"""
Attributes:
- success
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('init_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
value = pprint.pformat(self.success, indent=0)
value = padding.join(value.splitlines(True))
L.append(' success=%s' % (value))
return "%s(\n%s)" % (self.__class__.__name__, ",\n".join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
all_structs.append(init_result)
init_result.thrift_spec = (
(0, TType.I64, 'success', None, None, 2, ), # 0
)
init_result.thrift_struct_annotations = {
}
init_result.thrift_field_annotations = {
}
def init_result__init__(self, success=None,):
self.success = success
init_result.__init__ = init_result__init__
def init_result__setstate__(self, state):
state.setdefault('success', None)
self.__dict__ = state
init_result.__getstate__ = lambda self: self.__dict__.copy()
init_result.__setstate__ = init_result__setstate__
class Client(Iface):
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self._iprot.trans.close()
if self._iprot is not self._oprot:
self._oprot.trans.close()
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def init(self, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None, int9=None, int10=None, int11=None, int12=None, int13=None, int14=None, int15=None, int16=None):
"""
Parameters:
- int1
- int2
- int3
- int4
- int5
- int6
- int7
- int8
- int9
- int10
- int11
- int12
- int13
- int14
- int15
- int16
"""
self.send_init(int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, int14, int15, int16)
return self.recv_init()
def send_init(self, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None, int9=None, int10=None, int11=None, int12=None, int13=None, int14=None, int15=None, int16=None):
self._oprot.writeMessageBegin('init', TMessageType.CALL, self._seqid)
args = init_args()
args.int1 = int1
args.int2 = int2
args.int3 = int3
args.int4 = int4
args.int5 = int5
args.int6 = int6
args.int7 = int7
args.int8 = int8
args.int9 = int9
args.int10 = int10
args.int11 = int11
args.int12 = int12
args.int13 = int13
args.int14 = int14
args.int15 = int15
args.int16 = int16
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_init(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = init_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "init failed: unknown result");
class Processor(Iface, TProcessor):
_onewayMethods = ()
def __init__(self, handler):
TProcessor.__init__(self)
self._handler = handler
self._processMap = {}
self._priorityMap = {}
self._processMap["init"] = Processor.process_init
self._priorityMap["init"] = TPriority.NORMAL
def onewayMethods(self):
l = []
l.extend(Processor._onewayMethods)
return tuple(l)
@process_main()
def process(self,): pass
@process_method(init_args, oneway=False)
def process_init(self, args, handler_ctx):
result = init_result()
try:
result.success = self._handler.init(args.int1, args.int2, args.int3, args.int4, args.int5, args.int6, args.int7, args.int8, args.int9, args.int10, args.int11, args.int12, args.int13, args.int14, args.int15, args.int16)
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'init', ex)
result = Thrift.TApplicationException(message=str(ex))
return result
Iface._processor_type = Processor
class ContextProcessor(ContextIface, TProcessor):
_onewayMethods = ()
def __init__(self, handler):
TProcessor.__init__(self)
self._handler = handler
self._processMap = {}
self._priorityMap = {}
self._processMap["init"] = ContextProcessor.process_init
self._priorityMap["init"] = TPriority.NORMAL
def onewayMethods(self):
l = []
l.extend(ContextProcessor._onewayMethods)
return tuple(l)
@process_main()
def process(self,): pass
@process_method(init_args, oneway=False)
def process_init(self, args, handler_ctx):
result = init_result()
try:
result.success = self._handler.init(handler_ctx, args.int1, args.int2, args.int3, args.int4, args.int5, args.int6, args.int7, args.int8, args.int9, args.int10, args.int11, args.int12, args.int13, args.int14, args.int15, args.int16)
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'init', ex)
result = Thrift.TApplicationException(message=str(ex))
return result
ContextIface._processor_type = ContextProcessor
fix_spec(all_structs)
del all_structs
|
import smtplib
import mimetypes
from email import encoders
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from programy.utils.logging.ylogger import YLogger
from programy.utils.email.config import EmailConfiguration
class EmailSender:
def __init__(self, config: EmailConfiguration):
self._config = config
self._attachments = []
def _add_mine_attachments(self, msg, attachments):
for attachment in attachments:
if isinstance(attachment, tuple):
self._add_attachement(msg, attachment[0], attachment[1], attachment[2])
else:
self._add_attachement(msg, attachment)
def _guess_mime_type(self, path):
return mimetypes.guess_type(path)
def _split_ctype(self, ctype):
return ctype.split('/', 1)
def _get_ctype_and_attachment(self, path, encoding):
ctype, attachment_encoding = self._guess_mime_type(path)
if ctype is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
if attachment_encoding is None:
if encoding is not None:
attachment_encoding = encoding
else:
# No guess could be made, so we default to utf-8
attachment_encoding = "utf-8"
return ctype, attachment_encoding
def _attach_text(self, msg, path, encoding, subtype):
with open(path, encoding=encoding) as fp:
# Note: we should handle calculating the charset
attach = MIMEText(fp.read(), _subtype=subtype)
msg.attach(attach)
def _attach_image(self, msg, path, subtype):
with open(path, 'rb') as fp:
attach = MIMEImage(fp.read(), _subtype=subtype)
msg.attach(attach)
def _attach_audio(self, msg, path, subtype):
with open(path, 'rb') as fp:
attach = MIMEAudio(fp.read(), _subtype=subtype)
msg.attach(attach)
def _attach_binary(self, msg, path, mimetype, subtype):
# No specific mime type, so we shoot for a binary file
with open(path, 'rb') as fp:
attach = MIMEBase(mimetype, subtype)
attach.set_payload(fp.read())
encoders.encode_base64(attach)
msg.attach(attach)
def _add_attachement(self, msg, path, ctype=None, encoding=None):
if ctype is None:
ctype, encoding = self._get_ctype_and_attachment(path, encoding)
if encoding is None:
encoding = "utf-8"
mimetype, subtype = self._split_ctype(ctype)
if mimetype == 'text':
self._attach_text(msg, path, encoding, subtype)
elif mimetype == 'image':
self._attach_image(msg, path, subtype)
elif mimetype == 'audio':
self._attach_audio(msg, path, subtype)
else:
self._attach_binary(msg, path, mimetype, subtype)
def _smtp_server(self, host, port):
return smtplib.SMTP(host, port) # pragma: no cover
def _send_message(self, host, port, username, password, msg):
YLogger.info(self, "Email sender starting")
server = self._smtp_server(host, port)
server.ehlo()
server.starttls()
YLogger.info(self, "Email sender logging in")
server.login(username, password)
YLogger.info(self, "Email sender sending")
result = server.send_message(msg)
YLogger.info(self, "Email sender quiting")
server.quit()
return result
def send(self, to, subject, message, attachments=None):
try:
if attachments:
YLogger.info(self, "Email sender adding mime attachment")
msg = MIMEMultipart()
msg.attach(MIMEText(message))
self._add_mine_attachments(msg, attachments)
else:
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = self._config.from_addr
msg['To'] = to
result = self._send_message(self._config.host,
self._config.port,
self._config.username,
self._config.password,
msg)
if result:
for email, error in result.items():
YLogger.error(None, "Email send failed: [%d] - [%s]", email, error)
except Exception as e:
YLogger.exception(self, "Email sender failed", e)
|
from starepandas.io.granules.granule import Granule
import starepandas.io.s3
import datetime
import numpy
def get_hdfeos_metadata(file_path):
hdf= starepandas.io.s3.sd_wrapper(file_path)
metadata = {}
metadata['ArchiveMetadata'] = get_metadata_group(hdf, 'ArchiveMetadata')
metadata['StructMetadata'] = get_metadata_group(hdf, 'StructMetadata')
metadata['CoreMetadata'] = get_metadata_group(hdf, 'CoreMetadata')
return metadata
def get_metadata_group(hdf, group_name):
metadata_group = {}
keys = [s for s in hdf.attributes().keys() if group_name in s]
for key in keys:
string = hdf.attributes()[key]
m = parse_hdfeos_metadata(string)
metadata_group = {**metadata_group, **m}
return metadata_group
def parse_hdfeos_metadata(string):
out = {}
lines0 = [i.replace('\t','') for i in string.split('\n')]
lines = []
for line in lines0:
if "=" in line:
key = line.split('=')[0]
value = '='.join(line.split('=')[1:])
lines.append(key.strip()+'='+value.strip())
else:
lines.append(line)
i = -1
while i < (len(lines))-1:
i += 1
line = lines[i]
if "=" in line:
key = line.split('=')[0]
value = '='.join(line.split('=')[1:])
if key in ['GROUP', 'OBJECT']:
endIdx = lines[i+1:].index('END_{}={}'.format(key, value))
endIdx += i+1
out[value] = parse_hdfeos_metadata("\n".join(lines[i+1:endIdx]))
i = endIdx
elif ('END_GROUP' not in key) and ('END_OBJECT' not in key):
out[key] = str(value)
return out
class Modis(Granule):
def __init__(self, file_path, sidecar_path=None):
super(Modis, self).__init__(file_path, sidecar_path)
self.hdf = starepandas.io.s3.sd_wrapper(file_path)
def read_latlon(self, track_first=False):
self.lon = self.hdf.select('Longitude').get().astype(numpy.double)
self.lat = self.hdf.select('Latitude').get().astype(numpy.double)
if track_first:
self.lon = numpy.ascontiguousarray(self.lon.transpose())
self.lat = numpy.ascontiguousarray(self.lat.transpose())
def read_timestamps(self):
meta = get_hdfeos_metadata(self.file_path)
meta_group = meta['CoreMetadata']['INVENTORYMETADATA']['RANGEDATETIME']
begining_date = meta_group['RANGEBEGINNINGDATE']['VALUE']
begining_time = meta_group['RANGEBEGINNINGTIME']['VALUE']
end_date = meta_group['RANGEENDINGDATE']['VALUE']
end_time = meta_group['RANGEENDINGTIME']['VALUE']
self.ts_start = datetime.datetime.strptime(begining_date+begining_time, '"%Y-%m-%d""%H:%M:%S.%f"')
self.ts_end = datetime.datetime.strptime(end_date+end_time, '"%Y-%m-%d""%H:%M:%S.%f"')
class Mod09(Modis):
def __init__(self, file_path, sidecar_path=None):
super(Mod09, self).__init__(file_path)
self.nom_res = '1km'
def read_data(self):
for dataset_name in dict(filter(lambda elem: '1km' in elem[0], self.hdf.datasets().items())).keys():
self.data[dataset_name] = self.hdf.select(dataset_name).get()
class Mod05(Modis):
def __init__(self, file_path, sidecar_path=None):
super(Mod05, self).__init__(file_path, sidecar_path)
self.nom_res = '5km'
def read_data(self):
dataset_names = ['Scan_Start_Time', 'Solar_Zenith', 'Solar_Azimuth',
'Sensor_Zenith', 'Sensor_Azimuth', 'Water_Vapor_Infrared']
dataset_names2 = ['Cloud_Mask_QA', 'Water_Vapor_Near_Infrared',
'Water_Vaport_Corretion_Factors', 'Quality_Assurance_Near_Infrared', 'Quality_Assurance_Infrared']
for dataset_name in dataset_names:
self.data[dataset_name] = self.hdf.select(dataset_name).get()
|
#!/usr/bin/env python3
#
# utils.py
"""
Utility functions.
"""
#
# Copyright © 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
import sys
import textwrap
from typing import Any, Dict, NamedTuple, Optional, Type, Union
# 3rd party
import attr
import prettyprinter # type: ignore
from domdf_python_tools.doctools import prettify_docstrings
from domdf_python_tools.stringlist import StringList
from enum_tools import StrEnum
__all__ = [
"from_iso_zulu",
"RateType",
"Region",
"MeterPointDetails",
"add_repr",
]
# stdlib
from datetime import datetime, timedelta, timezone
if sys.version_info[:2] < (3, 7):
# 3rd party
from backports.datetime_fromisoformat import MonkeyPatch # nodep
MonkeyPatch.patch_fromisoformat()
# def format_datetime(dt: datetime) -> str:
# """
# Format a :class:`datetime.datetime` object to a string in
# :wikipedia:`ISO 8601` format.
#
# :param dt:
# """
#
# return dt.strftime("%Y-%m-%dT:")
def from_iso_zulu(the_datetime: Union[str, datetime, None]) -> Optional[datetime]:
"""
Constructs a :class:`datetime.datetime` object from an
`ISO 8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ format string.
This function understands the character ``Z`` as meaning Zulu time (GMT/UTC).
:param the_datetime:
""" # noqa: D400
if the_datetime is None:
return the_datetime
elif isinstance(the_datetime, datetime):
return the_datetime
else:
return datetime.fromisoformat( # type: ignore
the_datetime.replace('Z', "+00:00"),
)
class RateType(StrEnum):
"""
Enumeration of different rate types.
"""
StandingCharge = "standing-charges"
StandardUnitRate = "standard-unit-rates"
DayUnitRate = "day-unit-rates"
NightUnitRate = "night-unit-rates"
class Region(StrEnum):
"""
Enumeration of different electricity supply regions.
The different regions can be seen on the following map:
.. image:: pes_boundaries.png
:width: 300
:alt: Electricity Regions
"""
Eastern = "_A" # Eastern Electricity
EastMidlands = "_B" # East Midlands Electricity
London = "_C" # London Electricity
Merseyside = "_D" # Merseyside and North Wales Electricity Board
NorthWales = "_D" # Merseyside and North Wales Electricity Board
Midlands = "_E" # Midlands Electricity
NorthEastern = "_F" # North Eastern Electricity Board
NorthWestern = "_G" # North Western Electricity Board
Southern = "_H" # Southern Electric
SouthEastern = "_J" # South Eastern Electricity Board
SouthWales = "_K" # South Wales Electricity
SouthWestern = "_L" # South Western Electricity
Yorkshire = "_M" # Yorkshire Electricity
SouthScotland = "_N" # South of Scotland Electricity Board
NorthScotland = "_P" # North of Scotland Hydro Board
@prettify_docstrings
class MeterPointDetails(NamedTuple):
"""
Information about a meter point.
:param mpan: The meter point access number.
:param gsp: The grid supply point/region that the meter point is located in.
:param profile_class: The profile class of the meter point.
* **Profile Class 1** -- Domestic Unrestricted Customers
* **Profile Class 2** -- Domestic Economy 7 Customers
* **Profile Class 3** -- Non-Domestic Unrestricted Customers
* **Profile Class 4** -- Non-Domestic Economy 7 Customers
* **Profile Class 5** -- Non-Domestic Maximum Demand (MD) Customers with a Peak Load Factor (LF) of less than 20%
* **Profile Class 6** -- Non-Domestic Maximum Demand Customers with a Peak Load Factor between 20% and 30%
* **Profile Class 7** -- Non-Domestic Maximum Demand Customers with a Peak Load Factor between 30% and 40%
* **Profile Class 8** -- Non-Domestic Maximum Demand Customers with a Peak Load Factor over 40%
Information from https://www.elexon.co.uk/knowledgebase/profile-classes/
.. seealso:: `Load Profiles and their use in Electricity Settlement <https://www.elexon.co.uk/documents/training-guidance/bsc-guidance-notes/load-profiles/>`_ by Elexon
"""
mpan: str
gsp: Region
profile_class: int
@classmethod
def _from_dict(cls, octopus_dict: Dict[str, Any]) -> "MeterPointDetails":
return MeterPointDetails(
mpan=str(octopus_dict["mpan"]),
gsp=Region(octopus_dict["gsp"]),
profile_class=int(octopus_dict["profile_class"]),
)
#: The British Summer Time timezone (UTC+1).
bst = timezone(timedelta(seconds=3600))
#: The Greenwich Mean Time timezone (aka UTC).
gmt = timezone.utc
utc = gmt
def add_repr(cls: Type) -> Type:
"""
Add a pretty-printed ``__repr__`` function to the decorated attrs class.
:param cls:
.. seealso:: :func:`attr_utils.pprinter.pretty_repr`.
"""
if attr.has(cls):
def __repr__(self) -> str:
buf = StringList()
buf.indent_type = " "
buf.append(f"{self.__class__.__module__}.{self.__class__.__qualname__}(")
with buf.with_indent_size(1):
for attrib in attr.fields(self.__class__):
value = getattr(self, attrib.name)
if isinstance(value, datetime):
buf.append(f"{attrib.name}={value.isoformat()!r},")
elif isinstance(value, str):
lines = textwrap.wrap(value, width=80 - len(attrib.name) - 1)
buf.append(f"{attrib.name}={lines.pop(0)!r}")
for line in lines:
buf.append(' ' * len(attrib.name) + ' ' + repr(line))
buf[-1] = f"{buf[-1][len(buf.indent_type) * buf.indent_size:]},"
elif value is None:
buf.append(f"{attrib.name}=None,")
else:
buf.append(f"{attrib.name}={prettyprinter.pformat(value)},")
buf.append(')')
return str(buf)
__repr__.__doc__ = f"Return a string representation of the :class:`~.{cls.__name__}`."
cls.__repr__ = __repr__ # type: ignore
cls.__repr__.__qualname__ = f"{cls.__name__}.__repr__"
cls.__repr__.__module__ = cls.__module__
return cls
|
"""Classes for writing to and reading from TFRecord datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import multiprocessing
import os
import numpy as np
import tensorflow as tf
def tfrecord_path_to_metadata_path(tfrecord_path):
"""Generate the path where the metadata JSON file will be saved.
Args:
tfrecord_path: String. The path where the TFRecord file will be saved.
Returns:
String. The path where the metadata JSON file will be saved.
Raises:
ValueError: If the specified `tfrecord_path` ends with '.json'.
"""
root, ext = os.path.splitext(tfrecord_path)
if ext == '.json':
raise ValueError('The TFRecord path must not end with ".json". The'
'TFRecord path you specified was: %s' % tfrecord_path)
return root + '.json'
class DatasetWriter(object):
"""A class for writing Examples or SequenceExamples to a TFRecord file.
When you save an Example to the TFRecord file, the type and shape of the
Example's Features are writen to a separate metadata file. That type and
shape data is used later by the SmartTFRecordDataset class when reading
from the TFRecord file.
"""
def __init__(self, tfrecord_path):
"""Initialize the writer.
If a file already exists at the specified `tfrecord_path` or the
generated `metadata_path`, they will be deleted.
Args:
tfrecord_path: String. The path where the TFRecord file will be saved.
"""
self.metadata_path = tfrecord_path_to_metadata_path(tfrecord_path)
if os.path.exists(tfrecord_path):
os.remove(tfrecord_path)
if os.path.exists(self.metadata_path):
os.remove(self.metadata_path)
# Create the TFRecord file's parent directory if it does not exits.
tfrecord_dir = os.path.dirname(tfrecord_path)
if not os.path.exists(tfrecord_dir):
os.makedirs(tfrecord_dir)
self.writer = tf.python_io.TFRecordWriter(tfrecord_path)
self.metadata = None
@staticmethod
def _get_type(val):
"""Get the type of a value."""
dtype = np.array(val).dtype
if dtype == np.uint8:
return 'bytes'
if np.issubdtype(dtype, np.floating):
return 'float'
if np.issubdtype(dtype, np.integer):
return 'int64'
raise ValueError('Invalid feature type: %s' % dtype.name)
@staticmethod
def _get_shape(val, is_sequence_feature=False):
"""Get the shape of a value."""
shape = list(np.array(val).shape)
if is_sequence_feature:
if len(shape) < 1:
raise(ValueError('SequenceExample feature values must have a rank of '
'at least 1. The provided value had a rank of %d.'
% len(shape)))
shape[0] = -1
elif len(shape) == 1 and shape[0] == 1:
shape = []
return shape
def _build_and_write_metadata(self, features, context_features,
sequence_features):
"""Build the self.metadata dict, and write it to a JSON file."""
if features:
self.metadata = {
'type': 'Example',
'features': {
key: {
'type': self._get_type(val),
'shape': self._get_shape(val)
}
for key, val in features.items()
}
}
else:
self.metadata = {
'type': 'SequenceExample'
}
if context_features:
self.metadata['context_features'] = {
key: {
'type': self._get_type(val),
'shape': self._get_shape(val)
}
for key, val in context_features.items()
}
if sequence_features:
self.metadata['sequence_features'] = {
key: {
'type': self._get_type(val),
'shape': self._get_shape(val, is_sequence_feature=True)
}
for key, val in sequence_features.items()
}
# Write the metadata to a JSON file in an easily readable format.
s = '{\n "type": "' + self.metadata['type'] + '",'
for features_type in ['features', 'context_features', 'sequence_features']:
if features_type in self.metadata:
s += '\n "' + features_type + '": {'
for key, val in sorted(self.metadata[features_type].items()):
s += '\n "' + key + '": ' + json.dumps(val) + ','
s = s[:-1] # Remove the comma after the last item.
s += '\n },'
s = s[:-1] # Remove the comma after the last item.
s += '\n}'
with open(self.metadata_path, 'w') as f:
f.write(s)
@staticmethod
def _feature_to_example_feature(val, type_):
"""Return an Example Feature given the value and type."""
val = np.array(val).flatten()
if type_ == 'bytes':
val = val.tobytes()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=val))
if type_ == 'float':
return tf.train.Feature(float_list=tf.train.FloatList(value=val))
if type_ == 'int64':
return tf.train.Feature(int64_list=tf.train.Int64List(value=val))
raise ValueError('Invalid type: %s' % type_)
@staticmethod
def _feature_to_example_feature_list(val, type_):
"""Return an Example FeatureList given the value and type."""
val = np.array(val)
val = val.reshape(val.shape[0], -1)
if type_ == 'bytes':
val = val.tobytes()
feature = [tf.train.Feature(bytes_list=tf.train.BytesList(value=row))
for row in val]
elif type_ == 'float':
feature = [tf.train.Feature(float_list=tf.train.FloatList(value=row))
for row in val]
elif type_ == 'int64':
feature = [tf.train.Feature(int64_list=tf.train.Int64List(value=row))
for row in val]
else:
raise ValueError('Invalid type: %s' % type_)
return tf.train.FeatureList(feature=feature)
def write(self, features=None, context_features=None, sequence_features=None):
"""Write an Example or SequenceExample to the TFRecord file.
If `features` are passed in, an Example will be created. Otherwise, if
`context_features`, or `sequence_features`, or both are passed in,
a SequenceExample will be created. At least one of `features`,
`context_features`, or `sequence_features` must be passed in.
Args:
features: A dict of key value pairs where the key is the feature's
name and the value is the feature's value (int, float, list,
np.array). The shape of the value can be multidimensional,
but must be the same between Examples.
context_features: A dict of key value pairs where the key is the context
feature's name and the value is the context feature's value (int,
float, list, np.array). The shape of the value can be
multidimensional, but must be the same between SequenceExamples.
sequence_features: A dict of key value pairs where the key is the
sequence feature's name and the value is the sequence feature's
value (int, float, list, np.array). The shape of the value can be
multidimensional, and must have a rank of at least 1. The length of
the first dimension can be variable, but the rest of the shape must
be the same between SequenceExamples.
Raises:
ValueError: If `features`,`context_features`, and `sequence_features` are
all None.
"""
if not self.metadata:
self._build_and_write_metadata(features, context_features,
sequence_features)
if not features and not context_features and not sequence_features:
raise ValueError('`features`, `context_features`, and '
'`sequence_features` cannot all be None.')
if features:
# Create an Example.
feature_map = {
key: self._feature_to_example_feature(
val, self.metadata['features'][key]['type'])
for key, val in features.items()
}
example = tf.train.Example(
features=tf.train.Features(feature=feature_map))
else:
# Create a SequenceExample.
if context_features:
feature_map = {
key: self._feature_to_example_feature(
val, self.metadata['context_features'][key]['type'])
for key, val in context_features.items()
}
context = tf.train.Features(feature=feature_map)
else:
context = None
if sequence_features:
feature_list_map = {
key: self._feature_to_example_feature_list(
val, self.metadata['sequence_features'][key]['type'])
for key, val in sequence_features.items()
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list_map)
else:
feature_lists = None
example = tf.train.SequenceExample(
context=context,
feature_lists=feature_lists)
self.writer.write(example.SerializeToString())
def close(self):
"""Close the writer."""
self.writer.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
class DatasetReader(object):
"""A class for piping Examples from a TFRecord file into an Estimator.
When calling an Estimators train, eval, or predict methods, pass in this
class's input_fn method as the input_fn parameter and it will read Examples
from the TFRecord file using the parameters specified when initializing an
instance of this class.
"""
def __init__(self, tfrecord_path, batch_size=1, num_parallel_batches=None,
shuffle_buffer_size=None, repeat=None, prefetch_buffer_size=1):
"""Initialize the database object.
Store the initialization parameters and read in the metadata from the
metadata file."""
if not tfrecord_path.endswith('.tfrecord'):
raise ValueError('The TFRecord path must end with ".tfrecord", however '
'the path you specified was: %s' % tfrecord_path)
self.tfrecord_path = tfrecord_path
self.batch_size = batch_size
self.num_parallel_batches = (num_parallel_batches if num_parallel_batches
else multiprocessing.cpu_count())
self.shuffle_buffer_size = shuffle_buffer_size
self.repeat = repeat
self.prefetch_buffer_size = prefetch_buffer_size
self.metadata_path = tfrecord_path_to_metadata_path(tfrecord_path)
with open(self.metadata_path) as f:
self.metadata = json.load(f)
if self.metadata['type'] == 'Example':
self.features_parser_config = {
key: self._get_feature_parser_config(val)
for key, val in self.metadata['features'].items()
}
if self.metadata['type'] == 'SequenceExample':
if 'context_features' in self.metadata:
self.context_features_parser_config = {
key: self._get_feature_parser_config(val)
for key, val in self.metadata['context_features'].items()
}
else:
self.context_features_parser_config = None
if 'sequence_features' in self.metadata:
self.sequence_features_parser_config = {
key: self._get_feature_parser_config(val, is_sequence_feature=True)
for key, val in self.metadata['sequence_features'].items()
}
else:
self.sequence_features_parser_config = None
@staticmethod
def _get_feature_parser_config(feature_metadata, is_sequence_feature=False):
"""Get the parsing configuration for the feature, given its metadata.
Returns: A FixedLenFeature if the shape is constant, or a
FixedLenSequenceFeature if the first dimension of the shape is of
variable length.
"""
if feature_metadata['type'] == 'bytes':
tf_type = tf.string
elif feature_metadata['type'] == 'float':
tf_type = tf.float32
elif feature_metadata['type'] == 'int64':
tf_type = tf.int64
else:
raise ValueError('Invalid metadata type: %s' % feature_metadata['type'])
if is_sequence_feature:
return tf.FixedLenSequenceFeature(feature_metadata['shape'][1:], tf_type)
else:
return tf.FixedLenFeature(feature_metadata['shape'], tf_type)
# def _decode_if_needed(self, key, val):
# """Reshape the tensor to the metadata's shape if it's multidimensional."""
# shape = self.metadata[key]['shape']
# if len(shape) > 1:
# val = tf.reshape(val, shape)
# return val
@staticmethod
def _decode_and_reshape_if_needed(val, shape):
"""Decode the value to unit8 if it is a byte string, and reshape the
tensor if it's shape multidimensional."""
if val.dtype == tf.string:
val = tf.decode_raw(val, tf.uint8)
if len(shape) > 1:
val = tf.reshape(val, shape)
return val
def _parser(self, serialized_example):
"""Deserialize the Example and return a features dict and a label."""
if self.metadata['type'] == 'Example':
# Parse an Example.
features = tf.parse_single_example(serialized_example,
self.features_parser_config)
for key in features:
features[key] = self._decode_and_reshape_if_needed(
features[key], self.metadata['features'][key]['shape'])
else:
# Parse a SequenceExample.
features, sequence_features = tf.parse_single_sequence_example(
serialized_example, self.context_features_parser_config,
self.sequence_features_parser_config)
for key in features:
features[key] = self._decode_and_reshape_if_needed(
features[key], self.metadata['context_features'][key]['shape'])
for key in sequence_features:
features[key] = self._decode_and_reshape_if_needed(
sequence_features[key],
self.metadata['sequence_features'][key]['shape'])
features.update(sequence_features)
features['lengths'] = tf.shape(
sequence_features[sequence_features.keys()[0]])[0]
label = features.pop('label', None)
if label is None:
label = features.pop('labels', None)
return features, label
def _get_padded_shapes(self):
features_padded_shapes = {}
if 'context_features' in self.metadata:
features_padded_shapes.update({
key: val['shape']
for key, val in self.metadata['context_features'].items()
})
if 'sequence_features' in self.metadata:
features_padded_shapes.update({
key: val['shape']
for key, val in self.metadata['sequence_features'].items()
})
features_padded_shapes['lengths'] = []
label_padded_shape = (features_padded_shapes.pop('label', None) or
features_padded_shapes.pop('labels', None))
return features_padded_shapes, label_padded_shape
def input_fn(self):
"""The input_fn to be passed to an Estimator.
This method will add nodes and ops to the graph that will parse Examples
from the TFRecord file, batch them together, shuffle them if specified,
and repeat the dataset multiple epochs if specified.
Returns:
features: A dict of key value pairs where the key is the feature's name
and the value is a Tensor of a batch of that feature.
labels: A Tensor of a batch of labels.
"""
dataset = tf.data.TFRecordDataset(self.tfrecord_path)
if self.metadata['type'] == 'Example':
# Get a batch of Examples.
dataset = dataset.apply(tf.contrib.data.map_and_batch(
self._parser, self.batch_size, self.num_parallel_batches))
else:
# Get a dynamically padded batch of SequenceExamples
dataset = dataset.map(self._parser)
dataset = dataset.padded_batch(self.batch_size,
padded_shapes=self._get_padded_shapes())
if self.shuffle_buffer_size and self.repeat != 1:
dataset.apply(tf.contrib.data.shuffle_and_repeat(
self.shuffle_buffer_size, self.repeat))
else:
if self.shuffle_buffer_size:
dataset = dataset.shuffle(self.shuffle_buffer_size)
if self.repeat != 1:
dataset = dataset.repeat(self.repeat)
dataset = dataset.prefetch(self.prefetch_buffer_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
|
#
# Copyright (c) 2008-2015 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_utils.interfaces.size module
This module provides a simple interface for objects handling length attribute
"""
from zope.interface import Interface
from zope.schema import Int
__docformat__ = 'restructuredtext'
class ILength(Interface):
"""Length interface"""
length = Int(title="Object length")
|
import numpy as np
from .hmanifold import HomogenousManifold
from ..liealgebra import soLieAlgebra
from ..liegroup import SOLieGroup
class HomogenousSphere(HomogenousManifold):
"""The S2 sphere. Corresponding Lie group SO(n)."""
def __init__(self, y=np.array([0, 0, 1])):
if not isinstance(y, np.ndarray):
try:
y = np.array(y)
except Exception:
raise TypeError("y must be array_like")
self.n = y.size
self.y = y
self.lie_group = SOLieGroup()
self.lie_algebra = soLieAlgebra(self.lie_group)
super().__init__()
@property
def y(self):
return self._y
@y.setter
def y(self, value):
if value.size != self.n:
raise ValueError("y does not have the correct dimension")
elif not np.isclose(np.inner(value, value), 1.0):
raise ValueError(
f"y does not lie on the N-sphere. y^T . y should be one, was {np.inner(value, value)}" # noqa
)
self._y = value
|
import sys
import numpy as np
import os
from copy import deepcopy
import scipy.io as sio
import imageio
import time
import gc
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.losses as keras_losses
from tensorflow.keras.models import load_model, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard
import dannce.engine.serve_data_DANNCE as serve_data_DANNCE
from dannce.engine.generator import DataGenerator_3Dconv
from dannce.engine.generator import DataGenerator_3Dconv_frommem
from dannce.engine.generator import DataGenerator_3Dconv_torch
from dannce.engine.generator import DataGenerator_3Dconv_tf
from dannce.engine.generator_aux import DataGenerator_downsample
from dannce.engine.generator_aux import DataGenerator_downsample_frommem
import dannce.engine.processing as processing
from dannce.engine.processing import savedata_tomat, savedata_expval
from dannce.engine import nets, losses, ops, io
from dannce import _param_defaults_dannce, _param_defaults_shared, _param_defaults_com
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
_DEFAULT_VIDDIR = "videos"
_DEFAULT_COMSTRING = "COM"
_DEFAULT_COMFILENAME = "com3d.mat"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def check_unrecognized_params(params):
"""Check for invalid keys in the params dict against param defaults."""
# Check if key in any of the defaults
invalid_keys = []
for key in params:
in_com = key in _param_defaults_com
in_dannce = key in _param_defaults_dannce
in_shared = key in _param_defaults_shared
if not (in_com or in_dannce or in_shared):
invalid_keys.append(key)
# If there are any keys that are invalid, throw an error and print them out
if len(invalid_keys) > 0:
invalid_key_msg = [" %s," % key for key in invalid_keys]
msg = "Unrecognized keys in the configs: %s" % "".join(invalid_key_msg)
raise ValueError(msg)
def build_params(base_config, dannce_net):
base_params = processing.read_config(base_config)
base_params = processing.make_paths_safe(base_params)
params = processing.read_config(base_params["io_config"])
params = processing.make_paths_safe(params)
params = processing.inherit_config(params, base_params, list(base_params.keys()))
check_unrecognized_params(params)
return params
def make_folder(key, params):
# Make the prediction directory if it does not exist.
if params[key] is not None:
if not os.path.exists(params[key]):
os.makedirs(params[key])
else:
raise ValueError(key + " must be defined.")
def com_predict(params):
# Make the prediction directory if it does not exist.
make_folder("com_predict_dir", params)
# Load the appropriate loss function and network
try:
params["loss"] = getattr(losses, params["loss"])
except AttributeError:
params["loss"] = getattr(keras_losses, params["loss"])
params["net"] = getattr(nets, params["net"])
os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"]
# If params['n_channels_out'] is greater than one, we enter a mode in
# which we predict all available labels + the COM
MULTI_MODE = params["n_channels_out"] > 1
params["n_channels_out"] = params["n_channels_out"] + int(MULTI_MODE)
# channels out is equal to the number of views when using a single video stream with mirrors
eff_n_channels_out = int(params["n_views"]) if params["mirror"] else params["n_channels_out"]
# Grab the input file for prediction
params["label3d_file"] = processing.grab_predict_label3d_file()
print("Using camnames: {}".format(params["camnames"]))
# Also add parent params under the 'experiment' key for compatibility
# with DANNCE's video loading function
params["experiment"] = {}
params["experiment"][0] = params
# For real mono training
params["chan_num"] = 1 if params["mono"] else params["n_channels_in"]
# Build net
print("Initializing Network...")
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"],
eff_n_channels_out,
["mse"],
multigpu=False,
)
if params["com_predict_weights"] is None:
wdir = params["com_train_dir"]
weights = os.listdir(wdir)
weights = [f for f in weights if ".hdf5" in f]
weights = sorted(weights, key=lambda x: int(x.split(".")[1].split("-")[0]))
weights = weights[-1]
params["com_predict_weights"] = os.path.join(wdir, weights)
print("Loading weights from " + params["com_predict_weights"])
model.load_weights(params["com_predict_weights"])
print("COMPLETE\n")
def evaluate_ondemand(start_ind, end_ind, valid_gen):
"""Perform COM detection over a set of frames.
:param start_ind: Starting frame index
:param end_ind: Ending frame index
:param steps: Subsample every steps frames
"""
end_time = time.time()
sample_clock = 100
sample_save = 100000
if params["mirror"]:
ncams = 1
else:
ncams = len(params["camnames"])
for i in range(start_ind, end_ind):
print("Predicting on sample {}".format(i), flush=True)
if (i - start_ind) % sample_clock == 0 and i != start_ind:
print(i)
print(
"{} samples took {} seconds".format(
sample_clock, time.time() - end_time
)
)
end_time = time.time()
if (i - start_ind) % sample_save == 0 and i != start_ind:
print("Saving checkpoint at {}th sample".format(i))
processing.save_COM_checkpoint(
save_data, com_predict_dir, datadict_, cameras, params
)
pred_ = model.predict(valid_gen.__getitem__(i)[0])
pred_ = np.reshape(
pred_,
[
-1,
ncams,
pred_.shape[1],
pred_.shape[2],
pred_.shape[3],
],
)
for m in range(pred_.shape[0]):
# By selecting -1 for the last axis, we get the COM index for a
# normal COM network, and also the COM index for a multi_mode COM network,
# as in multimode the COM label is put at the end
if params["mirror"]:
# for mirror we need to reshape pred so that the cams are in front,
# so it works with the downstream code
pred = pred_[m, 0]
pred = np.transpose(pred, (2, 0, 1))
else:
pred = pred_[m, :, :, :, -1]
sampleID_ = partition["valid_sampleIDs"][i * pred_.shape[0] + m]
save_data[sampleID_] = {}
save_data[sampleID_]["triangulation"] = {}
for j in range(pred.shape[0]): # this loops over all cameras
# get coords for each map. This assumes that image are coming
# out in pred in the same order as CONFIG_PARAMS['camnames']
pred_max = np.max(np.squeeze(pred[j]))
ind = (
np.array(processing.get_peak_inds(np.squeeze(pred[j])))
* params["downfac"]
)
ind[0] += params["crop_height"][0]
ind[1] += params["crop_width"][0]
ind = ind[::-1]
# now, the center of mass is (x,y) instead of (i,j)
# now, we need to use camera calibration to triangulate
# from 2D to 3D
# now we need to mirror flip each coord if indicated
if params["mirror"] and cameras[params["camnames"][j]]["m"] == 1:
ind[1] = params["raw_im_h"] - ind[1] - 1
if params["com_debug"] is not None and j == cnum:
# Write preds
plt.figure(0)
plt.cla()
plt.imshow(np.squeeze(pred[j]))
plt.savefig(
os.path.join(
cmapdir, params["com_debug"] + str(i + m) + ".png"
)
)
plt.figure(1)
plt.cla()
im = valid_gen.__getitem__(i * pred_.shape[0] + m)
plt.imshow(processing.norm_im(im[0][j]))
plt.plot(
(ind[0] - params["crop_width"][0]) / params["downfac"],
(ind[1] - params["crop_height"][0]) / params["downfac"],
"or",
)
plt.savefig(
os.path.join(
overlaydir, params["com_debug"] + str(i + m) + ".png"
)
)
save_data[sampleID_][params["camnames"][j]] = {
"pred_max": pred_max,
"COM": ind,
}
# Undistort this COM here.
pts1 = save_data[sampleID_][params["camnames"][j]]["COM"]
pts1 = pts1[np.newaxis, :]
pts1 = ops.unDistortPoints(
pts1,
cameras[params["camnames"][j]]["K"],
cameras[params["camnames"][j]]["RDistort"],
cameras[params["camnames"][j]]["TDistort"],
cameras[params["camnames"][j]]["R"],
cameras[params["camnames"][j]]["t"],
)
save_data[sampleID_][params["camnames"][j]]["COM"] = np.squeeze(
pts1
)
# Triangulate for all unique pairs
for j in range(pred.shape[0]):
for k in range(j + 1, pred.shape[0]):
pts1 = save_data[sampleID_][params["camnames"][j]]["COM"]
pts2 = save_data[sampleID_][params["camnames"][k]]["COM"]
pts1 = pts1[np.newaxis, :]
pts2 = pts2[np.newaxis, :]
test3d = ops.triangulate(
pts1,
pts2,
camera_mats[params["camnames"][j]],
camera_mats[params["camnames"][k]],
).squeeze()
save_data[sampleID_]["triangulation"][
"{}_{}".format(params["camnames"][j], params["camnames"][k])
] = test3d
com_predict_dir = os.path.join(params["com_predict_dir"])
print(com_predict_dir)
if params["com_debug"] is not None:
cmapdir = os.path.join(com_predict_dir, "cmap")
overlaydir = os.path.join(com_predict_dir, "overlay")
if not os.path.exists(cmapdir):
os.makedirs(cmapdir)
if not os.path.exists(overlaydir):
os.makedirs(overlaydir)
cnum = params["camnames"].index(params["com_debug"])
print("Writing " + params["com_debug"] + " confidence maps to " + cmapdir)
print("Writing " + params["com_debug"] + "COM-image overlays to " + overlaydir)
(
samples,
datadict,
datadict_3d,
cameras,
camera_mats,
) = serve_data_DANNCE.prepare_data(
params,
multimode=MULTI_MODE,
prediction=True,
return_cammat=True,
nanflag=False,
)
# Zero any negative frames
for key in datadict.keys():
for key_ in datadict[key]["frames"].keys():
if datadict[key]["frames"][key_] < 0:
datadict[key]["frames"][key_] = 0
# The generator expects an experimentID in front of each sample key
samples = ["0_" + str(f) for f in samples]
datadict_ = {}
for key in datadict.keys():
datadict_["0_" + str(key)] = datadict[key]
datadict = datadict_
# Initialize video dictionary. paths to videos only.
vids = {}
vids = processing.initialize_vids(params, datadict, 0, vids, pathonly=True)
# Parameters
valid_params = {
"dim_in": (
params["crop_height"][1] - params["crop_height"][0],
params["crop_width"][1] - params["crop_width"][0],
),
"n_channels_in": params["n_channels_in"],
"batch_size": 1,
"n_channels_out": params["n_channels_out"],
"out_scale": params["sigma"],
"camnames": {0: params["camnames"]},
"crop_width": params["crop_width"],
"crop_height": params["crop_height"],
"downsample": params["downfac"],
"labelmode": "coord",
"chunks": params["chunks"],
"shuffle": False,
"dsmode": params["dsmode"],
"preload": False,
"mono": params["mono"],
"mirror": params["mirror"],
}
partition = {}
partition["valid_sampleIDs"] = samples
labels = datadict
save_data = {}
valid_generator = DataGenerator_downsample(
partition["valid_sampleIDs"], labels, vids, **valid_params
)
# If we just want to analyze a chunk of video...
st_ind = params["start_batch"]
if params["max_num_samples"] == "max":
evaluate_ondemand(st_ind, len(valid_generator), valid_generator)
else:
endIdx = np.min([st_ind + params["max_num_samples"], len(valid_generator)])
evaluate_ondemand(st_ind, endIdx, valid_generator)
processing.save_COM_checkpoint(
save_data, com_predict_dir, datadict_, cameras, params
)
print("done!")
def com_train(params):
# Make the train directory if it does not exist.
make_folder("com_train_dir", params)
params["loss"] = getattr(losses, params["loss"])
params["net"] = getattr(nets, params["net"])
os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"]
# MULTI_MODE is where the full set of markers is trained on, rather than
# the COM only. In some cases, this can help improve COMfinder performance.
MULTI_MODE = params["n_channels_out"] > 1
params["n_channels_out"] = params["n_channels_out"] + int(MULTI_MODE)
samples = []
datadict = {}
datadict_3d = {}
cameras = {}
camnames = {}
# Use the same label files and experiment settings as DANNCE unless
# indicated otherwise by using a 'com_exp' block in io.yaml.
#
# This can be useful for introducing additional COM-only label files.
if params["com_exp"] is not None:
exps = params["com_exp"]
else:
exps = params["exp"]
num_experiments = len(exps)
params["experiment"] = {}
for e, expdict in enumerate(exps):
exp = processing.load_expdict(params, e, expdict, _DEFAULT_VIDDIR)
params["experiment"][e] = exp
(samples_, datadict_, datadict_3d_, cameras_,) = serve_data_DANNCE.prepare_data(
params["experiment"][e],
nanflag=False,
com_flag=not MULTI_MODE,
multimode=MULTI_MODE,
)
# No need to prepare any COM file (they don't exist yet).
# We call this because we want to support multiple experiments,
# which requires appending the experiment ID to each data object and key
samples, datadict, datadict_3d, ddd = serve_data_DANNCE.add_experiment(
e,
samples,
datadict,
datadict_3d,
{},
samples_,
datadict_,
datadict_3d_,
{},
)
cameras[e] = cameras_
camnames[e] = params["experiment"][e]["camnames"]
com_train_dir = params["com_train_dir"]
# Dump the params into file for reproducibility
processing.save_params(com_train_dir, params)
# Additionally, to keep videos unique across experiments, need to add
# experiment labels in other places. E.g. experiment 0 CameraE's "camname"
# Becomes 0_CameraE.
cameras, datadict, params = serve_data_DANNCE.prepend_experiment(
params, datadict, num_experiments, camnames, cameras
)
samples = np.array(samples)
e = 0
# Initialize video objects
vids = {}
for e in range(num_experiments):
vids = processing.initialize_vids(params, datadict, e, vids, pathonly=True)
print("Using {} downsampling".format(params["dsmode"]))
train_params = {
"dim_in": (
params["crop_height"][1] - params["crop_height"][0],
params["crop_width"][1] - params["crop_width"][0],
),
"n_channels_in": params["n_channels_in"],
"batch_size": 1,
"n_channels_out": params["n_channels_out"],
"out_scale": params["sigma"],
"camnames": camnames,
"crop_width": params["crop_width"],
"crop_height": params["crop_height"],
"downsample": params["downfac"],
"shuffle": False,
"chunks": params["chunks"],
"dsmode": params["dsmode"],
"preload": False,
"mono": params["mono"],
"mirror": params["mirror"],
}
valid_params = deepcopy(train_params)
valid_params["shuffle"] = False
partition = processing.make_data_splits(
samples, params, com_train_dir, num_experiments
)
labels = datadict
# For real mono training
params["chan_num"] = 1 if params["mono"] else params["n_channels_in"]
# effective n_channels, which is different if using a mirror arena configuration
eff_n_channels_out = len(camnames[0]) if params["mirror"] else params["n_channels_out"]
# Build net
print("Initializing Network...")
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"],
eff_n_channels_out,
["mse"],
multigpu=False,
)
print("COMPLETE\n")
if params["com_finetune_weights"] is not None:
weights = os.listdir(params["com_finetune_weights"])
weights = [f for f in weights if ".hdf5" in f]
weights = weights[0]
try:
model.load_weights(os.path.join(params["com_finetune_weights"], weights))
except:
print(
"Note: model weights could not be loaded due to a mismatch in dimensions.\
Assuming that this is a fine-tune with a different number of outputs and removing \
the top of the net accordingly"
)
model.layers[-1].name = "top_conv"
model.load_weights(
os.path.join(params["com_finetune_weights"], weights), by_name=True
)
if params["lockfirst"]:
for layer in model.layers[:2]:
layer.trainable = False
model.compile(
optimizer=Adam(lr=float(params["lr"])), loss=params["loss"],
)
# Create checkpoint and logging callbacks
kkey = "weights.hdf5"
mon = "val_loss" if params["num_validation_per_exp"] > 0 else "loss"
# Create checkpoint and logging callbacks
model_checkpoint = ModelCheckpoint(
os.path.join(com_train_dir, kkey),
monitor=mon,
save_best_only=True,
save_weights_only=True,
)
csvlog = CSVLogger(os.path.join(com_train_dir, "training.csv"))
tboard = TensorBoard(
log_dir=os.path.join(com_train_dir, "logs"), write_graph=False, update_freq=100
)
# Initialize data structures
if params["mirror"]:
ncams = 1 # Effectively, for the purpose of batch indexing
else:
ncams = len(camnames[0])
dh = (params["crop_height"][1] - params["crop_height"][0]) // params["downfac"]
dw = (params["crop_width"][1] - params["crop_width"][0]) // params["downfac"]
ims_train = np.zeros(
(ncams * len(partition["train_sampleIDs"]), dh, dw, params["chan_num"]),
dtype="float32"
)
y_train = np.zeros(
(ncams * len(partition["train_sampleIDs"]), dh, dw, eff_n_channels_out),
dtype="float32",
)
ims_valid = np.zeros(
(ncams * len(partition["valid_sampleIDs"]), dh, dw, params["chan_num"]),
dtype="float32"
)
y_valid = np.zeros(
(ncams * len(partition["valid_sampleIDs"]), dh, dw, eff_n_channels_out),
dtype="float32",
)
# Set up generators
train_generator = DataGenerator_downsample(
partition["train_sampleIDs"], labels, vids, **train_params
)
valid_generator = DataGenerator_downsample(
partition["valid_sampleIDs"], labels, vids, **valid_params
)
print("Loading data")
for i in range(len(partition["train_sampleIDs"])):
print(i, end="\r")
ims = train_generator.__getitem__(i)
ims_train[i * ncams : (i + 1) * ncams] = ims[0]
y_train[i * ncams : (i + 1) * ncams] = ims[1]
for i in range(len(partition["valid_sampleIDs"])):
print(i, end="\r")
ims = valid_generator.__getitem__(i)
ims_valid[i * ncams : (i + 1) * ncams] = ims[0]
y_valid[i * ncams : (i + 1) * ncams] = ims[1]
train_generator = DataGenerator_downsample_frommem(
np.arange(ims_train.shape[0]),
ims_train,
y_train,
batch_size=params["batch_size"]*ncams,
augment_hue=params["augment_hue"],
augment_brightness=params["augment_brightness"],
augment_rotation=params["augment_rotation"],
augment_shear=params["augment_hue"],
augment_shift=params["augment_brightness"],
augment_zoom=params["augment_rotation"],
bright_val=params["augment_bright_val"],
hue_val=params["augment_hue_val"],
shift_val=params["augment_shift_val"],
rotation_val=params["augment_rotation_val"],
shear_val=params["augment_shear_val"],
zoom_val=params["augment_zoom_val"],
chan_num=params["chan_num"]
)
valid_generator = DataGenerator_downsample_frommem(
np.arange(ims_valid.shape[0]),
ims_valid,
y_valid,
batch_size=ncams,
shuffle=False,
chan_num=params["chan_num"]
)
def write_debug(trainData=True):
"""
Factoring re-used debug output code.
Writes training or validation images to an output directory, together
with the ground truth COM labels and predicted COM labels, respectively.
"""
def plot_out(imo, lo, imn):
processing.plot_markers_2d(
processing.norm_im(imo), lo, newfig=False
)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
imname = imn
plt.savefig(
os.path.join(debugdir, imname), bbox_inches="tight", pad_inches=0
)
if params["debug"] and not MULTI_MODE:
if trainData:
outdir = "debug_im_out"
ims_out = ims_train
label_out = y_train
else:
outdir = "debug_im_out_valid"
ims_out = ims_valid
label_out = model.predict(ims_valid, batch_size=1)
# Plot all training images and save
# create new directory for images if necessary
debugdir = os.path.join(params["com_train_dir"], outdir)
print("Saving debug images to: " + debugdir)
if not os.path.exists(debugdir):
os.makedirs(debugdir)
plt.figure()
for i in range(ims_out.shape[0]):
plt.cla()
if params["mirror"]:
for j in range(label_out.shape[-1]):
plt.cla()
plot_out(ims_out[i], label_out[i, :, :, j:j+1],
str(i) + "_cam_" + str(j) + ".png")
else:
plot_out(ims_out[i], label_out[i], str(i) + ".png")
elif params["debug"] and MULTI_MODE:
print("Note: Cannot output debug information in COM multi-mode")
write_debug(trainData=True)
model.fit(
x=train_generator,
steps_per_epoch=len(train_generator),
validation_data=valid_generator,
validation_steps=len(valid_generator),
verbose=params["verbose"],
epochs=params["epochs"],
workers=6,
callbacks=[csvlog, model_checkpoint, tboard],
)
write_debug(trainData=False)
print("Renaming weights file with best epoch description")
processing.rename_weights(com_train_dir, kkey, mon)
print("Saving full model at end of training")
sdir = os.path.join(params["com_train_dir"], "fullmodel_weights")
if not os.path.exists(sdir):
os.makedirs(sdir)
model.save(os.path.join(sdir, "fullmodel_end.hdf5"))
def dannce_train(params):
"""Entrypoint for dannce training."""
# Depth disabled until next release.
params["depth"] = False
# Make the training directory if it does not exist.
make_folder("dannce_train_dir", params)
params["loss"] = getattr(losses, params["loss"])
params["net"] = getattr(nets, params["net"])
# Default to 6 views but a smaller number of views can be specified in the DANNCE config.
# If the legnth of the camera files list is smaller than n_views, relevant lists will be
# duplicated in order to match n_views, if possible.
n_views = int(params["n_views"])
# Convert all metric strings to objects
metrics = []
for m in params["metric"]:
try:
m_obj = getattr(losses, m)
except AttributeError:
m_obj = getattr(keras.losses, m)
metrics.append(m_obj)
# set GPU ID
os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"]
# find the weights given config path
if params["dannce_finetune_weights"] is not None:
params["dannce_finetune_weights"] = processing.get_ft_wt(params)
print("Fine-tuning from {}".format(params["dannce_finetune_weights"]))
samples = []
datadict = {}
datadict_3d = {}
com3d_dict = {}
cameras = {}
camnames = {}
exps = params["exp"]
num_experiments = len(exps)
params["experiment"] = {}
for e, expdict in enumerate(exps):
exp = processing.load_expdict(params, e, expdict, _DEFAULT_VIDDIR)
(exp, samples_, datadict_, datadict_3d_, cameras_, com3d_dict_,) = do_COM_load(
exp, expdict, n_views, e, params
)
print("Using {} samples total.".format(len(samples_)))
samples, datadict, datadict_3d, com3d_dict = serve_data_DANNCE.add_experiment(
e,
samples,
datadict,
datadict_3d,
com3d_dict,
samples_,
datadict_,
datadict_3d_,
com3d_dict_,
)
cameras[e] = cameras_
camnames[e] = exp["camnames"]
print("Using the following cameras: {}".format(camnames[e]))
params["experiment"][e] = exp
dannce_train_dir = params["dannce_train_dir"]
# Dump the params into file for reproducibility
processing.save_params(dannce_train_dir, params)
# Additionally, to keep videos unique across experiments, need to add
# experiment labels in other places. E.g. experiment 0 CameraE's "camname"
# Becomes 0_CameraE. *NOTE* This function modified camnames in place
# to add the appropriate experiment ID
cameras, datadict, params = serve_data_DANNCE.prepend_experiment(
params, datadict, num_experiments, camnames, cameras
)
samples = np.array(samples)
# Initialize video objects
vids = {}
for e in range(num_experiments):
if params["immode"] == "vid":
vids = processing.initialize_vids(params, datadict, e, vids, pathonly=True)
# Parameters
if params["expval"]:
outmode = "coordinates"
else:
outmode = "3dprob"
gridsize = tuple([params["nvox"]] * 3)
# When this true, the data generator will shuffle the cameras and then select the first 3,
# to feed to a native 3 camera model
cam3_train = params["cam3_train"]
# Used to initialize arrays for mono, and also in *frommem (the final generator)
params["chan_num"] = 1 if params["mono"] else params["n_channels_in"]
valid_params = {
"dim_in": (
params["crop_height"][1] - params["crop_height"][0],
params["crop_width"][1] - params["crop_width"][0],
),
"n_channels_in": params["n_channels_in"],
"batch_size": 1,
"n_channels_out": params["new_n_channels_out"],
"out_scale": params["sigma"],
"crop_width": params["crop_width"],
"crop_height": params["crop_height"],
"vmin": params["vmin"],
"vmax": params["vmax"],
"nvox": params["nvox"],
"interp": params["interp"],
"depth": params["depth"],
"channel_combo": params["channel_combo"],
"mode": outmode,
"camnames": camnames,
"immode": params["immode"],
"shuffle": False, # We will shuffle later
"rotation": False, # We will rotate later if desired
"vidreaders": vids,
"distort": True,
"expval": params["expval"],
"crop_im": False,
"chunks": params["chunks"],
"preload": False,
"mono": params["mono"],
"mirror": params["mirror"],
}
# Setup a generator that will read videos and labels
tifdirs = [] # Training from single images not yet supported in this demo
partition = processing.make_data_splits(
samples, params, dannce_train_dir, num_experiments
)
train_generator = DataGenerator_3Dconv(
partition["train_sampleIDs"],
datadict,
datadict_3d,
cameras,
partition["train_sampleIDs"],
com3d_dict,
tifdirs,
**valid_params
)
valid_generator = DataGenerator_3Dconv(
partition["valid_sampleIDs"],
datadict,
datadict_3d,
cameras,
partition["valid_sampleIDs"],
com3d_dict,
tifdirs,
**valid_params
)
# We should be able to load everything into memory...
gridsize = tuple([params["nvox"]] * 3)
X_train = np.zeros(
(
len(partition["train_sampleIDs"]),
*gridsize,
params["chan_num"] * len(camnames[0]),
),
dtype="float32",
)
X_valid = np.zeros(
(
len(partition["valid_sampleIDs"]),
*gridsize,
params["chan_num"] * len(camnames[0]),
),
dtype="float32",
)
X_train_grid = None
X_valid_grid = None
if params["expval"]:
y_train = np.zeros(
(len(partition["train_sampleIDs"]), 3, params["new_n_channels_out"],),
dtype="float32",
)
X_train_grid = np.zeros(
(len(partition["train_sampleIDs"]), params["nvox"] ** 3, 3),
dtype="float32",
)
y_valid = np.zeros(
(len(partition["valid_sampleIDs"]), 3, params["new_n_channels_out"],),
dtype="float32",
)
X_valid_grid = np.zeros(
(len(partition["valid_sampleIDs"]), params["nvox"] ** 3, 3),
dtype="float32",
)
else:
y_train = np.zeros(
(
len(partition["train_sampleIDs"]),
*gridsize,
params["new_n_channels_out"],
),
dtype="float32",
)
y_valid = np.zeros(
(
len(partition["valid_sampleIDs"]),
*gridsize,
params["new_n_channels_out"],
),
dtype="float32",
)
print(
"Loading training data into memory. This can take a while to seek through for",
"long recordings. This will be especially slow if your frame indices are not",
"sorted in ascending order in your label data file.",
)
for i in range(len(partition["train_sampleIDs"])):
print(i, end="\r")
rr = train_generator.__getitem__(i)
if params["expval"]:
X_train[i] = rr[0][0]
X_train_grid[i] = rr[0][1]
else:
X_train[i] = rr[0]
y_train[i] = rr[1]
if params["debug_volume_tifdir"] is not None:
# When this option is toggled in the config, rather than
# training, the image volumes are dumped to tif stacks.
# This can be used for debugging problems with calibration or
# COM estimation
tifdir = params["debug_volume_tifdir"]
print("Dump training volumes to {}".format(tifdir))
for i in range(X_train.shape[0]):
for j in range(len(camnames[0])):
im = X_train[i, :, :, :, j * params["chan_num"] : (j + 1) * params["chan_num"]]
im = processing.norm_im(im) * 255
im = im.astype("uint8")
of = os.path.join(
tifdir, partition["train_sampleIDs"][i] + "_cam" + str(j) + ".tif"
)
imageio.mimwrite(of, np.transpose(im, [2, 0, 1, 3]))
print("Done! Exiting.")
sys.exit()
print("Loading validation data into memory")
for i in range(len(partition["valid_sampleIDs"])):
print(i, end="\r")
rr = valid_generator.__getitem__(i)
if params["expval"]:
X_valid[i] = rr[0][0]
X_valid_grid[i] = rr[0][1]
else:
X_valid[i] = rr[0]
y_valid[i] = rr[1]
# Now we can generate from memory with shuffling, rotation, etc.
randflag = params["channel_combo"] == "random"
train_generator = DataGenerator_3Dconv_frommem(
np.arange(len(partition["train_sampleIDs"])),
X_train,
y_train,
batch_size=params["batch_size"],
random=randflag,
rotation=params["rotate"],
augment_hue=params["augment_hue"],
augment_brightness=params["augment_brightness"],
augment_continuous_rotation=params["augment_continuous_rotation"],
bright_val=params["augment_bright_val"],
hue_val=params["augment_hue_val"],
rotation_val=params["augment_rotation_val"],
expval=params["expval"],
xgrid=X_train_grid,
nvox=params["nvox"],
cam3_train=cam3_train,
chan_num=params["chan_num"]
)
valid_generator = DataGenerator_3Dconv_frommem(
np.arange(len(partition["valid_sampleIDs"])),
X_valid,
y_valid,
batch_size=1,
random=randflag,
rotation=False,
augment_hue=False,
augment_brightness=False,
augment_continuous_rotation=False,
expval=params["expval"],
xgrid=X_valid_grid,
nvox=params["nvox"],
shuffle=False,
cam3_train=cam3_train,
chan_num=params["chan_num"]
)
# Build net
print("Initializing Network...")
# Currently, we expect four modes of use:
# 1) Training a new network from scratch
# 2) Fine-tuning a network trained on a diff. dataset (transfer learning)
# 3) Continuing to train 1) or 2) from a full model checkpoint (including optimizer state)
print("NUM CAMERAS: {}".format(len(camnames[0])))
if params["train_mode"] == "new":
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"] + params["depth"],
params["n_channels_out"],
len(camnames[0]),
batch_norm=False,
instance_norm=True,
include_top=True,
gridsize=gridsize,
)
elif params["train_mode"] == "finetune":
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"] + params["depth"],
params["n_channels_out"],
len(camnames[0]),
params["new_last_kernel_size"],
params["new_n_channels_out"],
params["dannce_finetune_weights"],
params["n_layers_locked"],
batch_norm=False,
instance_norm=True,
gridsize=gridsize,
)
elif params["train_mode"] == "continued":
model = load_model(
params["dannce_finetune_weights"],
custom_objects={
"ops": ops,
"slice_input": nets.slice_input,
"mask_nan_keep_loss": losses.mask_nan_keep_loss,
"euclidean_distance_3D": losses.euclidean_distance_3D,
"centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D,
},
)
elif params["train_mode"] == "continued_weights_only":
# This does not work with models created in 'finetune' mode, but will work with models
# started from scratch ('new' train_mode)
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"] + params["depth"],
params["n_channels_out"],
3 if cam3_train else len(camnames[0]),
batch_norm=False,
instance_norm=True,
include_top=True,
gridsize=gridsize,
)
model.load_weights(params["dannce_finetune_weights"])
else:
raise Exception("Invalid training mode")
model.compile(
optimizer=Adam(lr=float(params["lr"])), loss=params["loss"], metrics=metrics,
)
print("COMPLETE\n")
# Create checkpoint and logging callbacks
kkey = "weights.hdf5"
mon = "val_loss" if params["num_validation_per_exp"] > 0 else "loss"
model_checkpoint = ModelCheckpoint(
os.path.join(dannce_train_dir, kkey),
monitor=mon,
save_best_only=True,
save_weights_only=False,
)
csvlog = CSVLogger(os.path.join(dannce_train_dir, "training.csv"))
tboard = TensorBoard(
log_dir=os.path.join(dannce_train_dir,"logs"), write_graph=False, update_freq=100
)
class savePredTargets(keras.callbacks.Callback):
def __init__(self, total_epochs, td, tgrid, vd, vgrid, tID, vID, odir, tlabel, vlabel):
self.td = td
self.vd = vd
self.tID = tID
self.vID = vID
self.total_epochs = total_epochs
self.val_loss = 1e10
self.odir = odir
self.tgrid = tgrid
self.vgrid = vgrid
self.tlabel = tlabel
self.vlabel = vlabel
def on_epoch_end(self, epoch, logs=None):
lkey = 'val_loss' if 'val_loss' in logs else 'loss'
if epoch == self.total_epochs-1 or logs[lkey] < self.val_loss and epoch > 25:
print("Saving predictions on train and validation data, after epoch {}".format(epoch))
self.val_loss = logs[lkey]
pred_t = model.predict([self.td, self.tgrid], batch_size=1)
pred_v = model.predict([self.vd, self.vgrid], batch_size=1)
ofile = os.path.join(self.odir,'checkpoint_predictions_e{}.mat'.format(epoch))
sio.savemat(ofile, {'pred_train': pred_t,
'pred_valid': pred_v,
'target_train': self.tlabel,
'target_valid': self.vlabel,
'train_sampleIDs': self.tID,
'valid_sampleIDs': self.vID})
class saveCheckPoint(keras.callbacks.Callback):
def __init__(self, odir, total_epochs):
self.odir = odir
self.saveE = np.arange(0, total_epochs, 250)
def on_epoch_end(self, epoch, logs=None):
lkey = 'val_loss' if 'val_loss' in logs else 'loss'
val_loss = logs[lkey]
if epoch in self.saveE:
# Do a garbage collect to combat keras memory leak
gc.collect()
print("Saving checkpoint weights at epoch {}".format(epoch))
savename = 'weights.checkpoint.epoch{}.{}{:.5f}.hdf5'.format(epoch,
lkey,
val_loss)
self.model.save(os.path.join(self.odir, savename))
callbacks = [csvlog, model_checkpoint, tboard, saveCheckPoint(params['dannce_train_dir'], params["epochs"])]
if params['expval']:
save_callback = savePredTargets(params['epochs'],X_train,
X_train_grid,
X_valid,
X_valid_grid,
partition['train_sampleIDs'],
partition['valid_sampleIDs'],
params['dannce_train_dir'],
y_train,
y_valid)
callbacks = callbacks + [save_callback]
model.fit(
x=train_generator,
steps_per_epoch=len(train_generator),
validation_data=valid_generator,
validation_steps=len(valid_generator),
verbose=params["verbose"],
epochs=params["epochs"],
callbacks=callbacks,
workers=6,
)
print("Renaming weights file with best epoch description")
processing.rename_weights(dannce_train_dir, kkey, mon)
print("Saving full model at end of training")
sdir = os.path.join(params["dannce_train_dir"], "fullmodel_weights")
if not os.path.exists(sdir):
os.makedirs(sdir)
model.save(os.path.join(sdir, "fullmodel_end.hdf5"))
print("done!")
def dannce_predict(params):
# Depth disabled until next release.
params["depth"] = False
# Make the prediction directory if it does not exist.
make_folder("dannce_predict_dir", params)
# Load the appropriate loss function and network
try:
params["loss"] = getattr(losses, params["loss"])
except AttributeError:
params["loss"] = getattr(keras_losses, params["loss"])
netname = params["net"]
params["net"] = getattr(nets, params["net"])
# Default to 6 views but a smaller number of views can be specified in the DANNCE config.
# If the legnth of the camera files list is smaller than n_views, relevant lists will be
# duplicated in order to match n_views, if possible.
n_views = int(params["n_views"])
os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"]
gpu_id = params["gpu_id"]
# While we can use experiment files for DANNCE training,
# for prediction we use the base data files present in the main config
# Grab the input file for prediction
params["label3d_file"] = processing.grab_predict_label3d_file()
params["base_exp_folder"] = os.path.dirname(params["label3d_file"])
dannce_predict_dir = params["dannce_predict_dir"]
# default to slow numpy backend if there is no predict_mode in config file. I.e. legacy support
predict_mode = (
params["predict_mode"] if params["predict_mode"] is not None else "numpy"
)
print("Using {} predict mode".format(predict_mode))
print("Using camnames: {}".format(params["camnames"]))
# Also add parent params under the 'experiment' key for compatibility
# with DANNCE's video loading function
params["experiment"] = {}
params["experiment"][0] = params
(
params["experiment"][0],
samples_,
datadict_,
datadict_3d_,
cameras_,
com3d_dict_,
) = do_COM_load(
params["experiment"][0],
params["experiment"][0],
n_views,
0,
params,
training=False,
)
# Write 3D COM to file. This might be different from the input com3d file
# if arena thresholding was applied.
cfilename = os.path.join(dannce_predict_dir, "com3d_used.mat")
print("Saving 3D COM to {}".format(cfilename))
c3d = np.zeros((len(samples_), 3))
for i in range(len(samples_)):
c3d[i] = com3d_dict_[samples_[i]]
sio.savemat(cfilename, {"sampleID": samples_, "com": c3d})
# The library is configured to be able to train over multiple animals ("experiments")
# at once. Because supporting code expects to see an experiment ID# prepended to
# each of these data keys, we need to add a token experiment ID here.
samples = []
datadict = {}
datadict_3d = {}
com3d_dict = {}
samples, datadict, datadict_3d, com3d_dict = serve_data_DANNCE.add_experiment(
0,
samples,
datadict,
datadict_3d,
com3d_dict,
samples_,
datadict_,
datadict_3d_,
com3d_dict_,
)
cameras = {}
cameras[0] = cameras_
camnames = {}
camnames[0] = params["experiment"][0]["camnames"]
# Need a '0' experiment ID to work with processing functions.
# *NOTE* This function modified camnames in place
# to add the appropriate experiment ID
cameras, datadict, params = serve_data_DANNCE.prepend_experiment(
params, datadict, 1, camnames, cameras
)
samples = np.array(samples)
# For real mono prediction
params["chan_num"] = 1 if params["mono"] else params["n_channels_in"]
# Initialize video dictionary. paths to videos only.
# TODO: Remove this immode option if we decide not
# to support tifs
if params["immode"] == "vid":
vids = {}
vids = processing.initialize_vids(params, datadict, 0, vids, pathonly=True)
# Parameters
valid_params = {
"dim_in": (
params["crop_height"][1] - params["crop_height"][0],
params["crop_width"][1] - params["crop_width"][0],
),
"n_channels_in": params["n_channels_in"],
"batch_size": params["batch_size"],
"n_channels_out": params["n_channels_out"],
"out_scale": params["sigma"],
"crop_width": params["crop_width"],
"crop_height": params["crop_height"],
"vmin": params["vmin"],
"vmax": params["vmax"],
"nvox": params["nvox"],
"interp": params["interp"],
"depth": params["depth"],
"channel_combo": params["channel_combo"],
"mode": "coordinates",
"camnames": camnames,
"immode": params["immode"],
"shuffle": False,
"rotation": False,
"vidreaders": vids,
"distort": True,
"expval": params["expval"],
"crop_im": False,
"chunks": params["chunks"],
"preload": False,
"mono": params["mono"],
"mirror": params["mirror"]
}
# Datasets
partition = {}
valid_inds = np.arange(len(samples))
partition["valid_sampleIDs"] = samples[valid_inds]
# TODO: Remove tifdirs arguments, which are deprecated
tifdirs = []
# Generators
if predict_mode == "torch":
import torch
# Because CUDA_VISBILE_DEVICES is already set to a single GPU, the gpu_id here should be "0"
device = "cuda:0"
genfunc = DataGenerator_3Dconv_torch
elif predict_mode == "tf":
device = "/GPU:0"
genfunc = DataGenerator_3Dconv_tf
else:
genfunc = DataGenerator_3Dconv
valid_generator = genfunc(
partition["valid_sampleIDs"],
datadict,
datadict_3d,
cameras,
partition["valid_sampleIDs"],
com3d_dict,
tifdirs,
**valid_params
)
# Build net
print("Initializing Network...")
# This requires that the network be saved as a full model, not just weights.
# As a precaution, we import all possible custom objects that could be used
# by a model and thus need declarations
if params["dannce_predict_model"] is not None:
mdl_file = params["dannce_predict_model"]
else:
wdir = params["dannce_train_dir"]
weights = os.listdir(wdir)
weights = [f for f in weights if ".hdf5" in f]
weights = sorted(weights, key=lambda x: int(x.split(".")[1].split("-")[0]))
weights = weights[-1]
mdl_file = os.path.join(wdir, weights)
# if not using dannce_predict model (thus taking the final weights in train_results),
# set this file to dannce_predict_model so that it will still get saved with metadata
params["dannce_predict_model"] = mdl_file
print("Loading model from " + mdl_file)
if (
netname == "unet3d_big_tiedfirstlayer_expectedvalue"
or params["from_weights"] is not None
):
gridsize = tuple([params["nvox"]] * 3)
params["dannce_finetune_weights"] = processing.get_ft_wt(params)
if params["train_mode"] == "finetune":
print("Initializing a finetune network from {}, into which weights from {} will be loaded.".format(
params["dannce_finetune_weights"], mdl_file))
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"] + params["depth"],
params["n_channels_out"],
len(camnames[0]),
params["new_last_kernel_size"],
params["new_n_channels_out"],
params["dannce_finetune_weights"],
params["n_layers_locked"],
batch_norm=False,
instance_norm=True,
gridsize=gridsize,
)
else:
# This network is too "custom" to be loaded in as a full model, until I
# figure out how to unroll the first tied weights layer
model = params["net"](
params["loss"],
float(params["lr"]),
params["chan_num"] + params["depth"],
params["n_channels_out"],
len(camnames[0]),
batch_norm=False,
instance_norm=True,
include_top=True,
gridsize=gridsize,
)
model.load_weights(mdl_file)
else:
model = load_model(
mdl_file,
custom_objects={
"ops": ops,
"slice_input": nets.slice_input,
"mask_nan_keep_loss": losses.mask_nan_keep_loss,
"euclidean_distance_3D": losses.euclidean_distance_3D,
"centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D,
},
)
# To speed up expval prediction, rather than doing two forward passes: one for the 3d coordinate
# and one for the probability map, here we splice on a new output layer after
# the softmax on the last convolutional layer
if params["expval"]:
from tensorflow.keras.layers import GlobalMaxPooling3D
o2 = GlobalMaxPooling3D()(model.layers[-3].output)
model = Model(
inputs=[model.layers[0].input, model.layers[-2].input],
outputs=[model.layers[-1].output, o2],
)
save_data = {}
def evaluate_ondemand(start_ind, end_ind, valid_gen):
"""Evaluate experiment.
:param start_ind: Starting frame
:param end_ind: Ending frame
:param valid_gen: Generator
"""
end_time = time.time()
for idx, i in enumerate(range(start_ind, end_ind)):
print("Predicting on batch {}".format(i), flush=True)
if (i - start_ind) % 10 == 0 and i != start_ind:
print(i)
print("10 batches took {} seconds".format(time.time() - end_time))
end_time = time.time()
if (i - start_ind) % 1000 == 0 and i != start_ind:
print("Saving checkpoint at {}th batch".format(i))
if params["expval"]:
p_n = savedata_expval(
os.path.join(dannce_predict_dir, "save_data_AVG.mat"),
params,
write=True,
data=save_data,
tcoord=False,
num_markers=nchn,
pmax=True,
)
else:
p_n = savedata_tomat(
os.path.join(dannce_predict_dir, "save_data_MAX.mat"),
params,
params["vmin"],
params["vmax"],
params["nvox"],
write=True,
data=save_data,
num_markers=nchn,
tcoord=False,
)
ims = valid_gen.__getitem__(i)
pred = model.predict(ims[0])
if params["dannce_predict_vol_tifdir"] is not None:
# When this option is toggled in the config, rather than
# training, the image volumes are dumped to tif stacks.
# This can be used for debugging problems with calibration or
# COM estimation
tifdir = params["dannce_predict_vol_tifdir"]
if not os.path.exists(tifdir):
os.makedirs(tifdir)
print("Dumping prediction volumes to {}".format(tifdir))
for ii in range(ims[0][0].shape[0]):
for jj in range(len(camnames[0])):
im = ims[0][0][ii, :, :, :, jj * params["chan_num"] : (jj + 1) * params["chan_num"]]
im = processing.norm_im(im) * 255
im = im.astype("uint8")
snum = partition["valid_sampleIDs"][i * pred[0].shape[0] + ii]
of = os.path.join(
tifdir, snum + "_cam" + str(jj) + ".tif"
)
imageio.mimwrite(of, np.transpose(im, [2, 0, 1, 3]))
if params["expval"]:
probmap = pred[1]
pred = pred[0]
for j in range(pred.shape[0]):
pred_max = probmap[j]
sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j]
save_data[idx * pred.shape[0] + j] = {
"pred_max": pred_max,
"pred_coord": pred[j],
"sampleID": sampleID,
}
else:
if predict_mode == "torch":
for j in range(pred.shape[0]):
preds = torch.as_tensor(
pred[j], dtype=torch.float32, device=device
)
pred_max = preds.max(0).values.max(0).values.max(0).values
pred_total = preds.sum((0, 1, 2))
xcoord, ycoord, zcoord = processing.plot_markers_3d_torch(preds)
coord = torch.stack([xcoord, ycoord, zcoord])
pred_log = pred_max.log() - pred_total.log()
sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j]
save_data[idx * pred.shape[0] + j] = {
"pred_max": pred_max.cpu().numpy(),
"pred_coord": coord.cpu().numpy(),
"true_coord_nogrid": ims[1][j],
"logmax": pred_log.cpu().numpy(),
"sampleID": sampleID,
}
elif predict_mode == "tf":
# get coords for each map
with tf.device(device):
for j in range(pred.shape[0]):
preds = tf.constant(pred[j], dtype="float32")
pred_max = tf.math.reduce_max(
tf.math.reduce_max(tf.math.reduce_max(preds))
)
pred_total = tf.math.reduce_sum(
tf.math.reduce_sum(tf.math.reduce_sum(preds))
)
xcoord, ycoord, zcoord = processing.plot_markers_3d_tf(
preds
)
coord = tf.stack([xcoord, ycoord, zcoord], axis=0)
pred_log = tf.math.log(pred_max) - tf.math.log(pred_total)
sampleID = partition["valid_sampleIDs"][
i * pred.shape[0] + j
]
save_data[idx * pred.shape[0] + j] = {
"pred_max": pred_max.numpy(),
"pred_coord": coord.numpy(),
"true_coord_nogrid": ims[1][j],
"logmax": pred_log.numpy(),
"sampleID": sampleID,
}
else:
# get coords for each map
for j in range(pred.shape[0]):
pred_max = np.max(pred[j], axis=(0, 1, 2))
pred_total = np.sum(pred[j], axis=(0, 1, 2))
xcoord, ycoord, zcoord = processing.plot_markers_3d(
pred[j, :, :, :, :]
)
coord = np.stack([xcoord, ycoord, zcoord])
pred_log = np.log(pred_max) - np.log(pred_total)
sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j]
save_data[idx * pred.shape[0] + j] = {
"pred_max": pred_max,
"pred_coord": coord,
"true_coord_nogrid": ims[1][j],
"logmax": pred_log,
"sampleID": sampleID,
}
max_eval_batch = params["maxbatch"]
print(max_eval_batch)
if max_eval_batch == "max":
max_eval_batch = len(valid_generator)
print(max_eval_batch)
if params["start_batch"] is not None:
start_batch = params["start_batch"]
else:
start_batch = 0
if params["new_n_channels_out"] is not None:
nchn = params["new_n_channels_out"]
else:
nchn = params["n_channels_out"]
evaluate_ondemand(start_batch, max_eval_batch, valid_generator)
if params["expval"]:
if params["start_batch"] is not None:
path = os.path.join(
dannce_predict_dir, "save_data_AVG%d.mat" % (start_batch)
)
else:
path = os.path.join(dannce_predict_dir, "save_data_AVG.mat")
p_n = savedata_expval(
path,
params,
write=True,
data=save_data,
tcoord=False,
num_markers=nchn,
pmax=True,
)
else:
if params["start_batch"] is not None:
path = os.path.join(
dannce_predict_dir, "save_data_MAX%d.mat" % (start_batch)
)
else:
path = os.path.join(dannce_predict_dir, "save_data_MAX.mat")
p_n = savedata_tomat(
path,
params,
params["vmin"],
params["vmax"],
params["nvox"],
write=True,
data=save_data,
num_markers=nchn,
tcoord=False,
)
def do_COM_load(exp, expdict, n_views, e, params, training=True):
"""
Factors COM loading and processing code, which is shared by
dannce_train() and dannce_predict()
"""
(samples_, datadict_, datadict_3d_, cameras_,) = serve_data_DANNCE.prepare_data(
exp, prediction=False if training else True, nanflag=False
)
# If len(exp['camnames']) divides evenly into n_views, duplicate here
# This must come after loading in this experiment's data because there
# is an assertion that len(exp['camnames']) == the number of cameras
# in the label files (which will not be duplicated)
exp = processing.dupe_params(exp, ["camnames"], n_views)
# If there is "clean" data (full marker set), can take the
# 3D COM from the labels
if exp["com_fromlabels"] and training:
print("For experiment {}, calculating 3D COM from labels".format(e))
com3d_dict_ = deepcopy(datadict_3d_)
for key in com3d_dict_.keys():
com3d_dict_[key] = np.nanmean(datadict_3d_[key], axis=1, keepdims=True)
elif "com_file" in expdict and expdict["com_file"] is not None:
exp["com_file"] = expdict["com_file"]
if ".mat" in exp["com_file"]:
c3dfile = sio.loadmat(exp["com_file"])
com3d_dict_ = check_COM_load(
c3dfile, "com", datadict_, params["medfilt_window"]
)
elif ".pickle" in exp["com_file"]:
datadict_, com3d_dict_ = serve_data_DANNCE.prepare_COM(
exp["com_file"],
datadict_,
comthresh=params["comthresh"],
weighted=params["weighted"],
camera_mats=cameras_,
method=params["com_method"],
)
if params["medfilt_window"] is not None:
raise Exception("Sorry, median filtering a com pickle is not yet supported. Please use a com3d.mat or *dannce.mat file instead")
else:
raise Exception("Not a valid com file format")
else:
# Then load COM from the label3d file
exp["com_file"] = expdict["label3d_file"]
c3dfile = io.load_com(exp["com_file"])
com3d_dict_ = check_COM_load(
c3dfile, "com3d", datadict_, params["medfilt_window"]
)
print("Experiment {} using com3d: {}".format(e, exp["com_file"]))
if params["medfilt_window"] is not None:
print(
"Median filtering COM trace with window size {}".format(
params["medfilt_window"]
)
)
# Remove any 3D COMs that are beyond the confines off the 3D arena
do_cthresh = True if exp["cthresh"] is not None else False
pre = len(samples_)
samples_ = serve_data_DANNCE.remove_samples_com(
samples_, com3d_dict_, rmc=do_cthresh, cthresh=exp["cthresh"],
)
msg = "Removed {} samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file"
print(msg.format(pre - len(samples_)))
return exp, samples_, datadict_, datadict_3d_, cameras_, com3d_dict_
def check_COM_load(c3dfile, kkey, datadict_, wsize):
c3d = c3dfile[kkey]
# do a median filter on the COM traces if indicated
if wsize is not None:
if wsize % 2 == 0:
wsize += 1
print("medfilt_window was not odd, changing to: {}".format(wsize))
from scipy.signal import medfilt
c3d = medfilt(c3d, (wsize, 1))
c3dsi = np.squeeze(c3dfile["sampleID"])
com3d_dict_ = {}
for (i, s) in enumerate(c3dsi):
com3d_dict_[s] = c3d[i]
return com3d_dict_
|
# Sentinel workflow event to help determine sample cherrypicked status
EVENT_CHERRYPICK_LAYOUT_SET = "cherrypick_layout_set"
###
# Cherrypicking source and destination plate events detailed here:
# Beckman: https://ssg-confluence.internal.sanger.ac.uk/display/PSDPUB/%5BBeckman%5D+Cherrypicking+Events
# Biosero: see lighthouse/class/biosero.py
###
# Source plate has had all pickable wells cherrypicked into destination plate(s), and the source plate is put into the
# output stack.
PE_BECKMAN_SOURCE_COMPLETED = "lh_beckman_cp_source_completed"
# Source plate barcode cannot be read (damaged or missing), and the plate is put into the output stack
PE_BECKMAN_SOURCE_NOT_RECOGNISED = "lh_beckman_cp_source_plate_unrecognised"
# Source plate has no related plate map data, cannot be cherrypicked (yet), and the plate is returned to the input
# stacks
PE_BECKMAN_SOURCE_NO_MAP_DATA = "lh_beckman_cp_source_no_plate_map_data"
# Source plate only contains negatives, nothing to cherrypick, and the plate is put into the output stacks
PE_BECKMAN_SOURCE_ALL_NEGATIVES = "lh_beckman_cp_source_all_negatives"
# Destination plate has been created successfully
PE_BECKMAN_DESTINATION_CREATED = "lh_beckman_cp_destination_created"
# Destination plate failed to be created successfully
PE_BECKMAN_DESTINATION_FAILED = "lh_beckman_cp_destination_failed"
|
import cv2
import numpy as np
import random
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def normalize_(image, mean, std):
image -= mean
image /= std
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_jittering_(data_rng, image):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = center
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
def draw_gaussian_line(heatmap, center, radius, tl, outputSize, k=1):
if tl:
start = [0, 0]
end = [v + 1 for v in center]
else:
start = center
end = outputSize
for c in range(start[0], end[0]):
draw_gaussian(heatmap, [c, center[1]], radius, k)
for r in range(start[1], end[1]):
draw_gaussian(heatmap, [center[0], r], radius, k)
def draw_gaussian_fullline(heatmap, center, radius, tb, outputSize, k=1):
start = [0, 0]
end = outputSize
if tb:
for c in range(start[0], end[0]):
draw_gaussian(heatmap, [c, center[1]], radius, k)
else:
for r in range(start[1], end[1]):
draw_gaussian(heatmap, [center[0], r], radius, k)
def gaussian_radius(det_size, min_overlap):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / (2 * a3)
return min(r1, r2, r3)
def _get_border(border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def random_crop(image, detections, random_scales, view_size, border=64):
view_height, view_width = view_size
image_height, image_width = image.shape[0:2]
scale = np.random.choice(random_scales)
height = int(view_height * scale)
width = int(view_width * scale)
cropped_image = np.zeros((height, width, 3), dtype=image.dtype)
w_border = _get_border(border, image_width)
h_border = _get_border(border, image_height)
ctx = np.random.randint(low=w_border, high=image_width - w_border)
cty = np.random.randint(low=h_border, high=image_height - h_border)
x0, x1 = max(ctx - width // 2, 0), min(ctx + width // 2, image_width)
y0, y1 = max(cty - height // 2, 0), min(cty + height // 2, image_height)
left_w, right_w = ctx - x0, x1 - ctx
top_h, bottom_h = cty - y0, y1 - cty
# crop image
cropped_ctx, cropped_cty = width // 2, height // 2
x_slice = slice(cropped_ctx - left_w, cropped_ctx + right_w)
y_slice = slice(cropped_cty - top_h, cropped_cty + bottom_h)
cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
# crop detections
cropped_detections = detections.copy()
cropped_detections[:, 0:4:2] -= x0
cropped_detections[:, 1:4:2] -= y0
cropped_detections[:, 0:4:2] += cropped_ctx - left_w
cropped_detections[:, 1:4:2] += cropped_cty - top_h
return cropped_image, cropped_detections
def crop_image(image, center, size, output_size=None):
if output_size == None:
output_size = size
cty, ctx = center
height, width = size
o_height, o_width = output_size
im_height, im_width = image.shape[0:2]
cropped_image = np.zeros((o_height, o_width, 3), dtype=image.dtype)
x0, x1 = max(0, ctx - width // 2), min(ctx + width // 2, im_width)
y0, y1 = max(0, cty - height // 2), min(cty + height // 2, im_height)
left, right = ctx - x0, x1 - ctx
top, bottom = cty - y0, y1 - cty
cropped_cty, cropped_ctx = o_height // 2, o_width // 2
y_slice = slice(cropped_cty - top, cropped_cty + bottom)
x_slice = slice(cropped_ctx - left, cropped_ctx + right)
cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_cty - top,
cropped_cty + bottom,
cropped_ctx - left,
cropped_ctx + right
], dtype=np.float32)
offset = np.array([
cty - o_height // 2,
ctx - o_width // 2
])
return cropped_image, border, offset
|
from inspect import signature, Parameter
from functools import wraps
class Unsupplied:
pass
_ = Unsupplied()
def _reinsert_kwargs(fn, args, kwargs):
params = signature(fn).parameters
param_position_mapping = {name: i for i, (name, param) in enumerate(params.items())
if param.default == Parameter.empty}
# get all the kwargs that have positions make a new_kwargs dict that doesnt include them
new_kwargs = {}
position_to_kwarg= {}
for kw, arg in kwargs.items():
if kw in param_position_mapping:
position = param_position_mapping[kw]
position_to_kwarg[position] = arg
else:
new_kwargs[kw] = arg
# make a new list of arguments by inserting positional kwargs
new_args = []
arg_pos = 0
for i in range(len(args) + len(position_to_kwarg)):
if i in position_to_kwarg:
new_args.append(position_to_kwarg[i])
else:
new_args.append(args[arg_pos])
arg_pos += 1
return new_args, new_kwargs
def _get_partial_signature(fn, partial_args, partial_kwargs):
sig = signature(fn)
params = list(sig.parameters.items())
# remove args that are specified by partial_kwargs
params = [(name, param) for name, param in params if partial_kwargs.get(name, _) is _]
if partial_args[0] is Ellipsis:
# if ... then we're done.
pass
else:
assert len(partial_args) == len(params)
params = [(name, param) for partial_arg, (name, param) in zip(partial_args, params)
if partial_arg is _]
return sig.replace(parameters=[param for name, param in params])
def partial(fn):
@wraps(fn)
def g(*partial_args, **partial_kwargs):
all_unsupplied = False
if any(arg is Ellipsis for arg in partial_args):
assert len(partial_args) == 1
all_unsupplied = True
unsupplied_arg_positions = [i for i, arg in enumerate(partial_args) if arg is _]
unsupplied_kwargs = {key: value for key, value in partial_kwargs.items() if value is _}
if len(unsupplied_arg_positions) == 0 and len(unsupplied_kwargs) == 0 and (not all_unsupplied):
return fn(*partial_args, **partial_kwargs)
new_sig = _get_partial_signature(fn, partial_args, partial_kwargs)
@partial
def h(*args, **kwargs):
nonlocal partial_args, partial_kwargs, all_unsupplied, unsupplied_arg_positions
_partial_args = list(partial_args).copy()
_partial_kwargs = partial_kwargs.copy()
_unsupplied_arg_positions = unsupplied_arg_positions.copy()
_partial_args = list(args) if all_unsupplied else list(_partial_args)
for i, arg in zip(_unsupplied_arg_positions, args):
_partial_args[i] = arg
for key, value in kwargs.items():
_partial_kwargs[key] = value
_partial_args, _partial_kwargs = _reinsert_kwargs(fn, _partial_args, _partial_kwargs)
_partial_args = tuple(_partial_args)
return fn(*_partial_args, **_partial_kwargs)
h.__signature__ = new_sig
h.__wrapped__.__signature__ = new_sig
return h
g.__signature__ = signature(f)
g.__wrapped__.__signature__ = signature(f)
return g
if __name__ == '__main__':
@partial
def f(x, y, z):
return x, y, z
print(signature(f))
g = f(..., y=2)
print(signature(g))
# print(g(1, 3))
print(signature(g))
h = g(..., z=3)
print(signature(h))
print(h(1))
|
from Tokenizer import *
from enum import Enum
class I(Enum):
# Do nothing code
NO_OP = 1
"""
STACK OPERATIONS
[2-7] Range; [2-6] Full, [7] Empty
"""
# Removes the top-of-stack (TOS) item.
POP_TOP = 2
# Swaps the two top-most stack items
ROT_TWO = 3
# Lifts second and third stack item one position up,
# moves top down to position three.
ROT_THREE = 4
# Duplicates the reference on top of the stack
DUP_TOP = 5
# Duplicates the two references on top of the stack,
# leaving them in the same order.
DUP_TOP_TWO = 6
"""
UNARY OPERATIONS
[8-15] Range
"""
# Implements TOS = +TOS
UNARY_POSITIVE = 8
# Implements TOS = -TOS
UNARY_NEGATIVE = 9
# Implements TOS = not TOS
UNARY_NOT = 10
# Implements TOS = ~TOS
UNARY_BIT_NOT = 11
"""
BINARY OPERATIONS
[16-31] Range
"""
BINARY_PLUS = 16
BINARY_MINUS = 17
BINARY_TIMES = 18
BINARY_DIVIDE = 19
BINARY_FLOOR_DIVIDE = 20
BINARY_SUBSCRIPT = 21
BINARY_SHIFT_LEFT = 22
BINARY_SHIFT_RIGHT = 23
BINARY_AND = 24
BINARY_OR = 25
BINARY_XOR = 26
BINARY_MODULO = 27
BINARY_POWER = 28
"""
IN-PLACE AND FAST BINARY OPERATIONS
[32-63] Range; Reserved
"""
"""
MISCELLANEOUS
[64-95] Range
"""
PRINT_EXPR = 64
BREAK_LOOP = 65
CONTINUE_LOOP = 66
RETURN_VALUE = 67
IMPORT_STAR = 68
POP_BLOCK = 69
POP_EXCEPT = 70
END_FINALLY = 71
# Not in Python -- suspends a function
SUSPEND_COROUTINE = 74
# Not in Python -- resumes a coroutine
RESUME_COROUTINE = 75
RAISE_VARARGS = 76
CALL_FUNCTION = 77
CALL_FUNCTION_KW = 78
CALL_FUNCTION_EX = 79
LOAD_METHOD = 80
CALL_METHOD = 81
FORMAT_VALUE = 82
"""
VARIABLES AND NAMES
[96-127] Range
"""
STORE_NAME = 96
DELETE_NAME = 97
UNPACK_SEQUENCE = 98
IMPORT_NAME = 99
LOAD_FAST = 100
LOAD_CONST = 101
STORE_FAST = 102
DELETE_FAST = 103
"""
JUMP INSTRUCTIONS
[128-144] Range
"""
JUMP_FORWARD = 128
POP_JUMP_IF_TRUE = 129
POP_JUMP_IF_FALSE = 130
JUMP_IF_TRUE_OR_POP = 131
JUMP_IF_FALSE_OR_POP = 132
JUMP_ABSOLUTE = 133
test_opo = "1 + 1"
if __name__ == '__main__':
print(format_printing(tokenize(test_opo)))
|
# -*- coding: utf-8 -*-
"""
This module returns the constants of the code inclduing the info of sets and
parameter filese
"""
# Sorted connection parameter sheets
list_connection_operation = ["V_OM","F_OM","Line_efficiency",
"AnnualProd_perunit_capacity","Residual_capacity","Capacity_factor_line"]
list_connection_planning = ["V_OM","F_OM","INV","Decom_cost",
"Line_Economic_life","Interest_rate","Line_lifetime","Line_efficiency",
"AnnualProd_perunit_capacity","Residual_capacity","Capacity_factor_line",
"Min_newcap","Max_newcap","Min_totalcap","Max_totalcap"]
# Sorted regional parameter sheets
storage_sheets = ["Storage_initial_SOC","Storage_min_SOC",
"Storage_charge_efficiency","Storage_discharge_efficiency",
"Storage_charge_time","Storage_discharge_time"]
conversion_plus_sheets = ["Carrier_ratio_in","Carrier_ratio_out"]
def take_regional_sheets(mode,technologies,regions):
regional_sheets = {}
for reg in regions:
if mode == "Operation":
regional_sheets[reg] = ["V_OM","F_OM","Tech_efficiency",
"AnnualProd_perunit_capacity","Residual_capacity",
"Capacity_factor_tech","capacity_factor_resource",
"Specific_emission","Fix_taxsub","Carbon_tax",
"Min_production","Max_production","Min_production_h",
"Max_production_h","Emission_cap_annual","Demand"]
position1_operation = regional_sheets[reg].index("Tech_efficiency")
if "Conversion_plus" in technologies[reg].keys():
for sheet, item in enumerate(conversion_plus_sheets):
regional_sheets[reg].insert(sheet + position1_operation+1, item)
position2_operation = regional_sheets[reg].index("capacity_factor_resource")
if "Storage" in technologies[reg].keys():
for sheet,item in enumerate(storage_sheets):
regional_sheets[reg].insert(sheet + position2_operation+1, item)
if mode == "Planning":
regional_sheets[reg] = ["V_OM","F_OM","INV","Decom_cost",
"Economic_lifetime","Interest_rate","Discount_rate","Tech_lifetime",
"Tech_efficiency","AnnualProd_perunit_capacity","Residual_capacity",
"Capacity_factor_tech","capacity_factor_resource","Specific_emission",
"Investment_taxsub","Fix_taxsub", "Carbon_tax","Min_newcap",
"Max_newcap","Min_totalcap","Max_totalcap","Min_production",
"Max_production","Min_production_h","Max_production_h",
"Emission_cap_annual","Demand"]
position1_planning = regional_sheets[reg].index("Tech_efficiency")
if "Conversion_plus" in technologies[reg].keys():
for sheet, item in enumerate(conversion_plus_sheets):
regional_sheets[reg].insert(sheet + position1_planning+1, item)
position2_planning= regional_sheets[reg].index("capacity_factor_resource")
if "Storage" in technologies[reg].keys():
for sheet,item in enumerate(storage_sheets):
regional_sheets[reg].insert(sheet + position2_planning+1, item)
return regional_sheets
# Constants of connections data
def take_trade_ids(mode):
"""
Creates a dictionary for storing the information of the parameter sheets of
inter-regional link data based on the given mode
"""
trade_data_ids = {
"line_fixed_cost": {"sheet_name": "F_OM", "index_col": 0, "header": [0, 1]},
"line_var_cost": {"sheet_name": "V_OM", "index_col": 0, "header": [0, 1]},
"line_residual_cap": {
"sheet_name": "Residual_capacity",
"index_col": 0,
"header": [0, 1],
},
"line_eff": {"sheet_name": "Line_efficiency", "index_col": 0, "header": [0, 1]},
"line_capacity_factor": {
"sheet_name": "Capacity_factor_line",
"index_col": 0,
"header": [0, 1],
},
"annualprod_per_unitcapacity": {
"sheet_name": "AnnualProd_perunit_capacity",
"index_col": 0,
"header": [0, 1],
},
}
if mode == "Planning":
trade_data_ids.update(
{
"line_inv": {"sheet_name": "INV", "index_col": 0, "header": [0, 1]},
"line_decom_cost": {
"sheet_name": "Decom_cost",
"index_col": 0,
"header": [0, 1],
},
"line_mintotcap": {
"sheet_name": "Min_totalcap",
"index_col": 0,
"header": [0, 1],
},
"line_maxtotcap": {
"sheet_name": "Max_totalcap",
"index_col": 0,
"header": [0, 1],
},
"line_min_newcap": {
"sheet_name": "Min_newcap",
"index_col": 0,
"header": [0, 1],
},
"line_max_newcap": {
"sheet_name": "Max_newcap",
"index_col": 0,
"header": [0, 1],
},
"line_lifetime": {
"sheet_name": "Line_lifetime",
"index_col": 0,
"header": [0, 1],
},
"line_economic_lifetime": {
"sheet_name": "Line_Economic_life",
"index_col": 0,
"header": [0, 1],
},
"interest_rate": {
"sheet_name": "Interest_rate",
"index_col": 0,
"header": [0, 1],
},
}
)
return trade_data_ids
# Constants of parameters_glob data
def take_global_ids(mode):
"""
Creates a dictionary for storing the information of the parameter sheets of
global data based on the given mode
"""
global_data_ids = {
"global_min_production": {
"sheet_name": "Min_production_global",
"index_col": 0,
"header": 0,
},
"global_max_production": {
"sheet_name": "Max_production_global",
"index_col": 0,
"header": 0,
},
"global_emission_cap_annual": {
"sheet_name": "Glob_emission_cap_annual",
"index_col": 0,
"header": 0,
},
}
if mode == "Planning":
global_data_ids.update(
{
"global_mintotcap": {
"sheet_name": "Min_totalcap_global",
"index_col": 0,
"header": 0,
},
"global_maxtotcap": {
"sheet_name": "Max_totalcap_global",
"index_col": 0,
"header": 0,
},
"global_min_newcap": {
"sheet_name": "Min_newcap_global",
"index_col": 0,
"header": 0,
},
"global_max_newcap": {
"sheet_name": "Max_newcap_global",
"index_col": 0,
"header": 0,
},
"global_discount_rate": {
"sheet_name": "Discount_rate",
"index_col": 0,
"header": 0,
},
}
)
return global_data_ids
# Constants of parameters_regions data
def take_ids(regions, technologies, mode):
"""
Creates a dictionary for storing the information of the parameter sheets of
regional data based on the given mode and the list of regions and technologies
within each region
"""
regional_data_ids = {}
for reg in regions:
regional_data_ids[reg] = {
"tech_fixed_cost": {"sheet_name": "F_OM", "index_col": 0, "header": [0, 1]},
"tech_var_cost": {"sheet_name": "V_OM", "index_col": 0, "header": [0, 1]},
"tech_residual_cap": {
"sheet_name": "Residual_capacity",
"index_col": 0,
"header": [0, 1],
},
"tech_max_production": {
"sheet_name": "Max_production",
"index_col": 0,
"header": [0, 1],
},
"tech_min_production": {
"sheet_name": "Min_production",
"index_col": 0,
"header": [0, 1],
},
"tech_max_production_h": {
"sheet_name": "Max_production_h",
"index_col": [0, 1],
"header": [0, 1],
},
"tech_min_production_h": {
"sheet_name": "Min_production_h",
"index_col": [0, 1],
"header": [0, 1],
},
"annualprod_per_unitcapacity": {
"sheet_name": "AnnualProd_perunit_capacity",
"index_col": 0,
"header": [0, 1],
},
"tech_efficiency": {
"sheet_name": "Tech_efficiency",
"index_col": 0,
"header": [0, 1],
},
"tech_capacity_factor": {
"sheet_name": "Capacity_factor_tech",
"index_col": 0,
"header": [0, 1],
},
"specific_emission": {
"sheet_name": "Specific_emission",
"index_col": 0,
"header": [0, 1],
},
"carbon_tax": {
"sheet_name": "Carbon_tax",
"index_col": 0,
"header": [0, 1],
},
"fix_taxsub": {
"sheet_name": "Fix_taxsub",
"index_col": 0,
"header": [0, 1, 2],
},
"demand": {"sheet_name": "Demand", "index_col": [0, 1], "header": 0},
"res_capacity_factor": {
"sheet_name": "capacity_factor_resource",
"index_col": [0, 1],
"header": [0, 1],
},
"emission_cap_annual": {
"sheet_name": "Emission_cap_annual",
"index_col": 0,
"header": 0,
},
}
if mode == "Planning":
regional_data_ids[reg].update(
{
"tech_inv": {"sheet_name": "INV", "index_col": 0, "header": [0, 1]},
"tech_decom_cost": {
"sheet_name": "Decom_cost",
"index_col": 0,
"header": [0, 1],
},
"tech_mintotcap": {
"sheet_name": "Min_totalcap",
"index_col": 0,
"header": [0, 1],
},
"tech_maxtotcap": {
"sheet_name": "Max_totalcap",
"index_col": 0,
"header": [0, 1],
},
"tech_min_newcap": {
"sheet_name": "Min_newcap",
"index_col": 0,
"header": [0, 1],
},
"tech_max_newcap": {
"sheet_name": "Max_newcap",
"index_col": 0,
"header": [0, 1],
},
"tech_lifetime": {
"sheet_name": "Tech_lifetime",
"index_col": 0,
"header": [0, 1],
},
"economic_lifetime": {
"sheet_name": "Economic_lifetime",
"index_col": 0,
"header": [0, 1],
},
"interest_rate": {
"sheet_name": "Interest_rate",
"index_col": 0,
"header": [0, 1],
},
"inv_taxsub": {
"sheet_name": "Investment_taxsub",
"index_col": 0,
"header": [0, 1, 2],
},
"discount_rate": {
"sheet_name": "Discount_rate",
"index_col": 0,
"header": 0,
},
}
)
if "Conversion_plus" in technologies[reg].keys():
regional_data_ids[reg].update(
{
"carrier_ratio_in": {
"sheet_name": "Carrier_ratio_in",
"index_col": [0, 1],
"header": [0, 1],
},
"carrier_ratio_out": {
"sheet_name": "Carrier_ratio_out",
"index_col": [0, 1],
"header": [0, 1],
},
}
)
if "Storage" in technologies[reg].keys():
regional_data_ids[reg].update(
{
"storage_charge_efficiency": {
"sheet_name": "Storage_charge_efficiency",
"index_col": 0,
"header": 0,
},
"storage_discharge_efficiency": {
"sheet_name": "Storage_discharge_efficiency",
"index_col": 0,
"header": 0,
},
"storage_min_SOC": {
"sheet_name": "Storage_min_SOC",
"index_col": 0,
"header": 0,
},
"storage_initial_SOC": {
"sheet_name": "Storage_initial_SOC",
"index_col": 0,
"header": 0,
},
"storage_charge_time": {
"sheet_name": "Storage_charge_time",
"index_col": 0,
"header": 0,
},
"storage_discharge_time": {
"sheet_name": "Storage_discharge_time",
"index_col": 0,
"header": 0,
},
}
)
return regional_data_ids
# Constants of input set tables
global_set_ids = {
"Regions": ["Region", "Region_name"],
"Years": ["Year", "Year_name"],
"Timesteps": ["Timeslice", "Timeslice_name", "Timeslice_fraction"],
"Time_horizon": ["Start", "End"],
"Carriers_glob": ["Carrier", "Carr_name", "Carr_type", "Carr_unit"],
"Technologies_glob": ["Technology", "Tech_name", "Tech_category",
"Tech_cap_unit", "Tech_act_unit"],
"Emissions": ["Emission", "Emission_name", "Emission_unit"]
}
regional_set_ids = {
"Technologies": ["Technology", "Tech_name", "Tech_category"],
"Carriers": ["Carrier", "Carr_name", "Carr_type"],
"Carrier_input": ["Technology", "Carrier_in"],
"Carrier_output": ["Technology", "Carrier_out"],
}
technology_categories = [
"Supply",
"Conversion",
"Conversion_plus",
"Transmission",
"Demand",
"Storage",
]
carrier_types = ["Resource", "Intermediate", "Demand"]
|
from zeit.cms.i18n import MessageFactory as _
import grokcore.component as grok
import pkg_resources
import zeit.cms.content.dav
import zeit.cms.content.metadata
import zeit.cms.content.reference
import zeit.cms.interfaces
import zeit.cms.relation.interfaces
import zeit.content.video.interfaces
import zope.interface
@zope.interface.implementer(
zeit.content.video.interfaces.IPlaylist,
zeit.cms.interfaces.IAsset)
class Playlist(zeit.cms.content.metadata.CommonMetadata):
default_template = pkg_resources.resource_string(
__name__, 'playlist-template.xml').decode('utf-8')
videos = zeit.cms.content.reference.MultiResource(
'.body.videos.video', 'related')
id_prefix = 'pls'
class PlaylistType(zeit.cms.type.XMLContentTypeDeclaration):
title = _('Playlist')
interface = zeit.content.video.interfaces.IPlaylist
addform = zeit.cms.type.SKIP_ADD
factory = Playlist
type = 'playlist'
@grok.adapter(zeit.content.video.interfaces.IPlaylist, name='playlist')
@grok.implementer(zeit.cms.relation.interfaces.IReferenceProvider)
def references(context):
if context.videos:
return context.videos
return []
|
#!/usr/bin/env python
"""
dbase.py: instance methods for extending database classes
"""
# ----------------------ABSTRACT-BASE-CLASS-DATABASE----------------------#
class DBase:
"""
import sqlite3 module in extended module
Vars curs and conn should be declared in extended class
Values for both should be set at extended class __init__
"""
def execute(self, sql, *args):
self.curs.execute(sql, args)
self.conn.commit()
def insert(self, sql, *args):
self.curs.execute(sql, args)
self.conn.commit()
return self.curs.lastrowid
def fetch_rec(self, sql, *args):
self.curs.execute(sql, args)
return self.curs.fetchone()
def fetch_recs(self, sql, *args):
self.curs.execute(sql, args)
return self.curs.fetchall()
def table_exists(self,table_name):
sql = 'SELECT name FROM sqlite_master WHERE type = "table" AND name = "{}";'.format(table_name)
res = self.fetch_rec(sql)
if (res is not None):
return True
else:
return False
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBase']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
from sqlalchemy.sql import and_
from ensconce import model, exc
from ensconce.model import meta
from ensconce.autolog import log
from ensconce.util import pwhash
def get(password_id, assert_exists=True):
"""
Returns the operator object for specified ID.
:param password_id: The ID for operator to lookup.
:param assert_exists: Whether to raise :class:`exc.exception if entity does not exist (avoid NPE later).
:rtype: :class:`model.Operator`
"""
session = meta.Session()
try:
user = session.query(model.Operator).get(password_id)
except:
log.exception("Unable to retrieve user: {0}".format(password_id))
raise
if assert_exists and not user:
raise exc.NoSuchEntity(model.Operator, password_id)
return user
def get_by_username(username, assert_exists=True):
"""
This function will attempt to match an operator by username.
:param assert_exists: Whether to raise :class:`exc.exception if entity does not exist (avoid NPE later).
"""
session = meta.Session()
try:
operator = session.query(model.Operator).filter_by(username=username).first()
except:
# The user ID didn't exist.
log.exception("Unable to retrieve user for username: {0}".format(username))
raise
if assert_exists and not operator:
raise exc.NoSuchEntity(model.Operator, username)
return operator
def list(): # @ReservedAssignment
"""
This function will return all of the operators in the system.
"""
session = meta.Session()
try:
operators = session.query(model.Operator).order_by(model.Operator.username).all() # @UndefinedVariable
except:
log.exception("Error loading operator list.")
raise
else:
return operators
def create(username, password=None, access_id=None, externally_managed=False):
"""
This function will create an operator record in the database.
:rtype: :class:`ensconce.model.Operator`
"""
check = get_by_username(username, assert_exists=False)
# First, check to see if the given username exists.
if check:
raise ValueError("User already exists: {0}".format(username))
# Force the password to be null if it is empty (prevent logins w/ empty password)
if password == "":
password = None
session = meta.Session()
try:
operator = model.Operator()
operator.username = username
if password is not None:
operator.password = pwhash.obscure(password)
operator.access_id = access_id
operator.externally_managed = externally_managed
session.add(operator)
session.flush()
except:
log.exception("Error saving new operator_id.")
raise
return operator
def modify(operator_id, **kwargs):
"""
This function will attempt to modify the operator with the passed
in values.
:keyword username: The username for this operator.
:keyword password: The password for this operator.
:keyword access_id: The associated access level id for this operator.
"""
session = meta.Session()
update_attributes = kwargs # Just to make it clearer
log.debug("Update attribs = %r" % update_attributes)
# Force the password to be null if it is empty (prevent logins w/ empty password)
if update_attributes.get('password') == "":
update_attributes['password'] = None
try:
operator = get(operator_id)
modified = model.set_entity_attributes(operator, update_attributes, hashed_attributes=['password'])
session.flush()
except:
log.exception("Error modifying operator: {0}".format(operator_id))
raise
return (operator, modified)
def delete(password_id):
"""
This function will attempt to delete a operatorid from the database.
"""
session = meta.Session()
try:
operator = get(password_id)
session.delete(operator)
except:
log.exception("Unable to delete operator: {0}".format(password_id))
raise
return operator
|
import torch
import torch.nn as nn
import torch.nn.functional as F
#from .aspp import build_aspp
#from .decoder import build_decoder
from .resnet import ResNet101
class ASPP(nn.Module):
def __init__(self, num_classes):
super(ASPP, self).__init__()
dilations = [6, 12, 18, 24]
inplanes = 2048
self.aspp0 = nn.Conv2d(inplanes, num_classes, 3, 1, padding=dilations[0], dilation=dilations[0])
self.aspp1 = nn.Conv2d(inplanes, num_classes, 3, 1, padding=dilations[1], dilation=dilations[1])
self.aspp2 = nn.Conv2d(inplanes, num_classes, 3, 1, padding=dilations[2], dilation=dilations[2])
self.aspp3 = nn.Conv2d(inplanes, num_classes, 3, 1, padding=dilations[3], dilation=dilations[3])
self._init_weight()
def forward(self, x):
x0 = self.aspp0(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
out = x0 + x1 + x2 + x3
return out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DeepLab_V2(nn.Module):
def __init__(self, backbone='resnet', output_stride=8, num_classes=21,
sync_bn=False, freeze_bn=False, embedding='', seed=None):
assert seed is not None, 'seed is required'
torch.manual_seed(seed)
assert backbone == 'resnet', 'Other backbones have not been implemented: {}'.format(backbone)
super(DeepLab_V2, self).__init__()
if backbone == 'drn':
output_stride = 8
# sync_bn should be implemented by torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) before dds.
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = ResNet101(output_stride, BatchNorm)
self.aspp = ASPP(num_classes)
self.freeze_bn = freeze_bn
# additional embedding for metric learning
if embedding:
self._embedding_type = embedding
self._in_embed_channels = 2048
if embedding == 'linear':
self.embedding = nn.Conv2d(self._in_embed_channels, 256, 1, bias=False)
elif embedding == 'mlp':
self.embedding = nn.Sequential(
nn.Conv2d(self._in_embed_channels, 512, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, 1, bias=False)
)
elif embedding == 'mlp3':
self.embedding = nn.Sequential(
nn.Conv2d(self._in_embed_channels, 512, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, 1, bias=False)
)
else:
raise RuntimeError(embedding)
self._init_embedding()
else:
self.embedding = None
def _init_embedding(self):
for m in self.embedding.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.01)
def set_mode(self, mode):
self._forward_mode = mode
def forward(self, input):
return getattr(self, 'forward_' + self._forward_mode)(input)
def forward_seg(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
#x = self.decoder(x, low_level_feat)
#x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def forward_seg_embed(self, input):
assert self.embedding is not None
x, low_level_feat = self.backbone(input)
embed = self.embedding(x)
x = self.aspp(x)
#x = self.decoder(x, low_level_feat)
#x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x, embed
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone, self.embedding]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
modules = [self.aspp]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_param_groups(self):
lr_1x = self.get_1x_lr_params()
lr_10x = self.get_10x_lr_params()
return {1: lr_1x, 10: lr_10x}
if __name__ == "__main__":
#model = DeepLab_V2(backbone='mobilenet', output_stride=16)
#model.eval()
#input = torch.rand(1, 3, 513, 513)
#output = model(input)
#print(output.size())
pass
|
#Sean Billings, 2015
import random
import numpy
import subprocess
from backend import constraints
from backend.experiment import W1Experiment
from backend.objectiveFunctions import WeightedSumObjectiveFunction, IdealDifferentialObjectiveFunction
import math
from backend.spea_optimizer import SpeaOptimizer
from backend.photonicCrystalDesign import PhCWDesign
from backend.paretoFunctions import ParetoMaxFunction
# absolute path to the mpb executable
mpb = "/Users/sean/documents/mpb-1.5/mpb/mpb"
# absolute path to the input ctl
inputFile = "/Users/sean/UniversityOfOttawa/Photonics/PCWO/W1_2D_v04.ctl.txt"
# absolute path to the output ctl
outputFile = "/Users/sean/UniversityOfOttawa/Photonics/PCWO/optimizerTestFile.txt"
# we define a general experiment object
# that we reuse whenever we need to make a command-line mpb call
# see experiment.py for functionality
experiment = W1Experiment(mpb, inputFile, outputFile)
# ex.setParams(paramVector)
experiment.setCalculationType('4') # accepts an int from 0 to 5
experiment.setBand(23)
paramMap = {}
paramMap["s1"] = 0 # First row vertical shift
paramMap["s2"] = 0 # Second row vertical shift
paramMap["s3"] = 0 # Third row vertical shift
#paramMap["p1"] = 0 # First row horizontal shift
#paramMap["p2"] = 0 # Second row horizontal shift
#paramMap["p3"] = 0 # Third row horizontal shift
paramMap["r0"] = 0.3 # Default air-hole radius
paramMap["r1"] = 0.3 # Default first row radius
paramMap["r2"] = 0.3 # Default second row radius
paramMap["r3"] = 0.3 # Default third row radius
# see constraints.py
constraintFunctions = [constraints.latticeConstraintsLD]
pcw = PhCWDesign(paramMap, 0, constraintFunctions)
#Initialize pareto function
key_map = {}
key_map["ng0"] = "max"
key_map["loss_at_ng0"] = "min"
pareto_function = ParetoMaxFunction(experiment, key_map)
#Optimizer parameters
max_generation = 10 # number of iterations of the SPEA algorithm
population_size = 10 # number of solutions to consider
pareto_archive_size = 8 # number of solutions to store after each generation
tournament_selection_rate = 5 # number of solutions to consider in crossover/mutation
# Run the optimizer
print "Starting SPEA"
population = SpeaOptimizer.createPopulation(population_size, pcw)
optimizer = SpeaOptimizer(pareto_function)
optimizer.optimize(population,max_generation,tournament_selection_rate, pareto_archive_size)
print "\nSPEA solutions generated"
|
import numpy as np
from kalman_estimation import Kalman4FROLS, Selector, get_mat_data
from tqdm import trange, tqdm, tqdm_notebook
import matplotlib.pyplot as plt
def corr_term(y_coef, terms_set, Kalman_S_No, var_name: str = 'x', step_name: str = 't'):
n_dim, n_term = y_coef.shape
func_repr = []
for var in range(n_dim):
y = {}
for term in range(n_term):
y[terms_set[Kalman_S_No[var, term]]] = y_coef[var, term]
func_repr.append(y)
return func_repr
def frokf(noise_var, ndim, dtype, terms, length, root='../data/', trials=100, uc=0.01, ntest=50):
assert dtype in ['linear', 'nonlinear'], 'type not support!'
ax = []
for trial in range(1, trials + 1):
# for trial in [trials]:
terms_path = root + f'{dtype}_terms{ndim}D_{noise_var:2.2f}trial{trial}.mat'
term = Selector(terms_path)
_ = term.make_terms()
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term.make_selection()
# Kalman_S_No = np.sort(Kalman_S_No)
y_coef = 0
# 对FROKF多次实验取平均值
for _ in trange(ntest):
kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=uc)
y_coef += kf.estimate_coef()
y_coef /= ntest
terms_set = corr_term(y_coef, candidate_terms, Kalman_S_No)
flatten_coef, t = [], 0
for i in range(ndim):
tmp = []
for k in terms[t:t+length[i]]:
tmp.append(terms_set[i][k] if k in terms_set[i] else np.nan)
flatten_coef.extend(tmp)
t += length[i]
ax.append(flatten_coef)
return np.stack(ax)
def frols(noise_var, ndim, dtype, terms, length, root='../data/', trial=1, trials=100):
assert dtype in ['linear', 'nonlinear'], 'type not support!'
terms_path = root + f'FROLS_{ndim}{dtype}_est100_{noise_var:2.2f}.mat'
terms_pathx = root + f'{dtype}_terms{ndim}D_0.50trial{trial}.mat'
term = Selector(terms_pathx)
candidate_terms = term.make_terms()
y_coef = get_mat_data(terms_path, 'coef_est100')
Kalman_S_No = get_mat_data(terms_path, 'terms_chosen100')
flatten_coef = []
for trial in trange(trials):
ax, t, S_No = [], 0, Kalman_S_No[trial] - 1
for i in range(ndim):
terms_set = corr_term(y_coef[trial], candidate_terms, S_No)
tmp = []
for k in terms[t:t+length[i]]:
tmp.append(terms_set[i][k] if k in terms_set[i] else np.nan)
ax.extend(tmp)
t += length[i]
flatten_coef.append(ax)
return np.stack(flatten_coef)
def frokf_sta(dtype, ndim, noise_var, root='../data/'):
name = f"{root}FROKF_{dtype}{ndim}D_{noise_var:2.2f}"
coef = get_mat_data(name, 'frokf_coef')
return coef.mean(0), coef.var(0)
def frols_sta(dtype, ndim, noise_var, root='../data/'):
name = f"{root}FROLS_{dtype}{ndim}D_{noise_var:2.2f}"
coef = get_mat_data(name, 'frols_coef')
return coef.mean(0), coef.var(0)
def get_term_dict(dtype, dim, root='../data/'):
terms_pathx = root + f'{dtype}_terms{dim}D_0.50trial1.mat'
term = Selector(terms_pathx)
candidate_terms = term.make_terms()
dict1 = {t:i for i, t in enumerate(candidate_terms)}
dict2 = {i:t for i, t in enumerate(candidate_terms)}
return dict1, dict2
|
"""
This class can be instantiated directly, or subclassed to change signature / add methods
"""
import logging
from abstract_http_client.http_clients.http_client_base import HttpClientBase
from abstract_http_client.http_services.requests_service import RequestsService
class RequestsClient(HttpClientBase):
""" Instantiate or Inherit from this class to start writing api client """
def __init__(
self,
host,
user="",
password="",
token="",
logger: logging.Logger = None,
port: int = None,
use_https=True,
ssl_verify=True,
proxies: dict = None,
show_insecure_warning=True,
):
super().__init__(user, password, token, logger)
self._rest_service = RequestsService(host, port, logger, use_https, ssl_verify, proxies, show_insecure_warning)
@property
def rest_service(self) -> RequestsService:
return self._rest_service
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This script requires the nipy-data package to run. It is an example of
simultaneous motion correction and slice timing correction in
multi-session fMRI data from the FIAC 2005 dataset. Specifically, it
uses the first two sessions of subject 'fiac0'.
Usage:
python space_time_realign.py
Two images will be created in the working directory for the realigned series::
rarun1.nii
rarun2.nii
Author: Alexis Roche, 2009.
"""
from __future__ import print_function # Python 2/3 compatibility
import os
from os.path import split as psplit, abspath
import numpy as np
from nipy.algorithms.registration import SpaceTimeRealign
from nipy import load_image, save_image
from nipy.utils import example_data
# Input images are provided with the nipy-data package
runnames = [example_data.get_filename('fiac', 'fiac0', run + '.nii.gz')
for run in ('run1', 'run2')]
runs = [load_image(run) for run in runnames]
# Spatio-temporal realigner assuming interleaved ascending slice order
R = SpaceTimeRealign(runs, tr=2.5, slice_times='asc_alt_2', slice_info=2)
# If you are not sure what the above is doing, you can alternatively
# declare slice times explicitly using the following equivalent code
"""
tr = 2.5
nslices = runs[0].shape[2]
slice_times = (tr / float(nslices)) *\
np.argsort(range(0, nslices, 2) + range(1, nslices, 2))
print('Slice times: %s' % slice_times)
R = SpaceTimeRealign(runs, tr=tr, slice_times=slice_times, slice_info=2)
"""
# Estimate motion within- and between-sessions
R.estimate(refscan=None)
# Resample data on a regular space+time lattice using 4d interpolation
# Save images
cwd = abspath(os.getcwd())
print('Saving results in: %s' % cwd)
for i in range(len(runs)):
corr_run = R.resample(i)
fname = 'ra' + psplit(runnames[i])[1]
save_image(corr_run, fname)
|
from invoke import task
from tasks.util.env import (
AZURE_RESOURCE_GROUP,
AZURE_SGX_VM_SIZE,
AZURE_SGX_LOCATION,
AZURE_SGX_VM_IMAGE,
AZURE_SGX_VM_NAME,
AZURE_SGX_VM_ADMIN_USERNAME,
AZURE_SGX_VM_SSH_KEY_FILE,
)
from subprocess import check_output, run
def _run_vm_cmd(name, az_args=None, capture_stdout=False):
cmd = [
"az",
"vm {}".format(name),
"--resource-group {}".format(AZURE_RESOURCE_GROUP),
]
if az_args:
cmd.extend(az_args)
cmd = " ".join(cmd)
print(cmd)
if capture_stdout:
return check_output(cmd, shell=True)
else:
run(cmd, shell=True, check=True)
def _run_del_network_cmd(name, suffix):
cmd = [
"az",
"network {}".format(name),
"delete",
"--resource-group {}".format(AZURE_RESOURCE_GROUP),
"--name {}{}".format(AZURE_SGX_VM_NAME, suffix),
]
cmd = " ".join(cmd)
print(cmd)
run(cmd, shell=True, check=True)
@task
def provision(ctx):
"""
Provision SGX-enabled VM
"""
_run_vm_cmd(
"create",
[
"--name {}".format(AZURE_SGX_VM_NAME),
"--location {}".format(AZURE_SGX_LOCATION),
"--image {}".format(AZURE_SGX_VM_IMAGE),
"--size {}".format(AZURE_SGX_VM_SIZE),
"--authentication-type ssh",
"--public-ip-sku Standard",
"--admin-username {}".format(AZURE_SGX_VM_ADMIN_USERNAME),
"--generate-ssh-keys",
"--ssh-key-values {}".format(AZURE_SGX_VM_SSH_KEY_FILE),
],
)
def get_os_disk():
out = _run_vm_cmd(
"show",
[
"--name {}".format(AZURE_SGX_VM_NAME),
"--query storageProfile.osDisk.managedDisk.id",
],
capture_stdout=True,
).decode("utf-8")
return out.split("/")[-1][:-2]
def get_ip():
out = check_output(
"az network public-ip show --resource-group {} --name {}PublicIp --query ipAddress".format(
AZURE_RESOURCE_GROUP, AZURE_SGX_VM_NAME
),
shell=True,
)
return out.decode("utf-8").strip().strip('"')
@task
def get_ssh(ctx):
"""
Get the SSH config to log into the SGX VM
"""
ssh_config = [
"Host {}".format(AZURE_SGX_VM_NAME),
"\n HostName {}".format(get_ip()),
"\n User {}".format(AZURE_SGX_VM_ADMIN_USERNAME),
"\n ForwardAgent yes",
]
print(" ".join(ssh_config))
@task
def delete(ctx):
"""
Delete SGX VM
"""
os_disk = get_os_disk()
# Delete VM
_run_vm_cmd(
"delete",
[
"--name {}".format(AZURE_SGX_VM_NAME),
"--yes",
],
)
# Delete OS disk
run(
"az disk delete --resource-group {} --name {} --yes".format(
AZURE_RESOURCE_GROUP, os_disk
),
shell=True,
)
# Network components to be deleted, order matters
net_components = [
("nic", "VMNic"),
("nsg", "NSG"),
("vnet", "VNET"),
("public-ip", "PublicIp"),
]
for name, suffix in net_components:
_run_del_network_cmd(name, suffix)
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_graphics.datasets.features.camera_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.features import camera_feature
class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase):
"""Test Cases for Camera FeatureConnector."""
def __get_camera_params(self):
pose = {'R': np.eye(3).astype(np.float32),
't': np.zeros(3).astype(np.float32)}
f = 35.
optical_center = (640 / 2, 480 / 2)
return pose, f, optical_center
def test_simple_camera(self):
"""Tests camera parameters with fixed focal length, no skew and no aspect ratio."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]],
[0, expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f, 'optical_center': expected_center,
'pose': expected_pose}
lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'look_at': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'position': np.array([0, 0, 0], dtype=np.float32)
}
}
raising_pose_entry = {
'f': expected_f,
'optical_center': expected_center,
'pose': np.eye(4)
}
raising_pose_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {'rot': np.eye(3), 'trans': np.zeros(3)}
}
raising_lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'l': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'C': np.array([0, 0, 0], dtype=np.float32)
}
}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=lookat_inputs,
expected=expected_camera
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_lookat_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_entry,
raise_cls=ValueError,
raise_msg='Pose needs to be a dictionary'
),
],
)
def test_camera_with_aspect_ratio_and_skew(self):
"""Tests camera parameters with fixed focal length, aspect_ratio and skew."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_aspect_ratio = expected_center[0] / expected_center[1]
expected_skew = 0.6
expected_intrinsics = np.asarray(
[[expected_f, expected_skew, expected_center[0]],
[0, expected_aspect_ratio * expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_center,
'skew': expected_skew,
'aspect_ratio': expected_aspect_ratio,
'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
],
)
def test_full_camera_calibration_matrix(self):
"""Tests camera parameters with different focal length per camera axis and skew."""
expected_pose, _, expected_optical_center = self.__get_camera_params()
expected_skew = 0.6
expected_f = (35., 40.)
expected_intrinsics = np.array(
[[expected_f[0], expected_skew, expected_optical_center[0]],
[0, expected_f[1], expected_optical_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
raising_inputs = {'f': expected_f,
'aspect_ratio': 1.5,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=raising_inputs,
raise_cls=ValueError,
raise_msg='If aspect ratio is provided, f needs to '
'be a single float',
),
],
)
if __name__ == '__main__':
tfds.testing.test_main()
|
import os
import random
from PIL import Image
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def load_image(path):
return Image.open(path).convert('RGB')
def make_sync_dataset(root, label, ds_name='synROD'):
images = []
labeltxt = open(label)
for line in labeltxt:
data = line.strip().split(' ')
if not is_image_file(data[0]):
continue
path = os.path.join(root, data[0])
if ds_name == 'synROD' or ds_name in ['synHB', 'valHB']:
path_rgb = path.replace('***', 'rgb')
path_depth = path.replace('***', 'depth')
elif ds_name == 'ROD':
path_rgb = path.replace('***', 'crop')
path_rgb = path_rgb.replace('???', 'rgb')
path_depth = path.replace('***', 'depthcrop')
path_depth = path_depth.replace('???', 'surfnorm')
else:
raise ValueError('Unknown dataset {}. Known datasets are synROD, synHB, ROD, valHB'.format(ds_name))
gt = int(data[1])
item = (path_rgb, path_depth, gt)
images.append(item)
return images
def get_relative_rotation(rgb_rot, depth_rot):
rel_rot = rgb_rot - depth_rot
if rel_rot < 0:
rel_rot += 4
assert rel_rot in range(4)
return rel_rot
class MyTransformer(object):
def __init__(self, crop, flip):
super(MyTransformer, self).__init__()
self.crop = crop
self.flip = flip
self.angles = [0, 90, 180, 270]
def __call__(self, img, rot=None):
img = TF.resize(img, (256, 256))
img = TF.crop(img, self.crop[0], self.crop[1], 224, 224)
if self.flip:
img = TF.hflip(img)
if rot is not None:
img = TF.rotate(img, self.angles[rot])
img = TF.to_tensor(img)
img = TF.normalize(img, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
return img
class DatasetGeneratorMultimodal(Dataset):
def __init__(self, root, label, ds_name='synROD', do_rot=False, transform=None):
imgs = make_sync_dataset(root, label, ds_name=ds_name)
self.root = root
self.label = label
self.imgs = imgs
self.transform = transform
self.do_rot = do_rot
def __getitem__(self, index):
path_rgb, path_depth, target = self.imgs[index]
img_rgb = load_image(path_rgb)
img_depth = load_image(path_depth)
rot_rgb = None
rot_depth = None
# If a custom transform is specified apply that transform
if self.transform is not None:
img_rgb = self.transform(img_rgb)
img_depth = self.transform(img_depth)
else: # Otherwise define a random one (random cropping, random horizontal flip)
top = random.randint(0, 256 - 224)
left = random.randint(0, 256 - 224)
flip = random.choice([True, False])
if self.do_rot:
rot_rgb = random.choice([0, 1, 2, 3])
rot_depth = random.choice([0, 1, 2, 3])
transform = MyTransformer([top, left], flip)
# Apply the same transform to both modalities, rotating them if required
img_rgb = transform(img_rgb, rot_rgb)
img_depth = transform(img_depth, rot_depth)
if self.do_rot and (self.transform is None):
return img_rgb, img_depth, target, get_relative_rotation(rot_rgb, rot_depth)
return img_rgb, img_depth, target
def __len__(self):
return len(self.imgs)
|
# -*- coding: utf-8 -*-
"""
RIMM, ROMM and ERIMM Encodings
==============================
Defines the *RIMM, ROMM and ERIMM* encodings:
- :attr:`colour.models.RGB_COLOURSPACE_ROMM_RGB`.
- :attr:`colour.models.RGB_COLOURSPACE_RIMM_RGB`.
- :attr:`colour.models.RGB_COLOURSPACE_ERIMM_RGB`.
- :attr:`colour.models.RGB_COLOURSPACE_PROPHOTO_RGB`.
References
----------
- :cite:`ANSI2003a` : ANSI. (2003). Specification of ROMM RGB (pp. 1-2).
http://www.color.org/ROMMRGB.pdf
- :cite:`Spaulding2000b` : Spaulding, K. E., Woolfe, G. J., & Giorgianni, E.
J. (2000). Reference Input/Output Medium Metric RGB Color Encodings
(RIMM/ROMM RGB) (pp. 1-8). http://www.photo-lovers.org/pdf/color/romm.pdf
"""
import numpy as np
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models.rgb import (
RGB_Colourspace, cctf_encoding_ROMMRGB, cctf_decoding_ROMMRGB,
cctf_encoding_RIMMRGB, cctf_decoding_RIMMRGB, log_encoding_ERIMMRGB,
log_decoding_ERIMMRGB, cctf_encoding_ProPhotoRGB,
cctf_decoding_ProPhotoRGB)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'PRIMARIES_ROMM_RGB', 'WHITEPOINT_NAME_ROMM_RGB',
'CCS_WHITEPOINT_ROMM_RGB', 'MATRIX_ROMM_RGB_TO_XYZ',
'MATRIX_XYZ_TO_ROMM_RGB', 'RGB_COLOURSPACE_ROMM_RGB', 'PRIMARIES_RIMM_RGB',
'WHITEPOINT_NAME_RIMM_RGB', 'CCS_WHITEPOINT_RIMM_RGB',
'MATRIX_RIMM_RGB_TO_XYZ', 'MATRIX_XYZ_TO_RIMM_RGB',
'RGB_COLOURSPACE_RIMM_RGB', 'PRIMARIES_ERIMM_RGB',
'WHITEPOINT_NAME_ERIMM_RGB', 'CCS_WHITEPOINT_ERIMM_RGB',
'MATRIX_ERIMM_RGB_TO_XYZ', 'MATRIX_XYZ_TO_ERIMM_RGB',
'RGB_COLOURSPACE_ERIMM_RGB', 'PRIMARIES_PROPHOTO_RGB',
'WHITEPOINT_NAME_PROPHOTO_RGB', 'CCS_WHITEPOINT_PROPHOTO_RGB',
'MATRIX_PROPHOTO_RGB_TO_XYZ', 'MATRIX_XYZ_TO_PROPHOTO_RGB',
'RGB_COLOURSPACE_PROPHOTO_RGB'
]
PRIMARIES_ROMM_RGB = np.array([
[0.7347, 0.2653],
[0.1596, 0.8404],
[0.0366, 0.0001],
])
"""
*ROMM RGB* colourspace primaries.
PRIMARIES_ROMM_RGB : ndarray, (3, 2)
"""
WHITEPOINT_NAME_ROMM_RGB = 'D50'
"""
*ROMM RGB* colourspace whitepoint name.
WHITEPOINT_NAME_ROMM_RGB : unicode
"""
CCS_WHITEPOINT_ROMM_RGB = (CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][WHITEPOINT_NAME_ROMM_RGB])
"""
*ROMM RGB* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_ROMM_RGB : ndarray
"""
MATRIX_ROMM_RGB_TO_XYZ = np.array([
[0.7977, 0.1352, 0.0313],
[0.2880, 0.7119, 0.0001],
[0.0000, 0.0000, 0.8249],
])
"""
*ROMM RGB* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_ROMM_RGB_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_ROMM_RGB = np.array([
[1.3460, -0.2556, -0.0511],
[-0.5446, 1.5082, 0.0205],
[0.0000, 0.0000, 1.2123],
])
"""
*CIE XYZ* tristimulus values to *ROMM RGB* colourspace matrix.
MATRIX_XYZ_TO_ROMM_RGB : array_like, (3, 3)
"""
RGB_COLOURSPACE_ROMM_RGB = RGB_Colourspace(
'ROMM RGB',
PRIMARIES_ROMM_RGB,
CCS_WHITEPOINT_ROMM_RGB,
WHITEPOINT_NAME_ROMM_RGB,
MATRIX_ROMM_RGB_TO_XYZ,
MATRIX_XYZ_TO_ROMM_RGB,
cctf_encoding_ROMMRGB,
cctf_decoding_ROMMRGB,
)
RGB_COLOURSPACE_ROMM_RGB.__doc__ = """
*ROMM RGB* colourspace.
References
----------
:cite:`ANSI2003a`, :cite:`Spaulding2000b`
RGB_COLOURSPACE_ROMM_RGB : RGB_Colourspace
"""
PRIMARIES_RIMM_RGB = PRIMARIES_ROMM_RGB
"""
*RIMM RGB* colourspace primaries.
PRIMARIES_RIMM_RGB : ndarray, (3, 2)
"""
WHITEPOINT_NAME_RIMM_RGB = WHITEPOINT_NAME_ROMM_RGB
"""
*RIMM RGB* colourspace whitepoint name.
WHITEPOINT_NAME_RIMM_RGB : unicode
"""
CCS_WHITEPOINT_RIMM_RGB = CCS_WHITEPOINT_ROMM_RGB
"""
*RIMM RGB* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_RIMM_RGB : ndarray
"""
MATRIX_RIMM_RGB_TO_XYZ = MATRIX_ROMM_RGB_TO_XYZ
"""
*RIMM RGB* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_RIMM_RGB_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_RIMM_RGB = MATRIX_XYZ_TO_ROMM_RGB
"""
*CIE XYZ* tristimulus values to *RIMM RGB* colourspace matrix.
MATRIX_XYZ_TO_RIMM_RGB : array_like, (3, 3)
"""
RGB_COLOURSPACE_RIMM_RGB = RGB_Colourspace(
'RIMM RGB',
PRIMARIES_RIMM_RGB,
CCS_WHITEPOINT_RIMM_RGB,
WHITEPOINT_NAME_RIMM_RGB,
MATRIX_RIMM_RGB_TO_XYZ,
MATRIX_XYZ_TO_RIMM_RGB,
cctf_encoding_RIMMRGB,
cctf_decoding_RIMMRGB,
)
RGB_COLOURSPACE_RIMM_RGB.__doc__ = """
*RIMM RGB* colourspace. In cases in which it is necessary to identify a
specific precision level, the notation *RIMM8 RGB*, *RIMM12 RGB* and
*RIMM16 RGB* is used.
References
----------
:cite:`Spaulding2000b`
RGB_COLOURSPACE_RIMM_RGB : RGB_Colourspace
"""
PRIMARIES_ERIMM_RGB = PRIMARIES_ROMM_RGB
"""
*ERIMM RGB* colourspace primaries.
PRIMARIES_ERIMM_RGB : ndarray, (3, 2)
"""
WHITEPOINT_NAME_ERIMM_RGB = WHITEPOINT_NAME_ROMM_RGB
"""
*ERIMM RGB* colourspace whitepoint name.
WHITEPOINT_NAME_ERIMM_RGB : unicode
"""
CCS_WHITEPOINT_ERIMM_RGB = CCS_WHITEPOINT_ROMM_RGB
"""
*ERIMM RGB* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_ERIMM_RGB : ndarray
"""
MATRIX_ERIMM_RGB_TO_XYZ = MATRIX_ROMM_RGB_TO_XYZ
"""
*ERIMM RGB* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_ERIMM_RGB_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_ERIMM_RGB = MATRIX_XYZ_TO_ROMM_RGB
"""
*CIE XYZ* tristimulus values to *ERIMM RGB* colourspace matrix.
MATRIX_XYZ_TO_ERIMM_RGB : array_like, (3, 3)
"""
RGB_COLOURSPACE_ERIMM_RGB = RGB_Colourspace(
'ERIMM RGB',
PRIMARIES_ERIMM_RGB,
CCS_WHITEPOINT_ERIMM_RGB,
WHITEPOINT_NAME_ERIMM_RGB,
MATRIX_ERIMM_RGB_TO_XYZ,
MATRIX_XYZ_TO_ERIMM_RGB,
log_encoding_ERIMMRGB,
log_decoding_ERIMMRGB,
)
RGB_COLOURSPACE_ERIMM_RGB.__doc__ = """
*ERIMM RGB* colourspace.
References
----------
:cite:`Spaulding2000b`
RGB_COLOURSPACE_ERIMM_RGB : RGB_Colourspace
"""
PRIMARIES_PROPHOTO_RGB = PRIMARIES_ROMM_RGB
"""
*ProPhoto RGB* colourspace primaries.
PRIMARIES_PROPHOTO_RGB : ndarray, (3, 2)
"""
WHITEPOINT_NAME_PROPHOTO_RGB = WHITEPOINT_NAME_ROMM_RGB
"""
*ProPhoto RGB* colourspace whitepoint name.
WHITEPOINT_NAME_PROPHOTO_RGB : unicode
"""
CCS_WHITEPOINT_PROPHOTO_RGB = CCS_WHITEPOINT_ROMM_RGB
"""
*ProPhoto RGB* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_PROPHOTO_RGB : ndarray
"""
MATRIX_PROPHOTO_RGB_TO_XYZ = MATRIX_ROMM_RGB_TO_XYZ
"""
*ProPhoto RGB* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_PROPHOTO_RGB_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_PROPHOTO_RGB = MATRIX_XYZ_TO_ROMM_RGB
"""
*CIE XYZ* tristimulus values to *ProPhoto RGB* colourspace matrix.
MATRIX_XYZ_TO_PROPHOTO_RGB : array_like, (3, 3)
"""
RGB_COLOURSPACE_PROPHOTO_RGB = RGB_Colourspace(
'ProPhoto RGB',
PRIMARIES_PROPHOTO_RGB,
CCS_WHITEPOINT_PROPHOTO_RGB,
WHITEPOINT_NAME_PROPHOTO_RGB,
MATRIX_PROPHOTO_RGB_TO_XYZ,
MATRIX_XYZ_TO_PROPHOTO_RGB,
cctf_encoding_ProPhotoRGB,
cctf_decoding_ProPhotoRGB,
)
RGB_COLOURSPACE_PROPHOTO_RGB.__doc__ = """
*ProPhoto RGB* colourspace, an alias colourspace for *ROMM RGB*.
References
----------
:cite:`ANSI2003a`, :cite:`Spaulding2000b`
RGB_COLOURSPACE_PROPHOTO_RGB : RGB_Colourspace
"""
|
from kubernetes import config, config
# from _future_import print_function
import time
import kubernetes.client
from kubernetes.client.rest import ApiException
from pprint import pprint
# Configure API key authorization: BearerToken
configuration = kubernetes.client.Configuration()
configuration.api_key['authorization'] = 'eyJhbGciOiJSUzI1NiIsImtpZCI6IkN0VDcyLXJhSXd1cXBCdXFmQUp2dDBYd01DcU1kS29aRERBai1hRDVzTkkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tcjc0aHgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjRhMDdjMDViLWY0OWItNDBlMi1iZmRkLTJiOTY3MTY0YTU4MSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.GcUiuy8Q6j-f_6PiBXT2rvOY4Q83hxFy96FBRjUL2IoRvkDqaieyQVAKbu3_lte6Pg7LEzW797C_2VwP9xo1us5FDXN6KnmDMdXOU9EjFFrjt1F7IGR_OfnCOmZEG5Diydu2IvzJTq6N_lRMjAZOOq7Rbr8h7AimI2G_W6LJvCxG6-8Ppo-3qjv7vMeFvmG9VUsKcLp-IZyXdTI4OGuuvcOaHiwCzwbI9Uojwi279STsXpnoOj089yeE9sA9QDXpWZqVBonNZpF2Xb9ZPAOYWgrCkx0mT28wLaxWaRWP39ugUmDpct0wxo0eLSwpjOYqqa7HoYpjP20iIyUP5d0OkQ'
# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed
# configuration.api_key_prefix['authorization'] = 'Bearer'
# create an instance of the API class
api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))
namespace = 'namespace_example' # str | object name and auth scope, such as for teams and projects
body = kubernetes.client.V1Pod() # V1Pod |
pretty = 'pretty_example' # str | If 'true', then the output is pretty printed. (optional)
dry_run = 'dry_run_example' # str | When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed (optional)
field_manager = 'field_manager_example' # str | fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. (optional)
try:
api_response = api_instance.create_namespaced_pod(namespace, body, pretty=pretty, dry_run=dry_run, field_manager=field_manager)
pprint(api_response)
except ApiException as e:
print("Exception when calling CoreV1Api->create_namespaced_pod: %s\n" % e)
|
"""
Module containing code to work with Move observational data
"""
from netCDF4 import Dataset, num2date, date2num
import datetime
import numpy as np
import metric.utils
class MoveObs(object):
""" Template class to interface with observed ocean transports """
def __init__(self, f, time_avg=None, mindt=None, maxdt=None):
""" Create instance holding ocean transport data """
self.f = f
self.time_avg = time_avg
self.mindt = mindt
self.maxdt = maxdt
self._read_data()
def _read_data(self):
""" Abstract method to read data and apply time averaging """
pass
def _read_dates(self):
""" Abstract method to initialized dates """
pass
def _ym_dates(self):
""" Return yearly mean date time objects """
ym_dates = []
for yr in range(self.yy.min(), self.yy.max()+1):
ind = (self.yy == yr)
if ind.any():
ym_dates.append(datetime.datetime(yr, 7, 1))
return np.array(ym_dates)
def _mm_dates(self):
""" Return monthly mean date time objects """
mm_dates = []
for yr in range(self.yy.min(), self.yy.max()+1):
for mon in range(1,12+1):
ind = (self.yy == yr) & (self.mm == mon)
if ind.any():
mm_dates.append(datetime.datetime(yr, mon, 15))
return np.array(mm_dates)
def _calc_ym(self, data, profile=False):
""" Return yearly mean values """
ym_data = []
for yr in range(self.yy.min(), self.yy.max()+1):
ind = (self.yy == yr)
if ind.any():
if profile:
ym_data.append(np.mean(data[ind,:],axis=0))
else:
ym_data.append(np.mean(data[ind]))
return np.array(ym_data)
def _calc_mm(self, data, profile=False, profile_bdry=False):
""" Return monthly mean values """
mm_data = []
for yr in range(self.yy.min(), self.yy.max()+1):
for mon in range(1,12+1):
ind = (self.yy == yr) & (self.mm == mon)
if ind.any():
if profile:
mm_data.append(np.mean(data[ind,:],axis=0))
elif profile_bdry:
mm_data.append(np.mean(data[:,ind,:],axis=1))
else:
mm_data.append(np.mean(data[ind]))
return np.array(mm_data)
def _readnc(self, ncvar):
""" Read variable from netcdf file """
nc = Dataset(self.f)
data = nc.variables[ncvar][:]
nc.close()
return data
class TransportObs(MoveObs):
"""
Sub-class to hold volume transport observations
from the MOVE array at 16N.
Data source:
http://mooring.ucsd.edu/dev/move/
Data reference:
http://dx.doi.org/10.1016/j.dsr.2005.12.007
http://dx.doi.org/10.1029/2011GL049801
"""
def _read_data(self):
""" Read data and apply time averaging """
self._read_dates()
if self.time_avg is None:
self.dates = self.original_dates
self.trans_total = self._readnc('TRANSPORT_TOTAL')
self.trans_int = self._readnc('transport_component_internal')
self.trans_int_offset = self._readnc('transport_component_internal_offset')
self.trans_bdry = self._readnc('transport_component_boundary')
elif self.time_avg == 'monthly':
self.dates = self._mm_dates()
self.trans_total = self._calc_mm(self._readnc('TRANSPORT_TOTAL'))
self.trans_int = self._calc_mm(self._readnc('transport_component_internal'))
self.trans_int_offset = self._calc_mm(self._readnc('transport_component_internal_offset'))
self.trans_bdry = self._calc_mm(self._readnc('transport_component_boundary'))
elif self.time_avg == 'yearly':
self.dates = self._ym_dates()
self.trans_total = self._calc_ym(self._readnc('TRANSPORT_TOTAL'))
self.trans_int = self._calc_ym(self._readnc('transport_component_internal'))
self.trans_int_offset = self._calc_ym(self._readnc('transport_component_internal_offset'))
self.trans_bdry = self._calc_ym(self._readnc('transport_component_boundary'))
else:
print(self.time_avg)
raise ValueError('time_avg must be "monthly" or "yearly"')
if (self.mindt is not None) and (self.maxdt is not None):
tind = utils.get_dateind(self.dates, self.mindt, self.maxdt)
self.trans_total = self.trans_total[tind]
self.trans_int = self.trans_int[tind]
self.trans_int_offset = self.trans_int_offset[tind]
self.trans_bdry = self.trans_bdry[tind]
self.dates = self.dates[tind]
def _read_dates(self):
""" Read date information from file """
nc = Dataset(self.f)
t = nc.variables['TIME']
self.original_dates = num2date(t[:],units=t.units)
self.hh = np.array([dt.hour for dt in self.original_dates], dtype=np.int)
self.dd = np.array([dt.day for dt in self.original_dates], dtype=np.int)
self.mm = np.array([dt.month for dt in self.original_dates], dtype=np.int)
self.yy = np.array([dt.year for dt in self.original_dates], dtype=np.int)
|
"""
NSLS2 V2
---------
"""
# :author: Lingyun Yang <lyyang@bnl.gov>
import logging
#APHLA_LOG = os.path.join(tempfile.gettempdir(), "aphla.log")
#APHLA_LOG = 'aphla.nsls2v2.log'
#logging.basicConfig(filename=APHLA_LOG,
# format='%(asctime)s - %(name)s [%(levelname)s]: %(message)s',
# level=logging.DEBUG)
#_lgfmt = logging.Formatter("%(asctime)s - %(name)s [%(levelname)s]: %(message)s")
# set null handler when logging for a library.
#_lghdl = logging.FileHandler(filename=APHLA_LOG)
#_lghdl.setLevel(logging.DEBUG)
#_lghdl.setFormatter(_lgfmt)
#logger = logging.getLogger(__name__)
#logger.addHandler(_lghdl)
#logger.info("Testing")
from machinit import *
|
from nltk.util import ngrams
from collections import Counter
from xdnlp.utils import read_lines
import math
import joblib
import tqdm
class Gibberish(object):
def __init__(self):
self.counts = Counter()
self.total = 0
self.threshold = 0
def adapt(self, filename: str, total=None):
for line in tqdm.tqdm(read_lines(filename), total=total):
for a, b in ngrams(line, 2):
self.counts[a + b] += 1
self.total += 1
def adapt_threshold(self, goodfile: str, badfile: str):
good = [self.average_prob(line) for line in read_lines(goodfile)]
bad = [self.average_prob(line) for line in read_lines(badfile)]
assert min(good) > max(bad)
self.threshold = (min(good) + max(bad)) / 2
def average_prob(self, query):
assert self.total > 0, "Gibberish: should adapt first"
pt = 0
t = 0
for a, b in ngrams(query, 2):
p = self.counts.get(a + b, 1) / self.total
pt += math.log(p)
t += 1
return pt / t
def load(self, filename: str):
t = joblib.load(filename)
self.counts = t.get("counts", self.counts)
self.total = t.get("total", self.total)
self.threshold = t.get("threshold", self.threshold)
def predict(self, query):
assert self.total > 0, "Gibberish: should adapt first"
assert self.threshold != 0, "Gibberish: should set threshold first"
return self.average_prob(query) > self.threshold
def save(self, filename):
assert self.total > 0, "Gibberish: should adapt first"
assert self.threshold != 0, "Gibberish: should set threshold first"
joblib.dump({"counts": self.counts, "total": self.total, "threshold": self.threshold}, filename)
if __name__ == '__main__':
gib = Gibberish()
# gib.adapt("/home/geb/PycharmProjects/xdnlp/local/gibberish_data/data.txt")
# joblib.dump({"counts": gib.counts, "total": gib.total}, "gib.model")
gib.load("gib.model")
e = gib.average_prob("ahsfuia")
print(e)
e = gib.average_prob("good")
print(e)
e = gib.average_prob("我是你爹")
print(e)
e = gib.average_prob("竺諸聞瀧韋")
print(e)
e = gib.average_prob("一起喵喵猫")
print(e)
e = gib.average_prob("熙熙菋菂夏天")
print(e)
e = gib.average_prob("碎花今如梦")
print(e)
e = gib.average_prob("若懷ほ稽")
print(e)
|
'''
如果要获得一个对象的所有属性和方法, 可以使用dir()函数,
它返回一个包含字符串的list, 比如, 获得一个str对象的所有属性和方法
hasattr测试该对象的属性
setattr设置一个属性
getattr获取属性 getattr(obj, 'y')相当于obj.y
'''
a = 'ABC'
print(dir('ABC'), '\n',
len('ABC'), 'ABC'.__len__())
print('ABC'.__eq__(a), 'ABC'.__format__('123')) # output: True
class MyDog(object):
def __init__(self, len=10, eat='meat'):
self.__len = len
self.__eat = eat
def __len__(self):
return self.__len
def eat(self):
return self.__eat
dog = MyDog(15)
dog1 = MyDog(10,'rice')
print(len(dog), len(dog1), '\n',
dog.eat(), dog1.eat())
class Animal(object):
def run(self):
print('Animal is running...')
class Dog(Animal):
def run(self):
print('Dog is running...')
def eat(self):
print('Eating mear...')
class Cat(Animal):
def run(selg):
print('Cat is running...')
a = list()
b = Animal()
c = Dog()
print(type(123), type('str'))
print(type(abs))
print(type(b), type(a), type(c))
print(type(123) == int,
type('b') == type(c),
type('abc') == type('123'))
# 判断一个对象是否是函数
import types
def fn():
pass
print(type(fn) == types.FunctionType,
type(lambda x: x) == types.LambdaType,
type(abs) == types.BuiltinFunctionType,
type((x for x in range(10))) == types.GeneratorType)
class Husky(Dog):
pass
a = Animal()
d = Dog()
h = Husky()
print('\n',
isinstance(h, Husky),
isinstance(h, Dog), isinstance(h, Animal), '\n',
isinstance(b'a', bytes), isinstance([1, 2, 3], (list, tuple)),
isinstance((1, 2, 3), (list, tuple)))
print('\n',
'\n',
'\n')
class MyObject(object):
def __init__(self):
self.x = 9
def power(self):
return self.x * self.x
obj = MyObject()
print(hasattr(obj, 'x'), hasattr(obj, 'y'), setattr(obj, 'y', 19), hasattr(obj, 'y'))
print(obj.y, getattr(obj, 'y'))
print(getattr(obj, 'z', 404))
print(hasattr(obj, 'power'),
getattr(obj, 'power'))
fn = getattr(obj, 'power')
print(fn(), obj.power())
# 只有不知道对象信息的时候, 操去获取对象信息
# example
def readImage(fp):
if(hasattr(fp, 'read')):
return readData(fp)
return None
print(readImage(obj))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from svgplotlib.SVG.Parsers import Lexer, EOF
class ParsePathError(Exception):
pass
class Path(Lexer):
"""
Break SVG path data into tokens.
The SVG spec requires that tokens are greedy.
This lexer relies on Python's regexes defaulting to greediness.
"""
numfloat = object()
numint = object()
numexp = object()
string = object()
skip = object
lexicon = ( \
(numfloat , Lexer.Float),
(numint , Lexer.Int),
(numexp , r'(?:[Ee][-\+]?\d+)'),
(string , r'[AaCcHhLlMmQqSsTtVvZz]'),
(skip , r'[, \n]'),
)
ignore = frozenset((skip,))
callbacks = {
numfloat : lambda self,value: float(value),
numint : lambda self,value: float(value),
numexp : lambda self,value: float('1.'+value)
}
numbers = frozenset((numfloat, numint, numexp))
def __init__(self):
Lexer.__init__(self)
def assertion(self, condition, msg = ''):
if not condition:
raise ParsePathError(msg)
def iterparse(self, text):
"""
Parse a string of SVG <path> data.
"""
assertion = self.assertion
numbers = self.numbers
string = self.string
next = self.lex(text).next
token, value = next()
while token != EOF:
assertion(token is string, 'Expected string in path data')
cmd = value
CMD = value.upper()
# closePath
if CMD in 'Z':
token, value = next()
yield (cmd, (None,))
# moveTo, lineTo, curve, smoothQuadraticBezier, quadraticBezier, smoothCurve
elif CMD in 'CMLTQS':
coords = []
token, value = next()
while token in numbers:
last = value
coords.append(last)
token, value = next()
assertion(token in numbers, 'Expected number in path data')
coords.append(value)
token, value = next()
if CMD == 'C':
assertion(len(coords) % 3 == 0, 'Expected coordinate triplets in path data')
yield (cmd, tuple(coords))
# horizontalLine or verticalLine
elif CMD in 'HV':
coords = []
token, value = next()
assertion(token in numbers, 'Expected number')
while token in numbers:
coords.append(value)
token, value = next()
yield (cmd, tuple(coords))
# ellipticalArc
elif CMD == 'A':
coords = []
token, value = next()
assertion(token in numbers and value > 0, 'expected positive number in path data')
while token in numbers:
rx = value
coords.append(rx)
token, ry = next()
assertion(token in numbers and ry > 0, 'expected positive number in path data')
coords.append(ry)
token, rotation = next()
assertion(token in numbers, 'expected number in path data')
coords.append(rotation)
token, largearc = next()
assertion(token in numbers, 'expected 0 or 1 in path data')
coords.append(largearc)
token, sweeparc = next()
assertion(token in numbers, 'expected 0 or 1 in path data')
coords.append(sweeparc)
token, x = next()
assertion(token in numbers, 'expected number in path data')
coords.append(x)
token, y = next()
assertion(token in numbers, 'expected number in path data')
coords.append(y)
token, value = next()
yield (cmd, coords)
else:
raise ParsePathError("cmd '%s' in path data not supported" % cmd)
parsePath = Path()
if __name__ == '__main__':
print tuple(parsePath.iterparse("M250,150 L150,350 L350,350 Z"))
|
import sys
from PyQt5 import QtWidgets,QtCore
from PyQt5.QtWidgets import QApplication, QWidget
app = QApplication(sys.argv)
fen = QWidget()
fen.setWindowTitle("M2I&MQT PyQt Test")
fen.resize(300,200)
fen.move(300,50)
lineEdit = QtWidgets.QLineEdit(fen)
lineEdit.setGeometry(QtCore.QRect(100, 60, 111, 21))
lineEdit.setObjectName("lineEdit")
label = QtWidgets.QLabel(fen)
label.setGeometry(QtCore.QRect(20, 60, 91, 21))
label.setText("PyQt5, OK?")
pushButton = QtWidgets.QPushButton(fen)
pushButton.setGeometry(QtCore.QRect(110, 130, 75, 23))
pushButton.setText("Valider")
fen.show()
app.exec_()
|
"""
Created on Sat Jan 7 14:53:57 2017
@author: kamila
Load data(all .npy) for layer layer in data_folder
Input: layer,data_folder
Output: data
Choose patch pick num_regions=2 3x3xdepth regions and reshape data
Input: data - batchxdepthxwidthxheight
Output: Reshaped array of selected patches - nxm
"""
import numpy as np
import os
from random import randint
import torch as t
def choose_patch(data):
kernel = 1
dimention = np.shape(data)
n = dimention[0]
xdim = dimention[2]
ydim = dimention[3]
if xdim < kernel:
kernel = 2
selected_data = []
num_regions = 4
for i in range(0,n):
for j in range(0,num_regions):
x = randint(0,xdim-kernel)
y = randint(0,ydim-kernel)
selected_data.append(data[i,:,x:(x+kernel),y:(y+kernel)])
dimention = np.shape(selected_data)
n = dimention[0]
m = dimention[1]*dimention[2]*dimention[3]
selected_data = np.reshape(selected_data, (n,m))
return selected_data
def choose_single_patch(data):
dimention = np.shape(data)
n = dimention[0]
xdim = dimention[2]
ydim = dimention[3]
selected_data = []
num_regions = 1
for i in range(0,n):
for j in range(0,num_regions):
x = 0
y = 0
selected_data.append(data[i,:,x:(x+1),y:(y+1)])
dimention = np.shape(selected_data)
n = dimention[0]
m = dimention[1]*dimention[2]*dimention[3]
selected_data = np.reshape(selected_data, (n,m))
return selected_data
def read_data(data_folder):
selected_data = []
for file in os.listdir(data_folder):
if file.endswith("0.npy") or file.endswith("2.npy")or file.endswith("3.npy")or file.endswith("4.npy") or file.endswith("5.npy"):
data_var = t.load(os.path.join(data_folder, file))
selected_data.append(data_var)
return selected_data
def load_data(data_folder):
selected_data = read_data(data_folder)
dimention = np.shape(selected_data)
print('Dimention of data ' + str(dimention))
if len(dimention)==5:
n = dimention[0]*dimention[1]
selected_data = np.reshape(selected_data, (n,dimention[2],dimention[3],dimention[4]))
selected_data = choose_patch(selected_data)
elif len(dimention)==3:
n = dimention[0]*dimention[1]
selected_data = np.reshape(selected_data, (n,dimention[2], 1, 1))
selected_data = choose_single_patch(selected_data)
else:
n = dimention[0]
selected_data = np.reshape(selected_data, (n,dimention[1],dimention[2],dimention[3]))
selected_data = choose_patch(selected_data)
dimention = np.shape(selected_data)
return selected_data
|
import re
from typing import List, Type
from os.path import exists, isfile, join
from ..util.Locale import Locale, ROOT_LOCALE, from_iso
from ..exceptions import NotInResourceBundleError, MissingResourceBundleError
_STANDARD_FILE_EXTENSION = "properties"
class RawResourceBundle:
_cached_bundles = {}
def __init__(self, path: str = None, root: str = "."):
"""
Class that handles access to a resource across different locales.
:param path: The path to the resource file
:type path: str
:param root: The resources root directory path
:type root: str
"""
self._root = "." # Initialize root
self._parent = None
self._lookup = {}
self._reader = None
self.set_resources_root(root) # Set correct root
self._name = "INVALID" if path is None else path
def _load(self, path: str) -> None:
"""
Loads keys and values into this BasicResourceBundle instance.
:param path: The path to the resource file
:type path: str
:return: Nothing
:rtype: None
"""
if self._root not in path:
self._reader.load(join(self._root, path))
else:
self._reader.load(path)
self._lookup = self._reader.get()
def _needs_formatting(self, value: str) -> bool:
return re.findall(r'{[^}]*}', value)
def _format(self, value, *args, **kwargs):
if self._needs_formatting(value):
try:
return self._format(value.format(*args, **kwargs, **self._lookup))
except KeyError:
return self._parent._format(value, *args, **kwargs)
else:
return value
def _handle_get_object(self, key, *args, **kwargs) -> object:
"""
Searches the given key in this ResourceBundle and returns its value if found, else None.
:param key:
:type key:
:return:
:rtype:
"""
try:
return self._format(self._lookup[key], *args, **kwargs) \
if self._needs_formatting(self._lookup[key]) \
else self._lookup[key]
except KeyError:
return None
def _set_parent(self, parent) -> None:
"""
Sets the parent for this bundle.
:param parent: The new parent
:type parent: BasicResourceBundle
:return: Nothing
:rtype: None
"""
self._parent = parent
def set_resources_root(self, path: str) -> None:
"""
Sets the resources root.
:param path: The new path
:type path: str
:return: Nothing
:rtype: None
"""
path = path.replace("\\", "/")
if path.endswith("/"):
path = path[:-1]
if not exists(path):
raise FileNotFoundError("'" + path + "' could not be found")
if isfile(path):
raise NotADirectoryError("'" + path + "' is not a directory")
self._root = path
if self._parent is not None:
self._parent.set_resources_root(path)
def generate_parent_chain(self, base_name: str, locale_: Locale, root: str = None) -> None:
"""
Generates the parent chain for this BasicResourceBundle.
:param bundle_type: The type of bundle to create
:type bundle_type: RawResourceBundle
:param base_name: The base name of this bundle
:type base_name: str
:param locale_: The Locale of this ResourceBundle
:type locale_: Locale
:param root: The resources root directory path
:type root: str
:return: Nothing
:rtype: None
"""
top_locale = locale_.get_top_locale()
self._cached_bundles[_to_bundle_name(base_name, locale_)] = self
if top_locale is None:
return
else:
try:
bundle = self._cached_bundles[_to_bundle_name(base_name, top_locale)]
bundle.set_resources_root(root)
except KeyError:
bundle = _new_bundle(base_name, top_locale, self._name.split(".")[-1], root=root, bundle_type=type(self))
self._set_parent(bundle)
def get(self, key: str, *args, **kwargs) -> str:
"""
Gets an object from the BasicResourceBundle.
:param key: The key of the desired object
:type key: str
:return: The object
:rtype: str
"""
obj = self._handle_get_object(key, *args, **kwargs)
if obj is None:
if self._parent is not None:
obj = self._parent.get(key, *args, **kwargs)
if obj is None:
raise NotInResourceBundleError(self._name, key)
return obj
def get_name(self) -> str:
"""
Getter for the name of this BasicResourceBundle.
:return: The name
:rtype: str
"""
return self._name
def get_keys(self) -> List[str]:
"""
Gets the currently loaded keys.
:return: The keys
:rtype: List[str]
"""
return list(self._lookup.keys())
def get_values(self) -> List[str]:
"""
Gets the currently loaded values.
:return: The values
:rtype: List[str]
"""
return list(self._lookup.values())
def get_all_keys(self) -> List[str]:
"""
Gets all keys from this BasicResourceBundle and its parents.
Due to casting to set the order of the keys can vary.
:return: The keys
:rtype: List[str]
"""
if self._parent is not None:
return list(set(self.get_keys() + self._parent.get_all_keys()))
else:
return self.get_keys()
def get_all_values(self) -> List[str]:
"""
Gets all values from this BasicResourceBundle and its parents.
Due to casting to set the order of the values can vary.
Usage of this method is not encouraged.
:return: The keys
:rtype: List[str]
"""
if self._parent is not None:
return list(set(self.get_values() + self._parent.get_all_values()))
else:
return self.get_values()
def __str__(self):
return "<{} - '{}'>".format(self.__class__.__name__, self._name)
def __repr__(self):
return str(self)
def get_bundle(base_name: str, locale_: Locale = None, root: str = ".") -> RawResourceBundle:
"""
Gets a specific ResourceBundle.
:param base_name: The name of the ResourceBundle
:type base_name: str
:param locale_: The locale
:type locale_: ..util.Locale
:param root: The resources root directory path
:type root: str
:return: The ResourceBundle
:rtype: BasicResourceBundle
"""
return _new_bundle(base_name, locale_, _STANDARD_FILE_EXTENSION, root=root)
def _to_resource_name(bundle_name: str, format_: str) -> str:
"""
Converts the BasicResourceBundle name into the corresponding resource path.
:param bundle_name: The specific name of the BasicResourceBundle
:type bundle_name: str
:param format_: The format of this BasicResourceBundle (file extension)
:type format_: str
:return: The resource name
:rtype: str
"""
return bundle_name + "." + format_
def _to_bundle_name(base_name: str, locale_: Locale) -> str:
"""
Generates the bundle name for a BasicResourceBundle.
:param base_name: The base name of the BasicResourceBundle
:type base_name: str
:param locale_: The locale to use for generating the name
:type locale_: ..util.Locale
:return: The name of the BasicResourceBundle
:rtype: str
"""
return base_name + locale_.get_delim() + locale_.to_string() if locale_ != ROOT_LOCALE else base_name
def _new_bundle(base_name: str, locale_: Locale, format_: str, root: str = ".",
bundle_type: Type[RawResourceBundle] = RawResourceBundle
) -> RawResourceBundle:
"""
Creates a new ResourceBundle.
:param base_name: The base name of this ResourceBundle
:type base_name: str
:param locale_: The locale for this ResourceBundle
:type locale_: ..util.Locale
:param format_: The format (file extension)
:type format_: str
:param root: The resources root directory path
:type root: str
:param bundle_type: The type of the ResourceBundle
:type bundle_type: RawResourceBundle
:return: The new ResourceBundle
:rtype: BasicResourceBundle
"""
if locale_ is None:
return _new_bundle(base_name=base_name, locale_=ROOT_LOCALE, format_=format_,
root=root, bundle_type=bundle_type)
if type(locale_) is str:
locale_ = from_iso(str(locale_))
try:
bundle = bundle_type(_to_resource_name(_to_bundle_name(base_name, locale_), format_), root=root)
bundle.generate_parent_chain(base_name, locale_, root=root)
return bundle
except FileNotFoundError:
if locale_ != ROOT_LOCALE:
return _new_bundle(base_name, locale_.get_top_locale(), format_, root=root, bundle_type=bundle_type)
else:
raise MissingResourceBundleError(_to_bundle_name(base_name, locale_))
|
"""
MIT License
Copyright (c) 2021 AlexFlipnote
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class User:
def __init__(self, data):
self.name = data["username"]
self.id = int(data["id"])
self.discriminator = data["discriminator"]
self.avatar = data["avatar"]
def __str__(self):
""" Builds full username to Discord user """
return f"{self.name}#{self.discriminator}"
def is_avatar_animated(self):
""" Returns True/False depending if avatar is animated """
return bool(self.avatar and self.avatar.startswith("a_"))
@property
def mention(self):
""" Returns a Discord ping to targeted user """
return f"<@{self.id}>"
def avatar_url(self, img_format: str = "webp", size: int = None):
""" Builds the AvatarURL for a Discord user """
if self.avatar:
if self.is_avatar_animated():
img_format = "gif"
if size:
valid_size = [1024, 512, 256, 128, 64, 32]
if size not in valid_size:
raise ValueError(f"Size can only be the following: {valid_size}")
size_str = f"?size={size}"
else:
size_str = ""
return f"https://cdn.discordapp.com/avatars/{self.id}/{self.avatar}.{img_format}{size_str}"
return f"https://cdn.discordapp.com/embed/avatars/{int(self.discriminator) % 5}.png"
|
"""Create list of dicts with time transfer between all hubs of each European city (city with more than 100.000 inhabitants or with an airport)."""
transfers_raw_one_dir_DE = [
{'city': 'DE-AAH', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-AAH', 'orig_hub': '01-1', 'dest_hub': '02-2', 'trip_duration': 14, 'freq': 15},
{'city': 'DE-AAH', 'orig_hub': '01-1', 'dest_hub': '03-2', 'trip_duration': 16, 'freq': 15},
{'city': 'DE-AAH', 'orig_hub': '02-2', 'dest_hub': '02-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-AAH', 'orig_hub': '02-2', 'dest_hub': '03-2', 'trip_duration': 21, 'freq': 20},
{'city': 'DE-AAH', 'orig_hub': '03-2', 'dest_hub': '03-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-AGB', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-AGB', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 20, 'freq': 30},
{'city': 'DE-AGB', 'orig_hub': '00-0', 'dest_hub': '02-2', 'trip_duration': 40, 'freq': 90},
{'city': 'DE-AGB', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-AGB', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 20, 'freq': 60},
{'city': 'DE-AGB', 'orig_hub': '02-2', 'dest_hub': '02-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BAB', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BAB', 'orig_hub': '00-0', 'dest_hub': '00-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BAB', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 31, 'freq': 60},
{'city': 'DE-BAB', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 26, 'freq': 60},
{'city': 'DE-BAB', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BAB', 'orig_hub': '01-1', 'dest_hub': '00-2', 'trip_duration': 31, 'freq': 60},
{'city': 'DE-BAB', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BAB', 'orig_hub': '00-2', 'dest_hub': '00-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BAB', 'orig_hub': '00-2', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 15, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '00-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 30, 'freq': 7},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 25, 'freq': 7},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '02-0', 'trip_duration': 60, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '02-2', 'trip_duration': 50, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '03-1', 'trip_duration': 35, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '03-2', 'trip_duration': 35, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '04-2', 'trip_duration': 31, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '05-2', 'trip_duration': 16, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '06-2', 'trip_duration': 26, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '07-2', 'trip_duration': 18, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-0', 'dest_hub': '08-2', 'trip_duration': 16, 'freq': 25},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '00-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '01-1', 'trip_duration': 30, 'freq': 7},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '01-2', 'trip_duration': 25, 'freq': 7},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '02-0', 'trip_duration': 60, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '02-2', 'trip_duration': 50, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '03-1', 'trip_duration': 35, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '03-2', 'trip_duration': 35, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '04-2', 'trip_duration': 31, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '05-2', 'trip_duration': 16, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '06-2', 'trip_duration': 26, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '07-2', 'trip_duration': 18, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '00-2', 'dest_hub': '08-2', 'trip_duration': 16, 'freq': 25},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '02-0', 'trip_duration': 30, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '02-2', 'trip_duration': 30, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '03-1', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '03-2', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '04-2', 'trip_duration': 15, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '05-2', 'trip_duration': 25, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '06-2', 'trip_duration': 6, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '07-2', 'trip_duration': 7, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-1', 'dest_hub': '08-2', 'trip_duration': 22, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '02-0', 'trip_duration': 30, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '02-2', 'trip_duration': 30, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '03-1', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '03-2', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '04-2', 'trip_duration': 15, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '05-2', 'trip_duration': 25, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '06-2', 'trip_duration': 6, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '07-2', 'trip_duration': 7, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '01-2', 'dest_hub': '08-2', 'trip_duration': 22, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '02-0', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '02-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '03-1', 'trip_duration': 40, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '03-2', 'trip_duration': 40, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '04-2', 'trip_duration': 15, 'freq': 60},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '05-2', 'trip_duration': 30, 'freq': 60},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '06-2', 'trip_duration': 31, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '07-2', 'trip_duration': 36, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-0', 'dest_hub': '08-2', 'trip_duration': 46, 'freq': 40},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '02-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '03-1', 'trip_duration': 40, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '03-2', 'trip_duration': 40, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '04-2', 'trip_duration': 15, 'freq': 60},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '05-2', 'trip_duration': 30, 'freq': 60},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '06-2', 'trip_duration': 31, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '07-2', 'trip_duration': 36, 'freq': 30},
{'city': 'DE-BER', 'orig_hub': '02-2', 'dest_hub': '08-2', 'trip_duration': 46, 'freq': 40},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '03-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '03-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '04-2', 'trip_duration': 16, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '05-2', 'trip_duration': 25, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '06-2', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '07-2', 'trip_duration': 17, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '03-1', 'dest_hub': '08-2', 'trip_duration': 27, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '03-2', 'dest_hub': '03-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '03-2', 'dest_hub': '04-2', 'trip_duration': 16, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '03-2', 'dest_hub': '05-2', 'trip_duration': 25, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '03-2', 'dest_hub': '06-2', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '03-2', 'dest_hub': '07-2', 'trip_duration': 17, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '03-2', 'dest_hub': '08-2', 'trip_duration': 27, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '04-2', 'dest_hub': '04-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '04-2', 'dest_hub': '05-2', 'trip_duration': 18, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '04-2', 'dest_hub': '06-2', 'trip_duration': 13, 'freq': 15},
{'city': 'DE-BER', 'orig_hub': '04-2', 'dest_hub': '07-2', 'trip_duration': 22, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '04-2', 'dest_hub': '08-2', 'trip_duration': 43, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '05-2', 'dest_hub': '05-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '05-2', 'dest_hub': '06-2', 'trip_duration': 31, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '05-2', 'dest_hub': '07-2', 'trip_duration': 14, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '05-2', 'dest_hub': '08-2', 'trip_duration': 25, 'freq': 20},
{'city': 'DE-BER', 'orig_hub': '06-2', 'dest_hub': '06-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '06-2', 'dest_hub': '07-2', 'trip_duration': 13, 'freq': 5},
{'city': 'DE-BER', 'orig_hub': '06-2', 'dest_hub': '08-2', 'trip_duration': 24, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '07-2', 'dest_hub': '07-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BER', 'orig_hub': '07-2', 'dest_hub': '08-2', 'trip_duration': 21, 'freq': 10},
{'city': 'DE-BER', 'orig_hub': '08-2', 'dest_hub': '08-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BFE', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BFE', 'orig_hub': '01-1', 'dest_hub': '02-2', 'trip_duration': 14, 'freq': 15},
{'city': 'DE-BFE', 'orig_hub': '02-2', 'dest_hub': '02-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BOM', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BOM', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BOM', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BON', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BON', 'orig_hub': '01-1', 'dest_hub': '02-2', 'trip_duration': 13, 'freq': 5},
{'city': 'DE-BON', 'orig_hub': '01-1', 'dest_hub': '03-2', 'trip_duration': 12, 'freq': 0},
{'city': 'DE-BON', 'orig_hub': '02-2', 'dest_hub': '02-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BON', 'orig_hub': '02-2', 'dest_hub': '03-2', 'trip_duration': 22, 'freq': 10},
{'city': 'DE-BON', 'orig_hub': '03-2', 'dest_hub': '03-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BWE', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BWE', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BWE', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BRE', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-BRE', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 23, 'freq': 10},
{'city': 'DE-BRE', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 23, 'freq': 10},
{'city': 'DE-BRE', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BRE', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BRE', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BRV', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-BRV', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-BRV', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '00-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '00-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '02-1', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '02-2', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-0', 'dest_hub': '03-2', 'trip_duration': 42, 'freq': 30},
{'city': 'DE-CGN', 'orig_hub': '00-1', 'dest_hub': '00-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-1', 'dest_hub': '00-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-1', 'dest_hub': '01-1', 'trip_duration': 15, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-1', 'dest_hub': '02-1', 'trip_duration': 15, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-1', 'dest_hub': '02-2', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-1', 'dest_hub': '03-2', 'trip_duration': 42, 'freq': 30},
{'city': 'DE-CGN', 'orig_hub': '00-2', 'dest_hub': '00-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '00-2', 'dest_hub': '01-1', 'trip_duration': 15, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-2', 'dest_hub': '02-1', 'trip_duration': 15, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-2', 'dest_hub': '02-2', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '00-2', 'dest_hub': '03-2', 'trip_duration': 42, 'freq': 30},
{'city': 'DE-CGN', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '01-1', 'dest_hub': '02-1', 'trip_duration': 5, 'freq': 5},
{'city': 'DE-CGN', 'orig_hub': '01-1', 'dest_hub': '02-2', 'trip_duration': 5, 'freq': 5},
{'city': 'DE-CGN', 'orig_hub': '01-1', 'dest_hub': '00-2', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '01-1', 'dest_hub': '03-2', 'trip_duration': 27, 'freq': 45},
{'city': 'DE-CGN', 'orig_hub': '02-1', 'dest_hub': '02-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '02-1', 'dest_hub': '02-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '02-1', 'dest_hub': '00-2', 'trip_duration': 20, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '02-1', 'dest_hub': '03-2', 'trip_duration': 27, 'freq': 45},
{'city': 'DE-CGN', 'orig_hub': '02-2', 'dest_hub': '02-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-CGN', 'orig_hub': '02-2', 'dest_hub': '03-2', 'trip_duration': 33, 'freq': 15},
{'city': 'DE-CGN', 'orig_hub': '03-2', 'dest_hub': '03-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DAR', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DAR', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-DAR', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DTM', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-DTM', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 27, 'freq': 60},
{'city': 'DE-DTM', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 27, 'freq': 60},
{'city': 'DE-DTM', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-DTM', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-DTM', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DRS', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-DRS', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 21, 'freq': 30},
{'city': 'DE-DRS', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 21, 'freq': 30},
{'city': 'DE-DRS', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-DRS', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-DRS', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DUI', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-DUI', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-DUI', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DUS', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DUS', 'orig_hub': '00-0', 'dest_hub': '00-1', 'trip_duration': 5, 'freq': 0},
# {'city': 'DE-DUS', 'orig_hub': '00-0', 'dest_hub': '00-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-DUS', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 20, 'freq': 20},
{'city': 'DE-DUS', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 20, 'freq': 20},
{'city': 'DE-DUS', 'orig_hub': '00-1', 'dest_hub': '00-1', 'trip_duration': 0, 'freq': 0},
# {'city': 'DE-DUS', 'orig_hub': '00-1', 'dest_hub': '00-2', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-DUS', 'orig_hub': '00-1', 'dest_hub': '01-1', 'trip_duration': 15, 'freq': 20},
{'city': 'DE-DUS', 'orig_hub': '00-1', 'dest_hub': '01-2', 'trip_duration': 20, 'freq': 20},
{'city': 'DE-DUS', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-DUS', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 5, 'freq': 0},
# {'city': 'DE-DUS', 'orig_hub': '01-1', 'dest_hub': '00-2', 'trip_duration': 20, 'freq': 20},
# {'city': 'DE-DUS', 'orig_hub': '00-2', 'dest_hub': '00-2', 'trip_duration': 5, 'freq': 0},
# {'city': 'DE-DUS', 'orig_hub': '00-2', 'dest_hub': '01-2', 'trip_duration': 20, 'freq': 0},
{'city': 'DE-ERF', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-ERF', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 23, 'freq': 20},
{'city': 'DE-ERF', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 23, 'freq': 20},
{'city': 'DE-ERF', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-ERF', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-ERF', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-ERL', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-ERL', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-ERL', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-ESS', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-ESS', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-ESS', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
#taking worste case for Fraport (change of Terminal)
{'city': 'DE-FRA', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 30, 'freq': 5}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-0', 'dest_hub': '00-1', 'trip_duration': 30, 'freq': 5}, #max. is between T2 and T1
{'city': 'DE-FRA', 'orig_hub': '00-0', 'dest_hub': '00-2', 'trip_duration': 30, 'freq': 5}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 45, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 45, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-0', 'dest_hub': '02-1', 'trip_duration': 45, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-1', 'dest_hub': '00-2', 'trip_duration': 30, 'freq': 5}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-1', 'dest_hub': '01-1', 'trip_duration': 30, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-1', 'dest_hub': '01-2', 'trip_duration': 45, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-1', 'dest_hub': '02-1', 'trip_duration': 45, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-2', 'dest_hub': '01-1', 'trip_duration': 45, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-2', 'dest_hub': '01-2', 'trip_duration': 50, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '00-2', 'dest_hub': '02-1', 'trip_duration': 60, 'freq': 15}, #max. is between T1 and T2
{'city': 'DE-FRA', 'orig_hub': '01-1', 'dest_hub': '02-1', 'trip_duration': 10, 'freq': 5},
{'city': 'DE-FRA', 'orig_hub': '01-2', 'dest_hub': '02-1', 'trip_duration': 20, 'freq': 5},
{'city': 'DE-HNH', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-HNH', 'orig_hub': '00-0', 'dest_hub': '00-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-HNH', 'orig_hub': '00-2', 'dest_hub': '00-2', 'trip_duration': 0, 'freq': 0},
{'city': 'DE-FBG', 'orig_hub': '01-1', 'dest_hub': '01-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-FBG', 'orig_hub': '01-1', 'dest_hub': '01-2', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-FBG', 'orig_hub': '01-2', 'dest_hub': '01-2', 'trip_duration': 0, 'freq': 0},
#AUTOMATIZE TIMES BETWEEN 01-1 AND 01-2
{'city': 'DE-PAD', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 20, 'freq': 60},
{'city': 'DE-PAD', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 20, 'freq': 60},
{'city': 'DE-RSK', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 35, 'freq': 60*8}, #only 3 times per day: http://www.rebus.de/index.php?p=flughafen.htm
{'city': 'DE-RSK', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 35, 'freq': 60*8},
{'city': 'DE-STR', 'orig_hub': '00-0', 'dest_hub': '00-0', 'trip_duration': 10, 'freq': 0},
{'city': 'DE-WZE', 'orig_hub': '00-0', 'dest_hub': '00-1', 'trip_duration': 5, 'freq': 0},
{'city': 'DE-WZE', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 11, 'freq': 60},
{'city': 'DE-WZE', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 11, 'freq': 60},
{'city': 'DE-WZE', 'orig_hub': '00-0', 'dest_hub': '02-2', 'trip_duration': 25, 'freq': 60},
{'city': 'DE-WZE', 'orig_hub': '00-1', 'dest_hub': '01-1', 'trip_duration': 11, 'freq': 60},
{'city': 'DE-WZE', 'orig_hub': '00-1', 'dest_hub': '01-2', 'trip_duration': 11, 'freq': 60},
{'city': 'DE-WZE', 'orig_hub': '00-1', 'dest_hub': '02-2', 'trip_duration': 25, 'freq': 60},
{'city': 'DE-WZE', 'orig_hub': '01-1', 'dest_hub': '02-2', 'trip_duration': 6, 'freq': 30},
{'city': 'DE-WZE', 'orig_hub': '01-2', 'dest_hub': '02-2', 'trip_duration': 6, 'freq': 30},
]
transfers_raw_one_dir_ES = [
# {'city': 'ES-PMI', 'orig_hub': '01-3', 'dest_hub': '02-3', 'trip_duration': 100, 'freq': 90},
# {'city': 'ES-MJV', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 70, 'freq': 60*3},
# {'city': 'ES-MJV', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 70, 'freq': 60*3},
{'city': 'ES-TCI', 'orig_hub': '00-0', 'dest_hub': '01-0', 'trip_duration': 55, 'freq': 60*2},
{'city': 'ES-MJV', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 75, 'freq': 90},
{'city': 'ES-MJV', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 75, 'freq': 90},
{'city': 'ES-CAS', 'orig_hub': '00-0', 'dest_hub': '01-1', 'trip_duration': 40, 'freq': 60*5},
{'city': 'ES-CAS', 'orig_hub': '00-0', 'dest_hub': '01-2', 'trip_duration': 40, 'freq': 60*5},
{'city': 'ES-CAS', 'orig_hub': '00-0', 'dest_hub': '02-2', 'trip_duration': 60, 'freq': 60*5},
]
transfers_raw_one_dir_GB = [
{'city': 'GB-LON', 'orig_hub': '00-0', 'dest_hub': '01-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '00-0', 'dest_hub': '02-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '00-0', 'dest_hub': '03-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '00-0', 'dest_hub': '04-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '00-0', 'dest_hub': '05-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '01-0', 'dest_hub': '02-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '01-0', 'dest_hub': '03-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '01-0', 'dest_hub': '04-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '01-0', 'dest_hub': '05-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '02-0', 'dest_hub': '03-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '02-0', 'dest_hub': '04-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '02-0', 'dest_hub': '05-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '03-0', 'dest_hub': '04-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '03-0', 'dest_hub': '05-0', 'trip_duration': 60*5, 'freq': 60*3},
{'city': 'GB-LON', 'orig_hub': '04-0', 'dest_hub': '05-0', 'trip_duration': 60*5, 'freq': 60*3},
]
transfers_raw_one_dir_FR = [
{'city': 'FR-PAR', 'orig_hub': '00-0', 'dest_hub': '01-0', 'trip_duration': 75*2, 'freq': 30},
]
transfers_raw_one_dir_IT = [
{'city': 'IT-MIL', 'orig_hub': '00-0', 'dest_hub': '01-0', 'trip_duration': 75, 'freq': 30+20},
{'city': 'IT-ROM', 'orig_hub': '00-0', 'dest_hub': '01-0', 'trip_duration': 130, 'freq': 60+30},
]
transfers_raw_one_dir_RU = [
{'city': 'RU-MOW', 'orig_hub': '00-0', 'dest_hub': '01-0', 'trip_duration': 46+13+30, 'freq': 30+5+10},
{'city': 'RU-MOW', 'orig_hub': '00-0', 'dest_hub': '02-0', 'trip_duration': 46+7+38, 'freq': 30+60+5},
{'city': 'RU-MOW', 'orig_hub': '00-0', 'dest_hub': '03-0', 'trip_duration': 46+8+54+20, 'freq': 30+30+30+5},
{'city': 'RU-MOW', 'orig_hub': '01-0', 'dest_hub': '02-0', 'trip_duration': 30+5+38, 'freq': 10+5+60},
{'city': 'RU-MOW', 'orig_hub': '01-0', 'dest_hub': '03-0', 'trip_duration': 30+7+37+20, 'freq': 10+5+60+30},
{'city': 'RU-MOW', 'orig_hub': '02-0', 'dest_hub': '03-0', 'trip_duration': 38+13+37+20, 'freq': 60+5+60+30},
]
transfers_raw_one_dir = (transfers_raw_one_dir_DE + transfers_raw_one_dir_ES + transfers_raw_one_dir_GB +
transfers_raw_one_dir_FR + transfers_raw_one_dir_IT + transfers_raw_one_dir_RU)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 19:15:26 2020
@author: Diego
"""
import pandas as pd
import sqlite3
import wget
import os
from urllib.request import urlopen
from bs4 import BeautifulSoup
import urllib.request
import datetime
import zipfile
import io
import requests
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.exists(os.path.join('data', 'temp')):
os.makedirs(os.path.join('data', 'temp'))
conn = sqlite3.connect(os.path.join('data', 'fundos.db'))
db = conn.cursor()
# %% functions
def create_tables():
"""
Creates all tables in the database.
Returns
-------
None.
"""
db.execute("""CREATE TABLE IF NOT EXISTS files
(file_name TEXT,
last_modified DATE)""")
db.execute("""CREATE TABLE IF NOT EXISTS quotas
(cnpj TEXT,
date DATE,
quota REAL)""")
db.execute("CREATE INDEX idx_quotas_cnpj ON quotas(cnpj);")
db.execute("""CREATE TABLE IF NOT EXISTS inf_cadastral
(cnpj TEXT,
denom_social TEXT,
classe text,
rentab_fundo TEXT,
taxa_perfm INTEGER,
taxa_adm REAL)""")
db.execute("""CREATE TABLE IF NOT EXISTS cdi
(date DATE,
cdi REAL,
d_factor REAL)""")
def update_register():
"""
Updates the mutual funds register.
Returns
-------
None.
"""
url = 'http://dados.cvm.gov.br/dados/FI/CAD/DADOS/'
files = {}
i = 0
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table')
tr = table.find_all('tr')
for t in tr:
if t.text[0:17] == 'inf_cadastral_fi_':
file_name = t.text[0:29]
last_modified = pd.to_datetime(t.text[29:45])
files[i] = {'file_name': file_name, 'url_date': last_modified}
i += 1
available_files = pd.DataFrame.from_dict(files, orient='index')
available_files['url_date'] = pd.to_datetime(available_files['url_date'])
last_file = available_files['file_name'][available_files['url_date'] == max(available_files['url_date'])].values[0]
file_url = f"http://dados.cvm.gov.br/dados/FI/CAD/DADOS/{last_file}"
response = requests.get(file_url)
df = pd.read_csv(io.BytesIO(response.content), sep=';', header=0, encoding='latin-1')
df.columns = df.columns.str.lower()
df = df.rename(columns={'cnpj_fundo': 'cnpj'})
# drop inactive
df = df[df['sit'] == 'EM FUNCIONAMENTO NORMAL']
# drop closed
df = df[df['condom'] == 'Aberto']
# drop no equity
df = df[df['vl_patrim_liq'] != 0]
df = df.drop_duplicates(subset=['cnpj'], keep='last')
df = df[['cnpj', 'denom_social', 'classe', 'rentab_fundo', 'taxa_perfm', 'taxa_adm']]
df[['taxa_perfm', 'taxa_adm']] = df[['taxa_perfm', 'taxa_adm']].fillna(value=0)
db.execute("DELETE FROM inf_cadastral")
df.to_sql('inf_cadastral', conn, if_exists='append', index=False)
conn.commit()
return
def update_quotes():
"""
Updates the mutual funds quotes.
Returns
-------
None.
"""
db_files = pd.read_sql("SELECT * FROM files", conn)
urls = ['http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/',
'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/']
files = {}
i = 0
for url in urls:
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table')
tr = table.find_all('tr')
for t in tr:
if t.text[0:14] == 'inf_diario_fi_':
if url == 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/':
file_name = t.text[0:24]
last_modified = pd.to_datetime(t.text[24:40]).date()
else:
file_name = t.text[0:22]
last_modified = pd.to_datetime(t.text[22:38]).date()
files[i] = {'file_name': file_name, 'url_date': last_modified}
i += 1
available_files = pd.DataFrame.from_dict(files, orient='index')
new_files = available_files.merge(db_files, how='left', right_on='file_name', left_on='file_name')
new_files = new_files.fillna(pd.to_datetime('1900-01-01'))
new_files = new_files[new_files['url_date'] > pd.to_datetime(new_files['last_modified'])]
for idx, file in new_files.iterrows():
if len(file['file_name']) == 22:
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/'
zip_or_csv = 'zip'
else:
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/'
zip_or_csv = 'csv'
file_url = url + file['file_name']
file_data = requests.get(file_url).content
db.execute(f"""DELETE FROM files
WHERE file_name = '{file['file_name']}'""")
load_file(file_data, zip_or_csv=zip_or_csv)
db.execute(f"""INSERT INTO files
VALUES ('{file['file_name']}', '{file['url_date']}')""")
print(f"{file['file_name']} downloaded successfully.")
conn.commit()
return
def load_file(file_data, zip_or_csv):
"""
Loads the file with the new quotes.
Parameters
----------
file_name : string
Returns
-------
None.
"""
active = pd.read_sql("SELECT cnpj FROM inf_cadastral", conn)['cnpj']
if zip_or_csv == 'zip':
zip_file = zipfile.ZipFile(io.BytesIO(file_data))
# dict with all csv files
files_dict = {}
for i in range(len(zip_file.namelist())):
files_dict[zip_file.namelist()[i]] = zip_file.read(zip_file.namelist()[i])
else:
files_dict = {'any_name': file_data }
for key in files_dict.keys():
df = pd.read_csv(io.BytesIO(files_dict[key]), sep=';', header=0, encoding='latin-1')
df.columns = df.columns.str.lower()
df = df.rename(columns={'cnpj_fundo': 'cnpj', 'dt_comptc': 'date', 'vl_quota': 'quota'})
df = df[df['cnpj'].isin(list(active))]
df = df[['cnpj', 'date', 'quota']]
year = df['date'].str[:4].unique()[0]
month = df['date'].str[5:7].unique()[0]
db.execute(f"""DELETE FROM quotas
WHERE SUBSTR(date, 1, 4) = '{year}' AND
SUBSTR(date, 6, 2) = '{month}'""")
df.to_sql('quotas', conn, if_exists='append', index=False)
conn.commit()
return
def update_cdi():
"""
Updates the CDI (Brazilian reference rate).
Returns
-------
None.
"""
# Files in the ftp:
url = 'ftp://ftp.cetip.com.br/MediaCDI/'
req = urllib.request.Request(url)
r = urllib.request.urlopen(req)
text = str(r.read())
text = text.replace('\\n', ' ')
text = text.replace('\\r', '')
text = text.replace("b'", "")
text = text.replace("'", "")
text = text.split()
available_files = []
for file_name in text:
if file_name[-4:] == '.txt':
available_files.append(file_name)
# Files in the database:
db_files = pd.read_sql("SELECT * FROM files", conn)
db_files = db_files['file_name'].to_list()
# check if the file is new, process and update files table
for file in available_files:
if file not in db_files:
for fl in os.listdir(os.path.join('data', 'temp')):
os.remove(os.path.join('data', 'temp', fl))
file_url = f"ftp://ftp.cetip.com.br/MediaCDI/{file}"
wget.download(file_url, os.path.join('data', 'temp'))
with open(os.path.join('data', 'temp', file), 'r') as content:
cdi = int(content.readline()) / 100
d_factor = ((cdi / 100) + 1) ** (1 / 252)
date = datetime.datetime.strptime(file[:8], '%Y%m%d')
db.execute(f"""INSERT INTO cdi
VALUES ('{date}', {cdi}, {d_factor})""")
# These files are not updated by the provider (cetip.com.br).
# Because of that, the last_modified is not important, and set to 1900-01-01
db.execute(f"""INSERT INTO files
VALUES ('{file}', '1900-01-01')""")
conn.commit()
print("CDI file " + file + " downloaded successfully.")
return
def update_pipeline():
# create database tables
query = "SELECT name FROM sqlite_master WHERE type='table' AND name='quotas';"
if db.execute(query).fetchone() == None:
create_tables()
update_register()
update_quotes()
update_cdi()
return
|
#!Measurement
'''
baseline:
after: true
before: false
counts: 30
detector: H1
mass: 39.59
default_fits: nominal
multicollect:
counts: 60
detector: H1
isotope: Ar40
peakcenter:
after: false
before: false
detector: H1
isotope: Ar40
equilibration:
inlet: R
outlet: O
inlet_delay: 3
eqtime: 20
use_extraction_eqtime: True
whiff:
split_A_valve: L
counts: 4
abbreviated_count_ratio: 1.0
conditionals:
- action: run_total
attr: Ar40
teststr: Ar40.cur<50
- action: run
attr: Ar40
teststr: Ar40.cur<100
- action: run_split
teststr: Ar40.cur>100
attr: Ar40
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2', 'CDD')
#FITS=('Ar41:linear','Ar40:linear', 'Ar39:parabolic','Ar38:parabolic','Ar37:parabolic','Ar36:parabolic')
def main():
"""
This script does a fast whiff measurement
0. Split analytical section
1. close ion pump
2. wait 2 seconds
3. open inlet
4. do whiff for 4 seconds
5. make decision
run: Finish equilibration and run gas
run_total: run split_A and split_B together
run_split: pump out mass spec and split gas.
isolate co2 chamber, pump out microbone, expand co2 chamber
"""
info('Fast Whiff script')
# peak center before
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
# setup
# open a plot panel for this detectors
activate_detectors(*ACTIVE_DETECTORS)
# baseline before
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)
# position mass spectrometer for normal analysis
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
# isolate split A
close(mx.whiff.split_A_valve)
# equilibrate
set_time_zero()
close(mx.equilibration.outlet)
sleep(2)
open(mx.equilibration.inlet)
# do fast whiff
result = whiff(ncounts=mx.whiff.counts, conditionals=mx.whiff.conditionals)
info('Whiff result={}'.format(result))
wab=1.0
if result=='run':
info('Continuing whiff measurment')
# finish equilibration
sleep(10)
close(mx.equilibration.inlet)
post_equilibration()
wab = mx.whiff.abbreviated_count_ratio
elif result=='run_total':
#reset_measurement(ACTIVE_DETECTORS)
info('Run total')
open(mx.whiff.split_A_valve)
sleep(10)
close(mx.equilibration.inlet)
set_fits()
set_baseline_fits()
post_equilibration(block=False)
wab = mx.whiff.abbreviated_count_ratio
elif result=='run_split':
info('Measuring remainder instead')
reset_measurement(ACTIVE_DETECTORS)
close(mx.equilibration.inlet)
close(mx.whiff.split_A_valve)
# pump out spectrometer
open(mx.equilibration.outlet)
# pump out microbone
open(description='Microbone to Turbo')
sleep(15)
close(description='Microbone to Turbo')
#equilibrate split
open(mx.whiff.split_A_valve)
sleep(5)
#equilibrate with entire section
equil(eqtime)
multicollect(ncounts=mx.multicollect.counts*wab, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts*wab, mass=mx.baseline.mass, detector=mx.baseline.detector)
if mx.peakcenter.after:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
info('finished measure script')
def equil(eqt, do_post=True, set_tz=True):
#post equilibration script triggered after eqtime elapsed
#equilibrate is non blocking
#so use either a sniff of sleep as a placeholder until eq finished
equilibrate(eqtime=eqt, do_post_equilibration=do_post,
inlet=mx.equilibration.inlet,
outlet=mx.equilibration.outlet)
#equilibrate returns immediately after the inlet opens
if set_tz:
set_time_zero(0)
sniff(eqt)
#set default regression
set_fits()
set_baseline_fits()
#========================EOF==============================================================
|
"""Custom exceptions."""
class EmptyGroupResult(Exception):
"""Exception raised when group AnalysisModule is run with <= 1 samples."""
pass
class UnsupportedAnalysisMode(NotImplementedError):
"""
Error raised when AnalysisModule is called in the wrong context.
Example: an AnalysisModule that processes all sample data for a SampleGroup
gets called for processing a single Sample.
"""
pass
|
import pendulum
def ts_to_utc(ts):
return ts
def rfc_3339_to_local_string(string):
dt = pendulum.parse(string)
local = dt.in_timezone("local")
return local.strftime("%c")
|
import pandas as pd
import numpy as np
import matplotlib
#from google.colab import drive
#drive.mount('/content/drive', force_remount=True)
import pickle
from os import listdir
PATH = '/home/graspinglab/NCS_data/updated_data/'
all_files = listdir(PATH)
files = []
for f in all_files:
if "pkl" in str(f):
files.append(f)
X_train = []
X_test = []
y_train = []
y_test = []
for filename in files:
file = open(PATH + filename, "rb")
data = pickle.load(file)
file.close()
#80% of each file into training
#20% of each file into testing
X_train.extend(data["states"][:round(len(data["states"])*.8)])
X_test.extend(data["states"][round(len(data["states"])*.8): len(data["states"])])
y_train.extend(data["grasp_success"][:round(len(data["grasp_success"])*.8)])
y_test.extend(data["grasp_success"][round(len(data["grasp_success"])*.8): len(data["grasp_success"])])
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
X_test, y_test = shuffle(X_test, y_test)
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
#For saving and loading
from sklearn.externals import joblib
#Create the pipeline
pipeline = Pipeline([
('clf', LogisticRegression())
])
#Different of models to test
models = [#LogisticRegression(),
DecisionTreeClassifier(),
#KNeighborsClassifier(),
#GaussianNB(),
#SVC(),
RandomForestClassifier(n_estimators=20)]
#Cycle through the different models
for mod in models:
pipeline.set_params(clf = mod)
#Train and predict
pipeline.fit(X_train, y_train)
pred = pipeline.predict(X_test)
print("_" * 3)
print(str(mod))
#Metrics
print(accuracy_score(pred, y_test))
print(confusion_matrix(pred, y_test))
print(classification_report(pred, y_test))
#Save
joblib.dump(pipeline, PATH + type(mod).__name__ + '.joblib')
#Load
#clf = load('model.joblib')
#clf.predict(X_test)
print("_" * 3)
|
# coding=utf-8
__author__ = 'jiataogu'
from emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file
import numpy.random as n_rng
class BSTnode(object):
"""
Representation of a node in a binary search tree.
Has a left child, right child, and key value, and stores its subtree size.
"""
def __init__(self, parent, t):
"""Create a new leaf with key t."""
self.key = t
self.parent = parent
self.left = None
self.right = None
self.size = 1
def update_stats(self):
"""Updates this node's size based on its children's sizes."""
self.size = (0 if self.left is None else self.left.size) + (0 if self.right is None else self.right.size)
def insert(self, t, NodeType):
"""Insert key t into the subtree rooted at this node (updating subtree size)."""
self.size += 1
if t < self.key:
if self.left is None:
self.left = NodeType(self, t)
return self.left
else:
return self.left.insert(t, NodeType)
elif t > self.key:
if self.right is None:
self.right = NodeType(self, t)
return self.right
else:
return self.right.insert(t, NodeType)
else:
return self
def find(self, t):
"""Return the node for key t if it is in this tree, or None otherwise."""
if t == self.key:
return self
elif t < self.key:
if self.left is None:
return None
else:
return self.left.find(t)
else:
if self.right is None:
return None
else:
return self.right.find(t)
def rank(self, t):
"""Return the number of keys <= t in the subtree rooted at this node."""
left_size = 0 if self.left is None else self.left.size
if t == self.key:
return left_size + 1
elif t < self.key:
if self.left is None:
return 0
else:
return self.left.rank(t)
else:
if self.right is None:
return left_size + 1
else:
return self.right.rank(t) + left_size + 1
def minimum(self):
"""Returns the node with the smallest key in the subtree rooted by this node."""
current = self
while current.left is not None:
current = current.left
return current
def successor(self):
"""Returns the node with the smallest key larger than this node's key, or None if this has the largest key in the tree."""
if self.right is not None:
return self.right.minimum()
current = self
while current.parent is not None and current.parent.right is current:
current = current.parent
return current.parent
def delete(self):
""""Delete this node from the tree."""
if self.left is None or self.right is None:
if self is self.parent.left:
self.parent.left = self.left or self.right
if self.parent.left is not None:
self.parent.left.parent = self.parent
else:
self.parent.right = self.left or self.right
if self.parent.right is not None:
self.parent.right.parent = self.parent
current = self.parent
while current.key is not None:
current.update_stats()
current = current.parent
return self
else:
s = self.successor()
self.key, s.key = s.key, self.key
return s.delete()
def check(self, lokey, hikey):
"""Checks that the subtree rooted at t is a valid BST and all keys are between (lokey, hikey)."""
if lokey is not None and self.key <= lokey:
raise "BST RI violation"
if hikey is not None and self.key >= hikey:
raise "BST RI violation"
if self.left is not None:
if self.left.parent is not self:
raise "BST RI violation"
self.left.check(lokey, self.key)
if self.right is not None:
if self.right.parent is not self:
raise "BST RI violation"
self.right.check(self.key, hikey)
if self.size != 1 + (0 if self.left is None else self.left.size) + (0 if self.right is None else self.right.size):
raise "BST RI violation"
def __repr__(self):
return "<BST Node, key:" + str(self.key) + ">"
class BST(object):
"""
Simple binary search tree implementation, augmented with subtree sizes.
This BST supports insert, find, and delete-min operations.
Each tree contains some (possibly 0) BSTnode objects, representing nodes,
and a pointer to the root.
"""
def __init__(self, NodeType=BSTnode):
self.root = None
self.NodeType = NodeType
self.psroot = self.NodeType(None, None)
def reroot(self):
self.root = self.psroot.left
def insert(self, t):
"""Insert key t into this BST, modifying it in-place."""
if self.root is None:
self.psroot.left = self.NodeType(self.psroot, t)
self.reroot()
return self.root
else:
return self.root.insert(t, self.NodeType)
def find(self, t):
"""Return the node for key t if is in the tree, or None otherwise."""
if self.root is None:
return None
else:
return self.root.find(t)
def rank(self, t):
"""The number of keys <= t in the tree."""
if self.root is None:
return 0
else:
return self.root.rank(t)
def delete(self, t):
"""Delete the node for key t if it is in the tree."""
node = self.find(t)
deleted = self.root.delete()
self.reroot()
return deleted
def check(self):
if self.root is not None:
self.root.check(None, None)
def __str__(self):
if self.root is None:
return '<empty tree>'
def nested(node):
if node is None:
return '0'
head = str(node.key)
left = nested(node.left)
right = nested(node.right)
if left == '0' and right == '0':
return head
else:
return ' '.join(['(', head, left, right, ')'])
return nested(self.root)
# def recurse(node):
# if node is None:
# return [], 0, 0
# label = str(node.key)
# left_lines, left_pos, left_width = recurse(node.left)
# right_lines, right_pos, right_width = recurse(node.right)
# middle = max(right_pos + left_width - left_pos + 1, len(label), 2)
# pos = left_pos + middle // 2
# width = left_pos + middle + right_width - right_pos
# while len(left_lines) < len(right_lines):
# left_lines.append(' ' * left_width)
# while len(right_lines) < len(left_lines):
# right_lines.append(' ' * right_width)
# if (middle - len(label)) % 2 == 1 and node.parent is not None and \
# node is node.parent.left and len(label) < middle:
# label += '.'
# label = label.center(middle, '.')
# if label[0] == '.': label = ' ' + label[1:]
# if label[-1] == '.': label = label[:-1] + ' '
# lines = [' ' * left_pos + label + ' ' * (right_width - right_pos),
# ' ' * left_pos + '/' + ' ' * (middle-2) +
# '\\' + ' ' * (right_width - right_pos)] + \
# [left_line + ' ' * (width - left_width - right_width) +
# right_line
# for left_line, right_line in zip(left_lines, right_lines)]
# return lines, pos, width
# return '\n'.join(recurse(self.root) [0])
test1 = range(0, 100, 10)
test2 = [31, 41, 59, 26, 53, 58, 97, 93, 23]
test3 = "algorithms"
def printsizes(node):
if node is None:
print "node is nil"
else:
print "node", node.key, "has a subtree of size", node.size
def test(args=None, BSTtype=BST):
import random, sys
random.seed(19920206)
if not args:
args = sys.argv[1:]
if not args:
print 'usage: %s <number-of-random-items | item item item ...>' % \
sys.argv[0]
sys.exit()
elif len(args) == 1:
items = (random.randrange(100) for i in xrange(int(args[0])))
else:
items = [int(i) for i in args]
tree = BSTtype()
source = []
for item in items:
tree.insert(item)
source += [str(item)]
print ' '.join(source)
print tree
def generate():
import random, sys
random.seed(19920206)
Lmin = 2 ** 2 - 1
Lmax = 2 ** 4 - 1
Xnum = 1000000
voc = 26
wfile = open('/home/thoma/Work/Dial-DRL/dataset/BST_1M.txt', 'w')
for id in xrange(Xnum):
tree = BST()
items = (random.randrange(voc) for i in
xrange(random.randint(Lmin, Lmax)))
source = []
for item in items:
item = chr(item + 65)
tree.insert(item)
source += [str(item)]
source = ' '.join(source)
target = str(tree)
line = '{0} -> {1}'.format(source, target)
wfile.write(line + '\n')
if id % 10000 == 0:
print id
def obtain_dataset():
rfile = open('/home/thoma/Work/Dial-DRL/dataset/BST_1M.txt', 'r')
line = rfile.readline()
word2idx = dict()
word2idx['<eol>'] = 0
word2idx['<unk>'] = 1
pairs = []
at = 2
lines = 0
while line:
lines += 1
line = line.strip()
source, target = line.split('->')
source = source.split()
target = target.split()
for w in source:
if w not in word2idx:
word2idx[w] = at
at += 1
for w in target:
if w not in word2idx:
word2idx[w] = at
at += 1
pairs.append((source, target))
if lines % 20000 == 0:
print lines
line = rfile.readline()
idx2word = dict()
for v, k in word2idx.items():
idx2word[k] = v
Lmax = len(idx2word)
print 'read dataset ok.'
print Lmax
for i in xrange(Lmax):
print idx2word[i]
def build_data(data):
instance = dict(text=[], summary=[], source=[], target=[], target_c=[])
for pair in data:
source, target = pair
A = [word2idx[w] for w in source]
B = [word2idx[w] for w in target]
# C = np.asarray([[w == l for w in source] for l in target], dtype='float32')
C = [0 if w not in source else source.index(w) + Lmax for w in target]
instance['text'] += [source]
instance['summary'] += [target]
instance['source'] += [A]
instance['target'] += [B]
# instance['cc_matrix'] += [C]
instance['target_c'] += [C]
print instance['target'][5000]
print instance['target_c'][5000]
return instance
train_set = build_data(pairs[100000:])
test_set = build_data(pairs[:100000])
serialize_to_file([train_set, test_set, idx2word, word2idx],
'/home/thoma/Work/Dial-DRL/dataset/BST_1M.data.pkl')
if __name__ == '__main__':
generate()
obtain_dataset()
|
from setuptools import setup
setup(
name='savant_get',
version='0.0.1',
description='Go get all that data from Baseball Savant',
author='Jared Martin',
author_email='jared.martin@mlb.com',
py_modules=['savant_get'],
entry_points={'console_scripts': ['savant-get=savant_get:main']},
python_requires='>=3.4',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
],
)
|
#!/usr/bin/python
import re
import subprocess
script = '''
if [ "$GIT_AUTHOR_EMAIL" = "" ]; then
GIT_COMMITTER_NAME="Ghost";
GIT_AUTHOR_NAME="Ghost";
GIT_COMMITTER_EMAIL="ghost@xinhua.dev";
GIT_AUTHOR_EMAIL="ghost@xinhua.dev";
git commit-tree "$@";
'''
with open('./authors.txt', mode='r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
match = re.match(r'^(\S+)\s*=\s*(.+)\s<(\S+)>$', line)
if match is None:
continue
(oldemail, name, email) = match.groups()
temp = '''
elif [ "$GIT_AUTHOR_EMAIL" = "%s" ]; then
GIT_COMMITTER_NAME="%s";
GIT_AUTHOR_NAME="%s";
GIT_COMMITTER_EMAIL="%s";
GIT_AUTHOR_EMAIL="%s";
git commit-tree "$@";
''' % (oldemail, name, name, email, email)
script = script + temp
script = script + '''
else
git commit-tree "$@";
fi
'''
cmd = '''
git filter-branch -f --commit-filter '%s' --tag-name-filter cat -- --all
''' % script
print(cmd)
yes = input('Are you sure to execute the above command (y/n) ')
if yes == 'y':
subprocess.run(cmd, shell=True)
else:
print('exit')
|
from django.db import models
from django_fsm import FSMField, transition
from enum import Enum
from jsonfield import JSONField
from .managers import MessageManager
_received, _started, _failed, _submitted, _delivered = (
"received",
"started",
"failed",
"submitted",
"delivered",
)
class ChoicesEnum(Enum):
@classmethod
def yield_choices(cls):
return tuple((x.value, x.name) for x in cls)
class MessageTypes(ChoicesEnum):
DUMMY = 'dummy'
PUSH = 'push'
SMS = 'sms'
class MessageChannels(ChoicesEnum):
AT = 'africas-talking'
FIREBASE = 'firebase'
HTTPBIN = 'httpbin'
SMPP = 'smpp'
class FSMStates(ChoicesEnum):
"""
Class that exposes possible FSM states with methods to yield a tuple
of those states as choices.
Also, to access the string values of the states, use FSMStates.<STATE>.value
"""
RECEIVED = _received
STARTED = _started
FAILED = _failed
SUBMITTED = _submitted
DELIVERED = _delivered
class BaseModel(models.Model):
"""
Base data model for all objects
Defines `__repr__` & `json` methods or any common method that you need
for all your models
"""
class Meta:
abstract = True
state = FSMField(
default=_received,
choices=FSMStates.yield_choices(),
protected=True,
db_index=True
)
@transition(
field=state,
source=[_received, _failed],
target=_started
)
def started(self):
"""
Change message request to `started` state.
"""
return
@transition(
field=state,
source="*",
target=_failed
)
def failed(self):
"""
For requests in `started` that cannot be submitted to Network
hence in the `failed` state.
"""
return
@transition(
field=state,
source=_started,
target=_submitted
)
def submitted(self):
"""
Change message request to `submitted` state from `started` state.
"""
return
@transition(
field=state,
source=[_submitted, _failed, _delivered],
target=_delivered
)
def delivered(self):
"""
Request was successfully `submitted` to message center/server and a
response returned.
Can also transition from source=`delivered` to accommodate for delivery
notifications from message center/server even after the task has
been `delivered`
"""
return
class MessageRequest(BaseModel):
class Meta:
db_table = "message"
message_id = models.CharField(
max_length=255,
db_index=True
)
data = JSONField(
default=dict(status=dict(attempts=0))
)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
objects = MessageManager()
|
from reportlab.lib.enums import TA_JUSTIFY, TA_CENTER, TA_LEFT
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.lib.units import cm
styles = getSampleStyleSheet()
styles.add(
ParagraphStyle(
name="ai_other", parent=styles["Normal"], fontSize=11
)
)
styles.add(
ParagraphStyle(
name="ai_list",
parent=styles["Normal"],
fontSize=11,
bulletText="•",
bulletFontSize=11,
)
)
styles.add(
ParagraphStyle(
name="main_title",
parent=styles["Normal"],
leading=30,
alignment=TA_CENTER,
fontSize=40,
fontName="Helvetica-Bold",
)
)
styles.add(
ParagraphStyle(
name="ai_h1",
parent=styles["Normal"],
leading=30,
alignment=TA_CENTER,
fontSize=30,
fontName="Helvetica-Bold",
)
)
styles.add(
ParagraphStyle(
name="ai_h2",
parent=styles["Normal"],
leading=20,
alignment=TA_LEFT,
fontSize=20,
fontName="Helvetica-Bold",
)
)
styles.add(
ParagraphStyle(
name="ai_h3",
parent=styles["Normal"],
leading=20,
alignment=TA_LEFT,
fontSize=14,
fontName="Helvetica-Bold",
)
)
styles.add(
ParagraphStyle(
name="ai_code",
parent=styles["Normal"],
fontName="Courier",
backColor="black",
textColor="white",
borderPadding=0.1 * cm,
)
)
|
import sqlite3
from aiogram import Dispatcher, types
from aiogram.dispatcher.filters.state import State, StatesGroup
from app.config_reader import load_config
from app.localization import lang
config = load_config("config/bot.ini")
conn = sqlite3.connect(config.bot.way)
cur = conn.cursor()
class Lang(StatesGroup):
start = State()
waiting_for_lang_choose = State()
async def cmd_lang(message: types.Message):
cur.execute("SELECT * FROM users WHERE user_chat_id = %s" % message.chat.id)
user_results = cur.fetchone()
keyboard = types.InlineKeyboardMarkup()
lang_keys = list(lang.keys())
#
for x in range(0, len(lang)):
bth = types.InlineKeyboardButton(text=lang[lang_keys[x]]['name'], callback_data=lang_keys[x])
keyboard.add(bth)
keyboard.add(types.InlineKeyboardButton(text='◀️' + lang[user_results[1]]['cancel'], callback_data='cancel'))
#
await message.delete()
await message.answer(lang[user_results[1]]['lang_choose'], reply_markup=keyboard)
await Lang.waiting_for_lang_choose.set()
async def lang_choose(call: types.CallbackQuery):
cur.execute("SELECT * FROM users WHERE user_chat_id = %s" % call.message.chat.id)
user_results = cur.fetchone()
#
if call.data == 'cancel' and call.data == '/cancel':
await call.message.edit_text(text=(lang[user_results[1]]['successfully']))
else:
await call.message.edit_text(text=(lang[user_results[1]]['lang_chosen'] + ' <code>' +
lang[call.data]['name'] + '</code>'),
parse_mode=types.ParseMode.HTML)
#
cur.execute('UPDATE users SET lang = ? WHERE user_chat_id = ?', (call.data, call.message.chat.id))
conn.commit()
await call.answer()
def register_handlers_lang(dp: Dispatcher):
dp.register_message_handler(cmd_lang, commands="lang", state="*")
dp.register_callback_query_handler(lang_choose, state=Lang.waiting_for_lang_choose)
|
import threading
import logging
import io
class CapturingHandler(logging.StreamHandler):
@property
def content(self) -> str:
return self.stream.getvalue()
def __init__(self):
super().__init__(stream=io.StringIO())
def clear(self):
self.stream.truncate(0)
self.stream.seek(0)
_test_index = 0
_index_lock = threading.Lock()
class ExtraLogger(logging.Logger):
def makeRecord(
self,
name,
level,
fn,
lno,
msg,
args,
exc_info,
func=None,
extra=None,
sinfo=None,
):
"""Replacement for Logger.makeRecord to overwrite fields via ``extra``"""
try:
created = extra and extra.pop("created")
except KeyError:
created = None
rv = super().makeRecord(
name, level, fn, lno, msg, args, exc_info, func, None, sinfo
)
if extra is not None:
for key in extra:
rv.__dict__[key] = extra[key]
if created:
rv.created = created
rv.msecs = (created - int(created)) * 1000
rv.relativeCreated = (created - logging._startTime) * 1000
return rv
def make_test_logger(base_name: str = "test_logger"):
with _index_lock:
global _test_index
log_name = base_name + ".test%d" % _test_index
_test_index += 1
logger = logging.getLogger(log_name)
logger.propagate = False
logger.__class__ = ExtraLogger
handler = CapturingHandler()
logger.handlers = [handler]
return logger, handler
|
# (C) Copyright 2007-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Tests for applications and plugins. """
# Standard library imports.
import os
import shutil
import unittest
# Enthought library imports.
from traits.etsconfig.api import ETSConfig
from envisage.api import Application, ExtensionPoint
from envisage.api import Plugin, PluginManager
from traits.api import Bool, Int, List
# Local imports.
from envisage.tests.event_tracker import EventTracker
from envisage.tests.ets_config_patcher import ETSConfigPatcher
def listener(obj, trait_name, old, new):
""" A useful trait change handler for testing! """
listener.obj = obj
listener.trait_name = trait_name
listener.old = old
listener.new = new
def vetoer(event):
""" A function that will veto an event. """
event.veto = True
class TestApplication(Application):
""" The type of application used in the tests. """
id = "test"
class SimplePlugin(Plugin):
""" A simple plugin. """
#### 'SimplePlugin' interface #############################################
started = Bool(False)
stopped = Bool(False)
###########################################################################
# 'IPlugin' interface.
###########################################################################
def start(self):
""" Start the plugin. """
self.started = True
self.stopped = False
def stop(self):
""" Stop the plugin. """
self.started = False
self.stopped = True
class BadPlugin(Plugin):
""" A plugin that just causes trouble ;^). """
###########################################################################
# 'IPlugin' interface.
###########################################################################
def start(self):
""" Start the plugin. """
raise 1 / 0
def stop(self):
""" Stop the plugin. """
raise 1 / 0
class PluginA(Plugin):
""" A plugin that offers an extension point. """
id = "A"
x = ExtensionPoint(List, id="a.x")
class PluginB(Plugin):
""" A plugin that contributes to an extension point. """
id = "B"
x = List(Int, [1, 2, 3], contributes_to="a.x")
class PluginC(Plugin):
""" Another plugin that contributes to an extension point! """
id = "C"
x = List(Int, [98, 99, 100], contributes_to="a.x")
# PluginD and PluginE each contribute to the other's extension points, but both
# expect to be started before contributions are made.
# xref: enthought/envisage#417
class PluginD(Plugin):
""" Plugin that expects to be started before contributing to
extension points. """
id = "D"
x = ExtensionPoint(List, id="d.x")
y = List(Int, contributes_to="e.x")
started = Bool(False)
def start(self):
self.started = True
def _y_default(self):
if self.started:
return [4, 5, 6]
else:
return []
class PluginE(Plugin):
""" Another plugin that expects to be started before contributing to
extension points. """
id = "E"
x = ExtensionPoint(List, id="e.x")
y = List(Int, contributes_to="d.x")
started = Bool(False)
def start(self):
self.started = True
def _y_default(self):
if self.started:
return [1, 2, 3]
else:
return []
class ApplicationTestCase(unittest.TestCase):
""" Tests for applications and plugins. """
def setUp(self):
""" Prepares the test fixture before each test method is called. """
# Make sure that the listener contents get cleand up before each test.
listener.obj = None
listener.trait_name = None
listener.old = None
listener.new = None
ets_config_patcher = ETSConfigPatcher()
ets_config_patcher.start()
self.addCleanup(ets_config_patcher.stop)
def test_home(self):
""" home """
application = TestApplication()
# Make sure we get the right default value.
self.assertEqual(ETSConfig.application_home, application.home)
# Delete the directory.
shutil.rmtree(application.home)
# Create a new application.
application = TestApplication()
# Make sure the directory got created.
self.assertTrue(os.path.exists(application.home))
# Delete the directory.
shutil.rmtree(application.home)
def test_no_plugins(self):
""" no plugins """
application = TestApplication()
tracker = EventTracker(
subscriptions=[
(application, "starting"),
(application, "started"),
(application, "stopping"),
(application, "stopped"),
]
)
# Start the application.
started = application.start()
self.assertEqual(True, started)
self.assertEqual(["starting", "started"], tracker.event_names)
# Stop the application.
stopped = application.stop()
self.assertEqual(True, stopped)
self.assertEqual(
["starting", "started", "stopping", "stopped"], tracker.event_names
)
def test_veto_starting(self):
""" veto starting """
application = TestApplication()
# This listener will veto the 'starting' event.
application.on_trait_change(vetoer, "starting")
tracker = EventTracker(
subscriptions=[
(application, "starting"),
(application, "started"),
(application, "stopping"),
(application, "stopped"),
]
)
# Start the application.
started = application.start()
self.assertEqual(False, started)
self.assertTrue("started" not in tracker.event_names)
def test_veto_stopping(self):
""" veto stopping """
application = TestApplication()
# This listener will veto the 'stopping' event.
application.on_trait_change(vetoer, "stopping")
tracker = EventTracker(
subscriptions=[
(application, "starting"),
(application, "started"),
(application, "stopping"),
(application, "stopped"),
]
)
# Start the application.
started = application.start()
self.assertEqual(["starting", "started"], tracker.event_names)
self.assertEqual(True, started)
# Stop the application.
stopped = application.stop()
self.assertEqual(False, stopped)
self.assertTrue("stopped" not in tracker.event_names)
def test_start_and_stop_errors(self):
""" start and stop errors """
simple_plugin = SimplePlugin()
bad_plugin = BadPlugin()
application = TestApplication(plugins=[simple_plugin, bad_plugin])
# Try to start the application - the bad plugin should barf.
with self.assertRaises(ZeroDivisionError):
application.start()
# Try to stop the application - the bad plugin should barf.
with self.assertRaises(ZeroDivisionError):
application.stop()
# Try to start a non-existent plugin.
with self.assertRaises(SystemError):
application.start_plugin(plugin_id="bogus")
# Try to stop a non-existent plugin.
with self.assertRaises(SystemError):
application.stop_plugin(plugin_id="bogus")
def test_extension_point(self):
""" extension point """
a = PluginA()
b = PluginB()
c = PluginC()
application = TestApplication(plugins=[a, b, c])
application.start()
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(6, len(extensions))
self.assertEqual([1, 2, 3, 98, 99, 100], extensions)
# Make sure we can get the contributions via the plugin.
extensions = a.x
self.assertEqual(6, len(extensions))
self.assertEqual([1, 2, 3, 98, 99, 100], extensions)
def test_extension_point_resolution_occurs_after_plugin_start(self):
# Regression test for enthought/envisage#417
# Given
d = PluginD()
e = PluginE()
application = TestApplication(plugins=[d, e])
# When
application.start()
# Then
self.assertEqual(
application.get_extensions("d.x"),
[1, 2, 3],
)
self.assertEqual(
application.get_extensions("e.x"),
[4, 5, 6],
)
def test_add_extension_point_listener(self):
""" add extension point listener """
a = PluginA()
b = PluginB()
c = PluginC()
# Start off with just two of the plugins.
application = TestApplication(plugins=[a, b])
application.start()
def listener(extension_registry, event):
""" An extension point listener. """
listener.extension_point_id = event.extension_point_id
listener.added = event.added
listener.removed = event.removed
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(3, len(extensions))
self.assertEqual([1, 2, 3], extensions)
# Add the listener.
application.add_extension_point_listener(listener, "a.x")
# Now add the other plugin.
application.add_plugin(c)
# Make sure the listener was called.
self.assertEqual("a.x", listener.extension_point_id)
self.assertEqual([], listener.removed)
self.assertEqual([98, 99, 100], listener.added)
def test_remove_extension_point_listener(self):
""" remove extension point listener """
a = PluginA()
b = PluginB()
c = PluginC()
# Start off with just one of the plugins.
application = TestApplication(plugins=[a])
application.start()
def listener(extension_registry, event):
""" An extension point listener. """
listener.extension_point_id = event.extension_point_id
listener.added = event.added
listener.removed = event.removed
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(0, len(extensions))
# Add the listener.
application.add_extension_point_listener(listener, "a.x")
# Now add one of the other plugins.
application.add_plugin(b)
# Make sure the listener was called.
self.assertEqual("a.x", listener.extension_point_id)
self.assertEqual([], listener.removed)
self.assertEqual([1, 2, 3], listener.added)
# Now remove the listener.
listener.extension_point_id = None
application.remove_extension_point_listener(listener, "a.x")
# Now add the final plugin.
application.add_plugin(c)
# Make sure the listener was *not* called.
self.assertEqual(None, listener.extension_point_id)
def test_add_plugin(self):
""" add plugin """
a = PluginA()
b = PluginB()
c = PluginC()
# Start off with just two of the plugins.
application = TestApplication(plugins=[a, b])
application.start()
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(3, len(extensions))
self.assertEqual([1, 2, 3], extensions)
# Make sure we can get the contributions via the plugin.
extensions = a.x
self.assertEqual(3, len(extensions))
self.assertEqual([1, 2, 3], extensions)
# Now add the other plugin.
application.add_plugin(c)
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(6, len(extensions))
self.assertEqual([1, 2, 3, 98, 99, 100], extensions)
# Make sure we can get the contributions via the plugin.
extensions = a.x
self.assertEqual(6, len(extensions))
self.assertEqual([1, 2, 3, 98, 99, 100], extensions)
def test_get_plugin(self):
""" get plugin """
a = PluginA()
b = PluginB()
c = PluginC()
# Start off with just two of the plugins.
application = TestApplication(plugins=[a, b, c])
application.start()
# Make sure we can get the plugins.
self.assertEqual(a, application.get_plugin("A"))
self.assertEqual(b, application.get_plugin("B"))
self.assertEqual(c, application.get_plugin("C"))
# Make sure we can't get one that isn't there ;^)
self.assertEqual(None, application.get_plugin("BOGUS"))
def test_remove_plugin(self):
""" remove plugin """
a = PluginA()
b = PluginB()
c = PluginC()
application = TestApplication(plugins=[a, b, c])
application.start()
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(6, len(extensions))
self.assertEqual([1, 2, 3, 98, 99, 100], extensions)
# Make sure we can get the contributions via the plugin.
extensions = a.x
self.assertEqual(6, len(extensions))
self.assertEqual([1, 2, 3, 98, 99, 100], extensions)
# Now remove one plugin.
application.remove_plugin(b)
# Make sure we can get the contributions via the application.
extensions = application.get_extensions("a.x")
self.assertEqual(3, len(extensions))
self.assertEqual([98, 99, 100], extensions)
# Make sure we can get the contributions via the plugin.
extensions = a.x
self.assertEqual(3, len(extensions))
self.assertEqual([98, 99, 100], extensions)
def test_set_plugin_manager_at_contruction_time(self):
""" set plugin manager at construction time"""
a = PluginA()
b = PluginB()
c = PluginC()
# Start off with just two of the plugins.
application = TestApplication(
plugin_manager=PluginManager(plugins=[a, b, c])
)
application.start()
# Make sure we can get the plugins.
self.assertEqual(a, application.get_plugin("A"))
self.assertEqual(b, application.get_plugin("B"))
self.assertEqual(c, application.get_plugin("C"))
# Make sure we can't get one that isn't there ;^)
self.assertEqual(None, application.get_plugin("BOGUS"))
|
print('='*5, 'Exercício 006', '='*5)
n = int(input(' Digite um número: '))
print(' O número digitado foi: {}. \n seu dobro é: {}. \n seu triplo é: {}.'.format(n, (n*2), (n*3)))
print(' sua raiz quadrada é: {:.3f}.'.format((n**(1/2))))
print('='*10, 'Fim', '='*10)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-12-03 08:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('proposals', '0022_auto_20191107_1438'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='applicants',
field=models.ManyToManyField(help_text='Klik in het vlak hiernaast en type een aantal letters van de voornaam, achternaam, of Solis ID van de persoon die u toe wilt voegen. Klik vervolgens om de persoon toe te voegen. Merk op dat het laden even kan duren.', related_name='applicants', to=settings.AUTH_USER_MODEL, verbose_name='Uitvoerende(n) (inclusief uzelf)'),
),
migrations.AlterField(
model_name='proposal',
name='supervisor',
field=models.ForeignKey(blank=True, help_text='Aan het einde van de procedure kunt u deze studie ter verificatie naar uw eindverantwoordelijke\n sturen. De eindverantwoordelijke zal de studie vervolgens kunnen aanpassen en indienen bij Deelkracht.\n <br><br><strong>Tip</strong>: Type een aantal letters van de voornaam, achternaam, of Solis ID van\n de persoon die u toe wilt voegen in de zoekbalk hiernaast. Merk op dat het laden even kan duren.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Eindverantwoordelijke onderzoeker'),
),
]
|
import os
import csv
def get_all_data():
all_data = dict()
all_data["users"] = get_users()
all_data["movies"] = get_movies()
all_data["ratings"] = get_ratings()
return all_data
def get_users():
return get_file_data(
get_cwd() + "/movies_example/users.csv",
["UserId", "Name"]
)
def get_movies():
return get_file_data(
get_cwd() + "/movies_example/movies.csv",
["MovieId", "Title", "Year"]
)
def get_ratings():
ratings = get_file_data(
get_cwd() + "/movies_example/ratings.csv",
["UserId", "MovieId", "Rating"]
)
for r in ratings:
r["Rating"] = float(r["Rating"])
return ratings
def get_file_data(filename, columns):
with open(filename, mode="r") as csv_file:
csv_rows = csv.DictReader(csv_file, delimiter=';')
data_set = []
for row in csv_rows:
data = dict()
for col in columns:
data[col] = row[col]
data_set.append(data)
return data_set
def get_cwd():
return os.getcwd()
|
"""supervisr mod provider PowerDNS Record Translator"""
from logging import getLogger
from typing import Generator
from supervisr.core.providers.exceptions import ProviderObjectNotFoundException
from supervisr.core.providers.objects import (ProviderObject,
ProviderObjectTranslator,
ProviderResult)
from supervisr.dns.providers.compat import CompatDNSRecord
from supervisr.provider.nix_dns.models import Domain as PDNSDomain
from supervisr.provider.nix_dns.models import Record as PDNSRecord
LOGGER = getLogger(__name__)
class PowerDNSRecordObject(ProviderObject):
"""PowerDNS intermediate Record object"""
internal = None
def __init__(self, translator, internal, *args, **kwargs):
self.internal = internal
super().__init__(translator, *args, **kwargs)
domains = PDNSDomain.objects.filter(name=internal.domain)
if not domains.exists():
raise ProviderObjectNotFoundException()
self.domain = domains.first()
def save(self, **kwargs) -> ProviderResult:
"""Save this instance"""
LOGGER.debug("About to create %s (type=%s, content=%s)",
self.internal.name, self.internal.type, self.internal.content)
_obj, updated = PDNSRecord.objects.update_or_create(
name=self.internal.name,
domain=self.domain,
type=self.internal.type,
content=self.internal.content,
disabled=not self.internal.enabled,
ttl=self.internal.ttl,
prio=self.internal.priority,
auth=1)
if updated:
return ProviderResult.SUCCESS_UPDATED
return ProviderResult.SUCCESS_CREATED
def delete(self, **kwargs) -> ProviderResult:
"""Delete this instance"""
LOGGER.debug("About to delete %s (type=%s, content=%s)",
self.internal.name, self.internal.type, self.internal.content)
delete_count, _obj = PDNSRecord.objects.filter(
name=self.internal.name,
domain=self.domain,
type=self.internal.type,
content=self.internal.content,
disabled=not self.internal.enabled,
ttl=self.internal.ttl,
prio=self.internal.priority,
auth=1).delete()
if delete_count == 1:
return ProviderResult.SUCCESS
return ProviderResult.OTHER_ERROR
class PowerDNSRecordTranslator(ProviderObjectTranslator[CompatDNSRecord]):
"""PowerDNS Zone Translator"""
def to_external(self, internal: CompatDNSRecord) -> Generator[PowerDNSRecordObject, None, None]:
"""Convert Record to PDNS Record"""
yield PowerDNSRecordObject(
translator=self,
internal=internal
)
|
#!/usr/bin/env python
# coding: utf-8
# created by hevlhayt@foxmail.com
# Date: 2016/12/7
# Time: 16:58
from django.conf.urls import url
from welcome import views
urlpatterns = [
url(r'^welcome/$', views.welcome, name='welcome'),
]
|
from ._WriteMsg import *
|
import logging
import os
import dashpy.util.commons as commons
import dashpy.util.user_interaction_commons as ui_commons
import requests
import json
def clear_console():
os.system('cls' if os.name=='nt' else 'clear')
def assert_bytes(data):
assert isinstance(data, bytes)
def to_bytes(data, encoding='utf-8'):
if isinstance(data, bytes):
return data
if isinstance(data, str):
return data.encode(encoding)
else:
raise TypeError('Neither string or byte object')
def bytes_to_utf8(bytes_data):
return bytes_data.decode()
def write_encrypted_data_to_file(data, path):
assert_bytes(data)
try:
file = open(path, mode='wb')
file.write(data)
file.close()
except IOError:
logging.error(f'Could not write to {path}. Please make sure, you have sufficient permission for writing to the file.')
finally:
if file is not None:
file.close()
def is_wallet_existing():
if(os.path.exists(commons.WALLET_PATH)):
return True
return False
def get_exchange_rate(symbol):
if symbol not in ui_commons.currency_symbols:
raise ValueError("Not a currency symbol")
url = f"https://api.coingecko.com/api/v3/simple/price?ids=DASH&vs_currencies={symbol}"
http_headers = {'content-type': 'application/json'}
response_data_json = requests.request("GET", url, headers=http_headers)
response = json.loads(response_data_json.text)
return response["dash"][symbol.lower()]
def is_dash_addr(addr):
if not isinstance(addr, str):
return False
if not addr[0] == 'y':
return False
if not 25 <= len(addr) <= 34:
return False
return True
def duff_to_dash(duff):
return duff / 100000000
def dash_to_duff(dash):
return int(dash * 100000000)
def mdash_to_duff(mdash):
return int(mdash * 100000)
def mdash_to_dash(mdash):
return mdash / 1000
def duff_to_mdash(duff):
return duff / 100000
def dash_to_mdash(dash):
return int(dash * 1000)
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0003_auto_20150328_1600'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='instant_payment',
),
migrations.RemoveField(
model_name='payment',
name='multiple_description',
),
migrations.AddField(
model_name='payment',
name='delay',
field=models.SmallIntegerField(null=True, verbose_name='utsettelse', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='payment',
name='payment_type',
field=models.SmallIntegerField(default=1, verbose_name='type', choices=[(1, 'Umiddelbar'), (2, 'Frist'), (3, 'Utsettelse')]),
preserve_default=False,
),
]
|
# --------------------------------------------------------
# THOR
# Licensed under The MIT License
# Written by Axel Sauer (axel.sauer@tum.de)
# --------------------------------------------------------
import abc
from types import SimpleNamespace
import numpy as np
import torch
import torch.nn.functional as F
import cv2
from scipy.signal import tukey
from .utils import *
from .modules import ST_Module, LT_Module, Dummy_Module
MEDIATE_SIZE = 255
class THOR_Wrapper:
def __init__(self, cfg, net):
use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self._cfg = SimpleNamespace(**cfg)
self._mem_len_total = self._cfg.K_st + self._cfg.K_lt
assert self._cfg.K_lt > 0
self.do_full_init = True
self._net = net
self._curr_type = "lt"
self.score_viz = None
self.template_keys = ["im", "raw", "kernel", "compare"]
def setup(self, im, pos, sz):
"""
initialize the short-term and long-term module
"""
self.avg_chans = np.mean(im, axis=(0, 1))
self._frame_no = 0
# make the template
crop = self._get_crop(im, pos, sz)
temp = self._make_template(crop)
# initialize the short term module
if self._cfg.K_st:
self.st_module = ST_Module(
K=self._cfg.K_st,
template_keys=self.template_keys,
calc_div=(self._cfg.lb_type == "dynamic"),
verbose=self._cfg.verbose,
viz=self._cfg.viz,
)
else:
self.st_module = Dummy_Module(self.template_keys)
self.st_module.fill(temp)
# initialize the long term module
if self.do_full_init or self._cfg.vanilla:
self.lt_module = LT_Module(
K=self._cfg.K_lt,
template_keys=self.template_keys,
lb=self._cfg.lb,
lb_type=self._cfg.lb_type,
verbose=self._cfg.verbose,
viz=self._cfg.viz,
)
self.lt_module.fill(temp)
self.do_full_init = False
else:
# reinitialize long term only at the beginning of the episode
self.lt_module.update(temp, div_scale=0)
def update(self, im, curr_crop, pos, sz, i):
"""
update the short-term and long-term module and
update the shown templates and activations (score_viz)
"""
self._frame_no += 1
updates = {}
# only update according to dilation steps
if not self._frame_no % self._cfg.dilation:
updates["st"] = "append"
crop = self._get_crop(im, pos, sz)
temp, throaway_idx = self.crop_to_mem(crop)
updates["lt"] = throaway_idx
# reset st if it drifted
if self._cfg.K_st and self._curr_type == "lt":
updates["st"] = "fill"
self.st_module.fill(temp)
if self._cfg.viz:
self._show_modulate(torch_to_img(curr_crop), self.score_viz, i)
self._show_templates("st", i)
self._show_templates("lt", i)
return updates
def crop_to_mem(self, crop):
"""
make the template and insert into modules
"""
temp = self._make_template(crop)
# temp to st and lt module
div_scale = self.st_module.update(temp)
throwaway_idx = -2
if self._cfg.K_lt > 1:
throwaway_idx, self.lt_module.update(temp, div_scale=div_scale)
return temp, throwaway_idx
def _get_best_temp(self, pos, sz, score, lt=False):
"""
determine the best template and return the prediction and the
score of the best long-term template
"""
# get the best score in st and lt memory
score_st, score_lt = np.split(score, [self._cfg.K_st])
best_st = [] if not len(score_st) else np.argmax(score_st)
best_lt = np.argmax(score_lt) + self._cfg.K_st
# calculate iou and switch to lt if iou too low
iou = self.get_IoU(pos.T[best_st], sz.T[best_st], pos.T[best_lt], sz.T[best_lt])
self._curr_type = "lt" if iou < self._cfg.iou_tresh else "st"
# choose lt if wanted
if lt:
self._curr_type = "lt"
return (best_lt if self._curr_type == "lt" else best_st), score[best_lt], best_lt
def _show_templates(self, mode="lt", i=0):
if mode == "st" and not self._cfg.K_st:
return
mem = self.st_module if mode == "st" else self.lt_module
y_plot = 50 if mode == "st" else 300
temp_canvas = mem.canvas.copy()
window_name = f"Templates {mode} " + str(i)
cv2.imshow(window_name, temp_canvas)
@staticmethod
def get_IoU(pos_1, sz_1, pos_2, sz_2):
if not len(pos_1):
return 0.0 # st memory is empty
if not len(pos_2):
return 1.0 # lt memory is empy
return IOU_numpy(
xywh_to_xyxy(np.append(pos_1, sz_1)), xywh_to_xyxy(np.append(pos_2, sz_2))
)
@staticmethod
def modulate(score, mem_len, out_sz):
"""
modulate the prediction of each template with a mean activation map of all templates
"""
score_per_temp = int(np.prod(score.shape) / (mem_len * np.prod(out_sz)))
score_im = score.reshape(mem_len, score_per_temp, *out_sz)
score_mean = np.mean(score_im, axis=1)
# modulation according to score:
weights = np.max(score_mean, axis=(1, 2))
weights = weights.reshape(len(weights), 1, 1)
score_mean *= weights
# modulate the mean with the weights
score_mean_all = np.mean(score_mean, axis=0).reshape(1, *out_sz)
score_mean_norm = score_mean_all / np.max(score_mean_all)
# modulate: multiply with the mean
mean_tiled = np.tile(score_mean_norm.reshape(1, -1), score_per_temp)
score = score * mean_tiled
return score, score_mean_norm
@staticmethod
def _show_modulate(im, score_viz, i):
"""
show the current activations on top of the current crop
"""
if score_viz is None:
return # modulation is not active
im = cv2.resize(im, (MEDIATE_SIZE, MEDIATE_SIZE)).astype(np.uint8)
canvas = np.zeros([im.shape[0], im.shape[1], 3], dtype=np.uint8)
# calculate the color map
score_im_base = cv2.resize(score_viz[0], im.shape[:2])
score_im_base = (255 * score_im_base).astype(np.uint8)
im_color = cv2.applyColorMap(score_im_base, cv2.COLORMAP_JET)
# show the image
overlayed_im = cv2.addWeighted(im, 0.8, im_color, 0.7, 0)
canvas[:, : im.shape[1], :] = overlayed_im
window_name = "modulated " + str(i)
cv2.imshow(window_name, canvas)
@abc.abstractmethod
def custom_forward(self, x):
"""
implements the forward pass through the network of the tracker
with an added batch dimension [tracker specific]
"""
raise NotImplementedError("Must be implemented in subclass.")
@abc.abstractmethod
def _get_crop(self, im, pos, sz):
"""
get the crop from the search window [tracker specific]
"""
raise NotImplementedError("Must be implemented in subclass.")
@abc.abstractmethod
def _make_template(self, crop):
"""
given a crop, make a template [tracker specific]
"""
raise NotImplementedError("Must be implemented in subclass.")
@abc.abstractmethod
def batch_evaluate(self, crop):
"""
take evalue method from original tracker and add batch processing for all
templates in memory and add modulating [tracker specific]
"""
raise NotImplementedError("Must be implemented in subclass.")
class THOR_SiamFC(THOR_Wrapper):
def __init__(self, cfg, net):
super(THOR_SiamFC, self).__init__(cfg, net)
self.template_sz = 127
self.kernel_sz = 6
self.max_response = 0
def _get_crop(self, im, pos, sz):
context_sz = self._cfg.context_temp * np.sum(sz)
crop = get_subwindow_tracking_SiamFC(
im=im,
pos=pos,
model_sz=self.template_sz,
context_sz=context_sz,
avg_chans=self.avg_chans,
target_sz=sz,
)
return crop.unsqueeze(0)
def _make_template(self, crop):
temp = {}
temp["raw"] = crop.to(self.device)
temp["im"] = torch_to_img(crop)
temp["kernel"] = self._net.feature(temp["raw"])
# add the tukey window to the temp for comparison
alpha = self._cfg.tukey_alpha
win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))
temp["compare"] = temp["kernel"] * torch.Tensor(win).to(self.device)
return temp
def custom_forward(self, x):
x_f = self._net.feature(x) # 3 x 256 x 22 x 22
kernel_cat = torch.cat(
list(self.st_module.templates["kernel"])
+ list(self.lt_module.templates["kernel"])
) # mem_len x 256 x 22 x 22
# convolve
out = F.conv2d(x_f, kernel_cat).permute(1, 0, 2, 3) # mem_len x 3 x 17 x 17
# adjust the scale of the responses
return out * 0.001
def batch_evaluate(self, crop, old_pos, old_sz, p):
# get responses
responses = self.custom_forward(crop)
responses = responses.data.cpu().numpy()
batch_sz, scales = responses.shape[:2]
# upscale
upscale = lambda im: cv2.resize(
im, (p.upscale_sz, p.upscale_sz), interpolation=cv2.INTER_CUBIC
)
responses = np.array(
[[upscale(responses[t, s]) for s in range(scales)] for t in range(batch_sz)]
)
responses[:, : p.scale_num // 2] *= p.penalty_k
responses[:, p.scale_num // 2 + 1 :] *= p.penalty_k
# get peak scale for every template
scale_ids = np.argmax(np.amax(responses, axis=(2, 3)), axis=1)
# apply penalty
responses = responses[np.arange(batch_sz), scale_ids, :, :]
responses -= np.min(responses, axis=(1, 2)).reshape(-1, 1, 1)
responses /= np.sum(responses, axis=(1, 2)).reshape(-1, 1, 1) + 1e-16
responses = (
1 - p.window_influence
) * responses + p.window_influence * p.hann_window
# mediating
if self._cfg.modulate:
old_shape = responses.shape
responses = responses.reshape(batch_sz, -1)
responses, self.score_viz = self.modulate(
responses, self._mem_len_total, old_shape[-2:]
)
responses = responses.reshape(*old_shape)
# get the peak idcs
get_peak_idx = lambda x: np.unravel_index(x.argmax(), x.shape)
locs = [get_peak_idx(t) for t in responses]
# locate target center
disp_in_response = np.array(locs) - p.upscale_sz // 2
disp_in_instance = disp_in_response * p.total_stride / p.response_up
disp_in_image = (
disp_in_instance
* p.x_sz
* p.scale_factors[scale_ids].reshape(-1, 1)
/ p.instance_sz
)
target_pos = old_pos + disp_in_image
# update target size
scale = (1 - p.lr) * 1.0 + p.lr * p.scale_factors[scale_ids].reshape(-1, 1)
target_sz = old_sz * scale
# normalize the scores to the score of the initial frame
best_scores = np.max(responses, axis=(1, 2))
if not self.max_response:
self.max_response = best_scores[0]
best_scores = np.ones_like(best_scores)
else:
best_scores /= self.max_response
best_scores = np.clip(best_scores, 0, 1)
# determine the currently best template
best_temp, lt_score = self._get_best_temp(
target_pos.T, target_sz.T, best_scores
)
return target_pos[best_temp], target_sz[best_temp], lt_score, scale[best_temp]
class THOR_SiamRPN(THOR_Wrapper):
def __init__(self, cfg, net):
super(THOR_SiamRPN, self).__init__(cfg, net)
self.template_sz = 127
self.kernel_sz = 6
self.template_keys += ["reg", "cls", "reg_anc", "cls_anc"]
self.curr_temp = None
def _get_crop(self, im, pos, sz):
wc_z = sz[0] + self._cfg.context_temp * sum(sz)
hc_z = sz[1] + self._cfg.context_temp * sum(sz)
context_size = round(np.sqrt(wc_z * hc_z))
crop = get_subwindow_tracking_SiamRPN(
im=im,
pos=pos,
model_sz=self.template_sz,
original_sz=context_size,
avg_chans=self.avg_chans,
)
return crop.unsqueeze(0)
def _make_template(self, crop):
temp = {}
temp["raw"] = crop.to(self.device)
temp["im"] = torch_to_img(crop)
temp["kernel"] = self._net.featureExtract(temp["raw"])
temp["reg"] = self._net.conv_r1(temp["kernel"])
temp["cls"] = self._net.conv_cls1(temp["kernel"])
t_s = temp["reg"].data.size()[-1]
temp["reg_anc"] = temp["reg"].view(
self._net.anchor * 4, self._net.feature_out, t_s, t_s
)
temp["cls_anc"] = temp["cls"].view(
self._net.anchor * 2, self._net.feature_out, t_s, t_s
)
# add the tukey window to the temp for comparison
alpha = self._cfg.tukey_alpha
win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))
temp["compare"] = temp["kernel"] * torch.Tensor(win).to(self.device)
return temp
def custom_forward(self, x):
x_f = self._net.featureExtract(x)
def reg_branch(x, reg_cat, l):
out = F.conv2d(x, reg_cat)
out = out.view(l, out.shape[1] // l, out.shape[2], out.shape[3])
return out
def cls_branch(x, cls_cat, l):
out = F.conv2d(x, cls_cat)
return out.view(l, out.shape[1] // l, out.shape[2], out.shape[3])
# regression
x_reg = self._net.conv_r2(x_f)
reg_cat = torch.cat(
list(self.st_module.templates["reg_anc"])
+ list(self.lt_module.templates["reg_anc"])
)
reg_res = reg_branch(x_reg, reg_cat, self._mem_len_total)
reg_res = self._net.regress_adjust(reg_res)
# classification
x_cls = self._net.conv_cls2(x_f)
cls_cat = torch.cat(
list(self.st_module.templates["cls_anc"])
+ list(self.lt_module.templates["cls_anc"])
)
cls_res = cls_branch(x_cls, cls_cat, self._mem_len_total)
return reg_res, cls_res, x_f
def batch_evaluate(self, crop, pos, size, window, scale_z, p):
"""
adapted from SiamRPNs tracker_evaluate
"""
delta, score, x_f = self.custom_forward(crop)
out_sz = score.shape[-2:]
batch_sz = self._mem_len_total
delta = delta.contiguous().view(batch_sz, 4, -1).data.cpu().numpy()
score = (
F.softmax(score.contiguous().view(batch_sz, 2, -1), dim=1)
.data[:, 1, :]
.cpu()
.numpy()
)
# delta regression
anc = np.tile(p.anchor, (batch_sz, 1, 1))
delta[:, 0, :] = delta[:, 0, :] * anc[:, :, 2] + anc[:, :, 0]
delta[:, 1, :] = delta[:, 1, :] * anc[:, :, 3] + anc[:, :, 1]
delta[:, 2, :] = np.exp(delta[:, 2, :]) * anc[:, :, 2]
delta[:, 3, :] = np.exp(delta[:, 3, :]) * anc[:, :, 3]
# penalizing
def change(r):
return np.maximum(r, 1.0 / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# scale penalty
s_c = change(sz(delta[:, 2, :], delta[:, 3, :]) / (sz_wh(size)))
# ratio penalty
r_c = change((size[0] / size[1]) / (delta[:, 2, :] / delta[:, 3, :]))
penalty = np.exp(-(r_c * s_c - 1.0) * p.penalty_k)
pscore = penalty * score
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
# mediating
if self._cfg.modulate:
pscore, self.score_viz = self.modulate(pscore, self._mem_len_total, out_sz)
# target regression
best_pscore_id = np.argmax(pscore, axis=1)
# arange is needed for correct indexing
target = delta[np.arange(batch_sz), :, best_pscore_id] / scale_z
target_sz = size / scale_z
lr = (
penalty[np.arange(batch_sz), best_pscore_id]
* score[np.arange(batch_sz), best_pscore_id]
* p.lr
)
res_x = target[:, 0] + pos[0]
res_y = target[:, 1] + pos[1]
res_w = target_sz[0] * (1 - lr) + target[:, 2] * lr
res_h = target_sz[1] * (1 - lr) + target[:, 3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
best_scores = pscore[np.arange(batch_sz), best_pscore_id]
# determine the currently best template
best_temp, lt_score = self._get_best_temp(target_pos, target_sz, best_scores)
return (
np.squeeze(target_pos[:, best_temp]),
np.squeeze(target_sz[:, best_temp]),
lt_score,
)
class THOR_SiamMask(THOR_Wrapper):
def __init__(self, cfg, net):
super(THOR_SiamMask, self).__init__(cfg, net)
self.template_sz = 127
self.kernel_sz = 7
def _get_crop(self, im, pos, sz):
wc_z = sz[0] + self._cfg.context_temp * sum(sz)
hc_z = sz[1] + self._cfg.context_temp * sum(sz)
context_size = round(np.sqrt(wc_z * hc_z))
crop = get_subwindow_tracking_SiamRPN(
im=im,
pos=pos,
model_sz=self.template_sz,
original_sz=context_size,
avg_chans=self.avg_chans,
)
return crop.unsqueeze(0)
def _make_template(self, crop):
temp = {}
temp["raw"] = crop.to(self.device)
temp["im"] = torch_to_img(crop)
temp["kernel"] = self._net.template(temp["raw"])
# add the tukey window to the temp for comparison
alpha = self._cfg.tukey_alpha
win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))
temp["compare"] = temp["kernel"] * torch.Tensor(win).to(self.device)
return temp
def custom_forward(self, x):
self._net.zf = torch.cat(
list(self.st_module.templates["kernel"])
+ list(self.lt_module.templates["kernel"])
)
pred_cls, pred_loc, _ = self._net.track_mask(x)
return pred_loc, pred_cls
def batch_evaluate(self, crop, pos, size, window, scale_x, p, lt=False):
"""
adapted from SiamRPNs tracker_evaluate
"""
delta, score = self.custom_forward(crop)
out_sz = score.shape[-2:]
batch_sz = self._mem_len_total
delta = delta.contiguous().view(batch_sz, 4, -1).data.cpu().numpy()
score = (
F.softmax(score.contiguous().view(batch_sz, 2, -1), dim=1)
.data[:, 1, :]
.cpu()
.numpy()
)
# delta regression
anc = np.tile(p.anchor, (batch_sz, 1, 1))
delta[:, 0, :] = delta[:, 0, :] * anc[:, :, 2] + anc[:, :, 0]
delta[:, 1, :] = delta[:, 1, :] * anc[:, :, 3] + anc[:, :, 1]
delta[:, 2, :] = np.exp(delta[:, 2, :]) * anc[:, :, 2]
delta[:, 3, :] = np.exp(delta[:, 3, :]) * anc[:, :, 3]
# penalizing
def change(r):
return np.maximum(r, 1.0 / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# scale penalty
target_sz_in_crop = size * scale_x
s_c = change(sz(delta[:, 2, :], delta[:, 3, :]) / (sz_wh(target_sz_in_crop)))
# ratio penalty
r_c = change((size[0] / size[1]) / (delta[:, 2, :] / delta[:, 3, :]))
penalty = np.exp(-(r_c * s_c - 1.0) * p.penalty_k)
pscore = penalty * score
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
# mediating
mem_len = self._mem_len_total
if self._cfg.modulate:
pscore, self.score_viz = self.modulate(pscore, mem_len, out_sz)
# target regression
best_pscore_id = np.argmax(pscore, axis=1)
# arange is needed for correct indexing
target = delta[np.arange(batch_sz), :, best_pscore_id] / scale_x
lr = (
penalty[np.arange(batch_sz), best_pscore_id]
* score[np.arange(batch_sz), best_pscore_id]
* p.lr
)
target, lr = target.astype(np.float64), lr.astype(np.float64)
res_x = target[:, 0] + pos[0]
res_y = target[:, 1] + pos[1]
res_w = size[0] * (1 - lr) + target[:, 2] * lr
res_h = size[1] * (1 - lr) + target[:, 3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
best_scores = pscore[np.arange(batch_sz), best_pscore_id]
# determine the currently best template
best_temp, lt_score, best_lt = self._get_best_temp(
target_pos, target_sz, best_scores, lt=lt
)
self._net.best_temp = best_temp
return (
np.squeeze(target_pos[:, best_temp]),
np.squeeze(target_sz[:, best_temp]),
lt_score,
best_pscore_id[best_temp], best_temp, best_lt
)
|
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
ans, codex = [], defaultdict()
def translate(c: str) -> str:
if c not in codex:
codex[c] = chr(97 + len(codex))
return codex[c]
def compare(word: str) -> None:
codex.clear()
for i in range(len(word)):
if translate(word[i]) != cipher[i]:
return
ans.append(word)
cipher = [translate(c) for c in pattern]
for word in words:
compare(word)
return ans
|
srtFormat = {
'Elaps' : 'Elpas time:{0}', # This is special header created by OpenTX2SRT
'Date' : '%Y/%m/%d', # datetime.srtftime
'Time' : '%H:%M:%S', # datetime.srtftime
'1RSS(dB)' : '{0}dB',
'2RSS(dB)' : '{0}dB',
'RQly(%)' : 'RQly:{0}%',
'RSNR(dB)' : '{0}dB',
'ANT' : 'ANT:{0}',
'RFMD' : 'RFMD:{0}',
'TPWR(mW)' : '{0}mW',
'TRSS(dB)' : '{0}dB',
'TQly(%)' : '{0}%',
'TSNR(dB)' : '{0}dB',
'Ptch(rad)' : 'Pitch:{0}',
'Roll(rad)' : 'Roll:{0}',
'Yaw(rad)' : 'Yaw:{0}',
'RxBt(V)' : 'VBAT:{0}V',
'Curr(A)' : '{0}A',
'Capa(mAh)' : '{0}mAh',
'Bat_(%)' : '{0}%',
'FM' : 'FM:{0}',
'GPS' : '',
'GSpd(kmh)' : 'Speed:{0}Km/h',
'Hdg(@)' : '',
'Alt(m)' : 'Alt:{0}m',
'Sats' : 'Sats:{0}',
'Rud' : '',
'Ele' : '',
'Thr' : '',
'Ail' : '',
'SA' : 'SA:{0}',
'SB' : 'SB:{0}',
'SC' : 'SC:{0}',
'SD' : 'SD:{0}',
'SE' : 'SE:{0}',
'SF' : 'SF:{0}',
'LSW' : '',
'TxBat(V)' : '{0}V'
}
|
import requests
import csv
import os
def get_api_key():
"""
Function reading api key
from key.txt
"""
with open('key.txt', 'r') as file:
api_key = file.readline()
return api_key
def parse_value(value):
"""
function parsing given
str to int
"""
value = value.strip()
if value == 'N/A':
return 0
value = value.replace('$', '')
value = value.replace(',', '')
return int(value)
def get_movies_from_api(movies):
"""
request every movie
in the given array
"""
url_base = 'http://www.omdbapi.com/'
arr = []
api_key = get_api_key()
for movie in movies:
params = "+".join(movie.split())
full_url = (
url_base +
'?apikey=' +
api_key +
'&t=' +
params
)
response = requests.get(full_url)
if response.status_code == 200 and response.json()['Response'] == 'True':
arr.append(response.json())
return arr
def add_movies_to_csv_file(movies):
"""
function adding given
movies to the CSV file
"""
try:
f = open('movies.csv')
f.close()
with open('movies.csv', 'a') as file:
for movie in movies:
if not 'imdbRating' in movie:
movie['imdbRating'] = 'N/A'
if not 'BoxOffice' in movie:
movie['BoxOffice'] = 'N/A'
file.write(f"\n{movie['Title']};{movie['imdbRating']};{movie['BoxOffice']}")
except OSError:
with open('movies.csv', 'w') as file:
file.write('Title;imdbRating;BoxOffice')
for movie in movies:
if not 'imdbRating' in movie:
movie['imdbRating'] = 'N/A'
if not 'BoxOffice' in movie:
movie['BoxOffice'] = 'N/A'
file.write(f"\n{movie['Title']};{movie['imdbRating']};{movie['BoxOffice']}")
def sort_movies_by_imdb():
"""
function sorting all
movies by IMDB rating
"""
try:
with open('movies.csv', 'r') as file:
movies = [x.split(';') for x in list(file)]
del movies[0]
sorter = lambda x: (x[1], x[0], x[2])
movies = sorted(movies, key=sorter, reverse=True)
return movies
except OSError:
return False
def titles():
"""
function returning
array of all titles
"""
try:
with open('movies.csv', 'r') as file:
movies = [x.split(';') for x in list(file)]
del movies[0]
arr_of_titles = []
for movie in movies:
arr_of_titles.append(movie[0])
return arr_of_titles
except OSError:
return False
def most_profitable():
"""
function returning most
profitable movie
"""
try:
with open('movies.csv', 'r') as file:
movies = [x.split(';') for x in list(file)]
del movies[0]
max_value = 0
movie_with_max = None
for movie in movies:
value = parse_value(movie[2])
if value > max_value:
max_value = value
movie_with_max = movie
return movie_with_max
except OSError:
return False
def avg_rating():
"""
function returning
average IMDB rating
"""
try:
with open('movies.csv', 'r') as file:
movies = [x.split(';') for x in list(file)]
del movies[0]
sum_of_imdb = 0
for movie in movies:
sum_of_imdb += float(movie[1])
return round(sum_of_imdb / len(movies), 2)
except OSError:
return False
def clear_library():
"""
Function removes files
"""
try:
os.remove('key.txt')
except OSError:
return False
try:
os.remove('movies.csv')
except OSError:
return False
return True
|
import argparse
import sched, time
import syslog
from runner import Runner
class PeriodicScheduler(sched.scheduler):
def enter_periodic(self, interval, func, initial_delay=None, prio=1):
args = {'func':func, 'interval':interval, 'prio':prio}
if initial_delay:
self.enter(initial_delay, prio, self.enter_periodic, kwargs=args)
else:
func()
self.enter(interval, prio, self.enter_periodic, kwargs=args)
parser = argparse.ArgumentParser()
parser.add_argument("alerts_cfg",type=str, help="Json configuration file")
parser.add_argument("huebridge", help="ip of the Philips Hue Bridge")
parser.add_argument("jenkins", nargs='+', help="url of a jenkins server")
parser.add_argument("--poll_rate", default=10, type=int, help="seconds delay between each update")
parser.add_argument("--cfg_poll_rate", default=3600, type=int, help="seconds delay between each refresh off the config file")
parser.add_argument("--create_missing_lights", action='store_true', help="create virtual lights for all configurated light that don't exist")
args = parser.parse_args()
syslog.syslog('team-alert initializing...')
runner = Runner(args.alerts_cfg, args.huebridge, args.jenkins, args.create_missing_lights)
print("Updating status every {} sek".format(args.poll_rate))
print("Reloading config every {} sek".format(args.cfg_poll_rate))
scheduler = PeriodicScheduler(time.time, time.sleep)
scheduler.enter_periodic(args.cfg_poll_rate, runner.restart, args.cfg_poll_rate)
scheduler.enter_periodic(args.poll_rate, runner.update_alerts)
scheduler.run()
|
# A function to read in an ASCII XYC file and return a numpy (pronounced as written) array of pixels
import numpy as np
def read(filename):
frame = np.zeros((256, 256))
f = open(filename)
for line in f.readlines():
vals = line.split("\t")
x = int(float(vals[0].strip()))
y = int(float(vals[1].strip()))
c = int(float(vals[2].strip()))
frame[x][y] = c
return frame
|
################################################################################
# Project : AuShadha
# Description : Models for AuShadha OPD Visits.
# Author : Dr. Easwar TR
# Date : 17-09-2013
# LICENSE : GNU-GPL Version 3, Please see AuShadha/LICENSE.txt
################################################################################
# General Imports
from datetime import datetime, date, time
import importlib
# Django Specific Imports
from django.db import models
from django.forms import ModelForm, ModelChoiceField, Textarea, TextInput
# Application model imports
from AuShadha.apps.ui.ui import ui as UI
from AuShadha.apps.aushadha_base_models.models import AuShadhaBaseModel,AuShadhaBaseModelForm
from AuShadha.apps.clinic.models import Clinic, Staff
from registry.inv_and_imaging.models import ImagingInvestigationRegistry, LabInvestigationRegistry
#from patient.models import PatientDetail
#from admission.models import AdmissionDetail
PatientDetail = UI.get_module("PatientRegistration")
AdmissionDetail = UI.get_module("Admission")
VisitDetail = UI.get_module("OPD_Visit")
from dijit_fields_constants import VISIT_HPI_FORM_CONSTANTS
DEFAULT_VISIT_HPI_FORM_EXCLUDES = ('patient_detail',)
class VisitHPI(AuShadhaBaseModel):
def __init__(self, *args, **kwargs):
super(VisitHPI,self).__init__(*args, **kwargs)
self.__model_label__ = "hpi"
self._parent_model = 'visit_detail'
hpi = models.TextField('History of Presenting Illness', max_length=1000, help_text='limit to 1000 words')
visit_detail = models.ForeignKey('visit.VisitDetail')
created_at = models.DateTimeField(auto_now_add=True, editable=False)
def __unicode__(self):
return '%s' % (self.hpi)
def get_all_patient_hpi_url(self):
return '/AuShadha/visit_hpi/hpi/get/%s/' %(self.visit_detail.id)
def import_active_visit_hpi_url(self):
return '/AuShadha/visit_hpi/hpi/import_active_visit_hpi/%s/' %(self.visit_detail.id)
class Meta:
unique_together = ('hpi', 'visit_detail')
verbose_name = "History of Presenting Illness"
verbose_name_plural = "History of Presenting Illness"
ordering = ('visit_detail', 'created_at', 'hpi')
class VisitHPIForm(AuShadhaBaseModelForm):
__form_name__ = "Visit HPI Form"
dijit_fields = VISIT_HPI_FORM_CONSTANTS
class Meta:
model = VisitHPI
exclude = ('visit_detail','patient_detail')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.