content
stringlengths 5
1.05M
|
|---|
# Generated by Django 3.0.8 on 2020-09-10 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tensor_site', '0006_applicationmodel_date'),
]
operations = [
migrations.AddField(
model_name='applicationmodel',
name='Status',
field=models.CharField(default='Pending', max_length=100),
),
]
|
def get_map(rei):
def get_map_for_user_struct(user_struct):
return {
'user_name': str(user_struct.userName),
'irods_zone': str(user_struct.rodsZone),
'user_type': str(user_struct.userType),
'system_uid': user_struct.sysUid,
'authentication_info': {
'authentication_scheme': str(user_struct.authInfo.authScheme),
'privilege_level': user_struct.authInfo.authFlag,
'flag': user_struct.authInfo.flag,
'ppid': user_struct.authInfo.ppid,
'host': str(user_struct.authInfo.host),
'authentication_string': str(user_struct.authInfo.authStr)
},
'info': str(user_struct.userOtherInfo.userInfo),
'comments': str(user_struct.userOtherInfo.userComments),
'create_time': str(user_struct.userOtherInfo.userCreate),
'modify_time': str(user_struct.userOtherInfo.userModify)
}
return {
'plugin_instance_name': str(rei.pluginInstanceName),
'status': rei.status,
'operation_type': rei.doinp.oprType if rei.doinp is not None else None,
'connection': {
'client_address': str(rei.rsComm.clientAddr),
'connection_count': rei.rsComm.connectCnt,
'socket': rei.rsComm.sock,
'option': str(rei.rsComm.option),
'status': rei.rsComm.status,
'api_number': rei.rsComm.apiInx
} if rei.rsComm is not None else None,
'data_object': {
'object_path': str(rei.doi.objPath) if rei.doi is not None else str(rei.doinp.objPath),
'size': rei.doi.dataSize if rei.doi is not None else rei.doinp.dataSize,
'type': str(rei.doi.dataType) if rei.doi is not None else None,
'checksum': str(rei.doi.chksum) if rei.doi is not None else None,
'file_path': str(rei.doi.filePath) if rei.doi is not None else None,
'replica_number': rei.doi.replNum if rei.doi is not None else None,
'replication_status': rei.doi.replStatus if rei.doi is not None else None,
'write_flag': rei.doi.writeFlag if rei.doi is not None else None,
'owner': str(rei.doi.dataOwnerName) if rei.doi is not None else None,
'owner_zone': str(rei.doi.dataOwnerZone) if rei.doi is not None else None,
'expiry': str(rei.doi.dataExpiry) if rei.doi is not None else None,
'comments': str(rei.doi.dataComments) if rei.doi is not None else None,
'create_time': str(rei.doi.dataCreate) if rei.doi is not None else None,
'modify_time': str(rei.doi.dataModify) if rei.doi is not None else None,
'access_time': str(rei.doi.dataAccess) if rei.doi is not None else None,
'id': rei.doi.dataId if rei.doi is not None else None,
'collection_id': rei.doi.collId if rei.doi is not None else None,
'status_string': str(rei.doi.statusString) if rei.doi is not None else None,
'destination_resource_name': str(rei.doi.destRescName) if rei.doi is not None else None,
'backup_resource_name': str(rei.doi.backupRescName) if rei.doi is not None else None,
'resource_name': str(rei.doi.rescName) if rei.doi is not None else None,
} if rei.doi is not None or rei.doinp is not None else None,
'collection': {
'id': rei.coi.collId,
'name': str(rei.coi.collName),
'parent_collection_name': str(rei.coi.collParentName),
'owner': str(rei.coi.collOwnerName),
'expiry': str(rei.coi.collExpiry),
'comments': str(rei.coi.collComments),
'create_time': str(rei.coi.collCreate),
'modify_time': str(rei.coi.collModify),
'access_time': str(rei.coi.collAccess),
'inheritance': str(rei.coi.collInheritance)
} if rei.coi is not None else None,
'client_user': get_map_for_user_struct(rei.uoic) if rei.uoic is not None else get_map_for_user_struct(rei.rsComm.clientUser) if rei.rsComm is not None and rei.rsComm.clientUser is not None else None,
'proxy_user': get_map_for_user_struct(rei.uoip) if rei.uoip is not None else get_map_for_user_struct(rei.rsComm.proxyUser) if rei.rsComm is not None and rei.rsComm.proxyUser is not None else None,
'other_user': get_map_for_user_struct(rei.uoio) if rei.uoio is not None else None,
'key_value_pairs': dict((rei.condInputData.key[i], rei.condInputData.value[i]) for i in range(0, rei.condInputData.len)) if rei.condInputData is not None else None
}
|
import logging
import warnings
import multiprocessing
import time
from collections import defaultdict
import itertools
import numpy as np
import scipy as sp
import statsmodels
import statsmodels.stats
import statsmodels.stats.proportion
import pandas as pd
import plotly.express as px
from tqdm import tqdm
from joblib import Parallel, delayed
from xps import get_methods_from_results
from xps.monotonefeature import Feature, get_feature_objects
from xps.attribution.utils import xps_to_arrays
from .actionability import compute_max_ks, compute_confidence_intervals, join_results
# โโโโโโโโโโโ โโโโโโโโโโโ โโโโโโโโโโ โโโโโโโโโโโโโโโ โโโโโโโ โโโโ โโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ
# โโโโโโ โโโโโโ โโโโโโ โโโ โโโ โโโ โโโ โโโโโโ โโโโโโโโโ โโโ
# โโโโโโ โโโโโโ โโโโโโ โโโ โโโ โโโ โโโ โโโโโโ โโโโโโโโโโโโโ
# โโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโ โโโโโโ
# โโโโโโโโโโโ โโโโโโโโโโโ โโโโโโโ โโโโโโโ โโโ โโโ โโโโโโโ โโโ โโโโโ
#
class NormalizedFeature:
def __init__(
self,
feature,
normalizer,
method,
):
self.feature = feature
# Inititialize normalized values and values together cache
self.norm = {
which: np.hstack([
normalizer.feature_transform(
self.feature.values[which],
f=self.feature.idx,
method=method,
).reshape(-1, 1),
self.feature.values[which].reshape(-1, 1),
])
for which in [-1, +1]
}
def get_norm_and_values_towards_change(self, x_i, target_class=0):
if self.feature.is_useless():
return np.array([]).reshape(-1, 2)
assert target_class in [0, 1]
# Which values (left, right)
which = -1 * (target_class - .5) * self.feature.trend
vid = int(-1 + np.heaviside(which, .5) * 2)
# Get values
norm_values = self.norm[vid].copy()
feature_values = norm_values[:, 1]
if which < 0:
iv = np.searchsorted(feature_values, x_i, side='right')
norm_values = norm_values[iv:]
elif which > 0:
iv = np.searchsorted(feature_values, x_i, side='left')
norm_values = norm_values[:iv]
else:
raise ValueError("Invalid value for which")
return norm_values
def normalize_feature_objects(features_objs, normalizer, method):
return [NormalizedFeature(
feature=feat,
normalizer=normalizer,
method=method,
) for feat in features_objs]
def __get_cost_value_feature_tuples(fso, x, x_norm, pred, phi, also_negative_phi=False):
"""Returns the Cost & Values & Features that must be changed to change the prediction
Args:
fso (list): Subset of features objects
x (np.ndarray): The current sample
x_norm (np.ndarray): The current normalized sample
pred (int): Current prediction
Returns:
np.ndarray: An array with size nb_values x 3
Col 0 : Cost
Col 1 : Value
Col 3 : Feature (idx)
"""
# Generate the Cost-Value-Feature Array
cvfs = []
for feat in fso:
# The features are not ordered, we need to get the index again
f = feat.feature.idx
# We ignore features contributing 0
if phi[f] == 0:
continue
# We only consider features that are contributing positively to the prediction
if phi[f] < 0 and not also_negative_phi:
continue
# Get the Norm-Value array
# nvs.shape[0] = nb_values
# Col 0 : Normalized values
# Col 1 : Values
nvs = feat.get_norm_and_values_towards_change(
x_i=x[f],
target_class=1 - pred,
)
# Convert norm to cost-norm
nvs[:, 0] = np.abs(nvs[:, 0] - x_norm[f])
# Make it proportional
nvs[:, 0] = nvs[:, 0] / np.abs(phi[f])
# Append feature
# Col 2 : Feature idx
cvfs.append(np.hstack([
nvs,
np.ones(nvs.shape[0]).reshape(-1, 1) * f,
]))
# Stack all together
if len(cvfs) > 0:
cvfs = np.vstack(cvfs)
else:
cvfs = np.array([]).reshape(0, 3)
return cvfs
def __inf_costs_and_counter(x):
return dict(L0=np.inf, L1=np.inf, L2=np.inf, Linf=np.inf), np.full(x.shape[0], np.nan),
def __find_costs_and_counterfactual(model, x, pred, cvfs):
# cvf(s) = cost, value, feature (id)
# do change until the prediction change and report the pec_shift
X_prime = np.tile(x.copy(), (len(cvfs), 1))
for i, (_, newval, f) in enumerate(cvfs):
X_prime[i:, int(f)] = newval
first_change = np.searchsorted((model.predict(X_prime) != pred) * 1, 1, side='left')
# Get max cost per feature
fcosts = np.zeros(len(x))
for c, _, f in cvfs[:(first_change + 1)]:
fcosts[int(f)] = max(fcosts[int(f)], c)
# Return the maximum percentile shift and the total percentile shift
if first_change < len(X_prime):
# Column 0 contains costs
# Return max cost (per feature), and total cost (sum of features), L2 norm (on the cost of the features)
return dict(
Linf=cvfs[first_change, 0],
L1=fcosts.sum(),
L2=np.linalg.norm(fcosts),
L0=(fcosts > 0).sum(),
), X_prime[first_change]
else:
return __inf_costs_and_counter(x)
def __find_maxcost(model, x, pred, cvfs):
# cvf(s) = cost, value, feature (id)
# do change until the prediction change and report the pec_shift
X_prime = np.tile(x.copy(), (len(cvfs), 1))
for i, (_, newval, f) in enumerate(cvfs):
X_prime[i:, int(f)] = newval
first_change = np.searchsorted((model.predict(X_prime) != pred) * 1, 1, side='left')
if first_change < len(X_prime):
return cvfs[first_change, 0]
else:
return np.inf
def __find_counterfactual(x,
maxcost,
phi,
features_mask,
pred,
feature_trends,
normalizer,
normalization_method,
also_negative_phi=False):
__trends = np.array(feature_trends)
__features_mask = np.zeros(len(x))
__features_mask[features_mask] = 1 # Only top-k
if not also_negative_phi:
__features_mask = __features_mask * (phi > 0) # Only if positive
c = -1 * (pred * 2 - 1) * maxcost * __trends * __features_mask * np.abs(phi)
return normalizer.move_transform(np.array([x]), costs=c, method=normalization_method)[0]
def __inverse_actionability_method(
xps_results,
method,
normalizer,
splits,
feature_trends,
model,
normalization_method,
ret_counter=False,
mode='topk',
# n_jobs=1, #multiprocessing.cpu_count() - 1,
):
""""
mode :
'topk' - Change top-k (from 1 to n) features with positive attribution of the same (normalized) amount and do this for all k
'topn' - Change all features with positive attribution of a (normalized) amount proportional to their attribution
'...-vector' - Will change the features of a (normalized) amount proportional to their attribution
'...-both' - Will change also the features that are not positive
"""
# Arguments proprecessing
if 'topk' in mode:
Ks = range(1, len(splits) + 1)
elif 'topn' in mode:
Ks = [len(splits)]
elif mode.startswith('top'):
Ks = [int(mode.split('-')[0][3:])]
assert Ks[0] <= len(splits), 'Top-K greater than the number of features'
else:
raise ValueError('Invalid mode.')
if 'vector' in mode:
phi_proportional = True
else:
phi_proportional = False
if 'both' in mode:
also_negative_phi = True
else:
also_negative_phi = False
# Create features objects
features_objs = np.array(
normalize_feature_objects(
features_objs=get_feature_objects(splits, None, feature_trends=feature_trends),
normalizer=normalizer,
method=normalization_method,
))
# Compute stuff from the results
xps_arrays = xps_to_arrays(xps_results, only_methods=False, only_methods_and_X=False)
X_ = xps_arrays['x']
X_norm_ = normalizer.transform(X_, method=normalization_method)
Phi = xps_arrays[method]
Pred = xps_arrays['pred']
if also_negative_phi:
Order = np.argsort(-1 * np.abs(Phi), axis=1)
else:
Order = np.argsort(-1 * Phi, axis=1)
if not np.all(Pred == 1):
warnings.warn('Class is not 1!')
# Iterate
results = [] # Samples x Dict(Cost : Top-K)
results_counter = [] # Samples x Top-K x nb_features
def do_fa_recourse(x, x_norm, phi, pred, feat_order):
topk_costs = []
topk_counter = []
# Iterate over the Ks
for k in Ks:
if np.any(np.isnan(phi)):
warnings.warn(f'NaN feature attribution (method = {method})')
# If the feature attribution is NaN we return infinite and nan
costs, x_counter = __inf_costs_and_counter(x)
else:
phix = phi if phi_proportional else 1 * (phi > 0) + (phi < 0) * -1
# Take the k most important features
fso = features_objs[feat_order[:k]]
# Compute all the subsequent changes that can be done to change the prediction
cvfs = __get_cost_value_feature_tuples(
fso=fso,
x=x,
x_norm=x_norm,
pred=pred,
phi=phix,
also_negative_phi=also_negative_phi,
)
if len(cvfs) == 0 and (phix > 0).sum() > 0:
warnings.warn('len(cvfs) is 0 but phi is positive!')
# Sort the iterator by cost
# cvfs n x 3
# Column 0 : Cost
# Column 1 : Value to which to change to pay such cost
# Column 2 : Feature to change
cvfs = cvfs[cvfs[:, 0].argsort()]
# Compute cost
costs, x_counter = __find_costs_and_counterfactual(
model=model,
x=x,
pred=pred,
cvfs=cvfs,
)
# maxcost = __find_maxcost(
# model=model,
# x=x,
# pred=pred,
# cvfs=cvfs,
# )
maxcost = costs['Linf']
if not np.isinf(maxcost):
x_counter = __find_counterfactual(
x=x,
phi=phix,
pred=pred,
features_mask=feat_order[:k],
maxcost=costs['Linf'],
normalizer=normalizer,
normalization_method=normalization_method,
feature_trends=feature_trends,
also_negative_phi=also_negative_phi,
)
# Append results
topk_costs.append(costs)
topk_counter.append(x_counter)
return topk_costs, topk_counter
# Compute stuff in parallel (list of tuples (topk_costs, topk_counter))
# if n_jobs > 1:
# par_results = Parallel(n_jobs=n_jobs)(
# delayed(do_fa_recourse)(x, x_norm, phi, pred, feat_order)
# for x, x_norm, phi, pred, feat_order in tqdm(zip(X_, X_norm_, Phi, Pred, Order), desc=method)
# )
# else:
par_results = [
do_fa_recourse(x, x_norm, phi, pred, feat_order)
for x, x_norm, phi, pred, feat_order in tqdm(zip(X_, X_norm_, Phi, Pred, Order), desc=method)
]
# Extract results
results = [pd.DataFrame(x[0]).to_dict('list') for x in par_results]
results_counter = np.array([x[1] for x in par_results])
if ret_counter:
return results, np.array(results_counter)
else:
return results
def all_inverse_actionability(xps_results, *args, methods=None, loaded=None, ret_counter=False, **kwargs):
if methods is not None:
methods = set(get_methods_from_results(xps_results)) & set(methods)
else:
methods = get_methods_from_results(xps_results)
if loaded is None:
rets, counters = {}, {}
else:
if isinstance(loaded, tuple):
rets, counters = loaded
else:
rets, counters = loaded, {}
for method in methods:
if method not in rets or (ret_counter and method not in counters):
ret = __inverse_actionability_method(
xps_results,
*args,
method=method,
ret_counter=ret_counter,
**kwargs,
)
if ret_counter:
rets[method], counters[method] = ret
else:
rets[method] = ret
if ret_counter:
return rets, counters
else:
return rets
# โโโโโโโ โโโโโโ โโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโ โโโโโโโ โโโโ โโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโ
# โโโ โโโโโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโ
# โโโ โโโโโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโโโโ โโโ โโโ โโโ โโโ โโโ โโโ โโโโโโ โโโโโโโโโโโโโโ โโโโโโโโโโโโ โโโโโโ โโโ โโโ
# โโโโโโโ โโโ โโโ โโโ โโโ โโโ โโโ โโโ โโโโโโ โโโโโโโโโโโโโโ โโโโโโโ โโโ โโโโโโ โโโ
# %%
def swapaxes_and_select(reaction_results, what, methods=None):
selected_results = defaultdict(list)
for effects_results in reaction_results:
for method, results in effects_results.items():
if (methods is not None) and (method not in methods):
continue
selected_results[method].append(np.array([r[what] for r in results]))
for method, results in selected_results.items():
selected_results[method] = np.array(results)
return selected_results
def join_and_select_farclose(reac_close, reac_far, what):
ret = {}
for method in reac_close.keys():
lenght = len(reac_close[method][0]['Linf'])
# Some trailing np.inf are missing in some experiments, we fix those padding to the number of features
def pad_with_inf(a):
return np.pad(
np.array(a),
mode='constant',
pad_width=(lenght - len(a), 0),
constant_values=np.inf,
)
ret.update({
method + '_c': np.array([pad_with_inf(r[what]) for r in reac_close[method]]),
method + '_f': np.array([pad_with_inf(r[what]) for r in reac_far[method]]),
})
return ret
# โโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโโโ
# โโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโโโ
# โโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโโ
# โโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโโ
# %%
def compute_confidence_intervals_proportion(aggr_res, apply=lambda x: x):
lower = defaultdict(list)
upper = defaultdict(list)
for method, values in aggr_res.items():
counts = apply(values)
for v in counts:
l, u = statsmodels.stats.proportion.proportion_confint(
count=max(0, min(len(values), v * len(values))),
nobs=len(values),
alpha=.05,
method='beta',
)
lower[method].append(v - l)
upper[method].append(u - v)
lower = {method: np.array(a) for method, a in lower.items()}
upper = {method: np.array(a) for method, a in upper.items()}
return lower, upper
# โโโโโโโ โโโ โโโโโโโ โโโโโโโโโโโโ โโโ โโโ
# โโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโ โโโโ โโโโ
# โโโโโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโ
# โโโโโโโ โโโ โโโ โโโ โโโ โโโ โโโโโ
# โโโ โโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ
# โโโ โโโโโโโโ โโโโโโโ โโโ โโโโโโโโโโโ
# %%
def plotly_infinite(
df,
max_ks=None,
lower=None,
upper=None,
value_name="Rate",
):
"""
df:
# Columns = Methods
# Rows = Top-features changed
# Values = Rate of infinite
"""
var_name = 'XP'
top_col_name = 'top'
# Add KS to the method name
if max_ks is not None:
df.rename(
columns={method: method + f" (KS={str(np.round(max_ks[method], 4))[:6]})"
for method in df.columns.values},
inplace=True)
# Filter high top-k for which there is no infinite rate (=0.0)
max_f = np.searchsorted(1 * ((df.values != 0).sum(axis=1) == 0), 1, side='left')
df = df.iloc[:max_f]
df[top_col_name] = np.arange(len(df)) + 1
# Melt
df = df.melt(
id_vars=[top_col_name],
var_name=var_name,
value_name=value_name,
)
# Add confidence intervals
if lower is not None:
df['lower'] = pd.DataFrame(lower).iloc[:max_f].melt()['value'].values
if upper is not None:
df['upper'] = pd.DataFrame(upper).iloc[:max_f].melt()['value'].values
# Plot
fig = px.line(
df,
x=top_col_name,
y=value_name,
color=var_name,
title="Failure rate in changing the prediction by using the top-k features",
error_y='upper' if upper is not None else None,
error_y_minus='lower' if lower is not None else None,
)
fig.update_traces(
hoverinfo='text+name',
mode='lines+markers',
)
fig.update_layout(
yaxis_tickformat='.2%',
xaxis_title='Number of top features changed (k)',
)
fig.show()
from emutils.geometry.normalizer import get_normalization_name
def get_value_name(distance, what):
if distance is not None:
norm_name = get_normalization_name(distance)
else:
norm_name = ''
WHAT_DICT = {
'Linf': 'Maximum',
'L1': 'Total',
'L2': 'L2 Norm of',
'L0': 'Number of different features',
}
if what is not None:
what_name = WHAT_DICT[what]
else:
what_name = ''
return f"{what_name} {norm_name}".strip()
def get_how_name(mode):
if 'vector' in mode:
return 'changing top-k features proportionally'
else:
return 'changing top-k feature together'
def plotly_rev_actionability(
means,
max_ks=None,
upper=None,
lower=None,
top=None,
fig=None,
value_name="Maximum Percentile Shift",
how_name="",
):
var_name = "XP"
# Transform to dataframe
df = pd.DataFrame(means)
# Add KS to the method name
if max_ks is not None:
df.rename(columns={
method: method + f" (KS={str(np.round(max_ks[method], 4))[:6]})"
for method in df.columns.values if max_ks is not None
},
inplace=True)
# Filter only top-k
df['top'] = df.index.values + 1
if top is not None:
df = df.iloc[:top]
# Melt
df = df.melt(id_vars=['top'], var_name=var_name, value_name=value_name)
if upper is not None and lower is not None:
df['lower'] = pd.DataFrame(lower).melt()['value'].values
df['upper'] = pd.DataFrame(upper).melt()['value'].values
# Plot
fig = px.line(
df,
y=value_name,
color=var_name,
x='top',
error_y='upper' if upper is not None else None,
error_y_minus='lower' if lower is not None else None,
)
fig.update_traces(
hoverinfo='text+name',
mode='lines+markers',
)
fig.update_layout(
title=f"""Cost (in terms of {value_name.lower()}) to change<br>the prediction {how_name}
""",
xaxis_title='Number of top features changed (k)',
)
fig.update_layout(yaxis_tickformat='.2%')
fig.show()
# โโโโ โโโโ โโโโโโ โโโโโโโ โโโ โโโ โโโ โโโโโโ โโโโ โโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโ
# โโโโโ โโโโโโโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโ โโโโโโ โโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโ โโโโโโโโ
# โโโ โโโ โโโโโโ โโโโโโโโโ โโโโโโ โโโ โโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโ โโโโโโ โโโโโโโโโ โโโโโ โโโ โโโโโโ โโโโโโ โโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโ
#
def plot_infinite_rate(reaction_results, methods=None, distance=None):
# Results non-aggregated
# dict of list of Numpy (one list element per sample of ~1000)
nonaggr_res = swapaxes_and_select(
reaction_results,
what='Linf',
methods=methods,
)
# Results aggregated
# dict of Numpy
aggr_res = join_results(nonaggr_res)
# Function to compute the rate of np.inf
def __count_infinite(values):
return np.isinf(values).sum(axis=0) / len(values)
# Compute rates (aggregated for all samples)
infinite_rate = {method: __count_infinite(values) for method, values in aggr_res.items()}
# Compute Max KS Divergence between samples
max_ks = compute_max_ks(nonaggr_res, apply=__count_infinite)
# Computer confidence intervals
lower, upper = compute_confidence_intervals_proportion(aggr_res, apply=__count_infinite)
# Cols: methods / Rows: nb_top_features / Values: rate
infinite_rate = pd.DataFrame(infinite_rate)
# Plot
plotly_infinite(
infinite_rate,
max_ks=max_ks,
lower=lower,
upper=upper,
# value_name=get_value_name(distance, None),
)
def plot_rev_actionability(reaction_results, what, methods=None, safe=True, distance=None, mode=None):
"""
what : result to plot
i.e. 'Linf' or 'L1'
"""
# Select results
# dict (of methods) of list (of samples) of NumPy (nb_samples x nb_features)
nonaggr_res = swapaxes_and_select(
reaction_results,
what=what,
methods=methods,
)
# Aggregate results
# dict (of methods) of NumPy (tot_nb_samples x nb_features)
aggr_res = join_results(nonaggr_res)
# Function to be applied
def __safe_mean(x):
# Transform np.inf into 1.0 (100%)
if safe:
return np.mean(np.nan_to_num(x, posinf=1), axis=0)
else:
return np.mean(x, axis=0)
# Compute means
means = {method: __safe_mean(results) for method, results in aggr_res.items()}
# Compute KS divergence
max_ks = compute_max_ks(nonaggr_res, apply=__safe_mean)
# Computer confidence intervals
lower, upper = compute_confidence_intervals(
aggr_res,
apply=lambda x: np.nan_to_num(x, posinf=1),
)
# Plot it
plotly_rev_actionability(
means=means,
max_ks=max_ks,
lower=lower,
upper=upper,
value_name=get_value_name(distance, what),
how_name=get_how_name(mode),
)
def plot_aar_close_far_infinite(reac_close, reac_far, distance, what='Linf'):
# Results
# dict of ([method_close, method_far, method2_close, ...]) NumPy (tot_nb_samples x nb_features)
aggr_res = join_and_select_farclose(reac_close=reac_close, reac_far=reac_far, what=what)
print(f"Using {len(aggr_res[list(aggr_res.keys())[0]])} samples.")
# Function to compute the rate of np.inf
def __count_infinite(values):
return np.isinf(values).sum(axis=0) / len(values)
# Compute rates (aggregated for all samples)
infinite_rate = {method: __count_infinite(values) for method, values in aggr_res.items()}
# Computer confidence intervals
lower, upper = compute_confidence_intervals_proportion(aggr_res, apply=__count_infinite)
# Cols: methods / Rows: nb_top_features / Values: rate
infinite_rate = pd.DataFrame(infinite_rate)
# Plot
plotly_infinite(
infinite_rate,
max_ks=None,
lower=lower,
upper=upper,
value_name=get_value_name(distance, None),
)
def plot_aar_close_far(reac_close, reac_far, what, distance, safe=True):
"""
what : result to plot
i.e. 'Linf' or 'L1'
"""
# Results
# dict of ([method_close, method_far, method2_close, ...]) NumPy (tot_nb_samples x nb_features)
aggr_res = join_and_select_farclose(reac_close=reac_close, reac_far=reac_far, what=what)
print(f"Using {len(aggr_res[list(aggr_res.keys())[0]])} samples.")
# Function to be applied
def __safe_mean(x):
# Transform np.inf into 1.0 (100%)
if safe:
return np.mean(np.nan_to_num(x, posinf=1), axis=0)
else:
return np.mean(x, axis=0)
# Compute means
means = {method: __safe_mean(results) for method, results in aggr_res.items()}
# Computer confidence intervals
lower, upper = compute_confidence_intervals(
aggr_res,
apply=lambda x: np.nan_to_num(x, posinf=1),
)
# Plot it
plotly_rev_actionability(
means=means,
max_ks=None,
lower=lower,
upper=upper,
value_name=get_value_name(distance, what),
)
|
def is_prime(x):
if x < 2:
return False
else:
for n in range(2,x):
if x % n == 0:
return False
return True
counter = 0
value = 1
result = 0
while(counter < 10001):
print('testing: ', counter)
if is_prime(value):
result = value
counter += 1
value += 1
print('result =', result)
|
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas
import pytest
from isolateparser.resultparser.parsers import parse_vcf
data_folder = Path(__file__).parent / 'data' / 'Clonal_Output' / 'breseq_output'
vcf_filename_real = data_folder / "data" / "output.vcf"
@pytest.fixture
def vcf_filename(tmp_path) -> Path:
contents = """
##fileformat=VCFv4.1
##fileDate
##source=breseq_GD2VCF_converterter
##contig=<ID=REL606,length=4629812>
##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">
##INFO=<ID=AD,Number=1,Type=Float,Description="Allele Depth (avg read count)">
##INFO=<ID=DP,Number=1,Type=Float,Description="Total Depth (avg read count)">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
#CHROM POS ID REF ALT QUAL FILTER INFO
REL606 16971 . GG GGGGTGGTTGTACTCA . PASS AF=1.0000;AD=59.5;DP=59.5
REL606 161041 . T G 190.1 PASS AF=1.0000;AD=61;DP=61
REL606 380188 . A C 98.1 PASS AF=1.0000;AD=31;DP=32
REL606 430835 . C T 135.0 PASS AF=1.0000;AD=46;DP=48
REL606 475292 . G GG 94.7 PASS AF=1.0000;AD=30;DP=32
"""
#TODO add the scenario where a deletion is follwoed by a SNP. VCF annotates these as idenitical positions.
f = tmp_path / "vcf_file.vcf"
f.write_text("\n".join(i.strip() for i in contents.split('\n')))
return f
@dataclass
class Record:
""" Easier to emulate a VCF Record rather than instantiate vcf._Record"""
ALT: List[str]
REF: str
QUAL: Optional[float]
INFO: Dict[str, Any]
CHROM: str
POS: int
var_type: str
def test_convert_record_to_dictionary():
record = Record(
CHROM = 'REL606',
POS = 16971,
REF = "GG",
ALT = [
"GGGGTGGTTGTACTGACCCCAAAAAGTTG"
],
var_type = 'indel',
INFO = {'AF': [1.0], 'AD': 59.5, 'DP': 59.5},
QUAL = None
)
expected = {
parse_vcf.VCFColumns.sequence_id: 'REL606',
parse_vcf.VCFColumns.position: 16971,
parse_vcf.VCFColumns.alternate: record.ALT[0],
parse_vcf.VCFColumns.reference: "GG",
parse_vcf.VCFColumns.quality: None,
parse_vcf.VCFColumns.depth: 59.5,
parse_vcf.VCFColumns.variant_type: 'indel'
}
output = parse_vcf._convert_record_to_dictionary(record)
assert output == expected
def test_parse_vcf_file(vcf_filename):
# `seq id` and `position` are in the index.
expected_columns = ['alt', 'ref', 'quality', 'readDepth', 'variantType']
vcf_table = parse_vcf.parse_vcf_file(vcf_filename)
expected_index = [('REL606', 16971), ("REL606", 161041), ("REL606", 380188), ("REL606", 430835), ("REL606", 475292)]
assert list(vcf_table.columns) == expected_columns
assert list(vcf_table.index) == list(expected_index)
snp_table = vcf_table[vcf_table['variantType'] == 'snp']
expected_snp_index = [("REL606", 161041), ("REL606", 380188), ("REL606", 430835)]
assert list(snp_table.index) == expected_snp_index
def test_convert_vcf_to_table(vcf_filename):
record = Record(
CHROM = 'REL606',
POS = 16971,
REF = "GG",
ALT = ["GGGGTGGTTGTACTCA"],
var_type = 'indel',
INFO = {'AF': [1.0], 'AD': 59.5, 'DP': 59.5},
QUAL = None
)
expected = {
parse_vcf.VCFColumns.sequence_id: 'REL606',
parse_vcf.VCFColumns.position: 16971,
parse_vcf.VCFColumns.alternate: record.ALT[0],
parse_vcf.VCFColumns.reference: "GG",
parse_vcf.VCFColumns.quality: None,
parse_vcf.VCFColumns.depth: 59.5,
parse_vcf.VCFColumns.variant_type: 'indel'
}
table = parse_vcf._convert_vcf_to_table(vcf_filename)
assert table[0] == expected
|
#! /usr/bin/python
#***********************************************************
#* Software License Agreement (BSD License)
#*
#* Copyright (c) 2009, Willow Garage, Inc.
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions
#* are met:
#*
#* * Redistributions of source code must retain the above copyright
#* notice, this list of conditions and the following disclaimer.
#* * Redistributions in binary form must reproduce the above
#* copyright notice, this list of conditions and the following
#* disclaimer in the documentation and/or other materials provided
#* with the distribution.
#* * Neither the name of Willow Garage, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived
#* from this software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Author: Eitan Marder-Eppstein
#***********************************************************
"""
usage: %prog /action_name action_type
"""
PKG='actionlib'
import roslib.message
from optparse import OptionParser
import wx
import sys
import rospy
import actionlib
import time
import threading
import socket
import rostopic
from cStringIO import StringIO
from library import *
from dynamic_action import DynamicAction
from actionlib_msgs.msg import GoalStatus
class AXClientApp(wx.App):
def __init__(self, action_type, action_name):
self.action_type = action_type
wx.App.__init__(self)
self.client = actionlib.SimpleActionClient(action_name, self.action_type.action)
self.condition = threading.Condition()
self.goal_msg = None
self.execute_type = None
def set_status(self, label, color):
self.status_bg.SetBackgroundColour(color)
self.status.SetLabel(label)
def set_cancel_button(self, enabled):
if enabled:
self.cancel_goal.Enable()
else:
self.cancel_goal.Disable()
def set_server_status(self, label, color, enabled):
self.server_status_bg.SetBackgroundColour(color)
self.server_status.SetLabel(label)
if enabled:
self.send_goal.Enable()
else:
self.send_goal.Disable()
def server_check(self, event):
TIMEOUT = 0.01
if self.client.wait_for_server(rospy.Duration.from_sec(TIMEOUT)):
wx.CallAfter(self.set_server_status, "Connected to server",
wx.Colour(192, 252, 253), True)
else:
wx.CallAfter(self.set_server_status, "Disconnected from server",
wx.Colour(200, 0, 0), False)
def on_cancel(self, event):
#we'll cancel the current goal
self.client.cancel_goal()
self.set_status("Canceling goal", wx.Colour(211, 34, 243))
def on_goal(self, event):
try:
self.goal_msg = yaml_msg_str(self.action_type.goal,
self.goal.GetValue())
buff = StringIO()
self.goal_msg.serialize(buff)
#send the goal to the action server and register the relevant
#callbacks
self.client.send_goal(self.goal_msg, self.done_cb, self.active_cb,
self.feedback_cb)
self.set_status("Goal is pending", wx.Colour(255, 174, 59))
self.set_cancel_button(True)
except roslib.message.SerializationError, e:
self.goal_msg = None
wx.MessageBox(str(e), "Error serializing goal", wx.OK)
def set_result(self, result):
try:
self.result.SetValue(to_yaml(result))
except UnicodeDecodeError:
self.result.SetValue("Cannot display result due to unprintable characters")
def status_gui(self, status):
return {GoalStatus.PENDING: ['PENDING', wx.Colour(255, 174, 59)],
GoalStatus.ACTIVE: ['ACTIVE', wx.Colour(0, 255, 0)],
GoalStatus.PREEMPTED: ['PREEMPTED', wx.Colour(255,252,16)],
GoalStatus.SUCCEEDED: ['SUCCEEDED',wx.Colour(38,250,253)],
GoalStatus.ABORTED: ['ABORTED',wx.Colour(200,0,0)],
GoalStatus.REJECTED: ['REJECTED',wx.Colour(253,38,159)],
GoalStatus.PREEMPTING: ['PREEMPTING',wx.Colour(253,38,159)],
GoalStatus.RECALLING: ['RECALLING',wx.Colour(230,38,253)],
GoalStatus.RECALLED: ['RECALLED',wx.Colour(230,38,253)],
GoalStatus.LOST: ['LOST',wx.Colour(255,0,0)]}[status]
def done_cb(self, state, result):
status_string, status_color = self.status_gui(state)
wx.CallAfter(self.set_status, ''.join(["Goal finished with status: ",
status_string]), status_color)
wx.CallAfter(self.set_result, result)
wx.CallAfter(self.set_cancel_button, False)
def active_cb(self):
wx.CallAfter(self.set_status, "Goal is active", wx.Colour(0,200,0))
def set_feedback(self, feedback):
try:
self.feedback.SetValue(to_yaml(feedback))
except UnicodeDecodeError:
self.feedback.SetValue("Cannot display feedback due to unprintable characters")
def feedback_cb(self, feedback):
wx.CallAfter(self.set_feedback, feedback)
def OnQuit(self):
self.server_check_timer.Stop()
def OnInit(self):
self.frame = wx.Frame(None, -1, self.action_type.name + ' GUI Client')
self.sz = wx.BoxSizer(wx.VERTICAL)
tmp_goal = self.action_type.goal()
self.goal = wx.TextCtrl(self.frame, -1, style=wx.TE_MULTILINE)
self.goal.SetValue(to_yaml(tmp_goal))
self.goal_st_bx = wx.StaticBox(self.frame, -1, "Goal")
self.goal_st = wx.StaticBoxSizer(self.goal_st_bx, wx.VERTICAL)
self.goal_st.Add(self.goal, 1, wx.EXPAND)
self.feedback = wx.TextCtrl(self.frame, -1, style=(wx.TE_MULTILINE |
wx.TE_READONLY))
self.feedback_st_bx = wx.StaticBox(self.frame, -1, "Feedback")
self.feedback_st = wx.StaticBoxSizer(self.feedback_st_bx, wx.VERTICAL)
self.feedback_st.Add(self.feedback, 1, wx.EXPAND)
self.result = wx.TextCtrl(self.frame, -1, style=(wx.TE_MULTILINE |
wx.TE_READONLY))
self.result_st_bx = wx.StaticBox(self.frame, -1, "Result")
self.result_st = wx.StaticBoxSizer(self.result_st_bx, wx.VERTICAL)
self.result_st.Add(self.result, 1, wx.EXPAND)
self.send_goal = wx.Button(self.frame, -1, label="SEND GOAL")
self.send_goal.Bind(wx.EVT_BUTTON, self.on_goal)
self.send_goal.Disable()
self.cancel_goal = wx.Button(self.frame, -1, label="CANCEL GOAL")
self.cancel_goal.Bind(wx.EVT_BUTTON, self.on_cancel)
self.cancel_goal.Disable()
self.status_bg = wx.Panel(self.frame, -1)
self.status_bg.SetBackgroundColour(wx.Colour(200,0,0))
self.status = wx.StaticText(self.status_bg, -1, label="No Goal")
self.server_status_bg = wx.Panel(self.frame, -1)
self.server_status_bg.SetBackgroundColour(wx.Colour(200,0,0))
self.server_status = wx.StaticText(self.server_status_bg, -1, label="Disconnected from server.")
self.sz.Add(self.goal_st, 1, wx.EXPAND)
self.sz.Add(self.feedback_st, 1, wx.EXPAND)
self.sz.Add(self.result_st, 1, wx.EXPAND)
self.sz.Add(self.send_goal, 0, wx.EXPAND)
self.sz.Add(self.cancel_goal, 0, wx.EXPAND)
self.sz.Add(self.status_bg, 0, wx.EXPAND)
self.sz.Add(self.server_status_bg, 0, wx.EXPAND)
self.frame.SetSizer(self.sz)
self.server_check_timer = wx.Timer(self.frame)
self.frame.Bind(wx.EVT_TIMER, self.server_check,
self.server_check_timer)
self.server_check_timer.Start(1000)
self.sz.Layout()
self.frame.Show()
return True
def main():
rospy.init_node('axclient', anonymous=True)
parser = OptionParser(__doc__.strip())
# parser.add_option("-t","--test",action="store_true", dest="test",default=False,
# help="A testing flag")
# parser.add_option("-v","--var",action="store",type="string", dest="var",default="blah")
(options, args) = parser.parse_args(rospy.myargv())
if (len(args) == 2):
# get action type via rostopic
topic_type = rostopic._get_topic_type("%s/goal"%args[1])[0]
# remove "Goal" string from action type
assert("Goal" in topic_type)
topic_type = topic_type[0:len(topic_type)-4]
elif (len(args) == 3):
topic_type = args[2]
print(topic_type)
assert("Action" in topic_type)
else:
parser.error("You must specify the action topic name (and optionally type) Eg: ./axclient.py action_topic actionlib/TwoIntsAction ")
action = DynamicAction(topic_type)
app = AXClientApp(action, args[1])
app.MainLoop()
app.OnQuit()
rospy.signal_shutdown('GUI shutdown')
if __name__ == '__main__':
main()
|
import unittest
from gui import GUI
from tkinter import *
class TestGui(unittest.TestCase):
def test_config(self):
gui = GUI(Tk(), False)
config = gui.read_config()
self.assertEqual(True, len((config['commands'])) > 0)
self.assertEqual({1: True, 2: True, 3: True, 4: True}, config['commands'][0]['module'])
self.assertEqual(None, config['commands'][1]['module'])
self.assertEqual({1: False, 2: False, 3: False, 4: False}, config['commands'][3]['module'])
self.assertEqual(0.01, config['pool_interval'])
if __name__ == '__main__':
unittest.main()
|
import microbit
import random
import math
_GOLDEN_RATIO = (1 + 5 ** 0.5) / 2
class BreakOutOfALoop(Exception): pass
class ContinueLoop(Exception): pass
timer1 = microbit.running_time()
item = 0
item2 = True
def run():
global timer1, item, item2
item = min(max(item, 1), 100)
item2 = (item % 2) == 0
item2 = (item % 2) == 1
# following function is not implemented yet
item2 = false # not implemented yet
item2 = (item % 1) == 0
item2 = item > 0
item2 = item < 0
item2 = item % item
item = random.randint(1, 100)
item = random.random()
def main():
try:
run()
except Exception as e:
raise
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
myads_service.models
~~~~~~~~~~~~~~~~~~~~~
Models for the users (users) of AdsWS
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy import Column, String, Text
from adsmutils import UTCDateTime
import json
import logging
Base = declarative_base()
class MutableDict(Mutable, dict):
"""
By default, SQLAlchemy only tracks changes of the value itself, which works
"as expected" for simple values, such as ints and strings, but not dicts.
http://stackoverflow.com/questions/25300447/
using-list-on-postgresql-json-type-with-sqlalchemy
"""
@classmethod
def coerce(cls, key, value):
"""
Convert plain dictionaries to MutableDict.
"""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"""
Detect dictionary set events and emit change events.
"""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""
Detect dictionary del events and emit change events.
"""
dict.__delitem__(self, key)
self.changed()
def setdefault(self, key, value):
"""
Detect dictionary setdefault events and emit change events
"""
dict.setdefault(self, key, value)
self.changed()
def update(self, subdict):
"""
Detect dictionary update events and emit change events
"""
dict.update(self, subdict)
self.changed()
def pop(self, key, default):
"""
Detect dictionary pop events and emit change events
:param key: key to pop
:param default: default if key does not exist
:return: the item under the given key
"""
dict.pop(self, key, default)
self.changed()
class User(Base):
__tablename__ = 'users'
orcid_id = Column(String(255), primary_key=True)
access_token = Column(String(255))
created = Column(UTCDateTime)
updated = Column(UTCDateTime)
profile = Column(Text)
info = Column(Text)
def toJSON(self):
"""Returns value formatted as python dict."""
return {
'orcid_id': self.orcid_id,
'access_token': self.access_token,
'created': self.created and self.created.isoformat() or None,
'updated': self.updated and self.updated.isoformat() or None,
'profile': self.profile and json.loads(self.profile) or None,
'info': self.info and json.loads(self.info) or None
}
class Profile(Base):
__tablename__ = 'profile'
orcid_id = Column(String(255), primary_key=True)
created = Column(UTCDateTime)
updated = Column(UTCDateTime)
bibcode = Column(MutableDict.as_mutable(JSON), default={})
bib_status = ['verified', 'pending', 'rejected']
nonbib_status = ['not in ADS']
keys = ['status', 'title', 'pubyear', 'pubmonth']
def get_bibcodes(self):
"""
Returns the bibcodes of the ORCID profile
"""
bibcodes, statuses = self.find_nested(self.bibcode, 'status', self.bib_status)
return bibcodes, statuses
def get_non_bibcodes(self):
"""
Returns the non-ADS records of the ORCID profile
"""
non_bibcodes, status = self.find_nested(self.bibcode, 'status', self.nonbib_status)
return non_bibcodes
def get_records(self):
"""
Returns all records from an ORCID profile
"""
return self.bibcode
def add_records(self, records):
"""
Adds a record to the bibcode field, first making sure it has the appropriate nested dict
:param records: dict of dicts of bibcodes and non-bibcodes
"""
if not self.bibcode:
self.bibcode = {}
for r in records:
for k in self.keys:
tmp = records[r].setdefault(k, None)
self.bibcode.update(records)
def remove_bibcodes(self, bibcodes):
"""
Removes a bibcode(s) from the bibcode field.
Given the way in which bibcodes are stored may change, it seems simpler
to keep the method of adding/removing in a small wrapper so that only
one location needs to be modified (or YAGNI?).
:param bibcodes: list of bibcodes
"""
[self.bibcode.pop(key, None) for key in bibcodes]
def get_nested(self, dictionary, nested_key):
"""Get all values from the nested dictionary for a given nested key"""
keys = dictionary.keys()
vals = []
for key in keys:
vals.append(dictionary[key].setdefault(nested_key, None))
return vals
def find_nested(self, dictionary, nested_key, nested_value):
"""Get all top-level keys from a nested dictionary for a given list of nested values
belonging to a given nested key
:param dictionary - nested dictionary to search; searches one level deep
:param nested_key - key within nested dictionary to search for
:param nested_value - list (or string or number) of acceptable values to search for within the
given nested_key
:return good_keys - list of top-level keys with a matching nested value to the given nested key
:return good_values - list of the value (from nested_value) retrieved
"""
if type(nested_value) is not list:
nested_value = [nested_value]
keys = dictionary.keys()
good_keys = []
good_values = []
for key in keys:
if dictionary[key].get(nested_key,'') in nested_value:
good_keys.append(key)
good_values.append(dictionary[key].get(nested_key))
return good_keys, good_values
def update_status(self, keys, status):
"""
Update the status for a given key or keys
:param keys: str or list
:param status: str
:return: None
"""
if type(keys) is not list:
keys = [keys]
if not isinstance(status, str):
logging.warning('Status to update for record %s, ORCID %s must be passed as a string'.
format(keys, self.orcid_id))
for key in keys:
if key in self.bibcode:
self.bibcode[key]['status'] = status
self.bibcode.changed()
else:
logging.warning('Record %s not in profile for %s'.format(key, self.orcid_id))
def get_status(self, keys):
"""
For a given set of records, return the statuses
:param keys: str or list
:return: good_keys - list of keys that exist in the set
:return: statuses - list of statuses of good_keys
"""
if type(keys) is not list:
keys = [keys]
good_keys = []
statuses = []
for key in keys:
if key in self.bibcode:
good_keys.append(key)
statuses.append(self.bibcode[key]['status'])
return good_keys, statuses
|
from .time import human_time
from .paginator import Paginator, EmbedPaginator
from .group import group, CaseInsensitiveDict
from .subprocess import run_subprocess
|
import logging
from dsgrid.dimension.dimension_records import DimensionRecords
from dsgrid.exceptions import (
DSGInvalidDimension,
# DSGInvalidDimensionMapping,
)
from dsgrid.utils.timing import timed_debug
logger = logging.getLogger(__name__)
class DimensionStore:
"""Provides mapping functionality for project or dataset dimensions."""
REQUIRED_FIELDS = ("id", "name")
def __init__(self, record_store):
self._record_store = record_store
self._store = {} # {class type: DimensionRecordBaseModel}
self._dimension_direct_mappings = {}
@classmethod
@timed_debug
def load(cls, dimensions):
"""Load dimension records.
Parameters
----------
dimensions : sequence
list or iterable of DimensionConfig
spark : SparkSession
Returns
-------
DimensionStore
"""
records = DimensionRecords()
store = cls(records)
for dimension in dimensions:
store.add_dimension(dimension.model)
return store
def add_dimension(self, dimension):
"""Add a dimension to the store.
Parameters
----------
dimension : DimensionRecordBaseModel
"""
assert dimension.cls not in self._store
self._store[dimension.cls] = dimension
if getattr(dimension, "mappings", None):
for mapping in dimension.mappings:
self.add_dimension_mapping(dimension, mapping)
if getattr(dimension, "records", None):
self._record_store.add_dataframe(dimension)
def get_dimension(self, dimension_class):
"""Return a dimension from the store.
Parameters
----------
dimension_class : type
subclass of DimensionRecordBaseModel
Returns
-------
DimensionRecordBaseModel
instance of DimensionRecordBaseModel
"""
self._raise_if_dimension_not_stored(dimension_class)
return self._store[dimension_class]
def iter_dimension_classes(self, base_class=None):
"""Return an iterator over the stored dimension classes.
Parameters
----------
base_class : type | None
If set, return subclasses of this abstract dimension class
Returns
-------
iterator
model classes representing each dimension
"""
if base_class is None:
return self._store.keys()
return (x for x in self._store if issubclass(x, base_class))
def list_dimension_classes(self, base_class=None):
"""Return the stored dimension classes.
Parameters
----------
base_class : type | None
If set, return subclasses of this abstract dimension class
Returns
-------
list
list of classes representing each dimension type
"""
return sorted(
list(self.iter_dimension_classes(base_class=base_class)),
key=lambda x: x.__name__,
)
@property
def record_store(self):
"""Return the DimensionRecords."""
return self._record_store
@property
def spark(self):
"""Return the SparkSession instance."""
return self._record_store.spark
def _raise_if_dimension_not_stored(self, dimension_class):
if dimension_class not in self._store:
raise DSGInvalidDimension(f"{dimension_class} is not stored")
# def add_dimension_mapping(self, dimension, mapping):
# """Add a dimension mapping to the store.
# Parameters
# ----------
# dimension : DimensionRecordBaseModel
# mapping : DimensionDirectMapping
# """
# # TODO: other mapping types
# assert isinstance(mapping, DimensionDirectMapping), mapping.__name__
# key = (dimension.cls, mapping.to_dimension)
# assert key not in self._dimension_direct_mappings
# self._dimension_direct_mappings[key] = mapping
# logger.debug("Added dimension mapping %s-%s", dimension.cls, mapping)
# def get_dimension_direct_mapping(self, from_dimension, to_dimension):
# """Return the mapping to perform a join.
# Parameters
# ----------
# from_dimension : type
# to_dimension : type
# Returns
# -------
# str
# Raises
# ------
# DSGInvalidDimensionMapping
# Raised if the mapping is not stored.
# """
# key = (from_dimension, to_dimension)
# self._raise_if_mapping_not_stored(key)
# return self._dimension_direct_mappings[key]
# def _raise_if_mapping_not_stored(self, key):
# if key not in self._dimension_direct_mappings:
# raise DSGInvalidDimensionMapping(f"{key} is not stored")
|
import os
import platform
import shutil
import subprocess
# If on Linux and this script gives a "busy file" error, please run
# bash/cleanup_tbb.sh
def maybe_build_tbb():
"""Build tbb. This function is taken from
https://github.com/stan-dev/pystan/blob/develop/setup.py"""
stan_math_lib = os.path.abspath(os.path.join(os.path.dirname(
__file__), 'pybmix', 'core', 'pybmixcpp', 'bayesmix', 'lib', 'math', 'lib'))
tbb_dir = os.path.join(stan_math_lib, 'tbb')
tbb_dir = os.path.abspath(tbb_dir)
if os.path.exists(tbb_dir):
return
make = 'make' if platform.system() != 'Windows' else 'mingw32-make'
cmd = [make]
tbb_root = os.path.join(stan_math_lib, 'tbb_2019_U8').replace("\\", "/")
cmd.extend(['-C', tbb_root])
cmd.append('tbb_build_dir={}'.format(stan_math_lib))
cmd.append('tbb_build_prefix=tbb')
cmd.append('tbb_root={}'.format(tbb_root))
cmd.append('stdver=c++14')
cmd.append('compiler=gcc')
cwd = os.path.abspath(os.path.dirname(__file__))
subprocess.check_call(cmd, cwd=cwd)
tbb_debug = os.path.join(stan_math_lib, "tbb_debug")
tbb_release = os.path.join(stan_math_lib, "tbb_release")
tbb_dir = os.path.join(stan_math_lib, "tbb")
if not os.path.exists(tbb_dir):
os.makedirs(tbb_dir)
if os.path.exists(tbb_debug):
shutil.rmtree(tbb_debug)
shutil.move(os.path.join(tbb_root, 'include'), tbb_dir)
shutil.rmtree(tbb_root)
for name in os.listdir(tbb_release):
srcname = os.path.join(tbb_release, name)
dstname = os.path.join(tbb_dir, name)
shutil.move(srcname, dstname)
if os.path.exists(tbb_release):
shutil.rmtree(tbb_release)
if __name__ == "__main__":
maybe_build_tbb()
|
"""
@Project : Imylu
@Module : decision_regression.py
@Author : Deco [deco@cubee.com]
@Created : 8/22/18 4:29 PM
@Desc :
"""
import copy
# from collections import Iterable
from typing import List, TypeVar, Tuple, Union, Iterable
import numpy as np
from work5.load_bike_data import load_bike_sharing_data
from work5.logger_setup import define_logger
from work5.utils import (load_boston_house_prices, train_test_split,
get_r2, run_time)
logger = define_logger('work5.regression_tree')
Num = TypeVar('Num', int, float)
num_if = Union[int, float]
class Node:
def __init__(self, score: Num = None):
"""Node class to build tree leaves.
Parameters:
score -- int or float, prediction of y for a rule chain (default: {None})
"""
self.score = score
self.feature = None
self.split = None
self.left = None
self.right = None
class RegressionTree:
def __init__(self):
"""RegressionTree class.
Decision tree is for discrete variables
Regression tree is for continuous variables
Attributes:
root: the root node of RegressionTree
height: the height of RegressionTree
feature_types: the feature types of X
"""
self.root = Node()
self.height = 0
self.feature_types = None
def _get_split_mse(self, X: List[List[num_if]],
y: Iterable[Num],
idx: Iterable[int],
feature: int, split: Num) -> Tuple[float, Num, List[float]]:
"""Calculate the mse of each set when x is splitted into two pieces.
MSE as Loss fuction:
y_hat = Sum(y_i) / n, i <- [1, n], the average value in the interval
Loss(y_hat, y) = Sum((y_hat - y_i) ^ 2), i <- [1, n]
Loss = LossLeftNode+ LossRightNode
--------------------------------------------------------------------
Parameters:
X {list} -- 2d list object with int or float
y {iterable} -- 1d list object with int or float
idx {iterable} -- indexes, 1d list object with int
feature {int} -- Feature number, that is, column number of the dataframe
split -- int or float, Split point of x
Returns:
tuple -- MSE, split point and average of splitted x in each intervel
"""
# X: Iterable[Iterable[num_if]]
# X = [list(item) for item in X]
# ๅฝ็ฉ้ตๅพๅคงๆถ๏ผไธ้ข่ฟไธๆญฅ้ๅธธๅฝฑๅๆ็๏ผไป6sๅฐ26s
y = list(y)
idx = list(idx)
split_sum = [0, 0]
split_cnt = [0, 0]
split_sqr_sum = [0, 0]
# Iterate each row and compare with the split point
for i in idx:
# idx are the selected rows of the dataframe
xi, yi = X[i][feature], y[i]
if xi < split:
split_cnt[0] += 1
split_sum[0] += yi
split_sqr_sum[0] += yi ** 2
else:
split_cnt[1] += 1
split_sum[1] += yi
split_sqr_sum[1] += yi ** 2
# Calculate the mse of y, D(X) = E{[X-E(X)]^2} = E(X^2)-[E(X)]^2
# Estimated by the mean of y, and then subtract the value of y
# num*E{[X-E(X)]^2} = num*E(X^2)-num*[E(X)]^2
split_avg = [split_sum[0] / split_cnt[0], split_sum[1] / split_cnt[1]]
split_mse = [split_sqr_sum[0] - split_sum[0] * split_avg[0],
split_sqr_sum[1] - split_sum[1] * split_avg[1]]
return sum(split_mse), split, split_avg
def _get_category_mse(self, X: List[List[num_if]], y: List[Num], idx: List[int],
feature: int, category: str) -> Tuple[float, str, List[float]]:
"""Calculate the mse of each set when x is splitted into two parts.
MSE as Loss fuction.
--------------------------------------------------------------------
Arguments:
X {list} -- 2d list object with int or float
y {list} -- 1d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number, that is, column number of the dataframe
category {str} -- Category point of x
Returns:
tuple -- MSE, category point and average of splitted x in each intervel
"""
split_sum = [0, 0]
split_cnt = [0, 0]
split_sqr_sum = [0, 0]
# Iterate each row and compare with the split point
for i in idx:
# idx are the selected rows of the dataframe
xi, yi = X[i][feature], y[i]
if xi == category:
split_cnt[0] += 1
split_sum[0] += yi
split_sqr_sum[0] += yi ** 2
else:
split_cnt[1] += 1
split_sum[1] += yi
split_sqr_sum[1] += yi ** 2
# Calculate the mse of y, D(X) = E{[X-E(X)]^2} = E(X^2)-[E(X)]^2
# Estimated by the mean of y, and then subtract the value of y
# num*E{[X-E(X)]^2} = num*E(X^2)-num*[E(X)]^2
split_avg = [split_sum[0] / split_cnt[0], split_sum[1] / split_cnt[1]]
split_mse = [split_sqr_sum[0] - split_sum[0] * split_avg[0],
split_sqr_sum[1] - split_sum[1] * split_avg[1]]
return sum(split_mse), category, split_avg
def _info(self, y: np.ndarray) -> np.ndarray:
"""Use the standard deviation to measure the magnitude of information
็จๆ ๅๅทฎ็ๅคงๅฐๆฅ่กจๅพ่ฟ็ปญๅ้็ไฟกๆฏ้็ๅคงๅฐ
Parameters:
y -- 1d numpy.ndarray object with int or float
Returns:
np.float64
"""
return np.std(y)
def _condition_info_continuous(self, x: np.ndarray,
y: np.ndarray, split: Num) -> Num:
"""
the weighted continuous information, X is continuous
:param x: 1d numpy.array with int or float
:param y: 1d numpy.array int or float
:param split: float
:return: float
"""
low_rate = (x < split).sum() / x.size
high_rate = 1 - low_rate
low_info = self._info(y[np.where(x < split)])
# np.where will give the index of True elements
high_info = self._info(y[np.where(x >= split)])
res = low_rate * low_info + high_rate * high_info
return res
def _condition_info_categorical(self, x: np.ndarray, y: np.ndarray,
category: str) -> Num:
"""
the weighted categorical information, X is categorical
:param x: 1d numpy.array with str
:param y: 1d numpy.array with int or float
:param category: str
:return: float
"""
low_rate = (x == category).sum() / x.size
high_rate = 1 - low_rate
low_info = self._info(y[np.where(x == category)])
high_info = self._info(y[np.where(x != category)])
res = low_rate * low_info + high_rate * high_info
return res
def _get_split_info(self, X: List[List[num_if]], y: List[Num], idx: List[int],
feature: int, split: Num) -> Tuple[float, Num, List[float]]:
"""Calculate the reduction of standard deviation of each set when x is
splitted into two pieces.
Reduction of standard deviation as Loss fuction, the maximal reduction is best
Or weighted standard deviation as loss function, the minimal value is best
std of the column - the weighted sum of std of the two groups
--------------------------------------------------------------------
Parameters:
X {list} -- 2d list object with int or float
y {list} -- 1d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number, that is, column number of the dataframe
split {float} -- Split point of x
Returns:
tuple -- MSE, split point and average of splitted x in each intervel
"""
select_x = [item[feature] for item in X]
select_x = np.array(select_x)
select_x = select_x[idx]
y = np.array(y)
select_y = y[idx]
low_y = select_y[select_x < split].mean()
high_y = select_y[select_x >= split].mean()
split_info = self._condition_info_continuous(select_x, select_y, split)
return split_info, split, [low_y, high_y]
def _get_category_info(self, X: List[List[num_if]], y: List[Num], idx: List[int],
feature: int, category: str) -> Tuple[float, str, List[float]]:
"""Calculate the standard deviation of each set when x is
splitted into two discrete parts.
The weighted standard deviation as loss function, the minimal value is best
std of the column - the weighted sum of std of the two parts
--------------------------------------------------------------------
Parameters:
X {list} -- 2d list object with int or float
y {list} -- 1d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number, that is, column number of the dataframe
category {str} -- Chosen category of x to conduct binary classification
Returns:
tuple -- MSE, classify point and average of splitted x in each intervel
"""
X = np.array(X)
select_x = X[idx, feature]
y = np.array(y)
select_y = y[idx]
low_y = select_y[select_x == category].mean()
high_y = select_y[select_x != category].mean()
split_info = self._condition_info_categorical(
select_x, select_y, category)
return split_info, category, [low_y, high_y]
def _choose_split_point(self, X: List[List[num_if]], y: List[Num], idx: List[int],
feature: int):
"""Iterate each xi and split x, y into two pieces,
and the best split point is the xi when we get minimum mse.
Parameters:
X {list} -- 2d list object with int or float
y {list} -- 1d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number
Returns:
tuple -- The best choice of mse, feature, split point and average
could be None
"""
# Feature cannot be splitted if there's only one unique element.
unique = set([X[i][feature] for i in idx])
if len(unique) == 1:
return None
# In case of empty split
unique.remove(min(unique))
# Get split point which has min mse
mse, split, split_avg = min(
(self._get_split_mse(X, y, idx, feature, split)
# Here we can choose different algorithms
# _get_split_mse _get_split_info
for split in unique), key=lambda x: x[0])
return mse, feature, split, split_avg
def _choose_category_point(self, X: List[List[str]], y: List[Num],
idx: List[int], feature: int):
"""Iterate each xi and classify x, y into two parts,
and the best category point is the xi when we get minimum info or mse.
Parameters:
X {list} -- 2d list with str
y {list} -- 1d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number
Returns:
tuple -- The best choice of mse, feature, category point and average
could be None
"""
# Feature cannot be splitted if there's only one unique element.
unique = set([X[i][feature] for i in idx])
if len(unique) == 1:
return None
# If there is only one category left, None should be returned
# In case of empty split
# unique.remove(min(unique))
# We don't need this for categorical situation
# Get split point which has min mse
mse, category_idx, split_avg = min(
(self._get_category_mse(X, y, idx, feature, category)
# Here we can choose different algorithms
# _get_category_mse _get_category_info
for category in unique), key=lambda x: x[0])
# logger.debug(split_avg)
return mse, feature, category_idx, split_avg
def _detect_feature_type(self, x: list) -> int:
"""
To determine the type of the feature
:param x: 1d list with int, float or str
:return: 0 or 1, 0 represents continuous, 1 represents discrete
"""
for item in x:
if item is not None:
return 1 if type(item) == str else 0
def _get_column(self, x: list, i: int) -> list:
return [item[i] for item in x]
def _choose_feature(self, X: list, y: List[Num], idx: List[int]):
"""Choose the feature which has minimum mse or minimal info.
Parameters:
X {list} -- 2d list with int, float or str
y {list} -- 1d list with int or float
idx {list} -- indexes, 1d list object with int
Returns:
tuple -- (feature number, classify point or split point,
average, idx_classify)
could be None
"""
m = len(X[0])
# x[0] selects the first row
# Compare the mse of each feature and choose best one.
split_rets = []
for i in range(m):
if self.feature_types[i]:
item = self._choose_category_point(X, y, idx, i)
else:
item = self._choose_split_point(X, y, idx, i)
if item is not None:
split_rets.append(item)
# If it is None, it will not be considered as the chosen feature
# Terminate if no feature can be splitted
if not split_rets: # split_rets == []
return None
_, feature, split, split_avg = min(split_rets, key=lambda x: x[0])
# Get split idx into two pieces and empty the idx.
idx_split = [[], []]
# it contains different groups, and produces idx for next step
while idx:
i = idx.pop()
# logger.debug(i)
xi = X[i][feature]
if self.feature_types[feature]:
if xi == split:
idx_split[0].append(i)
else:
idx_split[1].append(i)
else:
if xi < split:
idx_split[0].append(i)
else:
idx_split[1].append(i)
return feature, split, split_avg, idx_split
def _expr2literal(self, expr: list) -> str:
"""Auxiliary function of print_rules.
Parameters:
expr {list} -- 1D list like [Feature, op, split]
Op: In continuos situation, -1 means less than, 1 means equal or more than
In discrete situation, -1 means equal, 1 means not equal
Returns:
str
"""
feature, op, split = expr
if type(split) == float or type(split) == int:
op = ">=" if op == 1 else "<"
return "Feature%d %s %.4f" % (feature, op, split)
if type(split) == str:
op = "!=" if op == 1 else "=="
return "Feature%d %s %s" % (feature, op, split)
def _get_rules(self):
"""Get the rules of all the decision tree leaf nodes.
Print the rules with breadth-first search for a tree
first print the leaves in shallow postions, then print
from left to right
ๅนฟๅบฆๆ้ๆ็ดข๏ผๅ
ๆๅฐ็ๆฏๆฏ่พๆต
็ๅถๅญ๏ผ็ถๅไปๅทฆๅพๅณๆๅฐ
Expr: 1D list like [Feature, op, split]
Rule: 2D list like [[Feature, op, split], score]
"""
que = [[self.root, []]]
self.rules = []
# Breadth-First Search
while que:
nd, exprs = que.pop(0)
# Generate a rule when the current node is leaf node
if not(nd.left or nd.right):
# Convert expression to text
literals = list(map(self._expr2literal, exprs))
self.rules.append([literals, nd.score])
# Expand when the current node has left child
if nd.left:
rule_left = copy.copy(exprs)
rule_left.append([nd.feature, -1, nd.split])
que.append([nd.left, rule_left])
# Expand when the current node has right child
if nd.right:
rule_right = copy.copy(exprs)
rule_right.append([nd.feature, 1, nd.split])
que.append([nd.right, rule_right])
# logger.debug(self.rules)
def fit(self, X: list, y: list, max_depth: int =5, min_samples_split: int =2):
"""Build a regression decision tree.
Note:
At least there's one column in X has more than 2 unique elements
y cannot be all the same value
Parameters:
X {list} -- 2d list object with int, float or str
y {list} -- 1d list object with int or float
max_depth {int} -- The maximum depth of the tree. (default: {2})
min_samples_split {int} -- The minimum number of samples required
to split an internal node (default: {2})
"""
# max_depth reflects the height of the tree, at most how many steps
# will we take to make decisions
# max_depthๅๆ ็ๆฏๆ ็้ซๅบฆ๏ผๆๅคๅณ็ญๅคๅฐๆญฅ
# min_samples_split determines how many times we could split in a
# feture, the smaller min_samples_split is, the more times
# min_samples_splitๅณๅฎไบๅจไธไธชfeatureไธๅฏไปฅๅๅคsplitๅคๅฐๆฌก๏ผ
# min_samples_split่ถๅฐ๏ผๅฏไปฅsplit็ๆฌกๆฐ่ถๅค
# Initialize with depth, node, indexes
self.root = Node()
que = [[0, self.root, list(range(len(y)))]]
# logger.debug(que)
# Breadth-First Search
# ๅณ็ญๆ ๆฏไธๅฑไธๅฑๆๅปบ่ตทๆฅ็๏ผๆไปฅ่ฆ็จๅนฟๅบฆไผๅ
็ฎๆณ
depth = 0
m = len(X[0])
self.feature_types = [self._detect_feature_type(self._get_column(X, i))
for i in range(m)]
logger.debug(self.feature_types)
while que:
depth, nd, idx = que.pop(0)
# Terminate loop if tree depth is more than max_depth
# At first, que is a list with only one element, if there is no new
# elements added to que, the loop can only run once
# queๅผๅงๆฏๅชๆไธไธชๅ
็ด ็list๏ผๅฆๆๆฒกๆๆฐ็ๅ
็ด ๅ ๅ
ฅ๏ผๅฐฑๅช่ฝๅพช็ฏไธๆฌก๏ผไธไธๆฌกqueๅฐฑไธบ็ฉบไบ
if depth == max_depth:
break
# Stop split when number of node samples is less than
# min_samples_split or Node is 100% pure.
if len(idx) < min_samples_split or set(
map(lambda i: y[i], idx)) == 1:
continue
# Stop split if no feature has more than 2 unique elements
feature_rets = self._choose_feature(X, y, idx)
if feature_rets is None:
continue
# if feature_rets is None, it means that for X's with these idx,
# the split should be stopped
# Split
nd.feature, nd.split, split_avg, idx_split = feature_rets
nd.left = Node(split_avg[0])
nd.right = Node(split_avg[1])
que.append([depth+1, nd.left, idx_split[0]])
que.append([depth+1, nd.right, idx_split[1]])
# Update tree depth and rules
self.height = depth
self._get_rules()
def print_rules(self):
"""Print the rules of all the regression decision tree leaf nodes.
"""
for i, rule in enumerate(self.rules):
literals, score = rule
print("Rule %d: " % i, ' | '.join(
literals) + ' => split_hat %.4f' % score)
def _predict(self, row: list) -> float:
"""Auxiliary function of predict.
Arguments:
row {list} -- 1D list with int, float or str
Returns:
int or float -- prediction of yi
"""
nd = self.root
while nd.left and nd.right:
if self.feature_types[nd.feature]:
# categorical split
if row[nd.feature] == nd.split:
nd = nd.left
else:
nd = nd.right
else:
# continuous split
if row[nd.feature] < nd.split:
nd = nd.left
else:
nd = nd.right
return nd.score
# nd.score must be float?
def predict(self, X: list) -> List[float]:
"""Get the prediction of y.
Prediction in batch, ๆน้้ขๆต
Arguments:
X {list} -- 2d list object with int, float or str
Returns:
list -- 1d list object with int or float
"""
return [self._predict(Xi) for Xi in X]
@run_time
def test_continuous_continuous():
"""test: x is continous, and y is continuous"""
print("Tesing the accuracy of RegressionTree...")
# Load data
X, y = load_boston_house_prices()
# Split data randomly, train set rate 70%
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=10)
# Train model
reg = RegressionTree()
reg.fit(X=X_train, y=y_train, max_depth=4)
# Show rules
reg.print_rules()
# Model accuracy
get_r2(reg, X_test, y_test)
@run_time
def test_arbitrary_continuous():
"""test: x is continuous or categorical, and y is continuous"""
print("Tesing the accuracy of RegressionTree...")
# Load data
X, y = load_bike_sharing_data()
logger.debug(X[0])
logger.debug([max(y), sum(y)/len(y), min(y)])
# Split data randomly, train set rate 70%
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=10)
# Train model
reg = RegressionTree()
reg.fit(X=X_train, y=y_train, max_depth=5)
# Show rules
reg.print_rules()
# Model accuracy
get_r2(reg, X_test, y_test)
print('A prediction:', reg.predict(
['1 0 1 0 0 6 1'.split() + [0.24, 0.81, 0.1]]), sep=' ')
def run():
# test_continuous_continuous()
test_arbitrary_continuous()
if __name__ == "__main__":
run()
|
import os
import requests
import socket
from time import sleep
def main():
os.system("clear")
print("""\033[1;95m
โโโโโโโโโโ โโโโโโโโโโโโโโโ โโโโโโโโโโ โโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโ โโโโโโโโโ โโโโโโ โโโโโโโโโ โโโโโโโโ
โโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโ โโโโโโโโ
โโโโโโ โโโ โโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโ โโโ
โโโโโโ โโโ โโโโโโ โโโโโโโโโโโโ โโโโโโโโโโโ โโโ
\033[1;102m\033[1;30m_Cyber security_\033[0;0m
\033[1;96m[\033[1;92m1\033[1;96m] \033[1;95m<-|-> \033[1;92mConsultar IP
\033[1;96m[\033[1;92m2\033[1;96m] \033[1;95m<-|-> \033[1;92mConsultar meu IP
\033[1;96m[\033[1;92m3\033[1;96m] \033[1;95m<-|-> \033[1;92mChecar porta
\033[1;96m[\033[1;92m4\033[1;96m] \033[1;95m<-|-> \033[1;92mSair
""")
es = input('\033[1;95m-->\033[1;92m ')
if es == '1' or es == '01':
os.system('clear')
ip = input('\033[1;95mDigite o IP \033[1;92m--> \033[1;93m')
request = requests.get(f'http://ip-api.com/json/{ip}')
json_api = request.json()
if 'message' not in json_api:
os.system('clear')
print('\033[1;94mResultados abaixo :)\n')
print('\033[1;93mIp: {}'.format(json_api['query']))
print('Pais: {}'.format(json_api['country']))
print('Estado: {}'.format(json_api['regionName']))
print('Cidade: {}'.format(json_api['city']))
print('Zip: {}'.format(json_api['zip']))
print('Longitude: {}'.format(json_api['lon']))
print('Latitude: {}'.format(json_api['lat']))
print('Timezone: {}'.format(json_api['timezone']))
print('Provedor: {}'.format(json_api['isp']))
print('Organizaรงรฃo: {}\n'.format(json_api['org']))
else:
print('\n\033[1;91mIP invalido\n')
print('\033[1;102m\033[1;30m1 = Sair | 2 = Voltar ao menu\n\033[0;0m')
sair = input('\033[1;95m-->\033[1;92m ')
if sair == '01' or sair == '1':
os.system('clear')
print('\033[1;102m\033[1;91m_Cyber Security_\033[0;0m\n')
exit()
else:
main()
elif es == '2' or es == '02':
os.system('clear')
request = requests.get('http://ip-api.com/json/?')
json_api = request.json()
print('\033[1;94mResultados abaixo :) \n')
print('\033[1;93mIp: {}'.format(json_api['query']))
print('Pais: {}'.format(json_api['country']))
print('Estado: {}'.format(json_api['regionName']))
print('Cidade: {}'.format(json_api['city']))
print('Zip: {}'.format(json_api['zip']))
print('Longitude: {}'.format(json_api['lon']))
print('Latitude: {}'.format(json_api['lat']))
print('Timezone: {}'.format(json_api['timezone']))
print('Provedor: {}'.format(json_api['isp']))
print('Organizaรงรฃo: {}\n'.format(json_api['org']))
print('\033[1;102m\033[1;30m1 = Sair | 2 = Voltar ao menu\033[0;0m\n')
sair = input('\033[1;95m-->\033[1;92m ')
if sair == '1' or sair == '01':
os.system('clear')
print('\033[1;102m\033[1;91m_Cyber Security_\033[0;0m\n')
exit()
else:
main()
elif es == '3' or es == '03':
cnx = socket.socket()
os.system('clear')
ip = input('\033[1;95mDigite um IP \033[1;92m-->\033[1;93m ')
porta = int(input('\033[1;95mDigite uma porta \033[1;92m-->\033[1;93m '))
time = 1
cnx.settimeout(time)
n = cnx.connect_ex((ip,porta))
if n == 0:
print('\n\033[1;92mPorta aberta\n')
else:
print('\nP\033[1;915morta fechada\n')
print('\033[1;102m\033[1;30m1 = Sair | 2 = Voltar ao menu\033[0;0m\n')
sair = input('\033[1;95m-->\033[1;92m ')
if sair == '1' or sair == '01':
os.system('clear')
print('\033[102m\033[1;30m_Cyber Security_\033[0;0m\n')
exit()
else:
main()
elif es == '4' or es == '04':
os.system('clear')
print('\033[1;102m\033[1;30m_Cyber Security_\033[0;0m\n')
exit()
else:
os.system('clear')
print('\033[1;91mOpรงรฃo invalida')
sleep(2)
main()
main()
|
"""Steps for MANAGE ACCOUTN top bar features of Onezone page.
"""
from pytest_bdd import parsers, when, then
from tests.gui.conftest import WAIT_BACKEND, WAIT_FRONTEND
from tests.gui.utils.generic import repeat_failed
__author__ = "Bartosz Walkowicz"
__copyright__ = "Copyright (C) 2017 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
@when(parsers.parse('user of {browser_id} expands account settings '
'dropdown in "ACCOUNT MANAGE" Onezone top bar'))
@then(parsers.parse('user of {browser_id} expands account settings '
'dropdown in "ACCOUNT MANAGE" Onezone top bar'))
@repeat_failed(timeout=WAIT_FRONTEND)
def expand_account_settings_in_oz(selenium, browser_id, oz_page):
driver = selenium[browser_id]
oz_page(driver)['manage account'].expand()
@when(parsers.re(r'user of (?P<browser_id>.+?) clicks on (?P<option>LOGOUT) '
r'item in expanded settings dropdown in "ACCOUNT MANAGE" '
r'Onezone top bar'))
@then(parsers.re(r'user of (?P<browser_id>.+?) clicks on (?P<option>LOGOUT) '
r'item in expanded settings dropdown in "ACCOUNT MANAGE" '
r'Onezone top bar'))
def click_on_option_in_account_settings_in_oz(selenium, browser_id,
option, oz_page):
driver = selenium[browser_id]
action = getattr(oz_page(driver)['manage account'], option.lower())
action()
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.views.decorators.csrf import csrf_exempt
from django.views import defaults as default_views
from graphene_django.views import GraphQLView
from django.urls import include, path
from django.contrib import admin
from django.urls import path
#from .views import debug_api
#urlpatterns = [
# path('admin/', admin.site.urls),
# path('api/debug', debug_api),
#]
urlpatterns = [
path(settings.ADMIN_URL, admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# API URLS
urlpatterns += [
path("graphql/", csrf_exempt(GraphQLView.as_view(graphiql=True))),
path("api/", include("config.api_router")),
]
if settings.DEBUG:
urlpatterns += [
path("400/",default_views.bad_request, kwargs={"exception": Exception("Bad Request!")},),
path("403/",default_views.permission_denied, kwargs={"exception": Exception("Permission Denied")},),
path("404/",default_views.page_not_found, kwargs={"exception": Exception("Page not Found")},),
path("500/", default_views.server_error),
]
|
import logging
import os
import sys
import time
from typing import Optional
import colorlog
import redis
from flask import Flask, request, render_template
from rq import Queue
from rq.exceptions import NoSuchJobError
from rq.job import Job
from dynoscale.config import get_redis_urls_from_environ
from dynoscale.wsgi import DynoscaleWsgiApp
from worker import count_cycles_and_wait_a_bit
# Configure logging
handler = colorlog.StreamHandler(stream=sys.stdout)
handler.setFormatter(
colorlog.ColoredFormatter(
fmt="%(asctime)s.%(msecs)03d %(log_color)s%(levelname)-8s%(reset)s %(processName)s %(threadName)10s"
" %(name)s: %(message)s",
datefmt="%H:%M:%S",
)
)
logging.getLogger("").handlers = [handler]
logging.getLogger("dynoscale").setLevel(logging.DEBUG)
app = Flask(__name__)
app.logger.setLevel(logging.DEBUG)
def get_hit_count():
retries = 5
while True:
try:
return conn.incr('hits')
except redis.exceptions.ConnectionError as exc:
if retries == 0:
raise exc
retries -= 1
time.sleep(0.5)
@app.route('/')
def index():
queue_name = request.args.get('queue_name')
job = None
if queue_name == 'urgent':
job = q_urgent.enqueue_call(
func=count_cycles_and_wait_a_bit,
kwargs={'duration': 1.0},
)
elif queue_name == 'priority':
job = q_priority.enqueue_call(
func=count_cycles_and_wait_a_bit,
kwargs={'duration': 3.0},
)
elif queue_name == 'default':
job = q_default.enqueue_call(
func=count_cycles_and_wait_a_bit,
kwargs={'duration': 5.0},
)
return render_template(
'index.html',
hit_count=get_hit_count(),
job=job,
q_urgent=q_urgent,
q_default=q_default,
q_priority=q_priority
)
@app.route("/jobs/<job_id>", methods=['GET'])
def job_detail(job_id):
job = None
try:
job = Job.fetch(job_id, connection=conn)
except NoSuchJobError:
logging.getLogger().warning(f"Job with id {job_id} does not exist!")
finally:
return render_template('job_detail.html', job=job)
def init_redis_conn_and_queues(redis_url: Optional[str] = None):
if redis_url is None:
urls_from_env = list(get_redis_urls_from_environ().values())
redis_url = urls_from_env[0] if redis_url else 'redis://127.0.0.1:6379'
global conn
global q_urgent
global q_priority
global q_default
conn = redis.from_url(redis_url)
q_urgent = Queue('urgent', connection=conn)
q_priority = Queue('priority', connection=conn)
q_default = Queue(connection=conn)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--redis_url",
type=str,
help="Redis Url for RQ, default: %(default)s",
default='redis://127.0.0.1:6379'
)
parser.add_argument(
"--redis_url_in",
type=str,
help="Env var with Redis Url for RQ, default: %(default)s",
default='REDIS_URL'
)
args = parser.parse_args()
redis_url = os.getenv(args.redis_url_in, args.redis_url)
init_redis_conn_and_queues(redis_url)
print(f"Starting Flask server, sending RQ jobs to redis @ {redis_url}")
app.wsgi_app = DynoscaleWsgiApp(app.wsgi_app)
port = os.getenv('PORT', 3000)
app.run(host='0.0.0.0', port=port, debug=True)
else:
init_redis_conn_and_queues()
|
__author__ = 'mnowotka'
from tastypie import fields
from tastypie.resources import ALL
from chembl_webservices.core.utils import NUMBER_FILTERS, CHAR_FILTERS, FLAG_FILTERS
from chembl_webservices.core.resource import ChemblModelResource
from chembl_webservices.core.serialization import ChEMBLApiSerializer
from chembl_webservices.core.meta import ChemblResourceMeta
from django.db.models import Prefetch
try:
from chembl_compatibility.models import ActionType
except ImportError:
from chembl_core_model.models import ActionType
try:
from chembl_compatibility.models import BindingSites
except ImportError:
from chembl_core_model.models import BindingSites
try:
from chembl_compatibility.models import ChemblIdLookup
except ImportError:
from chembl_core_model.models import ChemblIdLookup
try:
from chembl_compatibility.models import CompoundRecords
except ImportError:
from chembl_core_model.models import CompoundRecords
try:
from chembl_compatibility.models import DrugMechanism
except ImportError:
from chembl_core_model.models import DrugMechanism
try:
from chembl_compatibility.models import MechanismRefs
except ImportError:
from chembl_core_model.models import MechanismRefs
try:
from chembl_compatibility.models import MoleculeDictionary
except ImportError:
from chembl_core_model.models import MoleculeDictionary
try:
from chembl_compatibility.models import TargetDictionary
except ImportError:
from chembl_core_model.models import TargetDictionary
from chembl_webservices.core.fields import monkeypatch_tastypie_field
monkeypatch_tastypie_field()
# ----------------------------------------------------------------------------------------------------------------------
class MechanismRefsResource(ChemblModelResource):
class Meta(ChemblResourceMeta):
queryset = MechanismRefs.objects.all()
excludes = []
resource_name = 'mechanism_ref'
collection_name = 'mechanism_refs'
detail_uri_name = 'mecref_id'
serializer = ChEMBLApiSerializer(resource_name, {collection_name: resource_name})
prefetch_related = []
fields = (
'ref_type',
'ref_id',
'ref_url',
)
filtering = {
'ref_type': CHAR_FILTERS,
'ref_id': CHAR_FILTERS,
'ref_url': CHAR_FILTERS,
}
ordering = [field for field in filtering.keys() if not ('comment' in field or 'description' in field)]
# ----------------------------------------------------------------------------------------------------------------------
class MechanismResource(ChemblModelResource):
record_id = fields.IntegerField('record_id', null=True, blank=True)
molecule_chembl_id = fields.CharField('molecule__chembl_id', null=True, blank=True)
max_phase = fields.IntegerField('molecule__max_phase', null=True, blank=True)
target_chembl_id = fields.CharField('target__chembl_id', null=True, blank=True)
site_id = fields.IntegerField('site_id', null=True, blank=True)
action_type = fields.CharField('action_type_id', null=True, blank=True)
mechanism_refs = fields.ToManyField('chembl_webservices.resources.mechanism.MechanismRefsResource',
'mechanismrefs_set', full=True, null=True, blank=True)
parent_molecule_chembl_id = fields.CharField('molecule__moleculehierarchy__parent_molecule__chembl_id', null=True, blank=True)
class Meta(ChemblResourceMeta):
queryset = DrugMechanism.objects.all()
resource_name = 'mechanism'
collection_name = 'mechanisms'
serializer = ChEMBLApiSerializer(resource_name, {collection_name: resource_name})
prefetch_related = [
Prefetch('molecule', queryset=MoleculeDictionary.objects.only('chembl', 'max_phase')),
Prefetch('target', queryset=TargetDictionary.objects.only('chembl')),
Prefetch('mechanismrefs_set'),
Prefetch('molecule__moleculehierarchy'),
Prefetch('molecule__moleculehierarchy__parent_molecule',
queryset=MoleculeDictionary.objects.only('chembl')),
]
fields = (
'action_type',
'binding_site_comment',
'direct_interaction',
'disease_efficacy',
'max_phase',
'mec_id',
'mechanism_comment',
'mechanism_of_action',
'molecular_mechanism',
'molecule_chembl_id',
'record_id',
'selectivity_comment',
'site_id',
'target_chembl_id',
'parent_molecule_chembl_id'
)
filtering = {
'action_type': CHAR_FILTERS,
# 'binding_site_comment': ALL,
'direct_interaction': FLAG_FILTERS,
'disease_efficacy': FLAG_FILTERS,
'max_phase': NUMBER_FILTERS,
'mec_id': NUMBER_FILTERS,
# 'mechanism_comment': ALL,
'mechanism_of_action': CHAR_FILTERS,
'molecular_mechanism': FLAG_FILTERS,
'molecule_chembl_id': ALL,
'parent_molecule_chembl_id': ALL,
'record_id' : NUMBER_FILTERS,
# 'selectivity_comment': ALL,
'site_id': NUMBER_FILTERS,
'target_chembl_id': ALL,
}
ordering = [field for field in filtering.keys() if not ('comment' in field or 'description' in field)]
# ----------------------------------------------------------------------------------------------------------------------
|
"""Avatar hashes added
Revision ID: 3e056e90c67
Revises: cd0c89e863
Create Date: 2015-04-11 11:36:09.772333
"""
# revision identifiers, used by Alembic.
revision = '3e056e90c67'
down_revision = 'cd0c89e863'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('avatar_hash', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'avatar_hash')
### end Alembic commands ###
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
#import cgi
#import cgitb
#import web
#cgitb.enable(display=0, logdir="/opt/scripts/crycsv/cgi.log")
# import libraries in lib directory
#import matplotlib
#import numpy
base_path = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(base_path, 'lib'))
#sys.path.insert(0, '/usr/lib/python3.5/site-packages')
#print(str(sys.path))
from flask import Flask
import flask
import collections
import CryGold4Web
#import html5lib
import urllib
app = Flask(__name__)
from os import listdir
from os.path import isfile, join
import json
import traceback
#data = 'A'
#get_response = requests.get(url='http://google.com')
#post_data = {'username':'joeb', 'password':'foobar'}
# POST some form-encoded data:
#post_response = requests.post(url='http://httpbin.org/post', data=post_data)
#form = web.input()
#form = cgi.FieldStorage()
def is_safe_path(basedir, path, follow_symlinks=True):
# resolves symbolic links
if follow_symlinks:
return os.path.realpath(path).startswith(basedir)
return os.path.abspath(path).startswith(basedir)
def toCSV(cg4w):
return cg4w.toReturnCSV()
def dirs(mypath):
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
return onlyfiles
@app.route('/old')
def beginold():
website = "<html><head></head><body><form action=\"/runold\"> \
<input id=\"0\" name=\"0\" value=\"pricecoins/XMG-BTC.csv\"> File <!-- , but that first file not the amountcoin Files --> <br> \
<input id=\"1\" name=\"1\" value=\"300\"> must be 300 for pricecoin/XXX-XXX.csv , 900 for pricecoin/bitfinex-XXXXXX.csv and 3600 for amountcoins/*.csv <br> \
<input id=\"2\" name=\"2\" value=\"mul\"> mul or add or div or log or diffuse <br> \
<input id=\"3\" name=\"3\" value=\"pricecoins/btc-eur.csv\"> File when before mul add div, number if log, \"1d\" if diffuse<br> \
<input id=\"4\" name=\"4\" value=\"300\"> same with 300 and 3600 as above <br>\
<input id=\"5\" name=\"5\" value=\"\"> mul or add or div or log or diffuse <br> \
<input id=\"6\" name=\"6\" value=\"\"> Files when before mul add div, number if log, \"1d\" if diffuse<br> \
<input id=\"7\" name=\"7\" value=\"\"> same with 300 and 3600 as above <br>\
<input id=\"8\" name=\"8\" value=\"2018-01-01_00:00\"> DateTimeFormat or with the letter \"d\" before and next line blank a timespan same format<br> \
<input id=\"9\" name=\"9\" value=\"2018-04-01_00:00\"> DateTimeFormat or Nothing when letter \"d\" is added in front of befors line<br> \
<button type=\"submit\"> \
give-Table</button></form><br> \
<a href=\"/pricecoins.json\">files in pricecoins</a><br><a href=\"/amountcoins.json\">files in amountcoins</a><br> \
<a href=\"/operations.json\">operations</a><br> \
</body></html>"
return website
@app.route('/runold')
def homeold():
#if "name" not in form or "addr" not in form:
#a="<H1>Error</H1>"
#b="Please fill in the name and addr fields."
#c=str(form)
#d=form.username
# return a+b+d
#a="<p>name:"+ form["name"].value
#b="<p>addr:"+ form["addr"].value
b = []
for c,a in flask.request.args.items():
b.append(str(c))
#b.insert(1,c)
u = collections.OrderedDict(sorted(flask.request.args.items()))
d = b[::-1]
h = ''
#out_ = CryGold4Web.outdata
#toCSV(out_)
#return str(out_.toPyStdStructure().getCmdParaManageObj('/opt/scripts/crycsv/CryGold4Web '+h))
nono0 = False
nono = False
nono2 = False
for i,(w,o) in enumerate(u.items()):
if i > 1 and i < 5:
if str(o) == "":
nono0 = True
if i > 4 and i < 8:
if str(o) == "":
nono = True
if i == 9:
if str(o) == "":
nono2 = True
for i,(w,o) in enumerate(u.items()):
if nono0 and i > 1 and i < 8:
continue
if nono and i > 4 and i < 8:
continue
if nono2 and i == 9:
continue
if i == 0 or i == 3 or i == 6:
if not is_safe_path('/opt/scripts/crycsv/pricecoins', str(o)) and not is_safe_path('/opt/scripts/crycsv/amountcoins',str(o)):
return 'File wrong'
if i != 7 and i != 4 and i != 1:
h += str(o)+' '
outObj = CryGold4Web.getCmdParaManageObj('/opt/scripts/crycsv/CryGold4Web '+h)
if issubclass(type(outObj.outdata),CryGold4Web.eva.csvMatch.OperandsContentHandling):
return toCSV(outObj.outdata)
if type(outObj.outdata) is str:
return outObj.outdata
if type(outObj.outdata) is list:
return str(outObj.outdata)
return 'None'
@app.route('/')
def begin():
argument = flask.request.args.get('arguments')
if argument == None:
prefilled = "pricecoins/XMG-BTC.csv mul pricecoins/btc-eur.csv 2018-01-01_00:00 2018-04-01_00:00"
else:
prefilled = str(argument)
website = "<html><head></head><body><form action=\"/run\"> \
<input id=\"0\" name=\"arguments\" size=\"35\" value=\""+prefilled+"\"><br> \
<button type=\"submit\"> \
give-Table</button></form><br> \
<a href=\"/pricecoins.json\">files in pricecoins</a><br><a href=\"/amountcoins.json\">files in amountcoins</a><br> \
<a href=\"/operations.json\">operations</a><br> \
</body></html>"
return website
@app.route('/run')
def home():
#if "name" not in form or "addr" not in form:
#a="<H1>Error</H1>"
#b="Please fill in the name and addr fields."
#c=str(form)
#d=form.username
# return a+b+d
#a="<p>name:"+ form["name"].value
#b="<p>addr:"+ form["addr"].value
argument = flask.request.args.get('arguments')
wordsList = str(argument).split(' ')
for word in wordsList[:1]:
if os.path.isfile(word):
if not is_safe_path('/opt/scripts/crycsv/pricecoins', word) and not is_safe_path('/opt/scripts/crycsv/amountcoins',word):
return 'One file exists, but is in the wrong path!'
try:
arg = '/blub/CryGold4Web.piii ' + argument
outObj = CryGold4Web.getCmdParaManageObj(arg)
except Exception as inst:
exc_type, exc_obj, exc_tb = sys.exc_info()
# del(exc_type, exc_value, exc_traceback)
traceback_details = {
'filename': exc_tb.tb_frame.f_code.co_filename,
'lineno' : exc_tb.tb_lineno,
'name' : exc_tb.tb_frame.f_code.co_name,
'type' : exc_type.__name__,
#'message' : exc_value.message, # or see traceback._some_str()
}
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return "Command Parameter Error , Exception Type: "+str(type(inst))+" Args: "+str(inst.args) + \
str(exc_type)+"<br>\n"+str(fname)+"<br>\n"+str(exc_tb.tb_lineno) + \
'<br>\n'+str(argument)+'<br>\n'+str(traceback.format_exc())+'\n<br>'
try:
if issubclass(type(outObj.outdata),CryGold4Web.eva.csvMatch.OperandsContentHandling):
return toCSV(outObj.outdata)
if type(outObj.outdata) is str:
return outObj.outdata
if type(outObj.outdata) is list:
return str(outObj.outdata)
return 'None'
except Exception as inst:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return "Output Error , Exception Type: "+str(type(inst))+" Args: "+str(inst.args) + \
str(exc_type)+"\n"+str(fname)+"\n"+str(exc_tb.tb_lineno)
@app.route('/amountcoins.json')
def amount():
return json.dumps(dirs('/opt/scripts/crycsv/amountcoins'))
@app.route('/pricecoins.json')
def price():
return json.dumps(dirs('/opt/scripts/crycsv/pricecoins'))
@app.route('/operations.json')
def operations():
return json.dumps(CryGold4Web.eva.operations.OpTypesHandling.getOperationList())
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['HttpsHealthCheckArgs', 'HttpsHealthCheck']
@pulumi.input_type
class HttpsHealthCheckArgs:
def __init__(__self__, *,
check_interval_sec: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
request_path: Optional[pulumi.Input[str]] = None,
timeout_sec: Optional[pulumi.Input[int]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a HttpsHealthCheck resource.
:param pulumi.Input[int] check_interval_sec: How often (in seconds) to send a health check. The default value is 5 seconds.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[int] healthy_threshold: A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.
:param pulumi.Input[str] host: The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.
:param pulumi.Input[str] kind: Type of the resource.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[int] port: The TCP port number for the HTTPS health check request. The default value is 443.
:param pulumi.Input[str] request_path: The request path of the HTTPS health check request. The default value is "/".
:param pulumi.Input[int] timeout_sec: How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.
:param pulumi.Input[int] unhealthy_threshold: A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.
"""
if check_interval_sec is not None:
pulumi.set(__self__, "check_interval_sec", check_interval_sec)
if description is not None:
pulumi.set(__self__, "description", description)
if healthy_threshold is not None:
pulumi.set(__self__, "healthy_threshold", healthy_threshold)
if host is not None:
pulumi.set(__self__, "host", host)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if request_path is not None:
pulumi.set(__self__, "request_path", request_path)
if timeout_sec is not None:
pulumi.set(__self__, "timeout_sec", timeout_sec)
if unhealthy_threshold is not None:
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter(name="checkIntervalSec")
def check_interval_sec(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to send a health check. The default value is 5 seconds.
"""
return pulumi.get(self, "check_interval_sec")
@check_interval_sec.setter
def check_interval_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "check_interval_sec", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="healthyThreshold")
def healthy_threshold(self) -> Optional[pulumi.Input[int]]:
"""
A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.
"""
return pulumi.get(self, "healthy_threshold")
@healthy_threshold.setter
def healthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "healthy_threshold", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Type of the resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The TCP port number for the HTTPS health check request. The default value is 443.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="requestPath")
def request_path(self) -> Optional[pulumi.Input[str]]:
"""
The request path of the HTTPS health check request. The default value is "/".
"""
return pulumi.get(self, "request_path")
@request_path.setter
def request_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_path", value)
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> Optional[pulumi.Input[int]]:
"""
How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.
"""
return pulumi.get(self, "timeout_sec")
@timeout_sec.setter
def timeout_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_sec", value)
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> Optional[pulumi.Input[int]]:
"""
A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.
"""
return pulumi.get(self, "unhealthy_threshold")
@unhealthy_threshold.setter
def unhealthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unhealthy_threshold", value)
class HttpsHealthCheck(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
check_interval_sec: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
request_path: Optional[pulumi.Input[str]] = None,
timeout_sec: Optional[pulumi.Input[int]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Creates a HttpsHealthCheck resource in the specified project using the data included in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] check_interval_sec: How often (in seconds) to send a health check. The default value is 5 seconds.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[int] healthy_threshold: A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.
:param pulumi.Input[str] host: The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.
:param pulumi.Input[str] kind: Type of the resource.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[int] port: The TCP port number for the HTTPS health check request. The default value is 443.
:param pulumi.Input[str] request_path: The request path of the HTTPS health check request. The default value is "/".
:param pulumi.Input[int] timeout_sec: How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.
:param pulumi.Input[int] unhealthy_threshold: A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[HttpsHealthCheckArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a HttpsHealthCheck resource in the specified project using the data included in the request.
:param str resource_name: The name of the resource.
:param HttpsHealthCheckArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HttpsHealthCheckArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
check_interval_sec: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
request_path: Optional[pulumi.Input[str]] = None,
timeout_sec: Optional[pulumi.Input[int]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HttpsHealthCheckArgs.__new__(HttpsHealthCheckArgs)
__props__.__dict__["check_interval_sec"] = check_interval_sec
__props__.__dict__["description"] = description
__props__.__dict__["healthy_threshold"] = healthy_threshold
__props__.__dict__["host"] = host
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
__props__.__dict__["request_path"] = request_path
__props__.__dict__["timeout_sec"] = timeout_sec
__props__.__dict__["unhealthy_threshold"] = unhealthy_threshold
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["self_link_with_id"] = None
super(HttpsHealthCheck, __self__).__init__(
'google-native:compute/alpha:HttpsHealthCheck',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HttpsHealthCheck':
"""
Get an existing HttpsHealthCheck resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = HttpsHealthCheckArgs.__new__(HttpsHealthCheckArgs)
__props__.__dict__["check_interval_sec"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["description"] = None
__props__.__dict__["healthy_threshold"] = None
__props__.__dict__["host"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["request_path"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["self_link_with_id"] = None
__props__.__dict__["timeout_sec"] = None
__props__.__dict__["unhealthy_threshold"] = None
return HttpsHealthCheck(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="checkIntervalSec")
def check_interval_sec(self) -> pulumi.Output[int]:
"""
How often (in seconds) to send a health check. The default value is 5 seconds.
"""
return pulumi.get(self, "check_interval_sec")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="healthyThreshold")
def healthy_threshold(self) -> pulumi.Output[int]:
"""
A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.
"""
return pulumi.get(self, "healthy_threshold")
@property
@pulumi.getter
def host(self) -> pulumi.Output[str]:
"""
The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
The TCP port number for the HTTPS health check request. The default value is 443.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="requestPath")
def request_path(self) -> pulumi.Output[str]:
"""
The request path of the HTTPS health check request. The default value is "/".
"""
return pulumi.get(self, "request_path")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="selfLinkWithId")
def self_link_with_id(self) -> pulumi.Output[str]:
"""
Server-defined URL for this resource with the resource id.
"""
return pulumi.get(self, "self_link_with_id")
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> pulumi.Output[int]:
"""
How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.
"""
return pulumi.get(self, "timeout_sec")
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> pulumi.Output[int]:
"""
A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.
"""
return pulumi.get(self, "unhealthy_threshold")
|
import time
import functools
import math
import pyglet
class AnimatedValue:
def __new__(cls, start, end, duration, *w, **ka):
if duration <= 0:
return ConstantValue(end)
return super().__new__(cls)
def __init__(self, start, end, duration, easing=lambda x:x):
self.start = start
self.end = float(end)
self.clock = clock = time.monotonic
self.begin = clock()
self.duration = duration
self.easing = easing
def __float__(self):
t = (self.clock() - self.begin) / self.duration
if t > 1:
# Evil metamorphosis
self.val = self.end
self.__class__ = ConstantValue
del self.start
return self.end
start = float(self.start)
t = self.easing(t)
return (1-t) * start + t * self.end
def __repr__(self):
return f'<{self.start}โ{self.end}:{float(self)}>'
def __await__(self):
time_to_wait = self.clock() - self.begin + self.duration
if time_to_wait > 1:
yield time_to_wait
return self
class ConstantValue:
def __init__(self, end):
self.end = float(end)
def __float__(self):
return self.end
def __repr__(self):
return f'<{self.end}>'
def __await__(self):
return self
yield
def drive(it, dt=0):
go = functools.partial(drive, it)
try:
time_or_blocker = next(it)
except StopIteration:
return
if isinstance(time_or_blocker, Blocker):
time_or_blocker.waiters.append(go)
else:
time_to_wait = float(time_or_blocker)
pyglet.clock.schedule_once(go, time_to_wait)
def autoschedule(coro):
@functools.wraps(coro)
def func(*args, **kwargs):
blocker = Blocker()
async def wrap():
await coro(*args, **kwargs)
blocker.unblock()
it = wrap().__await__()
drive(it, 0)
return blocker
return func
def fork(coro):
it = coro().__await__()
drive(it, 0)
class Wait:
def __init__(self, time_to_wait):
self.time_to_wait = time_to_wait
def __await__(self):
yield self.time_to_wait
class Blocker:
"""A Future"""
def __init__(self):
self.done = False
self.waiters = []
self.value = None
def unblock(self, value=None):
self.done = True
self.value = value
for waiter in self.waiters:
waiter()
self.waiters.clear()
return value
def __await__(self):
if not self.done:
yield self
return self.value
def cubic_inout(t):
if t < 0.5:
return 4 * t ** 3
p = 2 * t - 2
return 0.5 * p ** 3 + 1
def sine_inout(t):
return 0.5 * (1 - math.cos(t * math.pi))
def sine_in(t):
return math.sin((t - 1) * math.pi / 2) + 1
def sine_out(t):
return math.sin(t * math.pi / 2)
|
import os
import json
import argparse
from autogluon.task import TextPrediction as task
TASKS = \
{'cola': ('sentence', 'label', 'mcc', ['mcc']),
'sst': ('sentence', 'label', 'acc', ['acc']),
'mrpc': (['sentence1', 'sentence2'], 'label', 'f1', ['acc', 'f1']),
'sts': (['sentence1', 'sentence2'], 'score', 'spearmanr', ['pearsonr', 'spearmanr']),
'qqp': (['sentence1', 'sentence2'], 'label', 'f1', ['acc', 'f1']),
'mnli': (['sentence1', 'sentence2'], 'label', 'acc', ['acc']),
'qnli': (['question', 'sentence'], 'label', 'acc', ['acc']),
'rte': (['sentence1', 'sentence2'], 'label', 'acc', ['acc']),
'wnli': (['sentence1', 'sentence2'], 'label', 'acc', ['acc']),
'snli': (['sentence1', 'sentence2'], 'label', 'acc', ['acc'])}
def get_parser():
parser = argparse.ArgumentParser(description='The Basic Example of AutoML for Text Prediction.')
parser.add_argument('--train_file', type=str,
help='The training pandas dataframe.',
default=None)
parser.add_argument('--dev_file', type=str,
help='The validation pandas dataframe',
default=None)
parser.add_argument('--test_file', type=str,
help='The test pandas dataframe',
default=None)
parser.add_argument('--seed', type=int,
help='The seed',
default=None)
parser.add_argument('--feature_columns', help='Feature columns', default=None)
parser.add_argument('--label_columns', help='Label columns', default=None)
parser.add_argument('--eval_metrics', type=str,
help='The metrics for evaluating the models.',
default=None)
parser.add_argument('--stop_metric', type=str,
help='The metrics for early stopping',
default=None)
parser.add_argument('--task', type=str, default=None)
parser.add_argument('--do_train', action='store_true',
help='Whether to train the model')
parser.add_argument('--do_eval', action='store_true',
help='Whether to evaluate the model')
parser.add_argument('--exp_dir', type=str, default=None,
help='The experiment directory where the model params will be written.')
parser.add_argument('--config_file', type=str,
help='The configuration of the TextPrediction module',
default=None)
return parser
def train(args):
if args.task is not None:
feature_columns, label_columns, stop_metric, eval_metrics = TASKS[args.task]
else:
raise NotImplementedError
if args.exp_dir is None:
args.exp_dir = 'autogluon_{}'.format(args.task)
model = task.fit(train_data=args.train_file,
label=label_columns,
feature_columns=feature_columns,
output_directory=args.exp_dir,
stopping_metric=stop_metric,
ngpus_per_trial=1,
eval_metric=eval_metrics)
dev_metrics_scores = model.evaluate(args.dev_file, metrics=eval_metrics)
with open(os.path.join(args.exp_dir, 'final_model_dev_score.json'), 'w') as of:
json.dump(dev_metrics_scores, of)
dev_prediction = model.predict(args.dev_file)
with open(os.path.join(args.exp_dir, 'dev_predictions.txt'), 'w') as of:
for ele in dev_prediction:
of.write(str(ele) + '\n')
model.save(os.path.join(args.exp_dir, 'saved_model'))
model = task.load(os.path.join(args.exp_dir, 'saved_model'))
test_prediction = model.predict(args.test_file)
with open(os.path.join(args.exp_dir, 'test_predictions.txt'), 'w') as of:
for ele in test_prediction:
of.write(str(ele) + '\n')
def predict(args):
model = task.load(args.model_dir)
test_prediction = model.predict(args.test_file)
if args.exp_dir is None:
args.exp_dir = '.'
with open(os.path.join(args.exp_dir, 'test_predictions.txt'), 'w') as of:
for ele in test_prediction:
of.write(str(ele) + '\n')
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if args.do_train:
train(args)
if args.do_eval:
predict(args)
|
# This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB) - Top Tagger Benchmark Demo Template.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""This is a dummy example for the Top Tagger tagging step. You can adjust
this example to your own requirements.
In this example, the evaluation function expects three arguments:
--datafile Pickle file with test data sample or the data file that was
created by the pre-porcessing step.
--weights Dummy text file for additional data that the evaluatormay need to
access. This is to given an example how additional data can be
accessed by your code. It is recommended to store all additional
files that are needed by your implementation in the data/ folder
of the template repository.
--outfile Output file containing the evaluation results.
"""
import argparse
import os
from mytagger.tagger.main import run_tagger
if __name__ == '__main__':
"""Main routine for the tagger. Reads the command line arguments. Then
calls the main tagger function in the respective tagger module.
In this example implementation for the tagger simply reads the weights
file to ensure that it is accessible and then generates a random result
file.
"""
# Get command line parameters. The pre-processor expects three input
# parameters that all reference files.
parser = argparse.ArgumentParser(
description='Tagger for the ML4Jets Top Tagger evaluation.'
)
parser.add_argument(
'-d', '--datafile',
required=True,
help='Test data sample file or pre-processing results file.'
)
parser.add_argument(
'-w', '--weights',
required=True,
help='Dummy weigth file.'
)
parser.add_argument(
'-r', '--runs',
type=int,
default=5,
help='Number of runs in the output.'
)
parser.add_argument(
'-o', '--outfile',
required=True,
help='Tagger result file.'
)
args = parser.parse_args()
# Call the main tagger function with the three arguments.
run_tagger(
datafile=args.datafile,
weightsfile=args.weights,
resultfile=args.outfile,
runs=args.runs
)
# Ensure that the output file exists.
assert os.path.isfile(args.outfile)
|
# coding: utf8
import os
import pytest
import requests
from mtgjson import CardDb, ALL_SETS_URL, ALL_SETS_X_URL
def download_set_file(url, fn):
tests_path = os.path.dirname(__file__)
fn = os.path.join(tests_path, fn)
if not os.path.exists(fn):
resp = requests.get(url)
resp.raise_for_status()
with open(fn, 'wb') as out:
out.write(resp.content)
return fn
@pytest.fixture(scope='module',
params=['url', 'file', 'file-x'])
def db(request):
if request.param == 'url':
return CardDb.from_url()
elif request.param == 'file':
return CardDb.from_file(
download_set_file(ALL_SETS_URL, 'AllSets.json')
)
elif request.param == 'file-x':
return CardDb.from_file(
download_set_file(ALL_SETS_X_URL, 'AllSets-x.json')
)
def test_db_instantiation(db):
pass
def test_get_card_by_name(db):
card = db.cards_by_name['Sen Triplets']
assert card.multiverseid == 180607
def test_get_card_by_id(db):
card = db.cards_by_id[180607]
assert card.name == 'Sen Triplets'
def test_get_sen_triplets(db):
card = db.cards_by_id[180607]
assert card.name == 'Sen Triplets'
assert card.manaCost == '{2}{W}{U}{B}'
assert card.cmc == 5
assert card.colors == ['White', 'Blue', 'Black']
assert card.type == u'Legendary Artifact Creature โ Human Wizard'
assert card.supertypes == ['Legendary']
assert card.types == ['Artifact', 'Creature']
assert card.subtypes == ['Human', 'Wizard']
assert card.rarity == 'Mythic Rare'
assert card.text == ('At the beginning of your upkeep, choose target '
'opponent. This turn, that player can\'t cast spells '
'or activate abilities and plays with his or her hand'
' revealed. You may play cards from that player\'s '
'hand this turn.')
assert card.flavor == 'They are the masters of your mind.'
assert card.artist == 'Greg Staples'
assert card.number == '109'
assert card.power == '3'
assert card.toughness == '3'
assert card.layout == 'normal'
assert card.multiverseid == 180607
assert card.imageName == 'sen triplets'
def test_set_list(db):
# should start with alpha
assert list(db.sets.values())[0].name == 'Limited Edition Alpha'
assert len(db.sets) > 20
def test_cards_from_set(db):
assert list(db.sets.values())[0].cards[0].name == 'Animate Wall'
def test_card_ascii_name(db):
card = db.cards_by_id[23194]
assert card.ascii_name == 'aether rift'
def test_cards_by_ascii_name(db):
assert db.cards_by_ascii_name['aether rift'].name == u'Aether Rift'
def test_get_specific_card(db):
assert db.sets['4ED'].cards_by_name['Lightning Bolt'].set.code == '4ED'
def test_different_sets_compare_nonequal(db):
c1 = db.sets['4ED'].cards[-1]
c2 = db.sets['ISD'].cards[0]
assert c1 < c2
|
# from elegantrl.tutorial.run import Arguments, train_and_evaluate
import torch
import torch.nn as nn
import gym
import numpy as np
import numpy.random as rd
import time
class QNetTwin(nn.Module): # Double DQN
def __init__(self, mid_dim, state_dim, action_dim):
super().__init__()
self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU())
self.net_q1 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # Q1 value
self.net_q2 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # Q2 value
def forward(self, state):
tmp = self.net_state(state)
return self.net_q1(tmp) # one Q value
def get_q1_q2(self, state):
tmp = self.net_state(state)
q1 = self.net_q1(tmp)
q2 = self.net_q2(tmp)
return q1, q2 # two Q values
class AgentDoubleDQN:
def __init__(self):
self.learning_rate = 1e-4
self.soft_update_tau = 2 ** -8 # 5e-3 ~= 2 ** -8
self.criterion = torch.nn.SmoothL1Loss()
self.state = None
self.device = None
self.act = self.act_target = None
self.cri = self.cri_target = None
self.act_optimizer = None
self.cri_optimizer = None
self.explore_rate = 0.1 # the probability of choosing action randomly in epsilon-greedy
self.action_dim = None # chose discrete action randomly in epsilon-greedy
self.explore_rate = 0.25 # the probability of choosing action randomly in epsilon-greedy
self.softmax = torch.nn.Softmax(dim=1)
@staticmethod
def soft_update(target_net, current_net, tau):
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1 - tau))
def explore_env(self, env, buffer, target_step, reward_scale, gamma) -> int:
for _ in range(target_step):
action = self.select_action(self.state)
next_s, reward, done, _ = env.step(action)
other = (reward * reward_scale, 0.0 if done else gamma, action) # action is an int
buffer.append_buffer(self.state, other)
self.state = env.reset() if done else next_s
return target_step
def update_net(self, buffer, target_step, batch_size, repeat_times):
buffer.update_now_len_before_sample()
q_value = obj_critic = None
for _ in range(int(target_step * repeat_times)):
obj_critic, q_value = self.get_obj_critic(buffer, batch_size)
self.cri_optimizer.zero_grad()
obj_critic.backward()
self.cri_optimizer.step()
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
return q_value.mean().item(), obj_critic.item()
def init(self, net_dim, state_dim, action_dim):
self.action_dim = action_dim
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.cri = QNetTwin(net_dim, state_dim, action_dim).to(self.device)
self.cri_target = QNetTwin(net_dim, state_dim, action_dim).to(self.device)
self.act = self.cri
self.cri_optimizer = torch.optim.Adam(self.act.parameters(), lr=self.learning_rate)
def select_action(self, state) -> np.ndarray: # for discrete action space
states = torch.as_tensor((state,), dtype=torch.float32, device=self.device).detach_()
actions = self.act(states)
if rd.rand() < self.explore_rate: # epsilon-greedy
action = self.softmax(actions)[0]
a_prob = action.detach().cpu().numpy() # choose action according to Q value
a_int = rd.choice(self.action_dim, p=a_prob)
else:
action = actions[0]
a_int = action.argmax(dim=0).cpu().numpy()
return a_int
def get_obj_critic(self, buffer, batch_size):
with torch.no_grad():
reward, mask, action, state, next_s = buffer.sample_batch(batch_size)
next_q = torch.min(*self.cri_target.get_q1_q2(next_s))
next_q = next_q.max(dim=1, keepdim=True)[0]
q_label = reward + mask * next_q
act_int = action.type(torch.long)
q1, q2 = [qs.gather(1, act_int) for qs in self.act.get_q1_q2(state)]
obj_critic = self.criterion(q1, q_label) + self.criterion(q2, q_label)
return obj_critic, q1
class ReplayBuffer:
def __init__(self, max_len, state_dim, action_dim, if_on_policy, if_gpu):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.max_len = max_len
self.now_len = 0
self.next_idx = 0
self.if_full = False
self.action_dim = action_dim # for self.sample_all(
self.if_on_policy = if_on_policy
self.if_gpu = False if if_on_policy else if_gpu
other_dim = 1 + 1 + action_dim * 2 if if_on_policy else 1 + 1 + action_dim
if self.if_gpu:
self.buf_other = torch.empty((max_len, other_dim), dtype=torch.float32, device=self.device)
self.buf_state = torch.empty((max_len, state_dim), dtype=torch.float32, device=self.device)
else:
self.buf_other = np.empty((max_len, other_dim), dtype=np.float32)
self.buf_state = np.empty((max_len, state_dim), dtype=np.float32)
def append_buffer(self, state, other): # CPU array to CPU array
if self.if_gpu:
state = torch.as_tensor(state, device=self.device)
other = torch.as_tensor(other, device=self.device)
self.buf_state[self.next_idx] = state
self.buf_other[self.next_idx] = other
self.next_idx += 1
if self.next_idx >= self.max_len:
self.if_full = True
self.next_idx = 0
def sample_batch(self, batch_size) -> tuple:
indices = torch.randint(self.now_len - 1, size=(batch_size,), device=self.device) if self.if_gpu \
else rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 1:2], # mask = 0.0 if done else gamma
r_m_a[:, 2:], # action
self.buf_state[indices], # state
self.buf_state[indices + 1]) # next_state
def sample_all(self) -> tuple:
all_other = torch.as_tensor(self.buf_other[:self.now_len], device=self.device)
return (all_other[:, 0], # reward
all_other[:, 1], # mask = 0.0 if done else gamma
all_other[:, 2:2 + self.action_dim], # action
all_other[:, 2 + self.action_dim:], # noise
torch.as_tensor(self.buf_state[:self.now_len], device=self.device)) # state
def update_now_len_before_sample(self):
self.now_len = self.max_len if self.if_full else self.next_idx
def empty_buffer_before_explore(self):
self.next_idx = 0
self.now_len = 0
self.if_full = False
def get_episode_return(env, act, device) -> float:
max_step = 200
if_discrete = True
episode_return = 0.0 # sum of rewards in an episode
state = env.reset()
for _ in range(max_step):
s_tensor = torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor)
if if_discrete:
a_tensor = a_tensor.argmax(dim=1)
action = a_tensor.cpu().numpy()[0] # not need .detach(), because with torch.no_grad() outside
state, reward, done, _ = env.step(action)
episode_return += reward
if done:
break
return env.episode_return if hasattr(env, 'episode_return') else episode_return
class Evaluator:
def __init__(self, cwd, agent_id, eval_times, show_gap, env, device):
self.recorder = [(0., -np.inf, 0., 0., 0.), ] # total_step, r_avg, r_std, obj_a, obj_c
self.r_max = -np.inf
self.total_step = 0
self.cwd = cwd
self.env = env
self.device = device
self.agent_id = agent_id
self.show_gap = show_gap
self.eva_times = eval_times
self.target_reward = 200
self.used_time = None
self.start_time = time.time()
self.print_time = time.time()
print(f"{'ID':>2} {'Step':>8} {'MaxR':>8} |{'avgR':>8} {'stdR':>8} {'objA':>8} {'objC':>8}")
def evaluate_save(self, act, steps, obj_a, obj_c) -> bool:
reward_list = [get_episode_return(self.env, act, self.device) for _ in range(self.eva_times)]
r_avg = np.average(reward_list) # episode return average
r_std = float(np.std(reward_list)) # episode return std
if r_avg > self.r_max: # save checkpoint with highest episode return
self.r_max = r_avg # update max reward (episode return)
print(f"{self.agent_id:<2} {self.total_step:8.2e} {self.r_max:8.2f} |")
self.total_step += steps # update total training steps
self.recorder.append((self.total_step, r_avg, r_std, obj_a, obj_c)) # update recorder
print(f"{'ID':>2} {'Step':>8} {'TargetR':>8} |"
f"{'avgR':>8} {'stdR':>8} {'UsedTime':>8} ########\n"
f"{self.agent_id:<2} {self.total_step:8.2e} {self.target_reward:8.2f} |"
f"{r_avg:8.2f} {r_std:8.2f} {self.used_time:>8} ########")
'''basic arguments'''
cwd = None
env = gym.make('CartPole-v0')
agent = AgentDoubleDQN()
gpu_id = None
'''training arguments'''
net_dim = 2 ** 7
max_memo = 2 ** 17
batch_size = 2 ** 7
target_step = 2 ** 10
repeat_times = 2 ** 0
gamma = 0.99
reward_scale = 2 ** 0
'''evaluating arguments'''
show_gap = 2 ** 0
eval_times = 2 ** 0
env_eval = env = gym.make('CartPole-v0')
'''init: environment'''
max_step = 200
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
'''init: Agent, ReplayBuffer, Evaluator'''
agent.init(net_dim, state_dim, action_dim)
buffer = ReplayBuffer(max_len=max_memo + max_step, if_on_policy=False, if_gpu=True,
state_dim=state_dim, action_dim=1)
evaluator = Evaluator(cwd=cwd, agent_id=gpu_id, device=agent.device, env=env_eval,
eval_times=eval_times, show_gap=show_gap) # build Evaluator
'''prepare for training'''
agent.state = env.reset()
with torch.no_grad(): # update replay buffer
if_discrete = True
action_dim = env.action_space.n
state = env.reset()
steps = 0
while steps < target_step:
action = rd.randint(action_dim) if if_discrete else rd.uniform(-1, 1, size=action_dim)
next_state, reward, done, _ = env.step(action)
steps += 1
scaled_reward = reward * reward_scale
mask = 0.0 if done else gamma
other = (scaled_reward, mask, action) if if_discrete else (scaled_reward, mask, *action)
buffer.append_buffer(state, other)
state = env.reset() if done else next_state
agent.update_net(buffer, target_step, batch_size, repeat_times) # pre-training and hard update
agent.act_target.load_state_dict(agent.act.state_dict()) if getattr(agent, 'act_target', None) else None
agent.cri_target.load_state_dict(agent.cri.state_dict()) if getattr(agent, 'cri_target', None) else None
total_step = steps
print(total_step)
'''start training'''
while not (total_step >= 5000):
with torch.no_grad(): # speed up running
steps = agent.explore_env(env, buffer, target_step, reward_scale, gamma)
total_step += steps
print(total_step)
obj_a, obj_c = agent.update_net(buffer, target_step, batch_size, repeat_times)
# with torch.no_grad(): # speed up running
# evaluator.evaluate_save(agent.act, steps, obj_a, obj_c)
|
import numpy as np
from core.encoders import default_boe_encoder as boe_encoder
from core.encoders import default_bov_encoder as bov_encoder
from scipy.spatial import distance
class Combiner():
def __init__(self, query, docs):
self._query = query
self._docs = docs
self._features = self._extract_features(self._query)
self._ndocs = len(self._docs)
self._nfeats = len(self._features)
self._matrix = None
def get_combinations(self, n=1):
"""Return best combinations as index pairs
Args:
n (int, optional): Number of combinations needed
Returns:
list: List of integer tuples representing indexes of documents
in the `docs` list
"""
candidates = self._possible_combinations()
distances = [self._distance(i, j) for i, j in candidates]
ranked_candidates = [candidates[i] for i in np.argsort(distances)]
exclusive = self._exclusive_combinations(ranked_candidates)
top_n = exclusive[:n]
return top_n if n > 1 else top_n[0]
def _possible_combinations(self):
pairs = []
for i in range(self._ndocs):
for j in range(i+1, self._ndocs):
pair = set([i, j])
pairs.append(pair)
return pairs
def _distance(self, i, j):
if self._matrix is None:
self._initialize_disclosure_matrix()
matches_i = self._matrix[i]
matches_j = self._matrix[j]
rows = np.array([matches_i, matches_j])
f1 = self._improvement_distance
f2 = self._feature_wise_best_distance
f3 = self._weakest_feature_distance
return f3(rows)
def _weakest_feature_distance(self, rows):
"""
Disclosure of the least supported features governs the overall distance.
"""
feature_wise_minimum = rows.min(axis=0)
distance = feature_wise_minimum.max()
return distance
def _feature_wise_best_distance(self, rows):
"""
Best feature-wise disclosures govern the overall distance.
"""
feature_wise_minimum = rows.min(axis=0)
distance = feature_wise_minimum.mean()
return distance
def _improvement_distance(self, rows):
"""
The improvement in the score by combining the results governs overall distance
"""
individual_distances = [row.mean() for row in rows]
individual_best = np.min(individual_distances)
combined_best = self._feature_wise_best_distance(rows)
distance = combined_best - individual_best # more negative, better
return distance
def _initialize_disclosure_matrix(self):
self._matrix = np.zeros((self._ndocs, self._nfeats))
for i, doc in enumerate(self._docs):
for j, feature in enumerate(self._features):
self._matrix[i][j] = self._match(feature, doc)
return self._matrix
def _extract_features(self, text):
entities = boe_encoder.encode(text)
features = bov_encoder.encode(entities)
return features
def _match(self, feature, doc):
doc_features = self._extract_features(doc)
min_dist = np.min([distance.cosine(df, feature) for df in doc_features])
return min_dist
def _exclusive_combinations(self, combinations):
seen = set([])
exclusive = []
for combination in combinations:
if all([e not in seen for e in combination]):
exclusive.append(combination)
seen = seen.union(combination)
return exclusive
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""A simple tool to decode a CFSR register from the command line
Example usage:
$ python -m pw_cpu_exception_cortex_m.cfsr_decoder 0x00010100
20210412 15:09:01 INF Exception caused by a usage fault, bus fault.
Active Crash Fault Status Register (CFSR) fields:
IBUSERR Bus fault on instruction fetch.
UNDEFINSTR Encountered invalid instruction.
All registers:
cfsr 0x00010100
"""
import argparse
import logging
import sys
import pw_cli.log
from pw_cpu_exception_cortex_m_protos import cpu_state_pb2
from pw_cpu_exception_cortex_m import exception_analyzer
_LOG = logging.getLogger('decode_cfsr')
def _parse_args() -> argparse.Namespace:
"""Parses arguments for this script, splitting out the command to run."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('cfsr',
type=lambda val: int(val, 0),
help='The Cortex-M CFSR to decode')
return parser.parse_args()
def dump_cfsr(cfsr: int) -> int:
cpu_state_proto = cpu_state_pb2.ArmV7mCpuState()
cpu_state_proto.cfsr = cfsr
cpu_state_info = exception_analyzer.CortexMExceptionAnalyzer(
cpu_state_proto)
_LOG.info(cpu_state_info)
return 0
if __name__ == '__main__':
pw_cli.log.install(level=logging.INFO)
sys.exit(dump_cfsr(**vars(_parse_args())))
|
import urllib
from urllib.request import urlopen
from bs4 import BeautifulSoup
from selenium import webdriver
import webbrowser, time, sys, requests, os, bs4
import pprint
site = 'https://www.opensea.io/assets/camelsnft?search[resultModel]=ASSETS&search[sortAscending]=false'
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
browser = webdriver.Chrome('/usr/local/bin/chromedriver')
browser.get(site)
browser.implicitly_wait(10)
req = urllib.request.Request(site, headers=hdr)
html = urlopen(req)
bs = BeautifulSoup(html, 'html5lib')
images = bs.findAll('img', {'class': 'Image--image'})
for img in images:
# if img.has_attr('src'):
print(img)
|
from Graph import Graph
from ContextFreeGrammar import ChomskyNormalForm as CNF
from pyformlang.cfg import *
def test_from_file():
gr = CNF.from_file("cfg_input.txt")
word_accepted = list(map(Terminal, 'aaba'))
word_declined = list(map(Terminal, 'aabb'))
assert gr.contains(word_accepted)
assert not gr.contains([])
assert not gr.contains(word_declined)
def test_from_file_with_eps():
gr = CNF.from_file("cfg_eps_input.txt")
word_accepted = list(map(Terminal, 'aaba'))
word_declined = list(map(Terminal, 'aabb'))
assert gr.contains(word_accepted)
assert gr.contains([])
assert not gr.contains(word_declined)
def test_CYK():
gr = CNF.from_file("cfg_input.txt")
assert gr.CYK('ab')
assert gr.CYK('aaba')
assert not gr.CYK('')
assert not gr.CYK('abc')
def test_CYK_with_eps():
gr = CNF.from_file("cfg_eps_input.txt")
assert gr.CYK('ab')
assert gr.CYK('aaba')
assert gr.CYK('')
assert not gr.CYK('abc')
def test_Hellings():
gr = CNF.from_file("cfg_input.txt")
g = Graph()
g.from_file("input4.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == {(0, 2), (2, 0), (0, 0), (2, 1), (0, 1)}
def test_Hellings_empty_graph():
gr = CNF.from_file("cfg_eps_input.txt")
g = Graph()
g.from_file("empty_input.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == frozenset()
def test_Hellings_empty_grammar():
gr = CNF.from_file("empty_input.txt")
g = Graph()
g.from_file("input.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == frozenset()
def test_Hellings_eps():
gr = CNF.from_file("cfg_eps_input.txt")
g = Graph()
g.from_file("input4.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == {(0, 0), (0, 1), (0, 2), (1, 1), (2, 0), (2, 1), (2, 2)}
|
"""
computes various cache things on top of db.py so that the server
(running from serve.py) can start up and serve faster when restarted.
this script should be run whenever db.p is updated, and
creates db2.p, which can be read by the server.
"""
import time
import pickle
from sqlite3 import dbapi2 as sqlite3
from utils import safe_pickle_dump, Config, to_datetime, PAPER_INIT_YEAR, to_struct_time
def load_dbs():
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
print('loading tfidf_meta', Config.meta_path)
meta = pickle.load(open(Config.meta_path, "rb"))
return sqldb, db, meta
def loop_db_for_infos(db, vocab, idf):
print('looping db for information...')
paper_min_published_time = time.mktime(to_struct_time(PAPER_INIT_YEAR))
paper_max_published_time = time.time()
# preparing date-pid-tuple for descend datas
date_pid_tuple = []
# just for faster search
search_dict = {}
for pid, p in db.items():
# add time score weight to db to make a better searching result
tt = time.mktime(p['updated_parsed'])
p['tscore'] = (tt - paper_min_published_time) / (paper_max_published_time - paper_min_published_time)
date_pid_tuple.append((to_datetime(p['updated_parsed']), pid))
dict_title = makedict(p['title'], vocab, idf, forceidf=5, scale=3)
dict_authors = makedict(' '.join(x['name'] for x in p['authors']), vocab, idf, forceidf=5)
dict_categories = {x['term'].lower(): 5 for x in p['tags']}
if 'and' in dict_authors:
# special case for "and" handling in authors list
del dict_authors['and']
dict_summary = makedict(p['summary'], vocab, idf)
search_dict[pid] = merge_dicts([dict_title, dict_authors, dict_categories, dict_summary])
date_pid_tuple.sort(reverse=True, key=lambda x: x[0])
date_sorted_pids = [sp[1] for sp in date_pid_tuple]
return db, date_sorted_pids, search_dict
def get_top_papers(sqldb):
# compute top papers in peoples' libraries
print('computing top papers...')
libs = sqldb.execute('''select * from library''').fetchall()
counts = {}
for lib in libs:
pid = lib['paper_id']
counts[pid] = counts.get(pid, 0) + 1
top_paper_counts = sorted([(v, k) for k, v in counts.items() if v > 0], reverse=True)
return [q[1] for q in top_paper_counts]
def makedict(s, vocab, idf, forceidf=None, scale=1.0):
# some utilities for creating a search index for faster search
punc = "'!\"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~'" # removed hyphen from string.punctuation
trans_table = {ord(c): None for c in punc}
words = set(s.lower().translate(trans_table).strip().split())
idfd = {}
for w in words: # todo: if we're using bigrams in vocab then this won't search over them
if forceidf is None:
if w in vocab:
# we have idf for this
idfval = idf[vocab[w]] * scale
else:
idfval = 1.0 * scale # assume idf 1.0 (low)
else:
idfval = forceidf
idfd[w] = idfval
return idfd
def merge_dicts(dlist):
m = {}
for d in dlist:
for k, v in d.items():
m[k] = m.get(k, 0) + v
return m
def save_cache(cache, updated_db):
# save the cache
print('writing', Config.serve_cache_path)
safe_pickle_dump(cache, Config.serve_cache_path)
print('writing', Config.db_serve_path)
safe_pickle_dump(updated_db, Config.db_serve_path)
def run():
sqldb, db, meta = load_dbs()
vocab, idf = meta['vocab'], meta['idf']
top_sorted_pids = get_top_papers(sqldb)
updated_db, date_sorted_pids, search_dict = loop_db_for_infos(db, vocab, idf)
cache = {"top_sorted_pids": top_sorted_pids, "date_sorted_pids": date_sorted_pids, "search_dict": search_dict}
save_cache(cache, updated_db)
if __name__ == "__main__":
run()
|
import os
import sys
import logging
import time
import datetime
import csv
import requests
import sqlite3
def configureLogger():
log_format = "%(levelname)s [%(name)s] %(asctime)s - %(message)s"
log_dir = os.path.join(os.path.normpath(os.getcwd() + os.sep), 'logs')
log_fname = os.path.join(log_dir, 'mercado_fundo.log')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.basicConfig (level=logging.INFO,
format=log_format,
handlers=[
logging.FileHandler(log_fname),
logging.StreamHandler(sys.stdout)])
def configureDownload():
download_dir = os.path.join(os.path.normpath(os.getcwd() + os.sep), 'download')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
def configureDatabase():
print("sqlite3.version:{0}".format(sqlite3.version))
database_dir = os.path.join(os.path.normpath(os.getcwd() + os.sep), 'database')
database_fname = os.path.join(database_dir, 'dados.db')
if not os.path.exists(database_dir):
os.makedirs(database_dir)
conn = sqlite3.connect(database_fname)
cursor = conn.cursor()
create_file = os.path.join(os.path.normpath(os.getcwd() + os.sep + 'scripts'), '05_cvm_mercado_fundo_create_table.sql')
with open(create_file, 'r') as content_file:
content = content_file.read()
cursor.execute(content)
conn.close()
def generateYearMonth(quantity):
yearMonth = []
today = datetime.datetime.today()
currentMonth = today.month
currentYear = today.year
for _ in range(0, quantity):
yearMonth.append(tuple((currentYear, currentMonth)))
if (currentMonth == 1):
currentMonth = 12
currentYear = currentYear - 1
else:
currentMonth = currentMonth -1
return yearMonth
def saveFundosDatabase(conteudo):
logger = logging.getLogger(name="database")
sql = ''
database_dir = os.path.join(os.path.normpath(os.getcwd() + os.sep), 'database')
database_fname = os.path.join(database_dir, 'dados.db')
insert_file = os.path.join(os.path.normpath(os.getcwd() + os.sep + 'scripts'), '06_cvm_mercado_fundo_insert.sql')
with open(insert_file, 'r') as content_file:
sql = content_file.read()
conn = sqlite3.connect(database_fname)
cursor = conn.cursor()
tamanho_conteudo = len(conteudo)
logger.info ("started saving {0} items to database".format(tamanho_conteudo))
start = datetime.datetime.now()
for i in range(1, tamanho_conteudo):
cursor.execute(sql, conteudo[i])
# save data
conn.commit()
conn.close()
finish = datetime.datetime.now()
logger.info ("finished saving {0} items to database in {1}".format(tamanho_conteudo,(finish-start)))
def downloadFundosFile(year,month):
logger = logging.getLogger(name="download")
year_month = "{0}/{1:02d}".format(year,month)
download_url = "http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/inf_diario_fi_{0}{1:02d}.csv".format(year,month)
#download_dir = os.path.join(os.path.normpath(os.getcwd() + os.sep), 'download')
logger.info("start download "+year_month)
with requests.Session() as s:
download_content = s.get(download_url)
decoded_content = download_content.content.decode('latin-1')
#parsed_content = csv.reader(decoded_content.splitlines(), delimiter=';')
parsed_content = csv.DictReader(decoded_content.splitlines(), delimiter=';')
content_list = list(parsed_content)
#content_list = parsed_content
logger.info("finished download "+year_month)
return content_list
def downlaodAndSave(year, month):
content_list = downloadFundosFile(year,month)
saveFundosDatabase(content_list)
def main():
#today = datetime.datetime.today()
configureLogger()
configureDownload()
configureDatabase()
list_year_month = generateYearMonth(50)
for year_month in list_year_month:
downlaodAndSave(year_month[0],year_month[1])
# years = [2019,2018,2017,2016,2015]
# month = [1,2,3,4,5,6,7,8,9,10,11,12]
# year_month =list(itertools.product(years,month))
# for i in len(year_month):
# if (i>numberOfMonth):
# break
# content_list = downloadFundosFile(year_month[i][1],year_month[i][2])
if __name__ == "__main__":
main()
|
from typing import Callable, Optional
from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QHBoxLayout, QToolButton, QWidget
from electrumsv.i18n import _
from .util import KeyEventLineEdit, read_QIcon
class ButtonLayout(QHBoxLayout):
def __init__(self, parent: Optional[QWidget]=None) -> None:
# NOTE(typing) The checker does not have signatures for the parent being an explicit None.
super().__init__(parent) # type: ignore
# The offset to insert the next button at.
self._button_index = 0
self.setSpacing(2)
self.setContentsMargins(0, 2, 0, 2)
def _create_button(self, icon_name: str, on_click: Callable[[], None], tooltip: str) \
-> QToolButton:
button = QToolButton()
button.setIcon(read_QIcon(icon_name))
button.setToolTip(tooltip)
button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
button.clicked.connect(on_click)
return button
def add_button(self, icon_name: str, on_click: Callable[[], None], tooltip: str,
position: Optional[int]=None) -> QToolButton:
button = self._create_button(icon_name, on_click, tooltip)
if position is None:
position = self._button_index
self._button_index += 1
self.insertWidget(position, button)
return button
class TableTopButtonLayout(ButtonLayout):
add_signal = pyqtSignal()
refresh_signal = pyqtSignal()
filter_signal = pyqtSignal(str)
def __init__(self, parent: Optional[QWidget]=None, filter_placeholder_text: str="",
enable_filter: bool=True) -> None:
super().__init__(parent)
# The offset to insert the next button at.
self._button_index = 0
self._filter_button: Optional[QToolButton] = None
self._filter_box = KeyEventLineEdit(override_events={Qt.Key.Key_Escape})
# When the focus is in the search box, if the user presses Escape the filtering exits.
self._filter_box.key_event_signal.connect(self._on_search_override_key_press_event)
# As text in the search box changes, the filter updates in real time.
self._filter_box.textChanged.connect(self._on_search_text_changed)
if not filter_placeholder_text:
filter_placeholder_text = _("Your filter text..")
self._filter_box.setPlaceholderText(filter_placeholder_text)
self._filter_box.hide()
self.setSpacing(2)
self.setContentsMargins(0, 2, 0, 2)
self.add_refresh_button()
if enable_filter:
self._filter_button = self.add_filter_button()
self.addWidget(self._filter_box, 1)
self.addStretch(1)
# Find the stretch QSpacerItem and hold a reference so we can add and remove it.
# The reason we do this is that otherwise the stretch item prevents the search box from
# expanding.
self._stretch_item = self.takeAt(self.count()-1)
self.addItem(self._stretch_item)
def add_create_button(self, tooltip: Optional[str]=None) -> QToolButton:
if tooltip is None:
tooltip = _("Add a new entry.")
return self.add_button("icons8-add-new-96-windows.png", self.add_signal.emit, tooltip)
def add_refresh_button(self, tooltip: Optional[str]=None) -> QToolButton:
if tooltip is None:
tooltip = _("Refresh the list.")
return self.add_button("refresh_win10_16.png", self.refresh_signal.emit, tooltip)
def add_filter_button(self, tooltip: Optional[str]=None) -> QToolButton:
if tooltip is None:
tooltip = _("Toggle list searching/filtering (Control+F).")
return self.add_button("icons8-filter-edit-32-windows.png", self.on_toggle_filter,
tooltip)
def _on_search_text_changed(self, text: str) -> None:
if self._filter_box.isHidden():
return
self.filter_signal.emit(text)
def _on_search_override_key_press_event(self, event_key: int) -> None:
if event_key == Qt.Key.Key_Escape:
self.on_toggle_filter()
# Call externally to toggle the filter.
def on_toggle_filter(self) -> None:
assert self._filter_button is not None
if self._filter_box.isHidden():
# Activate filtering and show the text field.
self._filter_button.setIcon(read_QIcon("icons8-clear-filters-32-windows.png"))
self._filter_box.show()
self.removeItem(self._stretch_item)
self._filter_box.setFocus()
else:
self.addItem(self._stretch_item)
# Deactivate filtering and hide the text field.
self._filter_button.setIcon(read_QIcon("icons8-filter-edit-32-windows.png"))
self._filter_box.setText('')
self._filter_box.hide()
self.filter_signal.emit('')
|
from polyglotdb import CorpusContext
from polyglotdb.config import CorpusConfig
import polyglotdb.io as pgio
import sys
import os
graph_db = {'host':'localhost', 'port': 7474}
path_to_SB = os.path.join("/Volumes","data","corpora","SantaBarbara_aligned", "Part2_aligned")
if __name__ == '__main__':
config = CorpusConfig("santabarbara_part2", **graph_db)
print("loading corpus...")
with CorpusContext(config) as g:
g.reset()
parser = pgio.inspect_fave(path_to_SB)
g.load(parser, path_to_SB)
q = g.query_graph(g.word).filter(g.word.label=="think")
results = q.all()
assert(len(results) > 0)
q = g.query_graph(g.phone).filter(g.phone.label=="ow")
results_phone = q.all()
assert(len(results_phone) > 0 )
|
import subprocess
import os
def sh(cmd, input=""):
rst = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=input.encode("utf-8"))
assert rst.returncode == 0, rst.stderr.decode("utf-8")
return rst.stdout.decode("utf-8")
s1 = sh('cat /etc/issue')
p1 = sh('python3 --version')
p2 = sh('jupyter notebook --version')
p3 = sh('jupyter lab --version')
p4 = sh('pip3 --version')
p5 = sh('pip3 freeze | grep pandas')
p6 = sh('pip3 freeze | grep matplotlib')
p7 = sh('pip3 freeze | grep numpy')
p8 = sh('pip3 freeze | grep scipy')
r1 = sh('R --version')
r2 = sh('pip3 freeze | grep rpy2')
print('')
print('########## System ###########')
print(s1)
print('########## Python ###########')
print(p1, 'jupyter notebook =', p2, 'jupyter lab =', p3, p4, p5, p6, p7, p8)
print('')
print('########## R ################')
print(r1, r2)
# remove single files
if os.path.exists('results_versions.txt'):
os.remove('results_versions.txt')
# create a file
if not os.path.exists('results_versions.txt'):
os.mknod('results_versions.txt')
# append data and some new lines
file = open('results_versions.txt','a')
file.write('\n')
file.write('######### System ########')
file.write('\n')
file.write(s1)
file.write('######### Python ########')
file.write('\n')
file.write(p1)
file.write('jupyter notebook = ' + p2)
file.write('jupyter lab = ' + p3)
file.write(p4)
file.write(p5)
file.write(p6)
file.write(p7)
file.write(p8)
file.write('\n')
file.write(r1)
file.write(r2)
file.write('\n')
file.close()
|
from da4py.main.utils.formulas import Or, And
def petri_net_to_SAT(net, m0, mf, variablesGenerator, size_of_run, reach_final, label_m="m_ip", label_t="tau_it",
silent_transition=None,transitions=None):
'''
This function returns the SAT formulas of a petrinet given label of variables, size_of_run
:param net: petri net of the librairie pm4py
:param m0: initial marking
:param mf: final marking
:param variablesgenerator: @see darksider4py.variablesGenerator
:param label_m (string) : name of marking boolean variables per instant i and place p
:param label_t (string) : name of place boolean variables per instant i and transition t
:param size_of_run (int) : max instant i
:param reach_final (bool) : True for reaching final marking
:param sigma (list of char) : transition name
:return: a boolean formulas
'''
# we need a ordered list to get int per place/transition (for the variablesgenerator)
if transitions is None :
transitions = [t for t in net.transitions]
silent_transitions=[t for t in net.transitions if t.label==silent_transition]
places = [p for p in net.places]
# we create the number of variables needed for the markings
variablesGenerator.add(label_m, [(0, size_of_run + 1), (0, len(places))])
# we create the number of variables needed for the transitions
variablesGenerator.add(label_t, [(1, size_of_run + 1), (0, len(transitions))])
return (is_run(size_of_run, places, transitions, m0, mf, variablesGenerator.getFunction(label_m),
variablesGenerator.getFunction(label_t), reach_final), places, transitions, silent_transitions)
|
import json
import pytuya
# Specify the smoker and get its status
d = pytuya.OutletDevice('<gwID>', '<IP>', '<productKey>')
data = d.status()
# Enable debug to see the raw JSON
Debug = False
#Debug = True
if Debug:
raw = json.dumps(data, indent=4)
print(raw)
# Simple if statement to check if the smoker is on
rt_state = data['dps']['1']
if rt_state:
print('RecTec is on')
else:
print('RecTec is off')
# The following values are based on observation
# dps = '102' & '103' might be swapped
print('Target Temperature: %r' % data['dps']['102'])
print('Current Temperature: %r' % data['dps']['103'])
# When smoker is off (data['dps']['1] = False)
# values of probes might be based on last "on"
print('Probe A Temperature: %r' % data['dps']['105'])
print('Probe B Temperature: %r' % data['dps']['106'])
|
from dataclasses import dataclass
@dataclass
class FuzzyVariable():
name: str
fuzzy_sets: list
def membership(self, variable):
return {fs.name: fs.membership(variable) for fs in self.fuzzy_sets}
|
""" Implementation for vocabulary with precomputed structures for faster searches and checks. """
from . import molecule_edit as me
from . import data_utils
from .chemutils import get_mol
from typing import NamedTuple
class AtomTuple(NamedTuple):
symbol: str
formal_charge: int
implicit_valence: int
explicit_valence: int
my_implicit_valence: int
my_explicit_valence: int
@staticmethod
def from_atom(atom):
return AtomTuple(
atom.GetSymbol(),
atom.GetFormalCharge(),
atom.GetImplicitValence(),
atom.GetExplicitValence(),
me.my_implicit_valence(atom),
me.my_explicit_valence(atom))
class BondTuple(NamedTuple):
bond_type: int
overlap: float
atom1: AtomTuple
atom2: AtomTuple
@staticmethod
def from_bond(bond):
a1 = AtomTuple.from_atom(bond.GetBeginAtom())
a2 = AtomTuple.from_atom(bond.GetEndAtom())
return BondTuple(
bond.GetBondType(),
bond.GetBondTypeAsDouble(),
min(a1, a2), max(a1, a2))
class Vocabulary:
""" Vocabulary class with cached legality checks.
This class stores the vocabulary, and additionally caches legality checks
performed to enable large speed-ups when editing many molecules.
"""
_average_eps = 3e-3
def __init__(self, vocab=None):
if vocab is None:
vocab = data_utils.get_vocab()
elif isinstance(vocab, str):
vocab = data_utils.get_vocab(vocab_name=vocab)
self.vocab = vocab
self._cache_legal_atom = {}
self._cache_legal_bond = {}
self.cache_atom_hit_rate = 0.
self.cache_bond_hit_rate = 0.
def __iter__(self):
return iter(self.vocab)
def __getitem__(self, key):
return self.vocab[key]
def __len__(self):
return len(self.vocab)
def legal_at_atom(self, atom):
self.cache_atom_hit_rate *= (1 - Vocabulary._average_eps)
free_slots = atom.GetImplicitValence()
if atom.GetExplicitValence() != me.my_explicit_valence(atom):
free_slots = me.my_implicit_valence(atom)
if free_slots == 0 and atom.GetSymbol() not in me.SPECIAL_ATOMS:
self.cache_atom_hit_rate += Vocabulary._average_eps
return []
atom_tuple = AtomTuple.from_atom(atom)
result = self._cache_legal_atom.get(atom_tuple, None)
if result is not None:
# return cached result if it exists
self.cache_atom_hit_rate += Vocabulary._average_eps
return result
result = []
for s, c in self.vocab:
match_atoms = [a.GetIdx() for a in c.GetAtoms()
if me.atom_match(atom, a)]
if len(match_atoms) > 0:
result.append((c, match_atoms))
self._cache_legal_atom[atom_tuple] = result
return result
def legal_at_bond(self, bond):
self.cache_bond_hit_rate *= (1 - Vocabulary._average_eps)
if not bond.IsInRing():
raise ValueError("bond was not in ring.")
bond_tuple = BondTuple.from_bond(bond)
result = self._cache_legal_bond.get(bond_tuple, None)
if result is not None:
# return cached result if it exists
self.cache_bond_hit_rate += Vocabulary._average_eps
return result
result = []
for s, c in self.vocab:
if c.GetNumAtoms() <= 2:
# only rings can attach by bond
continue
match_bonds = [b.GetIdx() for b in c.GetBonds() if me.bond_match(bond, b)]
if match_bonds:
result.append((s, match_bonds))
self._cache_legal_bond[bond_tuple] = result
return result
def get_vocab_index(self, mol):
pass
|
from channels.generic.websocket import AsyncWebsocketConsumer
import json
from .serializers import SiteSerializer
from .models import Site
class UpdateConsumer(AsyncWebsocketConsumer):
async def connect(self):
await self.channel_layer.group_add(
'All',
self.channel_name
)
await self.accept()
sites = Site.objects.all()
for site in sites:
serializer = SiteSerializer(site)
await self.channel_layer.group_send(
'All',
{
'type': 'chat_message',
'message': serializer.data
}
)
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
'All',
self.channel_name
)
async def chat_message(self, event):
message = event['message']
await self.send(text_data=json.dumps({
'message': message
}))
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('Book/add/',views.addBook),
path('allBooks/',views.allBooks),
path('changeGenre/',views.changeGenre)
]
|
from __future__ import annotations
from typing import Optional, Sequence, cast
import sqlalchemy as sa
import sqlalchemy.orm as so
import sqlalchemy_utils as su
from quiz_bot.db.base import Base, PrimaryKeyMixin
class UserQuery(so.Query):
def get_by_external_id(self, value: int) -> Optional[User]:
return cast(Optional[User], self.session.query(User).filter(User.external_id == value).one_or_none())
def get_all_user_ids(self) -> Sequence[int]:
return cast(Sequence[int], self.session.query(User).with_entities(User.id).all())
def get_by_nick_name(self, value: str) -> Optional[User]:
return cast(
Optional[User], self.session.query(User).filter(User.nick_name == value).order_by(User.id.asc()).first()
)
@su.generic_repr('id', 'first_name', 'last_name', 'external_id', 'nick_name')
class User(PrimaryKeyMixin, Base):
__tablename__ = 'users' # type: ignore
__query_cls__ = UserQuery
external_id = sa.Column(sa.Integer, nullable=False)
remote_chat_id = sa.Column(sa.Integer, nullable=False)
chitchat_id = sa.Column(sa.String, nullable=False)
first_name = sa.Column(sa.String)
last_name = sa.Column(sa.String)
nick_name = sa.Column(sa.String)
def __init__(
self,
external_id: int,
remote_chat_id: int,
chitchat_id: str,
first_name: Optional[str],
last_name: Optional[str],
nick_name: Optional[str],
) -> None:
self.external_id = external_id
self.remote_chat_id = remote_chat_id
self.chitchat_id = chitchat_id
self.first_name = first_name
self.last_name = last_name
self.nick_name = nick_name
|
import numpy as np
from tma.objects import Observer, Target
from tma.model import Model
from tma.algorithms import Algorithms, Swarm
from tma.helper_functions import get_df, convert_to_xy
observer_x, observer_y, observer_course, observer_velocity = 0.0, 0.0, 0.0, 5.0
observer = Observer(
observer_x,
observer_y,
observer_course,
observer_velocity,
verbose=True,
)
target_bearing, target_distance, target_course, target_velocity = (
5.0,
20.0,
45.0,
10.0,
)
target = Target(
observer,
target_bearing,
target_distance,
target_course,
target_velocity,
verbose=True,
)
observer.forward_movement(3 * 60)
observer.change_course(270, "left", omega=0.5)
observer.forward_movement(5 * 60)
observer.change_course(90, "right", omega=0.5)
observer.forward_movement(3 * 60)
target.forward_movement(len(observer.coords[0]) - 1)
model = Model(observer, target=target, verbose=True, seed=1)
alg = Algorithms(model)
p0 = convert_to_xy([0.0, 25.0, 90.0, 7.0])
res = alg.mle_v2(p0)
alg.print_result(res)
swarm = Swarm(model, True)
swarm.set_algorithm("ะะะ")
swarm.set_target(target)
swarm.set_initial()
swarm.set_noise_func()
r = swarm.run()
print(observer)
print(target)
|
import cv2
import numpy as np
from imutils.contours import sort_contours
import imutils
if __name__ == "__main__":
raise Exception('Cannot be called as main script')
def get_lines(image, output_images, output_image_labels, show=False):
"""
:return: list of roi parameters for lines
"""
# grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
output_images.append(gray.copy())
output_image_labels.append("Gray")
if show:
cv2.imshow('gray', gray)
cv2.waitKey(0)
# binary
ret, thresh = cv2.threshold(gray, 105, 255, cv2.THRESH_BINARY_INV) # 127 changed to 0
output_images.append(thresh.copy())
output_image_labels.append("Thresh")
if show:
cv2.imshow('second', thresh)
cv2.waitKey(0)
# dilation
kernel = np.ones((5, 125), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
output_images.append(img_dilation.copy())
output_image_labels.append("Dilation")
if show:
cv2.imshow('dilated', img_dilation)
cv2.waitKey(0)
# find contours
ctrs = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
ctrs = imutils.grab_contours(ctrs)
# sort contours
sorted_ctrs = sort_contours(ctrs, method="top-to-bottom")[0]
l_xs = []
l_ys = []
l_ws = []
l_hs = []
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = image[y:y + h, x:x + w]
l_xs.append(x)
l_ys.append(y)
l_ws.append(w)
l_hs.append(h)
lineSegment = cv2.rectangle(image, (x, y), (x + w, y + h), (90, 0, 255), 2)
output_images.append(lineSegment.copy())
output_image_labels.append("Line Segment ")
output_images.append(image.copy())
output_image_labels.append("Processed Image")
if show:
cv2.imshow('marked areas', image)
cv2.waitKey(0)
return l_xs, l_ys, l_ws, l_hs
|
from functools import lru_cache
from typing import List
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
@lru_cache(None)
def dp(i: int) -> int:
# base case
if i == 0:
return 1
# recurrence relation
res = 1
for j in range(i):
if nums[i] > nums[j]:
res = max(res, 1+dp(j))
return res
return max(dp(i) for i in range(len(nums)))
|
import time
import numpy as np
import serial
from file_utils import create_folder_if_absent, save_npy
from visualizer import init_heatmap, update_heatmap
"""
Initialization
Serial Parameters - Port, Baud Rate, Start Byte
Program Mode - Plot (Debug) / Write Mode
"""
# SERIAL_PORT = 'COM5' # for windows
SERIAL_PORT = "/dev/ttyUSB0" # for linux
BAUD_RATE = 115200
ARRAY_SHAPE = (24,32)
DEBUG_MODE = 0
WRITE_MODE = 1
PUBLISH_MODE = 2
DATA_PATH = "data/dataset_for_xavier_day1" # change as it fits
DATA_DIR_SORT = "day"
def interpolate_values(df):
"""
:param: 24x32 data frame obtained from get_nan_value_indices(df)
:return: 24x32 array
"""
nan_value_indices = np.argwhere(np.isnan(df))
x_max = df.shape[0] - 1
y_max = df.shape[1] - 1
for indx in nan_value_indices:
x = indx[0]
y = indx[1]
if x==0 and y==0 :
df[x][y] = (df[x+1][y]+df[x][y+1])/2
elif (x==x_max and y==y_max):
df[x][y] = (df[x-1][y]+df[x][y-1])/2
elif (x==0 and y==y_max):
df[x][y] = (df[x+1][y]+df[x][y-1])/2
elif (x==x_max and y==0):
df[x][y] = (df[x-1][y]+df[x][y+1])/2
elif (x==0):
df[x][y] = (df[x+1][y]+df[x][y-1]+df[x][y+1])/3
elif (x==x_max):
df[x][y] = (df[x-1][y]+df[x][y-1]+df[x][y+1])/3
elif (y==0):
df[x][y] = (df[x+1][y]+df[x-1][y]+df[x][y+1])/3
elif (y==y_max):
df[x][y] = (df[x-1][y]+df[x+1][y]+df[x][y-1])/3
else :
df[x][y] = (df[x][y+1] + df[x+1][y] + df[x-1][y] + df[x][y-1]) / 4
return df
def save_serial_output(forever, num_samples=3000, mode=DEBUG_MODE):
"""
Save serial output from arduino
"""
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
ser.reset_output_buffer()
counter = 0
to_read = forever or counter < num_samples
plot = None
if mode == DEBUG_MODE:
min_temp = 28
max_temp = 40
plot = init_heatmap("MLX90640 Heatmap", ARRAY_SHAPE, min_temp, max_temp)
elif mode == WRITE_MODE:
create_folder_if_absent(DATA_PATH)
while to_read:
try:
ser_bytes = ser.readline()
decoded_string = ser_bytes.decode("utf-8", errors='ignore').strip("\r\n")
values = decoded_string.split(",")[:-1]
array = np.array(values)
if array.shape[0] == ARRAY_SHAPE[0] * ARRAY_SHAPE[1]:
df = np.reshape(array.astype(float), ARRAY_SHAPE)
df = interpolate_values(df)
max_temp = np.amax(df)
min_temp = np.amin(df)
if mode == DEBUG_MODE:
print("Updating Heatmap...", "[{}]".format(counter))
update_heatmap(df, plot)
elif mode == WRITE_MODE:
print("Saving npy object...", "[{}]".format(counter))
save_npy(df, DATA_PATH, directory_sort=DATA_DIR_SORT)
elif mode == PUBLISH_MODE:
pass
counter += 1
except KeyboardInterrupt:
raise
except Exception as e:
print(e)
break
if __name__ == "__main__":
save_serial_output(forever=True, mode=WRITE_MODE)
|
'''
Timer stimulus generation
- makes videos of shrinking circles of different colours
'''
import socket #to get host machine identity
import os # for joining paths and filenames sensibly
import scipy.misc #for image function
import numpy as np #number functions
#test which machine we are on and set working directory
if 'tom' in socket.gethostname():
os.chdir('/home/tom/Dropbox/university/students/choice_risk/images')
else:
print("I don't know where I am! ")
#cribbing from
#https://stackoverflow.com/questions/12062920/how-do-i-create-an-image-in-pil-using-a-list-of-rgb-tuples
def distance(x,y,centre):
'''calculate straight line distance of two x,y points'''
return np.sqrt((centre[0]-x)**2 + (centre[1]-y)**2)
def makeimg(width,height,colour,radius,filename):
'''make an image containing a coloured circle'''
channels = 3
centre=[width/2, height/2]
# Create an empty image
if colour==[0,0,0]: #white background
img = 255*np.ones((height, width, channels), dtype=np.uint8)
else:
img = np.zeros((height, width, channels), dtype=np.uint8)
# Draw something (http://stackoverflow.com/a/10032271/562769)
xx, yy = np.mgrid[:height, :width]
# Set the RGB values
for y in range(img.shape[0]):
for x in range(img.shape[1]):
r, g, b = colour
if distance(x,y,centre)<radius:
img[y][x][0] = r
img[y][x][1] = g
img[y][x][2] = b
return img
#colours of our stimuli
colours=[[0,0,0],[0,0,255],[0,255,0],[0,255,255],
[255,0,0],[255,0,255],[255,255,0],[255,255,255]]
# Image size
width = 640
height = 480
#loop over colours
for c,colour in enumerate(colours):
colourname='c'+str(c)
#make frames
for i,radius in enumerate(np.linspace(min([width,height])/2,0,6)):
filename='img'+str(i)+'.png'
# Make image
img=makeimg(width,height,colour,radius,filename)
# Save the image
scipy.misc.imsave(filename, img)
#join frames into mp4 - you need ffmpeg installed
os.system("ffmpeg -r 1 -i img%01d.png -vcodec mpeg4 -y " + colourname + ".mp4")
|
#!/usr/bin/env python
"""
Rail-RNA-cojunction_enum
Follows Rail-RNA-junction_index
Precedes Rail-RNA-cojunction_fasta
Alignment script for MapReduce pipelines that wraps Bowtie 2. Finds junctions
that cooccur on reads by local alignments to transcriptome elements.
Input (read from stdin)
----------------------------
Single input tuple column:
1. SEQ or its reversed complement -- must be unique
(but not necessarily in alphabetical order)
Hadoop output (written to stdout)
----------------------------
Tab-delimited tuple columns:
1. Reference name (RNAME in SAM format) +
'+' or '-' indicating which strand is the sense strand
2. Comma-separated list of intron start positions in configuration
3. Comma-separated list of intron end positions in configuration
4. left_extend_size: by how many bases on the left side of an intron the
reference should extend
5. right_extend_size: by how many bases on the right side of an intron the
reference should extend
6. Read sequence or reversed complement, whatever's first in alphabetical order
ALL OUTPUT COORDINATES ARE 1-INDEXED.
"""
import sys
import os
import site
import subprocess
base_path = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))
)
)
utils_path = os.path.join(base_path, 'rna', 'utils')
site.addsitedir(utils_path)
site.addsitedir(base_path)
import bowtie
from dooplicity.tools import xstream, register_cleanup, xopen, \
make_temp_dir
from dooplicity.counters import Counter
from dooplicity.ansibles import Url
import tempdel
import filemover
# Initialize global variable for tracking number of input lines
_input_line_count = 0
counter = Counter('cojunction_enum')
register_cleanup(counter.flush)
def go(input_stream=sys.stdin, output_stream=sys.stdout, bowtie2_exe='bowtie2',
bowtie2_index_base='genome', bowtie2_args='', verbose=False,
report_multiplier=1.2, stranded=False, fudge=5, max_refs=300, score_min=60,
gzip_level=3, mover=filemover.FileMover(), intermediate_dir='.',
scratch=None):
""" Runs Rail-RNA-cojunction_enum
Alignment script for MapReduce pipelines that wraps Bowtie 2. Finds
junctions that cooccur on reads by local alignments to transcriptome
elements from Bowtie 2.
Input (read from stdin)
----------------------------
Tab-delimited output tuple columns (readletize)
1. SEQ or its reversed complement, whichever is first in alphabetical
order
2. Comma-separated list of sample labels if field 1 is the read
sequence; '\x1c' if empty
3. Comma-separated list of sample labels if field 1 is the reversed
complement of the read sequence; '\x1c' if empty
Hadoop output (written to stdout)
----------------------------
Tab-delimited tuple columns:
1. Reference name (RNAME in SAM format) +
'+' or '-' indicating which strand is the sense strand
2. Comma-separated list of intron start positions in configuration
3. Comma-separated list of intron end positions in configuration
4. left_extend_size: by how many bases on the left side of an intron
the reference should extend
5. right_extend_size: by how many bases on the right side of an intron
the reference should extend
6. Read sequence
input_stream: where to find input reads.
output_stream: where to emit exonic chunks and junctions.
bowtie2_exe: filename of Bowtie 2 executable; include path if not in
$PATH.
bowtie2_index_base: the basename of the Bowtie index files associated
with the reference.
bowtie2_args: string containing precisely extra command-line arguments
to pass to Bowtie 2, e.g., "--tryhard --best"; or None.
verbose: True iff more informative messages should be written to
stderr.
report_multiplier: if verbose is True, the line number of an alignment
written to stderr increases exponentially with base
report_multiplier.
stranded: True iff input reads are strand-specific; this affects
whether an output partition has a terminal '+' or '-' indicating
the sense strand. Further, if stranded is True, an alignment is
returned only if its strand agrees with the junction's strand.
fudge: by how many bases to extend left and right extend sizes
to accommodate potential indels
max_refs: hard limit on number of reference seqs to enumerate per
read per strand
score_min: Bowtie2 CONSTANT minimum alignment score
gzip_level: compression level to use for temporary files
mover: FileMover object, for use in case Bowtie2 idx needs to be
pulled from S3
intermediate_dir: where intermediates are stored; for temporarily
storing transcript index if it needs to be pulled from S3
scratch: scratch directory for storing temporary files or None if
securely created temporary directory
No return value.
"""
bowtie2_index_base_url = Url(bowtie2_index_base)
if bowtie2_index_base_url.is_s3:
index_basename = os.path.basename(bowtie2_index_base)
index_directory = os.path.join(intermediate_dir, 'transcript_index')
if not os.path.exists(os.path.join(index_directory, '_STARTED')):
# Download index
counter.add('index_download')
with open(os.path.join(index_directory, '_STARTED'), 'w') \
as started_stream:
print >>started_stream, 'STARTED'
for extension in ['.1.bt2', '.2.bt2', '.3.bt2', '.4.bt2',
'.rev.1.bt2', '.rev.2.bt2']:
mover.get(bowtie2_index_base_url + extension, index_directory)
with open(os.path.join(index_directory, '_SUCCESS'), 'w') \
as success_stream:
print >>success_stream, 'SUCCESS'
while not os.path.exists(os.path.join(index_directory, '_SUCCESS')):
time.sleep(0.5)
bowtie2_index_base = os.path.join(index_directory, index_basename)
global _input_line_count
temp_dir_path = make_temp_dir(scratch)
register_cleanup(tempdel.remove_temporary_directories, [temp_dir_path])
reads_file = os.path.join(temp_dir_path, 'reads.temp.gz')
with xopen(True, reads_file, 'w', gzip_level) as reads_stream:
for _input_line_count, line in enumerate(input_stream):
seq = line.strip()
counter.add('reads_to_temp')
print >>reads_stream, '\t'.join([seq, seq, 'I'*len(seq)])
input_command = 'gzip -cd %s' % reads_file
bowtie_command = ' '.join([bowtie2_exe,
bowtie2_args if bowtie2_args is not None else '',
' --local -t --no-hd --mm -x', bowtie2_index_base, '--12 -',
'--score-min L,%d,0' % score_min,
'-D 24 -R 3 -N 1 -L 20 -i L,4,0'])
delegate_command = ''.join(
[sys.executable, ' ', os.path.realpath(__file__)[:-3],
('_delegate.py --report-multiplier %08f --fudge %d '
'--max-refs %d %s %s') % (report_multiplier, fudge, max_refs,
'--stranded' if stranded else '',
'--verbose' if verbose else '')]
)
full_command = ' | '.join([input_command,
bowtie_command, delegate_command])
print >>sys.stderr, 'Starting Bowtie2 with command: ' + full_command
bowtie_process = subprocess.Popen(' '.join(
['set -exo pipefail;', full_command]
), bufsize=-1, stdout=sys.stdout, stderr=sys.stderr,
shell=True, executable='/bin/bash')
_input_line_count += 1
return_code = bowtie_process.wait()
counter.add('bowtie2_subprocess_done')
if return_code:
raise RuntimeError('Error occurred while reading Bowtie 2 output; '
'exitlevel was %d.' % return_code)
if __name__ == '__main__':
import argparse
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--report_multiplier', type=float, required=False,
default=1.2,
help='When --verbose is also invoked, the only lines of lengthy '
'intermediate output written to stderr have line number that '
'increases exponentially with this base')
parser.add_argument('--verbose', action='store_const', const=True,
default=False,
help='Print out extra debugging statements')
parser.add_argument('--test', action='store_const', const=True,
default=False,
help='Run unit tests; DOES NOT NEED INPUT FROM STDIN')
parser.add_argument('--keep-alive', action='store_const', const=True,
default=False,
help='Periodically print Hadoop status messages to stderr to keep ' \
'job alive')
parser.add_argument('--fudge', type=int, required=False,
default=5,
help='Permits a sum of exonic bases for a junction combo to be '
'within the specified number of bases of a read sequence\'s '
'size; this allows for indels with respect to the reference')
parser.add_argument(
'--stranded', action='store_const', const=True, default=False,
help='Assume input reads come from the sense strand; then partitions '
'in output have terminal + and - indicating sense strand')
parser.add_argument('--score-min', type=int, required=False,
default=48,
help='Bowtie2 minimum CONSTANT score to use')
parser.add_argument('--max-refs', type=int, required=False,
default=300,
help='Hard limit on the number of reference sequences to emit '
'per read per strand. Prioritizes reference sequences that '
'overlap the fewest junctions')
parser.add_argument('--gzip-level', type=int, required=False,
default=3,
help='Gzip compression level to use for temporary Bowtie input file')
parser.add_argument('--intermediate-dir', type=str, required=False,
default='./',
help='Where to put transcript index if it needs to be downloaded')
# Add command-line arguments for dependencies
bowtie.add_args(parser)
filemover.add_args(parser)
tempdel.add_args(parser)
# Collect Bowtie arguments, supplied in command line after the -- token
argv = sys.argv
bowtie2_args = ''
in_args = False
for i, argument in enumerate(sys.argv[1:]):
if in_args:
bowtie2_args += argument + ' '
if argument == '--':
argv = sys.argv[:i + 1]
in_args = True
'''Now collect other arguments. While the variable args declared below is
global, properties of args are also arguments of the go() function so
different command-line arguments can be passed to it for unit tests.'''
args = parser.parse_args(argv[1:])
mover = filemover.FileMover(args=args)
# Start keep_alive thread immediately
if args.keep_alive:
from dooplicity.tools import KeepAlive
keep_alive_thread = KeepAlive(sys.stderr)
keep_alive_thread.start()
if __name__ == '__main__' and not args.test:
import time
start_time = time.time()
go(bowtie2_exe=os.path.expandvars(args.bowtie2_exe),
bowtie2_index_base=os.path.expandvars(args.bowtie2_idx),
bowtie2_args=bowtie2_args,
verbose=args.verbose,
report_multiplier=args.report_multiplier,
stranded=args.stranded,
fudge=args.fudge,
max_refs=args.max_refs,
score_min=args.score_min,
mover=mover,
intermediate_dir=args.intermediate_dir,
scratch=tempdel.silentexpandvars(args.scratch))
print >>sys.stderr, ('DONE with cojunction_enum.py; in=%d; '
'time=%0.3f s') % (_input_line_count,
time.time() - start_time)
elif __name__ == '__main__':
# Test units
del sys.argv[1:] # Don't choke on extra command-line parameters
import unittest
# Add unit tests here
unittest.main()
|
#newcate
#args: name
#create a new category with name name
from . import classes
from . import globalvar
import sys
def newcate(args):
if len(args.name) < 2:
print('All names for category must be at least 2 characters long.')
sys.exit(1)
try:
globalvar.masterCate[args.name]
print('Category with duplicate name already exists.')
except KeyError:
newCate = classes.category(args.name)
globalvar.masterCate[args.name] = newCate
globalvar.listStrCate += '\'{0}\' '.format(args.name)
print('New category {0} created.'.format(args.name))
|
import logging
import json
import pkg_resources
import eth_utils
from web3 import Web3
from .address import Address
class Contract:
logger = logging.getLogger()
@staticmethod
def _get_contract(web3: Web3, abi: list, address: Address):
assert(isinstance(web3, Web3))
assert(isinstance(abi, list))
assert(isinstance(address, Address))
# comment for arb test net
# code = web3.eth.getCode(address.address)
# if (code == "0x") or (code == "0x0") or (code == b"\x00") or (code is None):
# raise Exception(f"No contract found at {address}")
return web3.eth.contract(address=address.address, abi=abi)
@staticmethod
def _load_abi(package, resource) -> list:
return json.loads(pkg_resources.resource_string(package, resource))
|
# coding: utf-8
from django.conf.urls import patterns, url
urlpatterns = patterns('pydesk.apps.configuration.user.views',
url(r'^/list[/]?$', 'user_list', name='user_list'),
url(r'^/edit[/]?$', 'user_edit', name='user_edit'),
url(r'^/add[/]?$', 'user_add', name='user_add'),
url(r'^/ajax/list[/]?$', 'user_ajax_list', name='user_ajax_list'),
url(r'^/ajax/add/save[/]?$', 'user_ajax_add_save', name='user_ajax_add_save'),
url(r'^/ajax/edit/save[/]?$', 'user_ajax_edit_save', name='user_ajax_edit_save'),
url(r'^/enterprise/edit[/]?$', 'user_enterprise_edit', name='user_enterprise_edit'),
url(r'^/enterprise/ajax/edit/save[/]?$', 'user_enterprise_ajax_edit_save', name='user_enterprise_ajax_edit_save'),
url(r'^/equip/edit[/]?$', 'user_equip_edit', name='user_equip_edit'),
url(r'^/equip/ajax/edit/save[/]?$', 'user_equip_ajax_edit_save', name='user_equip_ajax_edit_save'),
url(r'^/project/edit[/]?$', 'user_project_edit', name='user_project_edit'),
url(r'^/project/ajax/edit/save[/]?$', 'user_project_ajax_edit_save', name='user_project_ajax_edit_save'),
)
|
from __future__ import print_function
# A script to help you with manipulating CSV-files. This is especially necessary when dealing with
# CSVs that have more than 65536 lines because those can not (yet) be opened in Excel or Numbers.
# This script works with the example wintergames_winners.csv, which is an excerpt from
# https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results/#athlete_events.csv
# This script FILTERS rows from the CSV according to the condition you give below.
#
# Usage:
# - Adjust filenames and delimiters.
# - Write your condition. Could be anything that python is able to check
# Examples:
# if float(row['Age']) <= 20: Will keep row where 'Age' is smaller or equal than 20.
# Note that you have to convert row['Age'] to a number
# with float()
# if not 'Ski' in row['Sport']: Will remove all rows where 'Sport' contains the
# String 'Ski'
# ---------------------------------------------
# Change the parameters according to your task:
# Give the name of the CSV file you want to cumulate
readFileName = 'wintergames_winners.csv' # <--- Adjust here
# The result will be a new CSV file:
writeFileName = 'wintergames_winners_filtered.csv' # <--- Adjust here (has to be different than readFileName)
# What delimiter is used in your CSV? Usually ',' or ';'
readDelimiter = ';' # <--- Adjust here (have a look in your source CSV)
# You can give a different delimiter for the result.
writeDelimiter = ';' # <--- Adjust here (';' is usually good)
import csv
from collections import OrderedDict
readFile = open(readFileName)
reader = csv.DictReader(readFile, delimiter=readDelimiter)
writeFile = open(writeFileName, 'w')
writer = csv.writer(writeFile, delimiter=writeDelimiter)
# This writes the field names to the result.csv
writer.writerow(reader.fieldnames)
rows = list(reader)
numRows = 0
perc = 0
for i, row in enumerate(rows):
if float(i) / len(rows) > perc:
print('#', end='')
perc = perc + 0.01
# Give your conditions in the next line
if row['NOC'] == 'GER' or row['NOC'] == 'GDR': # <--- Adjust your conditions here
sorted_row = OrderedDict(sorted(row.items(), key=lambda item: reader.fieldnames.index(item[0])))
writer.writerow(sorted_row.values())
numRows = numRows + 1
print('\nKept %d from %d rows' % (numRows, len(rows)))
|
import os
from dotenv import load_dotenv
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'sms.db')
JWT_SECRET_KEY = os.environ.get('JWT_SECRET_KEY') or 'super-secret'
JWT_BLACKLIST_ENABLED = True
JWT_ACCESS_TOKEN_EXPIRES = timedelta(days=365)
JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=700)
WECHAT_APPID = os.environ.get('WECHAT_APPID')
WECHAT_APP_SECRET = os.environ.get('WECHAT_APP_SECRET')
REDIS_HOST = os.environ.get('REDIS_HOST') or 'localhost'
REDIS_PORT = os.environ.get('REDIS_PORT') or 6379
SUPER_ID = os.environ.get('SUPER_ID')
|
from copy import deepcopy
# https://stackoverflow.com/a/7205107
def merge(a, b, path=None):
"""merges b into a"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
def override_config(custom_conf):
"""Overrides the vertica configuration and reinstalls the previous
afterwards."""
from ddtrace import config
def provide_config(func):
def wrapper(*args, **kwargs):
orig = deepcopy(config.vertica)
merge(config.vertica, custom_conf)
r = func(*args, **kwargs)
config._add("vertica", orig)
return r
return wrapper
return provide_config
|
from concurrent.futures import ThreadPoolExecutor
from pydash import py_
from cv2 import cv2
import numba
import time
import re
import pathlib
import numpy
import os
import shutil
import typing
import dataclasses
import typing
import argparse
import sys
# ffmpeg็ๅธง็ผ็ ไป1ๅผๅง
# render_subtitleๆธฒๆๆๅพๆไปถ็ๅธง็ผๅทไนไป1ๅผๅง
# sub้็ๅธง็ผ็ ไป1ๅผๅง
subtitle_images = pathlib.Path("subtitle-images")
FILENAME_EXPR = re.compile(
r"(?P<type>(major)|(minor))-subtitle-(?P<id>[0-9]+)-(?P<begin>[0-9]+)-(?P<end>[0-9]+)\.png")
# This is by default ่ฟๆฏ้ป่ฎค้
็ฝฎ
BOTTOM_OFFSET = 40 # ไธปๅญๅนๅบ่พน่ท็ฆป่ง้ขๅบ้จ็่ท็ฆป
TOP_OFFSET = 40 # ๅฏๅญๅน้กถ่พน่ท็ฆป่ง้ข้กถ้จ็่ท็ฆป
INPUT_VIDEO_FILENAME = "lec.mp4" # ่พๅ
ฅๆไปถๅ
OUTPUT_VIDEO_FILENAME = "output1.mp4"
CHUNK_SIZE = 1000
@dataclasses.dataclass
class RenderData:
flap: int
subtitle_img: numpy.ndarray = None
subtitle_id: int = -1
minor_subtitle_img: numpy.ndarray = None
minor_subtitle_id: int = -1
@numba.njit(parallel=True, nogil=True, inline="always", boundscheck=False)
def render_subtitle(src_img: numpy.ndarray, subtitle_img: numpy.ndarray, major: bool):
rowc = len(subtitle_img)
colc = len(subtitle_img[0])
img_rowc = len(src_img)
img_colc = len(src_img[0])
if major:
lurow = img_rowc-BOTTOM_OFFSET-rowc
else:
lurow = TOP_OFFSET
lucol = (img_colc-colc)//2
# (lurow,lucol) ไธปๅญๅนๅทฆไธ่งๅๆ
bg_area = src_img[lurow:lurow+rowc, lucol:lucol+colc] # ๆชๅ่ๆฏ
for r in range(rowc):
for c in range(colc):
# ่ๆฏ่ฒ(้ป่ฒ)๏ผๅ้ๆๅค็
if subtitle_img[r, c][0] == 0 and subtitle_img[r, c][1] == 0 and subtitle_img[r, c][2] == 0:
bg_area[r, c] = (bg_area[r, c]+subtitle_img[r, c])//2
else:
# ้่ๆฏ่ฒ๏ผไธ้ๆ
bg_area[r, c] = subtitle_img[r, c]
src_img[lurow:lurow+rowc, lucol:lucol+colc] = bg_area
@numba.njit(nogil=True)
def render_subtitle_wrapper(arg: typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]):
src_img, major_subtitle_img, minor_subtitle_img = arg
if len(major_subtitle_img) != 0:
render_subtitle(src_img, major_subtitle_img, True)
if len(minor_subtitle_img) != 0:
render_subtitle(src_img, minor_subtitle_img, False)
return src_img
def main():
arg_parser = argparse.ArgumentParser(description="ๅ่ง้ขไธญๅตๅ
ฅๅญๅน")
arg_parser.add_argument(
"--chunk-size", default=CHUNK_SIZE, help=f"ๆฏ่ฝฎๆๆธฒๆ็ๅธงๆฐ (้ป่ฎคไธบ {CHUNK_SIZE})", type=int, required=False)
arg_parser.add_argument(
"--input", "-i", default=INPUT_VIDEO_FILENAME, help=f"่พๅ
ฅ่ง้ขๆไปถๅ (mp4ๆ ผๅผ, ้ป่ฎคไธบ'{INPUT_VIDEO_FILENAME}')", type=str, required=False)
arg_parser.add_argument(
"--output", "-o", default=OUTPUT_VIDEO_FILENAME, help=f"่พๅบ่ง้ขๆไปถๅ (mp4ๆ ผๅผ, ้ป่ฎคไธบ'{OUTPUT_VIDEO_FILENAME}')", type=str, required=False)
parse_result = arg_parser.parse_args()
chunk_size = parse_result.chunk_size
input_file_name = parse_result.input
output_file_name = parse_result.output
begin_time = time.time()
video_reader = cv2.VideoCapture(input_file_name)
video_fps = int(video_reader.get(cv2.CAP_PROP_FPS))
video_shape = (int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)))
total_flaps = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
print(
f"Video shape(width,height) = {video_shape}, FPS = {video_fps}, has {total_flaps} frames in total.")
print(f"Input file: {input_file_name}")
print(f"Output file: {output_file_name}")
print(f"Chunk size: {chunk_size}")
video_writer = cv2.VideoWriter(output_file_name, cv2.VideoWriter_fourcc(
*"mp4v"), video_fps, video_shape, True)
renderdata = [RenderData(flap=i, subtitle_img=None, subtitle_id=-1, minor_subtitle_id=-1, minor_subtitle_img=None,
) for i in range(0, total_flaps)]
for item in os.listdir(subtitle_images):
match_result = FILENAME_EXPR.match(item)
groupdict = match_result.groupdict()
# sub้็ๅธงๆฐไป1ๅผๅง
begin = int(groupdict["begin"])-1
end = int(groupdict["end"])-1
subtitle_type = groupdict["type"]
subtitle_id = int(groupdict["id"])
# print(subtitle_images/item)
image_data = cv2.imread(str(subtitle_images/item))
if subtitle_type == "major":
for j in range(begin, end+1):
if j >= len(renderdata):
break
renderdata[j] = RenderData(
flap=j,
subtitle_img=image_data,
subtitle_id=subtitle_id,
minor_subtitle_id=renderdata[j].minor_subtitle_id,
minor_subtitle_img=renderdata[j].minor_subtitle_img)
else:
for j in range(begin, end+1):
if j >= len(renderdata):
break
renderdata[j] = RenderData(
flap=j,
subtitle_img=renderdata[j].subtitle_img,
subtitle_id=renderdata[j].subtitle_id,
minor_subtitle_img=image_data,
minor_subtitle_id=subtitle_id,
)
print(f"{len(renderdata)} flaps loaded")
pool = ThreadPoolExecutor()
empty_frame = numpy.ndarray([0, 0, 0])
for seq in py_.chunk(renderdata, chunk_size):
chunk_begin = time.time()
print(
f"Rendering for {len(seq)} flaps, range from {seq[0].flap} to {seq[-1].flap}")
count = len(seq)
frames = []
print("Decoding frames..")
for _ in range(count):
ok, frame = video_reader.read()
assert ok, "Never should OpenCV failed to read a frame"
frames.append(frame)
print(f"{len(seq)=} {len(frames)=}")
assert len(seq) == len(frames)
print("Frames loaded.")
args = [(frame, (empty_frame if render_data.subtitle_img is None else render_data.subtitle_img),
(empty_frame if render_data.minor_subtitle_img is None else render_data.minor_subtitle_img)) for frame, render_data in zip(frames, seq)]
output: typing.List[numpy.ndarray] = list(pool.map(
render_subtitle_wrapper, args))
print("Render done.")
for frame in output:
video_writer.write(frame)
chunk_end = time.time()
print(f"Output ok with {chunk_end-chunk_begin}s .")
pool.shutdown()
video_reader.release()
video_writer.release()
end_time = time.time()
print(f"Task done, {end_time-begin_time}s")
if __name__ == "__main__":
main()
|
# Generated by Django 1.10.1 on 2017-09-11 05:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('show', '0001_initial'),
('venue', '0002_auto_20170710_1818'),
('ticket', '0009_account_phone'),
]
operations = [
migrations.AlterUniqueTogether(
name='ticket',
unique_together={('show', 'seat')},
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from mechanize import Browser
from lxml.html import fromstring
from time import *
from thread import start_new_thread
import requests
import sys
class DevNull:
def write(self, msg):
pass
class Main:
email = ''
password = ''
html = ''
authenticity_token = ''
accidents = {}
status = ''
cars = {}
fireman_at_accident = 0
session = None
headers = {
"Content - Type": "application / x - www - form - urlencoded",
"User-Agent":
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36",
}
missingcases = {
'Loeschfahrzeug (LF)': 'LF 20/16',
'Loeschfahrzeuge (LF)': 'LF 20/16',
'Feuerwehrleute': 'LF 20/16',
'FuStW': 'FuStW',
'ELW 1': 'ELW 1',
'ELW 2': 'ELW 2',
'Drehleitern (DLK 23)': 'DLK 23',
'GW-Messtechnik': 'GW-Messtechnik',
'GW-A oder AB-Atemschutz': 'GW-A',
'Ruestwagen oder HLF': 'RW',
'GW-Oel': u'GW-รl',
'GW-Gefahrgut': 'GW-Gefahrgut',
'GW-Hoehenrettung': u'GW-Hรถhenrettung',
'Schlauchwagen (GW-L2 Wasser': 'SW Kats',
'': ''
}
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stderr = DevNull()
self.email = raw_input('Email: ')
self.password = raw_input('Passwort: ')
self.login()
while True:
start_new_thread(self.thread, ())
if int(strftime("%M")) % 30 == 0 and int(strftime("%S") < 10):
self.login()
sleep(10)
def thread(self):
self.get_all_accidents()
for key, accident in self.accidents.iteritems():
if accident['status'] == 'rot' or accident['status'] == 'elb':
if accident['name'] != '"Feuerprobealarm an Schule"':
self.get_accident(key, accident)
def login(self):
url = "https://www.leitstellenspiel.de/users/sign_in"
br = Browser()
response = br.open(url)
self.parse_token(response.read())
data = {
'authenticity_token': self.authenticity_token,
'user[email]': self.email,
'user[password]': self.password,
'user[remember_me]': 1,
'commit': 'Einloggen'
}
self.session = requests.session()
self.session.headers.update(self.headers)
request = self.session.post(url, data=data)
self.parse_token(request.text)
print strftime("%H:%M:%S") + ': Erfolgreich Eingeloggt!'
def parse_token(self, html):
tree = fromstring(html)
self.authenticity_token = tree.xpath('//meta[@name="csrf-token"]/@content')[0]
def get_all_accidents(self):
mission = self.session.get('https://www.leitstellenspiel.de/')
startpoint = mission.text.find('missionMarkerAdd')
endpoint = mission.text.find('missionMarkerBulkAdd', startpoint)
ids = mission.text[startpoint:endpoint]
ids = ids.split('\n')
i = 0
self.accidents = {}
while i < len(ids) - 1:
idpoint = ids[i].find(',"id":')
statusstartpoint = ids[i].find(',"icon":')
statusendpoint = ids[i].find(',"caption":', statusstartpoint)
missingstartpoint = ids[i].find(',"missing_text":')
missingendpoint = ids[i].find(',"id":', missingstartpoint)
namestartpoint = ids[i].find(',"caption":')
nameendpoint = ids[i].find(',"captionOld":', namestartpoint)
t = 0
missingarray = {}
if 'Feuerwehrleute' in ids[i][missingstartpoint + 16: missingendpoint]:
missing = ids[i][missingstartpoint + 16: missingendpoint][1:].split(',')
while t < len(missing):
if missing[t][2:][-1:] == '"':
missingarray[int(missing[t][24:26])] = missing[t][27:-2]
else:
missingarray[int(missing[t][24:26])] = missing[t][27:-1]
t = t + 1
else:
missing = ids[i][missingstartpoint + 16: missingendpoint][43:].split(',')
while t < len(missing):
if missing[t][2:][-1:] == '"':
missingarray[missing[t][:2]] = missing[t][2:][:-1]
else:
missingarray[missing[t][:2]] = missing[t][2:]
t = t + 1
self.accidents[ids[i][idpoint + 6: idpoint + 15]] = {
'status': ids[i][statusstartpoint + 8: statusendpoint][-4:-1],
'missing': missingarray,
'name': str(ids[i][namestartpoint + 10: nameendpoint][1:])
.replace("\u00fc", "รผ")
.replace("\u00f6", "รถ")
.replace("\u00d6", "ร")
.replace("\u00df", "ร")
.replace("\u00e4", "รค")
.replace("\u00c4", "ร"),
'vehicle_state': ''
}
i = i + 1
def get_accident(self, accidentid, accident):
mission = self.session.get('https://www.leitstellenspiel.de/missions/' + accidentid)
if not self.parse_cars_needed(mission.text):
return
self.parse_available_cars(mission.text)
if accident['missing'] != {'': ''}:
for count, string in accident['missing'].iteritems():
string = str(string).replace("\u00f6", "oe")
string = string.replace("\u00d6", "Oe")
string = string.replace("\u00fc", "ue")
if string[0] == ' ':
string = string[1:]
t = 0
if string == 'Feuerwehrleute':
self.parse_fireman_at_accident(mission.text)
try:
newcount = (int(count) - int(self.fireman_at_accident)) // 9 + 1
except ValueError:
newcount = 0
while t < newcount:
for carid, cartype in self.cars.items():
if cartype == self.missingcases[string] and carid in self.cars:
self.send_car_to_accident(accidentid, carid)
del self.cars[carid]
print strftime("%H:%M:%S") + ': ' + cartype + ' zu ' + accident['name'] + ' alarmiert'
t = t + 1
break
else:
try:
newcount = int(count)
except ValueError:
newcount = 0
while t < newcount:
for carid, cartype in self.cars.items():
if cartype == self.missingcases[string] and carid in self.cars:
self.send_car_to_accident(accidentid, carid)
del self.cars[carid]
print strftime("%H:%M:%S") + ': ' + cartype + ' zu ' + accident['name'] + ' alarmiert'
t = t + 1
break
else:
if accident['status'] == 'rot':
for key, value in self.cars.items():
if value == 'LF 20/16':
self.send_car_to_accident(accidentid, key)
print strftime("%H:%M:%S") + ': ' + value + ' zu ' + accident['name'] + ' alarmiert'
break
@staticmethod
def parse_cars_needed(html):
tree = fromstring(html)
vehicle_state = tree.xpath('//h4[@id="h2_vehicle_driving"]//text()')
if vehicle_state == ['Fahrzeuge auf Anfahrt']:
return False
else:
return True
def parse_fireman_at_accident(self, html):
tree = fromstring(html)
people = tree.xpath('//div[small[contains(., "Feuerwehrleute")]]/small//text()')
for value in people:
if value[11:-15] == 'Feuerwehrleute':
self.fireman_at_accident = value[38:]
def parse_available_cars(self, html):
tree = fromstring(html)
cars = tree.xpath('//tr[@class="vehicle_select_table_tr"]/@id')
types = tree.xpath('//tr[@class="vehicle_select_table_tr"]/@vehicle_type')
self.cars = {}
for i, value in enumerate(cars):
self.cars[value[24:]] = types[i]
def send_car_to_accident(self, accident, car):
url = 'https://www.leitstellenspiel.de/missions/' + accident + '/alarm'
data = {
'authenticity_token': self.authenticity_token,
'commit': 'Alarmieren',
'next_mission': 0,
'vehicle_ids[]': car
}
self.session.post(url, data=data)
main = Main()
|
import datetime
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# Reference:
# https://flask-sqlalchemy.palletsprojects.com/en/2.x/models/
# https://docs.sqlalchemy.org/en/14/core/metadata.html#sqlalchemy.schema.Column
# https://flask-sqlalchemy.palletsprojects.com/en/2.x/models/#many-to-many-relationships
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
tweets = db.relationship('Tweet', backref='user', cascade="all,delete")
def __init__(self, username: str, password: str):
self.username = username
self.password = password
def serialize(self):
return{
'id': self.id,
'username': self.username
}
likes_table = db.Table(
'likes',
db.Column(
'user_id', db.Integer,
db.ForeignKey('users.id'),
primary_key=True
),
db.Column(
'tweet_id', db.Integer,
db.ForeignKey('tweets.id'),
primary_key=True
),
db.Column(
'created_at', db.DateTime,
default=datetime.datetime.utcnow,
nullable=False
)
)
class Tweet(db.Model):
__tablename__ = 'tweets'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
content = db.Column(db.String(280), nullable=False)
created_at = db.Column(
db.DateTime,
default=datetime.datetime.utcnow,
nullable=False
)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
likes = db.relationship(
'User', secondary=likes_table,
lazy='subquery',
backref=db.backref('liked_tweets', lazy=True)
)
def __init__(self, content: str, user_id: int):
self.content = content
self.user_id = user_id
def serialize(self):
return {
'id': self.id,
'content': self.content,
'created_at': self.created_at.isoformat(),
'user_id': self.user_id
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing_extensions import Protocol
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
class Step(Protocol):
__type__: str
data: TimeSeriesData
def remover(self, interpolate: bool) -> TimeSeriesData:
...
def transform(self, data: TimeSeriesData) -> object:
...
def fit(
self,
x: Union[pd.DataFrame, np.ndarray],
y: Optional[Union[pd.Series, np.ndarray]],
**kwargs: Any
) -> List[TimeSeriesData]:
...
PipelineStep = Tuple[str, Step]
class Pipeline:
"""
CuPiK (Customized Pipeline for Kats) is created with a similar mindset
of sklearn pipeline. Users can call multiple methods within Kats library
and run them in sequential to perform a series of useful timeseries processes
at once.
Due to the current distribution of Kats library, we provide the function to
apply detectors, transformers and time series modeling sequentially using CuPiK.
We also offer api to sklearn, once feature extraction using TsFeatures is performed,
users can feed the results directly to an sklearn machine learning model.
"""
remove: bool = False
useFeatures: bool = False
extra_fitting_params: Optional[Dict[str, Any]] = None
y: Optional[Union[np.ndarray, pd.Series]] = None
def __init__(self, steps: List[PipelineStep]) -> None:
"""
inputs:
steps: a list of the initialized Kats methods/sklearn machine learning model, with
the format of [('user_defined_method_name', initialized method class)]. User
can use any name for 'user_defined_method_name' for identification purpose
initialized attributes:
steps: same as the "steps" in the inputs
metadata: an dictionary to store outputs that are not passing to the next step, like
results from a detector. These metadata stored here with the format of
"user_defined_method_name": output
univariate: is the data fed in a list of multiple univariate time series or just a single
univariate time series
functions: a look up dictionary linking each method in the steps to what processing
function inside CuPiK should we apply
"""
self.steps = steps
self.metadata: Dict[str, str] = {}
self.univariate = False
self.functions: Dict[str, Callable[..., Any]] = { # type: ignore
"detector": self.__detect__,
"transformer": self.__transform__,
"model": self.__model__,
}
def __detect__(
self,
steps: List[Step],
data: List[TimeSeriesData],
extra_params: Dict[str, Any],
) -> Tuple[List[TimeSeriesData], List[object]]:
"""
Internal function for processing the detector steps
inputs:
steps: a list of the duplicated initialized detector. We will be using each duplicated
detector to process one time series data within the data list
data: a list containing time series data to be processed
extra_params: a dictionary holding extra customized parameters to be fed in the detector
outputs:
data: a list of post-processed data for next steps
metadata: outputs from the detectors, like changepoints, outliers, etc.
"""
metadata = []
for i, (s, d) in enumerate(zip(steps, data)):
s.data = d
if not s.data.is_univariate():
msg = "Only support univariate time series, but get {type}.".format(
type=type(s.data.value)
)
logging.error(msg)
raise ValueError(msg)
s.data.time = pd.to_datetime(s.data.time)
if s.__subtype__ == "outlier":
extra_params["pipe"] = True
metadata.append(s.detector(**extra_params))
if (
self.remove and s.__subtype__ == "outlier"
): # outlier removal when the step is outlier detector,
# and user required us to remove outlier
data[i] = s.remover(interpolate=True)
return data, metadata
def __transform__(
self,
steps: List[Step],
data: List[TimeSeriesData],
extra_params: Dict[str, Any],
) -> Tuple[Union[List[TimeSeriesData], List[object]], List[object]]:
"""
Internal function for processing the transformation/transformer steps. We currently only have
tsfeatures as a transformation/transformer step in Kats.
inputs:
steps: a list of the duplicated initialized transformer. We will be using each duplicated
transformer to process one time series data within the data list
data: a list containing time series data to be processed
extra_params: a dictionary holding extra customized parameters to be fed in the transformer
outputs:
data: a list of post-processed data for next steps. We user requires to use the outputs of
the transformer, this would become the output from the transformer turning time series data
to tabular data; otherwise, do nothing at the current stage of Kats.
metadata: outputs from the transformer
"""
metadata: List[object] = []
for s, d in zip(steps, data):
metadata.append(s.transform(d))
if self.useFeatures:
return metadata, metadata
else:
return data, metadata
def __model__(
self,
steps: List[Step],
data: List[TimeSeriesData],
extra_params: Dict[str, Any],
) -> Tuple[List[Step], Optional[List[object]]]:
"""
Internal function for processing the modeling step
inputs:
steps: a list of the duplicated initialized time series model in Kats. We will be using
each duplicated model to process one time series data within the data list
data: a list containing time series data to be processed
extra_params: a dictionary holding extra customized parameters to be fed in the model
outputs:
data: a list of fitted time series model
None as the placeholder of metadata
"""
result = []
for s, d in zip(steps, data):
s.data = d
if not isinstance(d.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(d.value)
)
logging.error(msg)
raise ValueError(msg)
s.fit(**extra_params)
result.append(s)
return result, None
def _fit_sklearn_(
self,
step: Step,
data: List[TimeSeriesData],
y: Optional[Union[pd.Series, np.ndarray]],
) -> List[TimeSeriesData]:
"""
Internal function for fitting sklearn model on a tabular data with features
extracted.
inputs:
step: an sklearn model class
data: a list with each item corresponds to an output from the feature extraction
methods in Kats
y: label data for fitting sklearn model
outputs:
step: a fitted sklearn model
"""
assert (type(data) == list) and (
type(data[0]) == dict
), "Require data preprocessed by TsFeatures, please set useFeatures = True"
assert y is not None, "Missing dependent variable"
df = pd.DataFrame(data).dropna(axis=1)
return step.fit(df.values, y)
def __fit__(
self,
n: str,
s: Step,
data: List[TimeSeriesData],
) -> List[
TimeSeriesData
]: # using list output for adaption of current multi-time series scenarios
"""
Internal function for performing the detailed fitting functions
inputs:
n: short for name, "user_defined_method_name"
s: short for step, a Kats method or sklearn model
data: either a list of univariate time series data or a list of dictionaries
including the output acquired using feature extraction methods in Kats
outputs:
data: either a list of post processed univariate time series data or a list
of dictionaries including the output acquired using feature extraction
methods in Kats
"""
y = self.y
if (
str(s.__class__).split()[1][1:8] == "sklearn"
): # if current step is a scikit-learn model
return self._fit_sklearn_(s, data, y)
_steps_ = [s for _ in range(len(data))]
method = s.__type__
extra_params = (self.extra_fitting_params or {}).get(n, {})
data, metadata = self.functions[method](_steps_, data, extra_params)
if metadata is not None:
self.metadata[n] = metadata # saving the metadata of the current step into
# the dictionary of {"user_defined_method_name": corresponding_metadata}
return data
def fit(
self,
data: Union[TimeSeriesData, List[TimeSeriesData]],
params: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> Union[TimeSeriesData, List[TimeSeriesData]]:
"""
This function is the external function for user to fit the pipeline
inputs:
data: a single univariate time series data or a list of multiple univariate
time series data
params: a dictionary with the extra parameters for each step. The dictionary
holds the format of {"user_defined_method_name": {"parameter": value}}
extra key word arguments:
remove: a boolean for telling the pipeline to remove outlier or not
useFeatures: a boolean for telling the pipeline whether to use TsFeatures to process
the data for sklearn models, or merely getting the features as metadata
for other usage
y: label data for fitting sklearn model, an array or a list
outputs:
data: output a single result for univariate data, or a list of results for multiple
univariate time series data fed originally in the format of a list. Determined by
the last step, the output could be processed time series data, or fitted kats/sklearn
model, etc.
"""
# Initialize a place holder for params
if params is None:
params = {}
# Judging if extra functions needed
####
self.remove = kwargs.get("remove", False) # remove outliers or not
self.useFeatures = kwargs.get(
"useFeatures", False
) # do you want to use tsfeatures as transformation or analyzer
self.y = kwargs.get("y", None)
####
# Extra parameters for specific method of each step
self.extra_fitting_params = params
if isinstance(data, list):
univariate = False
else:
# Put univariate data into a list
univariate = self.univariate = True
data = [data]
for (
n,
s,
) in self.steps: # Iterate through each step and perform the internal fitting
# function
data = self.__fit__(n, s, data)
return data[0] if univariate else data
|
# coding=UTF-8
# python version 3.5
# os version win10
import os
import sys
def getAllWifi():
wifiAll = os.popen('netsh wlan show profiles').read()
wifiResultArr = []
infoArr = wifiAll.split('\n')
for info in infoArr:
if info.find('ๆๆ็จๆท้
็ฝฎๆไปถ') > -1:
wifiArr = info.split(':')
if len(wifiArr) > 1:
name = wifiArr[1].rstrip().lstrip()
if name != '':
wifiResultArr.append(name)
return wifiResultArr
def getKey(wifi=''):
if wifi != '' and wifi is not None:
wifiNameArr = [wifi]
else:
wifiNameArr = getAllWifi()
if wifiNameArr is None:
return
for wifiName in wifiNameArr:
if wifiName != '':
cm = 'netsh wlan show profiles name="%s" key="clear"' % (wifiName)
wifiInfo = os.popen(cm).read()
wifiKeyArr = wifiInfo.split('ๅ
ณ้ฎๅ
ๅฎน')
if len(wifiKeyArr) > 1:
lineArr = wifiKeyArr[1].split('\n')
if len(lineArr) > 0:
key = lineArr[0].lstrip()[1:]
print("%s ๅฏ็ ๏ผ%s" % (wifiName, key))
else:
print('no the wifi password,%s' % wifiName)
wifi = None
if len(sys.argv) > 1:
wifi = sys.argv[1]
if __name__ == '__main__':
getKey(wifi)
|
#: Check if a field that implements storage, or is a runtime constant, is
#: missing it's reset value.
MISSING_RESET = 1<<0
#: Check if a field's bit offset is not explicitly specified.
#:
#: Some organizations may want to enforce explicit assignment of bit offsets to
#: avoid unexpected field packing.
IMPLICIT_FIELD_POS = 1<<1
#: Check if a component's address offset is not explicitly assigned.
#:
#: Some organizations may want to enforce explicit assignment of addresses to
#: avoid unintended address map changes.
IMPLICIT_ADDR = 1<<2
#: Check if an instance array's address stride is not a power of two.
STRIDE_NOT_POW2 = 1<<3
#: Enforce that all addressable components are aligned based on their size.
#: Alignment is determined by the component's size rounded up to the next power
#: of two.
#:
#: Strict self-alignment may be desireable since it can simplify address decode
#: logic for hierarchical designs.
#:
#: This rule is a superset of ``STRIDE_NOT_POW2``.
STRICT_SELF_ALIGN = 1<<4
#: Check if an array of registers uses a stride that is not equal to the
#: register's width.
#:
#: Many export formats are unable to natively represent register arrays that are
#: not tightly packed. (IP-XACT, UVM Virtual registers, C arrays, etc..)
SPARSE_REG_STRIDE = 1<<5
#-------------------------------------------------------------------------------
#: Enable all warnings.
ALL = (
MISSING_RESET
| IMPLICIT_FIELD_POS
| IMPLICIT_ADDR
| STRIDE_NOT_POW2
| STRICT_SELF_ALIGN
| SPARSE_REG_STRIDE
)
|
from typing import Optional, Tuple
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Attacker:
def __init__(self,
steps: int,
quantize: bool = True,
levels: int = 256,
max_norm: Optional[float] = None,
div_prob: float = 0.9,
loss_amp: float = 4.0,
device: torch.device = torch.device('cpu')) -> None:
self.steps = steps
self.quantize = quantize
self.levels = levels
self.max_norm = max_norm
self.div_prob = div_prob
self.loss_amp = loss_amp
self.device = device
def input_diversity(self, image, low=270, high=299):
if random.random() > self.div_prob:
return image
rnd = random.randint(low, high)
rescaled = F.interpolate(image, size=[rnd, rnd], mode='bilinear')
h_rem = high - rnd
w_rem = high - rnd
pad_top = random.randint(0, h_rem)
pad_bottom = h_rem - pad_top
pad_left = random.randint(0, w_rem)
pad_right = w_rem - pad_left
padded = F.pad(rescaled, [pad_top, pad_bottom, pad_left, pad_right], 'constant', 0)
return padded
def attack(self,
model: nn.Module,
inputs: torch.Tensor,
labels_true: torch.Tensor,
labels_target: torch.Tensor)-> torch.Tensor:
batch_size = inputs.shape[0]
delta = torch.zeros_like(inputs, requires_grad=True)
# setup optimizer
optimizer = optim.SGD([delta], lr=1, momentum=0.9)
# for choosing best results
best_loss = 1e4 * torch.ones(inputs.size(0), dtype=torch.float, device=self.device)
best_delta = torch.zeros_like(inputs)
for _ in range(self.steps):
if self.max_norm:
delta.data.clamp_(-self.max_norm, self.max_norm)
if self.quantize:
delta.data.mul_(self.levels - 1).round_().div_(self.levels - 1)
adv = inputs + delta
div_adv = self.input_diversity(adv)
logits = model(div_adv)
ce_loss_true = F.cross_entropy(logits, labels_true, reduction='none')
ce_loss_target = F.cross_entropy(logits, labels_target, reduction='none')
# fuse targeted and untargeted
loss = self.loss_amp * ce_loss_target - ce_loss_true
is_better = loss < best_loss
best_loss[is_better] = loss[is_better]
best_delta[is_better] = delta.data[is_better]
loss = torch.mean(loss)
optimizer.zero_grad()
loss.backward()
# renorm gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=float('inf'), dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer.step()
# avoid out of bound
delta.data.add_(inputs)
delta.data.clamp_(0, 1).sub_(inputs)
return inputs + best_delta
|
from __future__ import absolute_import
from checkout_sdk.common.enums import Currency
from checkout_sdk.payments.payments import PayoutRequest, PaymentRequestCardDestination
from tests.checkout_test_utils import VisaCard, phone, LAST_NAME, FIRST_NAME, new_uuid, assert_response, retriable
def test_should_request_payout(default_api):
destination = PaymentRequestCardDestination()
destination.name = VisaCard.name
destination.number = VisaCard.number
destination.first_name = FIRST_NAME
destination.last_name = LAST_NAME
destination.expiry_year = VisaCard.expiry_year
destination.expiry_month = VisaCard.expiry_month
destination.billing_address = phone()
destination.phone = phone()
payout_request = PayoutRequest()
payout_request.destination = destination
payout_request.capture = False
payout_request.reference = new_uuid()
payout_request.amount = 5
payout_request.currency = Currency.GBP
payout_request.reference = new_uuid()
payout_response = default_api.payments.request_payout(payout_request)
assert_response(payout_response,
'id',
'reference',
'status',
'customer',
'customer.id')
payment = retriable(callback=default_api.payments.get_payment_details,
payment_id=payout_response.id)
assert_response(payment,
'destination',
'destination.bin',
# 'destination.card_category',
# 'destination.card_type',
# 'destination.issuer',
# 'destination.issuer_country',
# 'destination.product_id',
# 'destination.product_type'
'destination.expiry_month',
'destination.expiry_year',
'destination.last4',
'destination.fingerprint',
'destination.name')
|
from .log import init_logger
init_logger()
|
import os
import yaml
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_squid_is_installed(host):
squid = host.package("squid")
assert squid.is_installed
def test_squid_running_and_enabled(host):
squid = host.service("squid")
assert squid.is_running
assert squid.is_enabled
def test_squid_syntax(host):
squid = host.run("/usr/sbin/squid -k parse").rc
assert squid == 0
def test_squid_process(host):
squid = host.run("/usr/sbin/squid -k check").rc
assert squid == 0
def test_squid_custom_files(host):
custom_whitelist = yaml.load(open(
"../resources/files/custom_whitelist.yml"))
for access in custom_whitelist['squid_custom_whitelist']:
# src
cmd = host.run("cat /etc/squid/{}_src".format(access['name']))
assert cmd.rc == 0
files = cmd.stdout.splitlines()
assert files == access['src']
# dest
cmd = host.run("cat /etc/squid/{}_dstdomain".format(access['name']))
assert cmd.rc == 0
files = cmd.stdout.splitlines()
assert files == access['dest']
|
# Functionality so a custom tpose can be made with fk offsets to put into the bind pose
'''
TODO:
* When reposer exists, copy unlocked translate and rotates
* Just copy the values onto the node so the old one can be deleted
* Delete old version, if it exists
* Rename the old one before making the new one
'''
from __future__ import absolute_import, division, print_function
import contextlib
from pymel.core import cmds, delete, duplicate, group, hasAttr, joint, listRelatives, ls, makeIdentity, objExists, parentConstraint, PyNode, showHidden, xform, evalDeferred
from pymel.core import displaySmoothness # noqa for an evalDeferred
from pdil import simpleName
import pdil
from ... import util
from ..._core import find
from ..._lib import proxyskel
def updateReposers(cards=None, missingOnly=False, progress=None):
''' (Re)Make the given cards' reposers.
Args:
cards: Cards to operate on, `None`, the default, operates on all cards.
missingOnly: If `True`, only make missing reposers.
progress: Optional progressWindow passed to `generateReposer()`, see it for configuration.
'''
if not cards:
cards = util.selectedCards()
if not cards:
cards = find.blueprintCards()
valid = getValidReposeCards()
cards = [card for card in cards if not getRCard(card, valid)]
otherCards = find.cardJointBuildOrder()
for c in cards:
otherCards.remove(c)
#with pdil.ui.progressWin(title='Building reposers', max=len(cards) * 2) as prog:
with reposeToBindPose(otherCards):
generateReposer( cards, progress=progress)
# For some reason the shaders are always messed up so burp the display to correct it, and only when evalDeferred
evalDeferred( "displaySmoothness( '{}', divisionsU=0, divisionsV=0, pointsWire=4, pointsShaded=1, polygonObject=1)".format( getReposeContainer().name() ) )
#------------------------------------------------------------------------------
def reposerExists():
''' Returns `True` if repose cards exist.
'''
return bool( getReposeRoots() )
def getValidReposeCards():
''' Return valid repose cards, i.e. excludes the 'old' ones.
'''
return listRelatives( listRelatives( getReposeRoots(), ad=True, type='nurbsSurface' ), p=True )
def getRJoint(bpj):
''' Get repositioned joint from blueprint joint
'''
for plug in bpj.message.listConnections(s=False, d=True, p=True):
if plug.attrName() == 'bpj':
return plug.node()
def getRCard(card, validCards=None):
''' Given a regular bp card, returns the reposeCard from the list of `validCards`
Use `getValidReposeCards()` for `validCards` (this is performance optimization).
'''
validCards = validCards if validCards else getValidReposeCards()
for plug in card.message.listConnections(s=False, d=True, p=True):
if plug.attrName() == 'bpCard':
if plug.node() in validCards:
return plug.node()
def getReposeContainer():
name = 'ReposeContainer'
mainBP = proxyskel.masterGroup()
for obj in listRelatives(mainBP):
if obj.name() == name:
return obj
grp = group(n=name, em=True)
grp.visibility.set(False)
grp.setParent(mainBP)
return grp
"""
def getReposeCardNormal(obj):
a = dt.Vector( xform(obj.cv[0][0], q=True, ws=True, t=True) )
b = dt.Vector( xform(obj.cv[1][0], q=True, ws=True, t=True) )
c = dt.Vector( xform(obj.cv[0][1], q=True, ws=True, t=True) )
v1 = a - b
v2 = c - b
return v1.cross(v2).normal()
def matchWorldAxis(obj, viaOrbit=False):
normal = getReposeCardNormal(obj)
x_abs, y_abs, z_abs = abs(normal)
x, y, z = normal
if x_abs > y_abs and x_abs > z_abs:
#X_abs DOM
pass
elif y_abs > x_abs and y_abs > z_abs:
yRot = dt.Vector( x, 0, z ).angle( dt.Vector(1, 0, 0) )
print(yRot)
elif z_abs > x_abs and z_abs > y_abs:
#Z DOM
pass
"""
def renameReposeObj(obj, targetName, previous):
''' Rename `obj` to `targetName`, suffixing the (possible) existing one and storing transform info.
'''
#print(' Rename {} with prev of {}'.format(obj, previous) )
oldName = targetName.replace('repose', 'old')
if objExists(oldName): # and '_helper' not in oldName:
# For now, just skip of _helpers, though I should give them generated unique names
print(oldName, 'exists, deleting')
delete(oldName)
if objExists(targetName):
old = PyNode(targetName)
old.rename( oldName )
previous = old if not previous else previous
if previous:
addVector(obj, 'prevRot', previous.r.get())
addVector(obj, 'prevTrans', previous.t.get())
addVector(obj, 'prevRotWorld', xform(previous, q=True, ws=True, ro=True))
addVector(obj, 'prevTransWorld', xform(previous, q=True, ws=True, t=True))
obj.rename( targetName )
def reposeLink(srcObj, reposeObj, attr):
''' Removes existing connections (shouold only be one if ther are any) hooking up the new object.
Returns the previously connected object, and a list of attrs on the orig that were unlocked.
'''
existing = [plug for plug in srcObj.message.listConnections(d=True, s=False, p=True) if plug.attrName() == attr]
for plug in existing:
srcObj.message.disconnect(plug)
reposeObj.addAttr(attr, at='message')
srcObj.message >> reposeObj.attr(attr)
unlocked = []
if existing:
unlocked = [attr for attr in [t + a for t in 'tr' for a in 'xyz'] if not existing[0].node().attr(attr).isLocked()]
'''
# Inherit unlocks from previous. Not sure this is as useful with the orbits.
for attr in [t + a for t in 'tr' for a in 'xyz']:
print(existing[0].node().attr(attr), existing[0].node().attr(attr).isLocked())
if not existing[0].node().attr(attr).isLocked():
reposeObj.attr(attr).unlock()
reposeObj.attr(attr).showInChannelBox(True)
'''
return existing[0].node(), unlocked
return None, unlocked
def stripReposerCard(card):
''' Make sure the reposer card doesn't look like a blueprint card.
'''
for attr in [
'fossilRigData',
'outputLeft',
'outputCenter',
'outputRight',
'outputShapeCenterfk',
'outputShapeCenterik',
'outputShapeLeftfk',
'outputShapeLeftik',
'outputShapeRightfk',
'outputShapeRightik',
'fossilRigState',
]:
if card.hasAttr(attr):
card.deleteAttr(attr)
def makeMirrored(reposeJoint):
''' Makes a joint that mirrors the transforms of the given repose joint, possibly returning an already existing one.
'''
if hasAttr(reposeJoint, 'mirrorGroup'):
con = reposeJoint.mirrorGroup.listConnections()
if con:
return con[0]
else:
reposeJoint.addAttr('mirrorGroup', at='message')
root = PyNode(reposeJoint.fullPath().split('|', 2)[1])
print(root)
for child in root.listRelatives():
if child.name() == 'mirrorGroups':
grouper = child
break
else:
grouper = group(em=True, n='mirrorGroups')
grouper.inheritsTransform.set(False)
grouper.setParent(root)
follower = group(em=True)
mirror = group(em=True)
reposeMirror = group(em=True)
pdil.math.multiply(follower.tx, -1) >> mirror.tx
follower.ty >> mirror.ty
follower.tz >> mirror.tz
follower.rx >> mirror.rx
pdil.math.multiply(follower.ry, -1) >> mirror.ry
pdil.math.multiply(follower.rz, -1) >> mirror.rz
parentConstraint(reposeJoint, follower, mo=False)
parentConstraint(mirror, reposeMirror)
reposeMirror.setParent(reposeJoint)
reposeMirror.message >> reposeJoint.mirrorGroup
follower.setParent(grouper)
mirror.setParent(grouper)
return reposeMirror
def generateReposer(cards=None, placeholder=False, progress=None):
''' If no cards are specificed, a new reposer is build, otherwise it
rebuilds/adds reposers for the specified cards.
Args:
cards
placeholder
progress: Optional `progressWindow` that will be `.update()`'d twice for
each card, MUST be preconfigured (in case several things are updating)
&&& TODO Verify the cards can be built in any order
'''
global jointMapping # global'd for debugging
suffix = '_placeholder' if placeholder else ''
rJoints = []
rCards = []
unlock = {} # <repose joint or card>: <list of attrs to be re-locked>
jointMapping = {} # Lazy "bi-directional mapping" of bpj <-> reposeJoint, both are added as keys to eachother
# Build all the cards and joints
if not cards:
cards = find.blueprintCards()
# Delete previous roots
for oldRoot in getReposeRoots():
oldRoot.deleteAttr('reposeRoot')
# Otherwise populate the containers with the existing reposer to build/add new stuff.
else:
#allExistingRCards = set( cmds.ls( '*.bpCard', o=True, r=True, l=True ) )
allExistingRJoints = set( cmds.ls( '*.bpj', o=True, r=True, l=True ) )
for oldRoot in getReposeRoots():
joints = cmds.listRelatives( str(oldRoot), f=True, ad=True, type='joint' )
joints = [PyNode(c) for c in allExistingRJoints.intersection(joints)]
for rj in joints:
bpj = rj.bpj.listConnections()[0]
jointMapping[rj] = bpj
jointMapping[bpj] = rj
for card in cards:
if progress:
progress.update()
rCard = duplicate(card, po=0)[0]
showHidden(rCard)
pdil.dagObj.unlock(rCard)
stripReposerCard(rCard)
targetName = simpleName(card, '{}_repose' + suffix)
previous, attrs = reposeLink(card, rCard, 'bpCard') if not placeholder else (None, [])
unlock[rCard] = attrs
renameReposeObj(rCard, targetName, previous)
for child in rCard.listRelatives():
if not child.type() == 'nurbsSurface':
delete(child)
rCards.append(rCard)
makeIdentity(rCard, t=False, r=False, s=True, apply=True)
pdil.dagObj.lockScale( rCard )
for jnt in card.joints:
reposeJoint = joint(None)
targetName = simpleName(jnt, '{}_repose' + suffix)
previous, attrs = reposeLink(jnt, reposeJoint, 'bpj') if not placeholder else (None, [])
unlock[reposeJoint] = attrs
renameReposeObj(reposeJoint, targetName, previous)
pdil.dagObj.matchTo(reposeJoint, jnt)
#assert jnt.info.get('options', {}).get('mirroredSide', False) is False, 'parent to mirrored joints not supported yet'
jointMapping[jnt] = reposeJoint
jointMapping[reposeJoint] = jnt
rJoints.append(reposeJoint)
# Set their parents
for reposeJoint in rJoints:
parent = jointMapping[reposeJoint].parent
if parent in jointMapping: # Check against joint mapping in case only a few selected cards a being tposed
reposeJoint.setParent( jointMapping[parent] )
reposeContainer = getReposeContainer()
# Put under cards, card pivot to lead joint
for rCard, card in zip(rCards, cards):
if progress:
progress.update()
bpj = card.parentCardJoint
#print('BPJ - - - - ', bpj, bpj in jointMapping)
if bpj in jointMapping:
start = card.start() if card.joints else bpj
#rCard.setParent( getRJoint(bpj) )
pdil.dagObj.unlock(rCard)
#firstBpj = card.joints[0]
#return
isMirrored = card.isCardMirrored()
mirroredSide = card.joints[0].info.get('options', {}).get('mirroredSide')
#print('rCard.mirror', rCard.mirror, 'info:', mirroredSide)
#if rCard.mirror is False and mirroredSide:
if isMirrored is False and card.mirror is False and mirroredSide:
#print('opposite mirror')
rCard.setParent( makeMirrored( jointMapping[bpj] ) )
else:
#print('regular side stuff')
rCard.setParent( jointMapping[bpj] )
#cmds.parent(str(rCard), str(jointMapping[bpj]))
xform(rCard, ws=True, piv=xform(start, q=True, t=True, ws=True) )
pdil.dagObj.lockTrans(rCard)
else:
if not placeholder:
rCard.addAttr('reposeRoot', at='message')
rCard.setParent( reposeContainer )
addVector(rCard, 'origRot', rCard.r.get())
addVector(rCard, 'origTrans', rCard.t.get())
#start = getRJoint(card.start())
start = jointMapping[card.start()]
start.setParent( rCard )
pdil.dagObj.lockTrans( pdil.dagObj.lockScale( start ) )
if rCard in unlock:
for attr in unlock[rCard]:
rCard.attr(attr).unlock()
rCard.attr(attr).showInChannelBox(True)
for reposeJoint in rJoints:
pdil.dagObj.lock(reposeJoint, 'ry rz')
if reposeJoint in unlock:
for attr in unlock[reposeJoint]:
reposeJoint.attr(attr).unlock()
reposeJoint.attr(attr).showInChannelBox(True)
addVector(reposeJoint, 'origRot', reposeJoint.r.get())
addVector(reposeJoint, 'origTrans', reposeJoint.t.get())
'''
children = reposeJoint.listRelatives(type='transform')
if len(children) > 1:
for child in children:
orbit = joint(reposeJoint)
orbit.t.lock()
orbit.s.lock()
renameReposeObj(orbit, simpleName(child, '{}_orbit'), None)
child.setParent(orbit)
'''
def getReposeRoots():
''' Return the top level reposer cards
'''
return [plug.node() for plug in ls('*.reposeRoot')]
def setRot(obj, r):
''' Attempts to set each axis individually.
'''
for axis, val in zip('xyz', r):
try:
obj.attr( 'r' + axis ).set(val)
except Exception:
pass
def setTrans(obj, t):
''' Attempts to set each axis individually.
'''
for axis, val in zip('xyz', t):
try:
obj.attr( 't' + axis ).set(val)
except Exception:
pass
@contextlib.contextmanager
def reposeToBindPose(cards):
''' Temporarily puts the repose cards in their original orientation (to add/edit cards).
'''
validCards = getValidReposeCards()
currentTrans = {}
currentRot = {}
jointRot = {}
for card in cards:
reposeCard = getRCard(card, validCards)
if not reposeCard:
continue
currentRot[reposeCard] = reposeCard.r.get()
currentTrans[reposeCard] = reposeCard.t.get()
setRot(reposeCard, reposeCard.origRot.get())
setTrans(reposeCard, reposeCard.origTrans.get())
for jnt in card.joints:
repose = getRJoint(jnt)
if repose:
jointRot[repose] = repose.r.get()
setRot(repose, repose.origRot.get())
yield
for reposeCard, origRot in currentRot.items():
setRot(reposeCard, origRot )
setTrans(reposeCard, currentTrans[reposeCard] )
for reposeJoint, origRot in jointRot.items():
setRot(reposeJoint, origRot )
class matchReposer(object):
''' Temporarily puts the cards (aka bind pose) in the tpose.
Intended use is as a context manager but steps can be separated for debugging.
'''
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.unmatch()
def __init__(self, cards):
self.relock = []
self.prevPos = {}
self.prevRot = {}
validCards = getValidReposeCards()
for card in cards:
reposeCard = getRCard(card, validCards)
if not reposeCard:
continue
self.prevRot[card] = card.r.get()
rot = xform(reposeCard, q=True, ws=True, ro=True)
# Need to unlock/disconnect rotation, then redo it later (to handle twists aiming at next joint)
xform(card, ws=True, ro=rot)
for jnt in card.joints:
repose = getRJoint(jnt)
if repose:
for axis in 'xyz':
plug = jnt.attr('t' + axis)
if plug.isLocked():
plug.unlock()
self.relock.append(plug)
#if jnt.tx.isLocked():
# jnt.tx.unlock()
# relock.append(jnt)
self.prevPos[jnt] = jnt.t.get()
trans = xform(repose, q=True, t=True, ws=True)
try:
xform(jnt, ws=True, t=trans)
except Exception:
del self.prevPos[jnt]
def unmatch(self):
for jnt, pos in self.prevPos.items():
try:
jnt.t.set(pos)
except Exception:
pass
for plug in self.relock:
plug.lock()
for card, rot in self.prevRot.items():
try:
card.r.set(rot)
except:
print('UNABLE TO RETURN ROTATE', card)
class goToBindPose(object):
''' Puts the rig into the bind pose.
Intended use is as a context manager but steps can be separated for debugging.
'''
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.returnFromPose()
def __init__(self):
# &&& I need some coralary toTPose, which is basically zeroPose but also does uncontrolled joints, just in case
# Joints without controllers still need to be reposed
'''
joints = []
for card in core.findNode.allCards():
joints += card.getOutputJoints()
for j in joints:
if not j.tr.listConnections():
if j.hasAttr('bindZero'):
j.r.set( j.bindZero.get() )
if j.hasAttr( 'bindZeroTr' ):
if not j.hasAttr('tposeTr'):
addVector(j, 'tposeTr', j.t.get()).lock()
j.t.set( j.bindZeroTr.get() )
'''
controls = find.controllers()
self.current = {ctrl: (ctrl.t.get(), ctrl.r.get()) for ctrl in controls }
for ctrl in controls:
if ctrl.hasAttr( 'bindZero' ):
try:
ctrl.r.set( ctrl.bindZero.get() )
except Exception:
pass
if ctrl.hasAttr( 'bindZeroTr' ):
ctrl.t.set( ctrl.bindZeroTr.get() )
def returnFromPose(self):
for ctrl, (pos, rot) in self.current.items():
setRot(ctrl, rot)
setTrans(ctrl, pos)
def addVector(obj, name, val):
''' Adds a double3 attribute to obj, setting the value and returning the plug.
'''
if not obj.hasAttr(name):
obj.addAttr( name, at='double3' )
obj.addAttr( name + 'X', at='double', p=name )
obj.addAttr( name + 'Y', at='double', p=name )
obj.addAttr( name + 'Z', at='double', p=name )
plug = obj.attr(name)
plug.set( val )
return plug
def _mark(card, side):
''' Helper, copies the bind transform from the joint to the controllers on the given card.
'''
mainCtrl = card.getLeadControl(side, 'fk')
controls = [mainCtrl] + [ v for k, v in mainCtrl.subControl.items()]
joints = card.getRealJoints(side=side if side != 'center' else None)
for ctrl, jnt in zip(controls, joints):
if jnt.hasAttr('bindZero') and not pdil.math.isClose( jnt.bindZero.get(), (0, 0, 0) ):
addVector(ctrl, 'bindZero', jnt.bindZero.get()).lock()
'''
if not ctrl.hasAttr('bindZero'):
ctrl.addAttr( 'bindZero', at='double3' )
ctrl.addAttr( 'bindZeroX', at='double', p='bindZero' )
ctrl.addAttr( 'bindZeroY', at='double', p='bindZero' )
ctrl.addAttr( 'bindZeroZ', at='double', p='bindZero' )
ctrl.bindZero.set( jnt.bindZero.get() )
ctrl.bindZero.lock()
'''
if jnt.hasAttr('bindZeroTr') and not pdil.math.isClose( jnt.bindZeroTr.get(), (0, 0, 0) ):
'''
if not ctrl.hasAttr('bindZeroTr'):
ctrl.addAttr( 'bindZeroTr', at='double3' )
ctrl.addAttr( 'bindZeroTrX', at='double', p='bindZeroTr' )
ctrl.addAttr( 'bindZeroTrY', at='double', p='bindZeroTr' )
ctrl.addAttr( 'bindZeroTrZ', at='double', p='bindZeroTr' )
ctrl.bindZeroTr.set( jnt.bindZeroTr.get() )
ctrl.bindZeroTr.lock()
'''
addVector(ctrl, 'bindZeroTr', jnt.bindZeroTr.get()).lock()
def markBindPose(cards=None):
'''
If any cards were build with the tpose system, mark what the bind pose is on the FK controllers.
Operates on all cards by default but can be give a specific list.
'''
if not cards:
cards = find.blueprintCards()
for card in cards:
if card.outputCenter.fk:
_mark(card, 'center')
elif card.outputLeft.fk:
_mark(card, 'left')
_mark(card, 'right')
'''
controls = [card.outputLeft.fk] + [ v for k, v in card.outputLeft.fk.subControl.items()]
joints = card.getRealJoints(side='left')
for ctrl, jnt in zip(controls, joints):
if jnt.hasAttr('bindZero') and not core.math.isClose( jnt.bindZero.get(), (0, 0, 0) ):
print(ctrl, jnt)
controls = [card.outputLeft.fk] + [ v for k, v in card.outputRight.fk.subControl.items()]
joints = card.getRealJoints(side='right')
'''
def backportReposition(rCard):
''' Creates several nodes so changes to the repose card are moved back to the original. Requires teardown.
'''
# ??? Unsure if I need to match the pivot and maintain offset? I probably do.
# The pivot of the real card needs to be in relation to it's parent (I think)
# ??? I probably need to clear these out when updating cards, so test that first?
rGroup = group(em=True)
pdil.dagObj.matchTo(rGroup, rCard)
card = pdil.factory._getSingleConnection( rCard, 'bpCard' )
cardGroup = group(em=True)
pdil.dagObj.matchTo(cardGroup, card)
reposeProxy = group(em=True)
reposeProxy.setParent(rGroup)
parentConstraint( rCard, reposeProxy )
cardProxy = group(em=True)
cardProxy.setParent(cardGroup)
reposeProxy.t >> cardProxy.t
reposeProxy.r >> cardProxy.r
parentConstraint(cardProxy, card)
pdil.dagObj.unlock(rCard)
pdil.factory._setSingleConnection(rGroup, 'fossil_backport', rCard)
pdil.factory._setSingleConnection(cardGroup, 'fossil_backport', rCard)
def backportRepositionTeardown(rCard):
card = pdil.factory._getSingleConnection( rCard, 'bpCard' )
delete( card.listRelatives(type='parentConstraint') )
for con in rCard.listConnections(s=False, d=True, p=True):
if con.attrName() == 'fossil_backport':
delete( con.node() )
pdil.dagObj.lock(rCard, 't')
|
import ast
def is_main(node: ast.AST) -> bool:
"""Returns whether a node represents `if __name__ == '__main__':` or
`if '__main__' == __name__:`.
"""
if not isinstance(node, ast.If):
return False
test = node.test
if not isinstance(test, ast.Compare):
return False
if len(test.ops) != 1 or not isinstance(test.ops[0], ast.Eq):
return False
if len(test.comparators) != 1:
return False
left = test.left
right = test.comparators[0]
if isinstance(left, ast.Name):
name = left
elif isinstance(right, ast.Name):
name = right
else:
return False
if isinstance(left, ast.Str):
str_part = left
elif isinstance(right, ast.Str):
str_part = right
else:
return False
if name.id != "__name__":
return False
if not isinstance(name.ctx, ast.Load):
return False
if str_part.s != "__main__":
return False
return True
|
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
import cloudify_nsx.library.nsx_firewall as nsx_firewall
import cloudify_nsx.library.nsx_common as common
@operation
def create(**kwargs):
validation_rules = {
"esg_id": {
"required": True
},
"ruleTag": {
"set_none": True
},
"name": {
"set_none": True
},
"source": {
"set_none": True
},
"destination": {
"set_none": True
},
"application": {
"set_none": True
},
"matchTranslated": {
"default": False,
"type": "boolean"
},
"direction": {
"values": [
"in",
"out"
],
"set_none": True
},
"action": {
"required": True,
"values": [
"accept",
"deny",
"reject"
]
},
"enabled": {
"default": True,
"type": "boolean"
},
"loggingEnabled": {
"default": False,
"type": "boolean"
},
"description": {
"set_none": True
}
}
use_existing, firewall_dict = common.get_properties_and_validate(
'rule', kwargs, validation_rules
)
resource_id = ctx.instance.runtime_properties.get('resource_id')
if resource_id:
ctx.logger.info("Reused %s" % resource_id)
return
# credentials
client_session = common.nsx_login(kwargs)
rule_id, resource_id = nsx_firewall.add_firewall_rule(
client_session,
firewall_dict['esg_id'],
firewall_dict['application'],
firewall_dict["direction"],
firewall_dict["name"],
firewall_dict["loggingEnabled"],
firewall_dict["matchTranslated"],
firewall_dict["destination"],
firewall_dict["enabled"],
firewall_dict["source"],
firewall_dict["action"],
firewall_dict["ruleTag"],
firewall_dict["description"]
)
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.instance.runtime_properties['rule_id'] = rule_id
ctx.logger.info("created %s" % resource_id)
@operation
def delete(**kwargs):
common.delete_object(
nsx_firewall.delete_firewall_rule, 'rule',
kwargs, ['rule_id']
)
|
from math import pi
try:
import openmm.unit as u
except ImportError: # OpenMM < 7.6
import simtk.unit as u
kB = u.BOLTZMANN_CONSTANT_kB * u.AVOGADRO_CONSTANT_NA
# OpenMM constant for Coulomb interactions in OpenMM units
# (openmm/platforms/reference/include/SimTKOpenMMRealType.h)
# TODO: Replace this with an import from openmm.constants once available
E_CHARGE = 1.602176634e-19 * u.coulomb
EPSILON0 = 1e-6*8.8541878128e-12/(u.AVOGADRO_CONSTANT_NA*E_CHARGE**2) * u.farad/u.meter
ONE_4PI_EPS0 = 1/(4*pi*EPSILON0) * EPSILON0.unit # we need it unitless
# Standard-state volume for a single molecule in a box of size (1 L) / (avogadros number).
LITER = 1000.0 * u.centimeters**3
STANDARD_STATE_VOLUME = LITER / (u.AVOGADRO_CONSTANT_NA*u.mole)
|
import sys
from gencode.gen import *
if __name__ == "__main__" :
p = 'C:\\Users\\ISS-Vendor\\Desktop\\gen\\'
gen = Gen(p,sys.argv)
gen.write()
|
import pytest
import io
import json
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.providers.figshare import metadata
from waterbutler.providers.figshare import provider
from waterbutler.providers.figshare.settings import PRIVATE_IDENTIFIER, MAX_PAGE_SIZE
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
'callback_url': 'http://sup.com/api/v1/project/v8s9q/waterbutler/logs/',
'id': 'fakey',
}
@pytest.fixture
def credentials():
return {
'token': 'freddie',
}
@pytest.fixture
def project_settings():
return {
'container_type': 'project',
'container_id': '13423',
}
@pytest.fixture
def article_settings():
return {
'container_type': 'article',
'container_id': '4037952',
}
@pytest.fixture
def project_provider(auth, credentials, project_settings):
return provider.FigshareProvider(auth, credentials, project_settings)
@pytest.fixture
def article_provider(auth, credentials, article_settings):
return provider.FigshareProvider(auth, credentials, article_settings)
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def list_project_articles():
return [
{ "modified_date": "2016-10-18T12:56:27Z",
"doi": "",
"title": "file_article",
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4037952",
"created_date": "2016-10-18T12:55:44Z",
"id": 4037952,
"published_date": None
},
{
"modified_date": "2016-10-18T20:47:25Z",
"doi": "",
"title": "folder_article",
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4040019",
"created_date": "2016-10-18T20:47:25Z",
"id": 4040019,
"published_date": None
}
]
@pytest.fixture
def file_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): file_article. figshare.\n \n Retrieved: 19 20, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4037952,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4037952",
"embargo_type": None,
"title": "file_article",
"defined_type": 3,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "file",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T12:55:44Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T12:56:27Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4037952",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
@pytest.fixture
def file_metadata():
return{
"status": "available",
"is_link_only": False,
"name": "file",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}
@pytest.fixture
def folder_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): folder_article. figshare.\n \n Retrieved: 19 27, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4040019,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4040019",
"embargo_type": None,
"title": "folder_article",
"defined_type": 4,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "folder_file.png",
"viewer_type": "image",
"preview_state": "preview_available",
"download_url": "https://ndownloader.figshare.com/files/6517539",
"supplied_md5": "",
"computed_md5": "03dee7cf60f17a8453ccd2f51cbbbd86",
"upload_token": "3f106f31-d62e-40e7-bac8-c6092392142d",
"upload_url": "",
"id": 6517539,
"size": 15584
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T20:47:25Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T20:47:25Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4040019",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
@pytest.fixture
def folder_file_metadata():
return{
"status": "available",
"is_link_only": False,
"name": "folder_file.png",
"viewer_type": "image",
"preview_state": "preview_available",
"download_url": "https://ndownloader.figshare.com/files/6517539",
"supplied_md5": "",
"computed_md5": "03dee7cf60f17a8453ccd2f51cbbbd86",
"upload_token": "3f106f31-d62e-40e7-bac8-c6092392142d",
"upload_url": "",
"id": 6517539,
"size": 15584
}
@pytest.fixture
def create_article_metadata():
return {
"location": "https://api.figshare.com/v2/account/projects/13423/articles/4055568"
}
@pytest.fixture
def create_file_metadata():
return {
"location": "https://api.figshare.com/v2/account/articles/4055568/files/6530715"}
@pytest.fixture
def get_file_metadata():
return {
"status": "created",
"is_link_only": False,
"name": "barricade.gif",
"viewer_type": "",
"preview_state": "preview_not_available",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "",
"computed_md5": "",
"upload_token": "c9d1a465-f3f6-402c-8106-db3493942303",
"upload_url": "https://fup100310.figshare.com/upload/c9d1a465-f3f6-402c-8106-db3493942303",
"id": 6530715,
"size": 7}
@pytest.fixture
def get_upload_metadata():
return {
"token": "c9d1a465-f3f6-402c-8106-db3493942303",
"md5": "",
"size": 1071709,
"name": "6530715/barricade.gif",
"status": "PENDING",
"parts": [{
"partNo": 1,
"startOffset": 0,
"endOffset": 6,
"status": "PENDING",
"locked": False}]}
@pytest.fixture
def upload_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): barricade.gif. figshare.\n \n Retrieved: 19 20, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4055568,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4037952",
"embargo_type": None,
"title": "barricade.gif",
"defined_type": 3,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "barricade.gif",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T12:55:44Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T12:56:27Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4037952",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
@pytest.fixture
def upload_folder_article_metadata():
return {
"group_resource_id": None,
"embargo_date": None,
"citation": "Baxter, Thomas (): barricade.gif. figshare.\n \n Retrieved: 19 20, Oct 19, 2016 (GMT)",
"embargo_reason": "",
"references": [],
"id": 4040019,
"custom_fields": [],
"size": 0,
"metadata_reason": "",
"funding": "",
"figshare_url": "https://figshare.com/articles/_/4040019",
"embargo_type": None,
"title": "barricade.gif",
"defined_type": 4,
"is_embargoed": False,
"version": 0,
"resource_doi": None,
"confidential_reason": "",
"files": [{
"status": "available",
"is_link_only": False,
"name": "barricade.gif",
"viewer_type": "",
"preview_state": "preview_not_supported",
"download_url": "https://ndownloader.figshare.com/files/6530715",
"supplied_md5": "b3e656f8b0828a31f3ed396a1c868786",
"computed_md5": "b3e656f8b0828a31f3ed396a1c868786",
"upload_token": "878068bf-8cdb-40c9-bcf4-5d8065ac2f7d",
"upload_url": "",
"id": 6530715,
"size": 7
}],
"description": "",
"tags": [],
"created_date": "2016-10-18T12:55:44Z",
"is_active": True,
"authors": [{
"url_name": "_",
"is_active": True,
"id": 2665435,
"full_name": "Thomas Baxter",
"orcid_id": ""
}],
"is_public": False,
"categories": [],
"modified_date": "2016-10-18T12:56:27Z",
"is_confidential": False,
"doi": "",
"license": {
"url": "https://creativecommons.org/licenses/by/4.0/",
"name": "CC-BY",
"value": 1
},
"has_linked_file": False,
"url": "https://api.figshare.com/v2/account/projects/13423/articles/4040019",
"resource_title": None,
"status": "draft",
"published_date": None,
"is_metadata_record": False
}
class TestPolymorphism:
# These should not be passing but are
async def test_project_provider(self, project_settings, project_provider):
assert isinstance(project_provider, provider.FigshareProjectProvider)
assert project_provider.project_id == project_settings['container_id']
async def test_article_provider(self, article_settings, article_provider):
assert isinstance(article_provider, provider.FigshareArticleProvider)
assert article_provider.article_id == article_settings['container_id']
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_contents(self, project_provider, list_project_articles,
file_article_metadata, folder_article_metadata):
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_metadata_url = project_provider.build_url(False, *root_parts,'articles',
str(list_project_articles[0]['id']))
folder_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
str(list_project_articles[1]['id']))
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_article_metadata)
aiohttpretty.register_json_uri('GET', folder_metadata_url, body=folder_article_metadata)
path = await project_provider.validate_path('/')
result = await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=folder_metadata_url)
assert result == [
metadata.FigshareFileMetadata(file_article_metadata, file_article_metadata['files'][0]),
metadata.FigshareFolderMetadata(folder_article_metadata)
]
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_article_contents(self, project_provider, list_project_articles,
file_article_metadata, file_metadata):
root_parts = project_provider.root_path_parts
article_id = str(file_article_metadata['id'])
article_name = file_article_metadata['title']
file_id = str(file_metadata['id'])
file_name = file_metadata['name']
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id, 'files', file_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_article_metadata_url, body=file_article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=file_article_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
expected = metadata.FigshareFileMetadata(file_article_metadata, file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert str(result.article_id) == article_id
assert result.article_name == article_name
assert result.size == file_metadata['size']
assert result.is_public == (PRIVATE_IDENTIFIER not in file_article_metadata['url'])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_contents(self, project_provider, list_project_articles,
folder_article_metadata, folder_file_metadata):
root_parts = project_provider.root_path_parts
article_id = str(folder_article_metadata['id'])
article_name = folder_article_metadata['title']
file_id = str(folder_file_metadata['id'])
file_name = folder_file_metadata['name']
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
folder_article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id, 'files', file_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', folder_article_metadata_url,
body=folder_article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=folder_file_metadata)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=folder_article_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
expected = metadata.FigshareFileMetadata(folder_article_metadata, folder_file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert str(result.article_id) == article_id
assert result.article_name == article_name
assert result.size == folder_file_metadata['size']
assert result.is_public == (PRIVATE_IDENTIFIER not in folder_article_metadata['url'])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_contents(self, article_provider, folder_article_metadata,
folder_file_metadata):
root_parts = article_provider.root_path_parts
article_id = str(folder_article_metadata['id'])
article_name = folder_article_metadata['title']
file_id = str(folder_file_metadata['id'])
file_name = folder_file_metadata['name']
folder_article_metadata_url = article_provider.build_url(False, *root_parts)
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
print("%%%%%%% HERH?: {}".format(file_metadata_url))
aiohttpretty.register_json_uri('GET', folder_article_metadata_url,
body=folder_article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=folder_file_metadata)
path = await article_provider.validate_path('/{}'.format(file_id))
result = await article_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=folder_article_metadata_url)
assert aiohttpretty.has_call(method='GET', uri=file_metadata_url)
expected = metadata.FigshareFileMetadata(folder_article_metadata, folder_file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert result.article_name == article_name
assert result.size == folder_file_metadata['size']
assert result.is_public == (PRIVATE_IDENTIFIER not in folder_article_metadata['url'])
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_upload(self, project_provider, list_project_articles,
create_article_metadata, create_file_metadata,
get_file_metadata, get_upload_metadata, file_stream,
upload_article_metadata):
file_name = 'barricade.gif'
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
validate_article_url = project_provider.build_url(False, *root_parts, 'articles', file_name)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_uri('GET', validate_article_url, status=404)
path = await project_provider.validate_path('/' + file_name)
article_id = str(upload_article_metadata['id'])
create_article_url = project_provider.build_url(False, *root_parts, 'articles')
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(get_file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = get_file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_article_url, body=create_article_metadata, status=201)
aiohttpretty.register_json_uri('POST', create_file_url, body=create_file_metadata, status=201)
aiohttpretty.register_json_uri('GET', file_url, body=get_file_metadata)
aiohttpretty.register_json_uri('GET', upload_url, body=get_upload_metadata)
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=upload_article_metadata)
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
upload_article_metadata,
upload_article_metadata['files'][0],
)
assert aiohttpretty.has_call(
method='POST',
uri=create_article_url,
data=json.dumps({
'title': 'barricade.gif',
})
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_upload(self, file_stream, project_provider, list_project_articles,
folder_article_metadata, get_file_metadata,
create_file_metadata, get_upload_metadata,
upload_folder_article_metadata):
file_name = 'barricade.gif'
article_id = str(list_project_articles[1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
validate_folder_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
validate_file_url = project_provider.build_url(False, *root_parts, 'articles', article_id,
'files', file_name)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', validate_folder_url, body=folder_article_metadata)
aiohttpretty.register_uri('GET', validate_file_url, status=404)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_name))
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(get_file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = get_file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url, body=create_file_metadata,
status=201)
aiohttpretty.register_json_uri('GET', file_url, body=get_file_metadata)
aiohttpretty.register_json_uri('GET', upload_url, body=get_upload_metadata)
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=upload_folder_article_metadata)
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
upload_folder_article_metadata,
upload_folder_article_metadata['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_upload(self, file_stream, article_provider, folder_article_metadata,
get_file_metadata, create_file_metadata, get_upload_metadata,
upload_folder_article_metadata):
file_name = 'barricade.gif'
file_id = str(get_file_metadata['id'])
root_parts = article_provider.root_path_parts
validate_file_url = article_provider.build_url(False, *root_parts, 'files', file_name)
aiohttpretty.register_uri('GET', validate_file_url, status=404)
path = await article_provider.validate_path('/' + file_name)
create_file_url = article_provider.build_url(False, *root_parts, 'files')
file_url = article_provider.build_url(False, *root_parts, 'files', file_id)
get_article_url = article_provider.build_url(False, *root_parts)
upload_url = get_file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url, body=create_file_metadata, status=201)
aiohttpretty.register_json_uri('GET', file_url, body=get_file_metadata)
aiohttpretty.register_json_uri('GET', get_file_metadata['upload_url'], body=get_upload_metadata)
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=upload_folder_article_metadata)
result, created = await article_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
upload_folder_article_metadata,
upload_folder_article_metadata['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_article_download(self, project_provider, file_article_metadata,
list_project_articles, file_metadata):
article_id = str(list_project_articles[0]['id'])
file_id = str(file_article_metadata['files'][0]['id'])
body = b'castle on a cloud'
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles', article_id,
'files', file_id)
article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
download_url = file_metadata['download_url']
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', article_metadata_url, body=file_article_metadata)
aiohttpretty.register_uri('GET', download_url, params={'token': project_provider.token},
body=body, auto_length=True)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download(self, article_provider, file_article_metadata, file_metadata):
body = b'castle on a cloud'
file_id = str(file_metadata['id'])
root_parts = article_provider.root_path_parts
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
article_metadata_url = article_provider.build_url(False, *root_parts)
download_url = file_metadata['download_url']
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', article_metadata_url, body=file_article_metadata)
aiohttpretty.register_uri('GET', download_url, params={'token': article_provider.token},
body=body, auto_length=True)
path = await article_provider.validate_path('/{}'.format(file_id))
result = await article_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_delete(self, project_provider, list_project_articles,
file_article_metadata, file_metadata):
file_id = str(file_metadata['id'])
article_id = str(list_project_articles[0]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_url = project_provider.build_url(False, *root_parts, 'articles', article_id, 'files',
file_id)
file_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', file_article_url, body=file_article_metadata)
aiohttpretty.register_uri('DELETE', file_article_url, status=204)
path = await project_provider.validate_path('/{}/{}'.format(article_id, file_id))
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_delete(self, project_provider, list_project_articles,
folder_article_metadata):
article_id = str(list_project_articles[1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
folder_article_url = project_provider.build_url(False, *root_parts,'articles', article_id)
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', folder_article_url, body=folder_article_metadata)
aiohttpretty.register_uri('DELETE', folder_article_url, status=204)
path = await project_provider.validate_path('/{}'.format(article_id))
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=folder_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_delete(self, article_provider, file_metadata):
file_id = str(file_metadata['id'])
file_url = article_provider.build_url(False, *article_provider.root_path_parts, 'files',
file_id)
aiohttpretty.register_json_uri('GET', file_url, body=file_metadata)
aiohttpretty.register_uri('DELETE', file_url, status=204)
path = await article_provider.validate_path('/{}'.format(file_id))
result = await article_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_url)
class TestRevalidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_revalidate_path(self, project_provider, list_project_articles,
file_article_metadata, folder_article_metadata):
file_article_id = str(list_project_articles[0]['id'])
folder_article_id = str(list_project_articles[1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_url = project_provider.build_url(False, *root_parts, 'articles',
file_article_id)
folder_article_url = project_provider.build_url(False, *root_parts, 'articles',
folder_article_id)
print("%%%%%% list_articles_url: {}".format(list_articles_url))
print("%%%%%% file_article_url: {}".format(file_article_url))
print("%%%%%% folder_article_url: {}".format(folder_article_url))
aiohttpretty.register_json_uri('GET', list_articles_url, body=list_project_articles,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_article_url, body=file_article_metadata)
aiohttpretty.register_json_uri('GET', folder_article_url, body=folder_article_metadata)
path = await project_provider.validate_path('/')
result = await project_provider.revalidate_path(path, '{}'.format('file'), folder=False)
assert result.is_dir is False
assert result.name == 'file'
assert result.identifier == str(file_article_metadata['files'][0]['id'])
|
import logging
from yapsy.IPlugin import IPlugin
import simplejson as json
import configargparse
from mrtarget.modules.GeneData import Gene
from mrtarget.Settings import Config
from mrtarget.common import URLZSource
class HGNC(IPlugin):
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__)
def print_name(self):
self._logger.info("HGNC gene data plugin")
def merge_data(self, genes, loader, r_server, data_config):
self._logger.info("HGNC parsing - requesting from URL %s", data_config.hgnc_complete_set)
with URLZSource(data_config.hgnc_complete_set).open() as source:
data = json.load(source)
for row in data['response']['docs']:
gene = Gene()
gene.load_hgnc_data_from_json(row)
genes.add_gene(gene)
self._logger.info("STATS AFTER HGNC PARSING:\n" + genes.get_stats())
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import sys
import pymysql
se = requests.Session() # ๆจกๆ็ป้
requests.adapters.DEFAULT_RETRIES = 15
se.mount('http://', HTTPAdapter(max_retries=3)) # ้่
se.mount('https://', HTTPAdapter(max_retries=3))
class Pixiv(object):
def __init__(self):
self.base_url = 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index'
self.login_url = 'https://accounts.pixiv.net/api/login?lang=zh'
self.search_url = 'https://www.pixiv.net/search.php'
self.main_url = 'https://www.pixiv.net'
self.target_url = 'https://www.pixiv.net/member_illust.php?mode=medium&illust_id='
self.headers = {
'Referer': 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
self.pixiv_id = '835437423@qq.com', # 2664504212@qq.com
self.password = 'yjw3616807', # knxy0616
self.post_key = []
self.return_to = 'https://www.pixiv.net/'
self.load_path = './search_pic/' # ๅญๆพๅพ็่ทฏๅพ
def login(self):
post_key_xml = se.get(self.base_url, headers=self.headers).text
post_key_soup = BeautifulSoup(post_key_xml, 'lxml')
self.post_key = post_key_soup.find('input')['value']
# ๆ้ ่ฏทๆฑไฝ
data = {
'pixiv_id': self.pixiv_id,
'password': self.password,
'post_key': self.post_key,
'return_to': self.return_to
}
se.post(self.login_url, data=data, headers=self.headers)
def download(self):
pixiv_id=sys.argv[1]
db = pymysql.connect("localhost", "root", "PlanetarAntimony", "Study")
cursor = db.cursor()
sql = "SELECT pixiv_id,original_url FROM pixiv_search WHERE pixiv_id = '%s' " % (pixiv_id)
cursor.execute(sql)
results = cursor.fetchall()
if(results):
original_url=results[0][1]
else:
sql = "SELECT pixiv_id,original_url FROM pixiv_rank WHERE pixiv_id = '%s' " % (pixiv_id)
cursor.execute(sql)
results = cursor.fetchall()
original_url=results[0][1]
print(original_url)
img = se.get(original_url, headers=self.headers)
with open(self.load_path + pixiv_id + '.jpg', 'wb') as f: # ๅพ็่ฆ็จb,ๅฏนtext่ฆๅๆณๅๅค็
f.write(img.content) # ไฟๅญๅพ็
# olocal_url='http://study.imoe.club/Try/search_pic/'+ pixiv_id + '.jpg'
# sql = "INSERT INTO pixiv_search(olocal_url) VALUES ('%s')" % (olocal_url)
if __name__ == '__main__':
pixiv = Pixiv()
pixiv.login()
pixiv.download()
print("System Exit")
|
from django.contrib import admin
from models import License, Submission, Revision, TagCreation
class LicenseAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
class SubmissionAdmin(admin.ModelAdmin):
list_display = ('sub_type', 'slug', 'created_by', 'date_created',
'num_revisions')
list_display_links = ('slug',)
ordering = ['-date_created']
class RevisionAdmin(admin.ModelAdmin):
list_display = ('pk', 'title', 'created_by', 'date_created',
'sub_license', 'item_url', 'rev_id', 'is_displayed')
list_display_links = ('title',)
ordering = ['-date_created']
class TagCreationAdmin(admin.ModelAdmin):
list_display = ('date_created', 'tag', 'created_by', 'revision')
admin.site.register(License, LicenseAdmin)
admin.site.register(Submission, SubmissionAdmin)
admin.site.register(Revision, RevisionAdmin)
admin.site.register(TagCreation, TagCreationAdmin)
|
import sklearn
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
def get_score(model, x, y, plot=True, sparse=50):
y_pred = model.predict(x)
y_pred = np.clip(y_pred, 1.0, 5.0)
mse = mean_squared_error(y, y_pred)
mae = mean_absolute_error(y, y_pred)
medae = median_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print ('{:.4} \nMSE: {:.4}\nMAE: {:.4}\nMedianAE: {:.4}\nR2 score: {:.4}'.format(model.name, mse, mae, medae, r2))
if plot:
plt.figure(figsize=(20,5))
plt.title(model.name)
plt.ylabel('Score')
plt.plot(y_pred[::sparse])
plt.plot(y[::sparse])
plt.legend(('y_pred', 'y_test'))
plt.show()
return {'mean squared error':mse, 'mean absolute error':mae, 'median absolute error':medae, 'r2 score':r2}
|
##list of integers
student_score= [99, 88, 60]
##printing out that list
print(student_score)
##printing all the integers in a range
print(list(range(1,10)))
##printing out all the integers in a range skipping one every time
print(list(range(1,10,2)))
## manipulating a string and printting all the modifications
x = "hello"
y = x.upper()
z = x.title()
print(x, y, z)
|
# Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Package identification routines.
This module contains the :func:`~reprozip.tracer.linux_pkgs.identify_packages`
function that sorts a list of files between their distribution packages,
depending on what Linux distribution we are running on.
Currently supported package managers:
- dpkg (Debian, Ubuntu)
"""
from __future__ import division, print_function, unicode_literals
import distro
import itertools
import logging
from rpaths import Path
import subprocess
import time
from reprozip.common import Package
from reprozip.utils import iteritems, listvalues
logger = logging.getLogger('reprozip')
magic_dirs = ('/dev', '/proc', '/sys')
system_dirs = ('/bin', '/etc', '/lib', '/sbin', '/usr', '/var', '/run')
class PkgManager(object):
"""Base class for package identifiers.
Subclasses should provide either `search_for_files` or `search_for_file`
which actually identifies the package for a file.
"""
def __init__(self):
# Files that were not part of a package
self.unknown_files = set()
# All the packages identified, with their `files` attribute set
self.packages = {}
def filter_files(self, files):
seen_files = set()
for f in files:
if f.path not in seen_files:
if not self._filter(f):
yield f
seen_files.add(f.path)
def search_for_files(self, files):
nb_pkg_files = 0
for f in self.filter_files(files):
pkgnames = self._get_packages_for_file(f.path)
# Stores the file
if not pkgnames:
self.unknown_files.add(f)
else:
pkgs = []
for pkgname in pkgnames:
if pkgname in self.packages:
pkgs.append(self.packages[pkgname])
else:
pkg = self._create_package(pkgname)
if pkg is not None:
self.packages[pkgname] = pkg
pkgs.append(self.packages[pkgname])
if len(pkgs) == 1:
pkgs[0].add_file(f)
nb_pkg_files += 1
else:
self.unknown_files.add(f)
# Filter out packages with no files
self.packages = {pkgname: pkg
for pkgname, pkg in iteritems(self.packages)
if pkg.files}
logger.info("%d packages with %d files, and %d other files",
len(self.packages),
nb_pkg_files,
len(self.unknown_files))
def _filter(self, f):
# Special files
if any(f.path.lies_under(c) for c in magic_dirs):
return True
# If it's not in a system directory, no need to look for it
if (f.path.lies_under('/usr/local') or
not any(f.path.lies_under(c) for c in system_dirs)):
self.unknown_files.add(f)
return True
return False
def _get_packages_for_file(self, filename):
raise NotImplementedError
def _create_package(self, pkgname):
raise NotImplementedError
# Before Linux 2.6.23, maximum argv is 128kB
MAX_ARGV = 800
class DpkgManager(PkgManager):
"""Package identifier for deb-based systems (Debian, Ubuntu).
"""
def search_for_files(self, files):
# Make a set of all the requested files
requested = dict((f.path, f) for f in self.filter_files(files))
found = {} # {path: pkgname}
# Request a few files at a time so we don't hit the command-line size
# limit
iter_batch = iter(requested)
while True:
batch = list(itertools.islice(iter_batch, MAX_ARGV))
if not batch:
break
proc = subprocess.Popen(['dpkg-query', '-S'] +
[path.path for path in batch],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
for line in out.splitlines():
pkgname, path = line.split(b': ', 1)
path = Path(path.strip())
# 8-bit safe encoding, because this might be a localized error
# message (that we don't care about)
pkgname = pkgname.decode('iso-8859-1')
if ', ' in pkgname: # Multiple packages
found[path] = None
continue
pkgname = pkgname.split(':', 1)[0] # Remove :arch
if path in requested:
if ' ' not in pkgname:
# If we had assigned it to a package already, undo
if path in found:
found[path] = None
# Else assign to the package
else:
found[path] = pkgname
# Remaining files are not from packages
self.unknown_files.update(
f for f in files
if f.path in requested and found.get(f.path) is None)
nb_pkg_files = 0
for path, pkgname in iteritems(found):
if pkgname is None:
continue
if pkgname in self.packages:
package = self.packages[pkgname]
else:
package = self._create_package(pkgname)
self.packages[pkgname] = package
package.add_file(requested.pop(path))
nb_pkg_files += 1
logger.info("%d packages with %d files, and %d other files",
len(self.packages),
nb_pkg_files,
len(self.unknown_files))
def _get_packages_for_file(self, filename):
# This method is not used for dpkg: instead, we query multiple files at
# once since it is faster
assert False
def _create_package(self, pkgname):
p = subprocess.Popen(['dpkg-query',
'--showformat=${Package}\t'
'${Version}\t'
'${Installed-Size}\n',
'-W',
pkgname],
stdout=subprocess.PIPE)
try:
size = version = None
for line in p.stdout:
fields = line.split()
# Removes :arch
name = fields[0].decode('ascii').split(':', 1)[0]
if name == pkgname:
version = fields[1].decode('ascii')
size = int(fields[2].decode('ascii')) * 1024 # kbytes
break
for line in p.stdout: # finish draining stdout
pass
finally:
p.wait()
if p.returncode == 0:
pkg = Package(pkgname, version, size=size)
logger.debug("Found package %s", pkg)
return pkg
else:
return None
class RpmManager(PkgManager):
"""Package identifier for rpm-based systems (Fedora, CentOS).
"""
def _get_packages_for_file(self, filename):
p = subprocess.Popen(['rpm', '-qf', filename.path,
'--qf', '%{NAME}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
return [line.strip().decode('iso-8859-1')
for line in out.splitlines()
if line]
def _create_package(self, pkgname):
p = subprocess.Popen(['rpm', '-q', pkgname,
'--qf', '%{VERSION}-%{RELEASE} %{SIZE}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
version, size = out.strip().decode('iso-8859-1').rsplit(' ', 1)
size = int(size)
pkg = Package(pkgname, version, size=size)
logger.debug("Found package %s", pkg)
return pkg
else:
return None
def identify_packages(files):
"""Organizes the files, using the distribution's package manager.
"""
distribution = distro.id()
if distribution in ('debian', 'ubuntu'):
logger.info("Identifying Debian packages for %d files...", len(files))
manager = DpkgManager()
elif (distribution in ('centos', 'centos linux',
'fedora', 'scientific linux') or
distribution.startswith('red hat')):
logger.info("Identifying RPM packages for %d files...", len(files))
manager = RpmManager()
else:
logger.info("Unknown distribution, can't identify packages")
return files, []
begin = time.time()
manager.search_for_files(files)
logger.debug("Assigning files to packages took %f seconds",
(time.time() - begin))
return manager.unknown_files, listvalues(manager.packages)
|
'''Editing video and getting images'''
from image_tools import *
import cv2
import numpy as np
def num_frames(filepath):
video = cv2.VideoCapture(filepath)
# return video.get(cv2.CAP_PROP_FRAME_COUNT) # broken because codecs; I think dolphin doesn't encode the frame count
success, _ = video.read()
frame_count = 0
while success:
success, _ = video.read()
frame_count += 1
return frame_count
def trim(frame_start, frame_end, filepath_in, filepath_out):
video_in = cv2.VideoCapture(filepath_in)
video_out = create_similar_writer(video_in, filepath_out)
# frame_num = frame_start
# video_in.set(cv2.CAP_PROP_POS_FRAMES, frame_num-1) # opencv bug, need to do alternative (just .read() a bunch I guess)
#
# success, img = video_in.read()
# while success and frame_num < frame_end:
# video_out.write(img)
#
# success, img = video_in.read()
# frame_num += 1
#
# video_in.release()
### slower but working method
i = 0
for frame in iter_frames(filepath_in):
if i >= frame_end:
break
if i >= frame_start:
video_out.write(frame)
i += 1
video_out.release()
def create_similar_writer(video_capture, filename):
# returns VideoWriter with same size, fps, codec as given VideoCapture
codec, width, height, fps = [video_capture.get(prop)
for prop in (cv2.CAP_PROP_FOURCC,
cv2.CAP_PROP_FRAME_WIDTH,
cv2.CAP_PROP_FRAME_HEIGHT,
cv2.CAP_PROP_FPS)]
# https://stackoverflow.com/questions/61659346/how-to-get-4-character-codec-code-for-videocapture-object-in-opencv
# h = int(828601953.0)
# fourcc = chr(h&0xff) + chr((h>>8)&0xff) + chr((h>>16)&0xff) + chr((h>>24)&0xff)
api_pref = cv2.CAP_FFMPEG # opencv python VideoWriter constructor forces 6 args, even tho I don't care about api_pref and extra params
return cv2.VideoWriter(filename, api_pref, int(codec), fps, (int(width), int(height)))#, params=[])
def grab_frame(n, filepath):
# returns nth image frame in given video
video = cv2.VideoCapture(filepath)
success, img = video.read()
frame_num = 0
while success and frame_num < n:
success, img = video.read()
frame_num += 1
return img
def iter_frames(filepath):
# returns generator for image frames in given video
video = cv2.VideoCapture(filepath)
success, img = video.read()
while success:
yield img
success, img = video.read()
video.release()
def range_of_frames(filepath, _range):
# returns frames indexed by range
frame_nums = iter_ending_in_none(_range)
next_num = next(frame_nums)
for i, img in enumerate(iter_frames(filepath)):
if next_num is None: # no more frames to check for
break
if i == next_num:
yield img
next_num = next(frame_nums)
def iter_ending_in_none(iterable):
# prevents error on next of last in iterable
# check for None as last item
for item in iter(iterable):
yield item
yield None
def filter_playback(filepath, filter_fn, title='', interval_ms=int(1000 * 1/120)):
# shows video playback in new window, applying filter func to each image frame. Esc to close
for frame in iter_frames(filepath):
cv2.imshow(title, filter_fn(frame))
if cv2.waitKey(interval_ms) & 0xFF == 27: # default timing is close to realtime fps I think, but idk why
break
cv2.destroyAllWindows()
def write_frames(filepath, frames):
example_reader = cv2.VideoCapture('../test/Game_20210408T225110.avi')
writer = create_similar_writer(example_reader, filepath)
for img in frames:
writer.write(img)
writer.release()
def manually_generate_mask(img, fp='./mask.npy'):
# utility tool in new window; click and drag to select pixels. Esc to close.
img_background = upscale_min_to_max_res(img) # rendered to user
mask = np.zeros(MIN_RES, bool) # hidden data being modified
def on_mouse_activated(mouse_x, mouse_y):
# update data
x, y = downscale_pt_max_to_min_res((mouse_x, mouse_y))
mask[x][y] = 1
# update user
top_left, bot_right = upscale_pt_min_to_max_res((x, y))
cv2.rectangle(img_background, top_left, bot_right, (0,0,255), -1)
def mouse_callback(event, mouse_x, mouse_y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
mouse_callback.mouse_down = True
on_mouse_activated(mouse_x, mouse_y)
elif event == cv2.EVENT_LBUTTONUP:
mouse_callback.mouse_down = False
elif mouse_callback.mouse_down and event == cv2.EVENT_MOUSEMOVE:
on_mouse_activated(mouse_x, mouse_y)
mouse_callback.mouse_down = False
window_name = 'click and drag pixels to mask'
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, mouse_callback)
while True:
cv2.imshow(window_name, img_background)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
mask = mask.T
np.save(fp, mask)
return mask
|
from django.contrib import admin
from django.urls import re_path, include
from django.conf import settings
from django.views.static import serve
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^', include('gym.urls')),
]
if settings.DEBUG:
urlpatterns += [
re_path(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
|
from collections import Hashable
print(issubclass(list,object))
print(issubclass(object,Hashable))
print(issubclass(list,Hashable))
"""
output:
True
True
False
"""
|
# Generated by Django 2.2.1 on 2020-10-14 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('call_tracking', '0003_auto_20201014_1603'),
]
operations = [
migrations.RenameField(
model_name='call',
old_name='forwarding_number',
new_name='proxy_number',
),
]
|
import pandas as pd
import numpy as np
import logging
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from config import shuffled_csv
from NN import NN_model, ReLU, Sigmoid, MSE, L1_reg, L2_reg
from NN.utility import batch_train, batch_out, Model_Wrapper
from LevelMethod import LevelMethod, LevelMethod2d, TestFunction
test_2d = False
test_smooth = False
if test_2d:
print("Hello World!")
# 2d version is outdated:
# - bound is not centred in x0
# - does not have memory option
f = LevelMethod2d(bounds = 20)
f.solve(TestFunction(), [-1,-3], plot=False)
else:
data = pd.read_csv(shuffled_csv, index_col=0).to_numpy()
data = data[:100,:]
n_samples = data.shape[0]
X_data = data[:, :10]
Y_data = data[:, 10:]
Y_scaler = StandardScaler()
Y_scaled = Y_scaler.fit_transform(Y_data)
np.random.seed()
if test_smooth:
model = NN_model([10, 5, 5, 2], Sigmoid, MSE)
else:
model = NN_model([10, 20, 20, 2], ReLU, MSE)
# set level to WARNING to avoid printing INFOs
logging.basicConfig(level='INFO')
reg_loss = L1_reg(1e-4)
f = Model_Wrapper(model, X_data, Y_scaled, reg_loss)
if test_smooth:
base_bound = 1
bound_decay = 10
max_iter = 200
loops = 2
max_iter = [max_iter]*loops
else:
base_bound = 1
bound_decay = 1
max_iter = [500]
loops = len(max_iter)
print(
"\nConfiguration:",
f"""
base_bound = {base_bound}
bound_decay = {bound_decay}
max_iter = {max_iter}
loops = {loops}
"""
)
for method in ["MOSEK"]:#, "CLP", "ECOS", "ECOS_BB", "GLPK"]:
print(method)
model.init_weights()
for i in range(loops):
bound = base_bound / (bound_decay ** i)
solver = LevelMethod(bounds=1, lambda_=0.9, epsilon=0.01, max_iter=max_iter[i], memory=None, LP_solver=method)
x = model.Weights
status = solver.solve(f,x)
model.Weights = solver.x_upstar
if status == -1:
print(f"Terminato al loop {i+1}.")
break
times = solver.times
plt.plot(times["step"][1:], label=f"Step duration")
plt.plot(times["LP"][1:], label=f"LP duration - {method}")
plt.plot(times["QP"][1:], label=f"QP duration - MOSEK")
plt.legend(loc="upper left")
plt.show()
print('')
print(f'Exited with status: {status}')
print('')
Y_out = batch_out(model, X_data)
Y_out = Y_scaler.inverse_transform(Y_out)
plt.scatter(Y_data[:,0], Y_data[:,1], s=1)
plt.scatter(Y_out[:,0], Y_out[:,1], s=1)
print('MEE is:')
mee = 0
for y1, y2 in zip(Y_data, Y_out):
mee += np.linalg.norm(y1 - y2)
print(mee/n_samples)
plt.show()
|
from pyramid.threadlocal import get_current_registry
class ZCAConfiguratorMixin(object):
def hook_zca(self):
""" Call :func:`zope.component.getSiteManager.sethook` with the
argument :data:`pyramid.threadlocal.get_current_registry`, causing
the :term:`Zope Component Architecture` 'global' APIs such as
:func:`zope.component.getSiteManager`,
:func:`zope.component.getAdapter` and others to use the
:app:`Pyramid` :term:`application registry` rather than the Zope
'global' registry."""
from zope.component import getSiteManager
getSiteManager.sethook(get_current_registry)
def unhook_zca(self):
""" Call :func:`zope.component.getSiteManager.reset` to undo the
action of :meth:`pyramid.config.Configurator.hook_zca`."""
from zope.component import getSiteManager
getSiteManager.reset()
|
import pandas as pd
import numpy as np
column_data = ["packed",
"highway",
"greennopop",
"water",
"road",
"density"
]
testing_df = pd.read_csv('testing.csv', names=column_data, index_col=None)
test_x = testing_df[["packed",
"highway",
"greennopop",
"water",
"road"]]
test_x = test_x.to_numpy()
test_x = np.reshape(
test_x, (
test_x.shape[0],
1,
test_x.shape[1]
)
)
|
"""
This module descibes how to load a custom dataset from a single file.
As a custom dataset we will actually use the movielens-100k dataset, but act as
if it were not built-in.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from surprise import BaselineOnly
from surprise import Dataset
from surprise import evaluate
from surprise import Reader
# path to dataset file
file_path = os.path.expanduser('~/.surprise_data/ml-100k/ml-100k/u.data')
# As we're loading a custom dataset, we need to define a reader. In the
# movielens-100k dataset, each line has the following format:
# 'user item rating timestamp', separated by '\t' characters.
reader = Reader(line_format='user item rating timestamp', sep='\t')
data = Dataset.load_from_file(file_path, reader=reader)
data.split(n_folds=5)
# We'll use an algorithm that predicts baseline estimates.
algo = BaselineOnly()
# Evaluate performances of our algorithm on the dataset.
evaluate(algo, data)
|
import sys
import os.path
import itertools
import textwrap
import argparse
from argparse import RawTextHelpFormatter
from math import log
from phanotate_modules.edges import Edge
from phanotate_modules.nodes import Node
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('phanotate').version
except Exception:
__version__ = 'unknown'
def pairwise(iterable):
a = iter(iterable)
return zip(a, a)
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __repr__(self):
return '{0}-{1}'.format(self.start, self.end)
def __eq__(self, other):
return self.start <= other <= self.end
def is_valid_file(x):
if not os.path.exists(x):
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
def get_args():
usage = 'phanotate.py [-opt1, [-opt2, ...]] infile'
parser = argparse.ArgumentParser(description='PHANOTATE: A phage genome annotator', formatter_class=RawTextHelpFormatter, usage=usage)
parser.add_argument('infile', type=is_valid_file, help='input file in fasta format')
parser.add_argument('-o', '--outfile', action="store", default=sys.stdout, type=argparse.FileType('w'), help='where to write the output [stdout]')
parser.add_argument('-f', '--outfmt', action="store", default="tabular", dest='outfmt', help='format of the output [tabular]', choices=['tabular','genbank','fasta'])
parser.add_argument('-d', '--dump', action="store_true")
parser.add_argument('-V', '--version', action='version', version=__version__)
args = parser.parse_args()
return args
def read_fasta(filepath):
my_contigs = dict()
name = ''
seq = ''
with open(filepath, mode="r") as my_file:
for line in my_file:
if(line.startswith(">")):
my_contigs[name] = seq
name = line.split()[0]
seq = ''
else:
seq += line.replace("\n", "").upper()
my_contigs[name] = seq
if '' in my_contigs: del my_contigs['']
return my_contigs
def write_output(id, args, my_path, my_graph, my_orfs):
outfmt = args.outfmt
outfile = args.outfile
try:
my_path = my_path[1:]
except:
sys.stdout.write("Error running fastpathz: " + output + '\n')
if(not my_path):
outfile.write("#id:\t" + str(id[1:]) + " NO ORFS FOUND\n")
elif(outfmt == 'tabular'):
last_node = eval(my_path[-1])
outfile.write("#id:\t" + str(id[1:]) + "\n")
outfile.write("#START\tSTOP\tFRAME\tCONTIG\tSCORE\n")
for source, target in pairwise(my_path):
left = eval(source)
right = eval(target)
weight = my_graph.weight(Edge(left,right,0))
if(left.gene == 'tRNA'):
continue
if(left.position == 0 and right.position == last_node.position):
left.position = abs(left.frame)
right.position = '>' + str(left.position+3*int((right.position-left.position)/3)-1)
left.position = '<' + str(left.position)
elif(left.position == 0):
left.position = '<' + str(((right.position+2)%3)+1)
right.position += 2
elif(right.position == last_node.position):
right.position = '>' + str(left.position+3*int((right.position-left.position)/3)-1)
else:
right.position += 2
if(left.type == 'start' and right.type == 'stop'):
outfile.write(str(left.position) + '\t' + str(right.position) + '\t+\t' + id[1:] + '\t' + str(weight) + '\t\n')
elif(left.type == 'stop' and right.type == 'start'):
outfile.write(str(right.position) + '\t' + str(left.position) + '\t-\t' + id[1:] + '\t' + str(weight) + '\t\n')
elif(outfmt == 'genbank'):
last_node = eval(my_path[-1])
outfile.write('LOCUS ' + id[1:])
outfile.write(str(last_node.position-1).rjust(10))
outfile.write(' bp DNA PHG\n')
outfile.write('DEFINITION ' + id[1:] + '\n')
outfile.write('FEATURES Location/Qualifiers\n')
outfile.write(' source 1..' + str(last_node.position-1) + '\n')
for source, target in pairwise(my_path):
#get the orf
left = eval(source)
right = eval(target)
weight = my_graph.weight(Edge(left,right,0))
if(left.gene == 'tRNA' or right.gene == 'tRNA'):
outfile.write(' ' + left.gene.ljust(16))
if(left.frame > 0):
outfile.write(str(left.position) + '..' + str(right.position) + '\n')
else:
outfile.write('complement(' + str(left.position) + '..' + str(right.position) + ')\n')
continue
if(left.frame > 0):
orf = my_orfs.get_orf(left.position, right.position)
else:
orf = my_orfs.get_orf(right.position, left.position)
#properly display the orf
if(not orf.has_start() and not orf.has_stop()):
left.position = '<' + str(((right.position+2)%3)+1)
if(right.position == last_node.position):
right.position = '>' + str(left.position+3*int((right.position-left.position)/3)-1)
else:
right.position += 2
outfile.write(' ' + left.gene.ljust(16))
if(left.type == 'start' and right.type == 'stop'):
outfile.write(str(left.position) + '..' + str(right.position) + '\n')
elif(left.type == 'stop' and right.type == 'start'):
outfile.write('complement(' + str(left.position) + '..' + str(right.position) + ')\n')
outfile.write(' /note="weight=' + '{:.2E}'.format(weight) + ';"\n')
outfile.write('ORIGIN')
i = 0
dna = textwrap.wrap(my_orfs.seq, 10)
for block in dna:
if(i%60 == 0):
outfile.write('\n')
outfile.write(str(i+1).rjust(9))
outfile.write(' ')
outfile.write(block.lower())
else:
outfile.write(' ')
outfile.write(block.lower())
i += 10
outfile.write('\n')
outfile.write('//')
outfile.write('\n')
elif(outfmt == 'fasta'):
last_node = eval(my_path[-1])
for source, target in pairwise(my_path):
left = eval(source)
right = eval(target)
if(left.gene == 'tRNA'):
continue
if(left.frame > 0):
orf = my_orfs.get_orf(left.position, right.position)
else:
orf = my_orfs.get_orf(right.position, left.position)
if(left.gene == 'CDS'):
weight = my_graph.weight(Edge(left,right,0))
if(left.position == 0 and right.position == last_node.position):
left.position = abs(left.frame)
right.position = '>' + str(left.position+3*int((right.position-left.position)/3)-1)
left.position = '<' + str(left.position)
elif(left.position == 0):
left.position = '<' + str(((right.position+2)%3)+1)
right.position += 2
elif(right.position == last_node.position):
right.position = '>' + str(left.position+3*int((right.position-left.position)/3)-1)
else:
right.position += 2
if(left.type == 'start' and right.type == 'stop'):
#outfile.write(str(left.position) + '\t' + str(right.position) + '\t+\t' + id[1:] + '\t' + str(weight) + '\t\n')
outfile.write(id + "." + str(right.position) + " [START=" + str(left.position) + "] [SCORE=" + str(weight) + "]\n")
elif(left.type == 'stop' and right.type == 'start'):
#outfile.write(str(right.position) + '\t' + str(left.position) + '\t-\t' + id[1:] + '\t' + str(weight) + '\t\n')
outfile.write(id + "." + str(left.position) + " [START=" + str(right.position) + "] [SCORE=" + str(weight) + "]\n")
outfile.write(orf.seq)
outfile.write("\n")
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import time
import torch
import numpy as np
import cv2
import matplotlib.pyplot as plt
from torch.autograd import Variable
from models import model_create_by_name as model_create
from losses.loss import losses
import utils.utils as utils
import logging
#logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
logging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s - %(message)s')
class stereo(object):
def __init__(self, args):
self.args = args
self.use_cuda = torch.cuda.is_available()
# dataloader
self.dataloader = None
# model
self.name = args.net
self.model = model_create(self.name, args.maxdisparity)
if(self.use_cuda):
self.model = self.model.cuda()
#self.model = torch.nn.parallel.DistributedDataParallel(self.model.cuda())
# optim
self.lr = args.lr
self.alpha = args.alpha
self.betas = (args.beta1, args.beta2)
self.optim = torch.optim.Adam(self.model.parameters(), lr=self.lr, betas=self.betas)
#self.optim = torch.optim.RMSprop(model.parameters(), lr=self.lr, alpha=self.alpha)
self.lr_epoch0 = args.lr_epoch0
self.lr_stride = args.lr_stride
# lossfun
maxepoch_weight_adjust = 0 if self.args.mode == 'finetune' else args.lr_epoch0*3//4
# maxepoch_weight_adjust = args.lr_epoch0*3//4
self.lossfun = losses(loss_name=args.loss_name, count_levels=self.model.count_levels, maxepoch_weight_adjust=maxepoch_weight_adjust)
# dirpath of saving weight
self.dirpath = os.path.join(args.output,
"%s_%s" % (args.mode, args.dataset),
"%s_%s" % (args.net, args.loss_name))
#
self.epoch = 0
self.best_prec = np.inf
if(os.path.exists(self.args.path_weight)):
# load pretrained weight
state_dict = torch.load(self.args.path_weight)['state_dict']
self.model.load_state_dict(state_dict)
msg = 'load pretrained weight successly: %s \n' % self.args.path_weight
logging.info(msg)
if(self.args.mode in ['train', 'finetune']):
# load checkpoint
self.load_checkpoint()
msg = "[%s] Model name: %s , loss name: %s , updated epoches: %d \n" % (args.mode, args.net, args.loss_name, self.epoch)
logging.info(msg)
def save_checkpoint(self, epoch, best_prec, is_best):
state = {
'epoch': epoch,
'best_prec': best_prec,
'state_dict': self.model.state_dict(),
'optim' : self.optim.state_dict(),
}
utils.save_checkpoint(state, is_best, dirpath=self.dirpath, filename='model_checkpoint.pkl')
if(is_best):
path_save = os.path.join(self.dirpath, 'weight_best.pkl')
torch.save({'state_dict': self.model.state_dict()}, path_save)
def load_checkpoint(self, best=False):
state = utils.load_checkpoint(self.dirpath, best)
if state is not None:
msg = 'load checkpoint successly: %s \n' % self.dirpath
logging.info(msg)
self.epoch = state['epoch'] + 1
self.best_prec = state['best_prec']
self.model.load_state_dict(state['state_dict'])
self.optim.load_state_dict(state['optim'])
def lr_adjust(self, optimizer, epoch0, stride, lr0, epoch):
if(epoch < epoch0):
return
n = ((epoch - epoch0)//stride) + 1
lr = lr0 * (0.5 ** n) # ๅณๆฏstrideๆญฅ๏ผlr = lr /2
for param_group in optimizer.param_groups: # ๅฐๆดๆฐ็lr ้ๅ
ฅไผๅๅจ optimizer ไธญ๏ผ่ฟ่กไธไธๆฌกไผๅ
param_group['lr'] = lr
def accuracy(self, dispL, dispL_gt):
mask_disp = dispL_gt > 0
disp_diff = (dispL_gt - dispL).abs()
# epe
epe = disp_diff[mask_disp].mean()
# d1
mask1 = disp_diff[mask_disp] <= 3
mask2 = (disp_diff[mask_disp] / dispL_gt[mask_disp]) <= 0.05
pixels_good = (mask1 + mask2) > 0
d1 = 100 - 100.0 * pixels_good.sum() / mask_disp.sum()
return d1, epe
def submit(self):
# ๆต่ฏไฟกๆฏไฟๅญ่ทฏๅพ
dirpath_save = os.path.join('submit', "%s_%s" % (self.args.dataset, self.args.flag_model))
if(not os.path.exists(dirpath_save)):
os.makedirs(dirpath_save)
filepath_save = dirpath_save + '.pkl'
# ่ฅๅทฒ็ปๆต่ฏ่ฟ๏ผ็ดๆฅ่พๅบ็ปๆใ
filenames, times, D1s, epes = [], [], [], []
if(os.path.exists(filepath_save)):
data = torch.load(filepath_save)
filenames.extend(data['filename'])
times.extend(data['time'])
D1s.extend(data['D1'])
epes.extend(data['epe'])
for i in range(len(filenames)):
if(len(D1s) == len(filenames)):
print("submit: %s | time: %6.3f, D1: %6.3f, epe: %6.3f" % (filenames[i], times[i], D1s[i], epes[i]))
print(np.mean(times), np.mean(D1s), np.mean(epes))
else:
print("submit: %s | time: %6.3f" % (filenames[i], times[i]))
print(np.mean(times))
return
# switch to evaluate mode
self.model.eval()
# start to predict and save result
time_end = time.time()
for batch_idx, (batch, batch_filenames) in enumerate(self.dataloader_val):
assert batch.shape[2] >= 6
if(self.use_cuda):
batch = batch[:, :7].cuda()
imL = batch[:, :3]
imR = batch[:, 3:6]
imL = Variable(imL, volatile=True, requires_grad=False)
imR = Variable(imR, volatile=True, requires_grad=False)
# compute output
scale_dispLs, dispLs = self.model(imL, imR)
# measure elapsed time
filenames.append(batch_filenames[0])
times.append(time.time() - time_end)
time_end = time.time()
# measure accuracy
if(batch.shape[1] >= 7):
dispL = batch[:, 6:7]
d1, epe = self.accuracy(dispLs[0].data, dispL)
D1s.append(d1)
epes.append(epe)
print("submit: %s | time: %6.3f, D1: %6.3f, epe: %6.3f" % (filenames[-1], times[-1], D1s[-1], epes[-1]))
else:
print("submit: %s | time: %6.3f" % (filenames[-1], times[-1]))
# save predict result
filename_save = filenames[-1].split('.')[0]+'.png'
path_file = os.path.join(dirpath_save, filename_save)
cv2.imwrite(path_file, dispLs[0][0, 0].data.cpu().numpy())
# save final result
data = {
"filename":filenames,
"time":times,
"D1":D1s,
"epe":epes,
}
torch.save(data, filepath_save)
if (len(D1s) > 0):
print(np.mean(times), np.mean(D1s), np.mean(epes))
else:
print(np.mean(times))
def start(self):
args = self.args
if args.mode == 'test':
self.validate()
return
losses, EPEs, D1s, epochs_val, losses_val, EPEs_val, D1s_val = [], [], [], [], [], [], []
path_val = os.path.join(self.dirpath, "loss.pkl")
if(os.path.exists(path_val)):
state_val = torch.load(path_val)
losses, EPEs, D1s, epochs_val, losses_val, EPEs_val, D1s_val = state_val
# ๅผๅง่ฎญ็ปๆจกๅ
plt.figure(figsize=(18, 5))
time_start = time.time()
epoch0 = self.epoch
for epoch in range(epoch0, args.epochs):
self.epoch = epoch
self.lr_adjust(self.optim, args.lr_epoch0, args.lr_stride, args.lr, epoch) # ่ชๅฎไน็lr_adjustๅฝๆฐ๏ผ่งไธ
self.lossfun.Weight_Adjust_levels(epoch)
msg = 'lr: %.6f | weight of levels: %s' % (self.optim.param_groups[0]['lr'], str(self.lossfun.weight_levels))
logging.info(msg)
# train for one epoch
mloss, mEPE, mD1 = self.train()
losses.append(mloss)
EPEs.append(mEPE)
D1s.append(mD1)
if(epoch % self.args.val_freq == 0) or (epoch == args.epochs-1):
# evaluate on validation set
mloss_val, mEPE_val, mD1_val = self.validate()
epochs_val.append(epoch)
losses_val.append(mloss_val)
EPEs_val.append(mEPE_val)
D1s_val.append(mD1_val)
# remember best prec@1 and save checkpoint
is_best = mD1_val < self.best_prec
self.best_prec = min(mD1_val, self.best_prec)
self.save_checkpoint(epoch, self.best_prec, is_best)
torch.save([losses, EPEs, D1s, epochs_val, losses_val, EPEs_val, D1s_val], path_val)
# plt
m, n = 1, 3
ax1 = plt.subplot(m, n, 1)
ax2 = plt.subplot(m, n, 2)
ax3 = plt.subplot(m, n, 3)
plt.sca(ax1); plt.cla(); plt.xlabel("epoch"); plt.ylabel("Loss")
plt.plot(np.array(losses), label='train'); plt.plot(np.array(epochs_val), np.array(losses_val), label='val'); plt.legend()
plt.sca(ax2); plt.cla(); plt.xlabel("epoch"); plt.ylabel("EPE")
plt.plot(np.array(EPEs), label='train'); plt.plot(np.array(epochs_val), np.array(EPEs_val), label='val'); plt.legend()
plt.sca(ax3); plt.cla(); plt.xlabel("epoch"); plt.ylabel("D1")
plt.plot(np.array(D1s), label='train'); plt.plot(np.array(epochs_val), np.array(D1s_val), label='val'); plt.legend()
plt.savefig("check_%s_%s_%s_%s.png" % (args.mode, args.dataset, args.net, args.loss_name))
time_curr = (time.time() - time_start)/3600.0
time_all = time_curr*(args.epochs - epoch0)/(epoch + 1 - epoch0)
msg = 'Progress: %.2f | %.2f (hour)\n' % (time_curr, time_all)
logging.info(msg)
|
"""
This module pltots all extraceted features with mean and standard dev.
:copyright: (c) 2022 by Matthias Muhr, Hochschule-Bonn-Rhein-Sieg
:license: see LICENSE for more details.
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
def save_fig(fig, path, name):
"""
This function saves the fig object in the folder "results\\plots\\param_plots".
Args:
fig (Object): figure to save
path (string): path to root folder
name (string): figures name
"""
fig.tight_layout()
path = path + '\\results\\plots\\param_plots'
Path(path).mkdir(parents=True, exist_ok=True)
path = path + '\\' + name.replace('/', ' dev ') + '.jpeg'
print(path)
# plt.show()
fig.savefig(path)
plt.close(fig)
def normalizedata(data, error):
"""
This function normalises data, including errors, between 1 and 0.
Args:
data (list): list with data to normalise
error (list): list with to data corresponding errors
Returns:
normalised_data (list): normalised data
normalised_error (list): normalised error
"""
normalized_data = (data - np.min(data)) / (np.max(data) - np.min(data))
noralized_error = (1/np.max(data))*error
# print(normalized_data, noralized_error)
return normalized_data, noralized_error
def transform_table(path, df_mean, df_stabw, properties):
"""
This function creates a DataFrame with mean values
and errors including the units for every feauer and calls the plot
function for them.
Args:
path (string): path to store the plots
df_mean (pandas.DataFrame): DataFrame with means of all feauters
df_stabw (pandas.DataFrame): DataFrame with standard deviation of all feauters
properties (dictionary): dictionary with parameters for processing
"""
params = df_mean.T.index.tolist()
# creating dataframe with means and errors
for param in params:
# testen ob Einheit vorliegt
try:
unit = param.split()[1]
except:
unit = '[]'
mean = df_mean[param]
stabw = df_stabw[param]
df_plot = pd.DataFrame({'mean': mean,'stabw': stabw})
# calling plot function
plot_mean(path, df_plot, param, unit, properties)
def plot_mean(path, df_plot, param, unit, properties):
plot_properties = properties['plot_properties']['compare_plots']
"""
This function plots the mean value with standard
deviation for the given DataFrame of a property.
Args:
path (string): path to store the plots
df_plot (pandas.DataFrame): Dataframe with mean and standard deviation of one feauter for all Samples
param (string): name of the feauter
unit (string): unit of the feauter
properties (dictionary): dictionary with parameters for processing
"""
colors = properties['colors_samples']
# Create lists for the plot
samples = df_plot.index.tolist()
x_pos = np.arange(len(samples))
mean = df_plot['mean']
error = df_plot['stabw']
# mean, error = normalizedata(mean, error)
fig, ax = plt.subplots()
barlist = ax.bar(x_pos, mean, yerr=error, align='center', alpha=0.5, ecolor='black', capsize=plot_properties['label_size'])
### remove this for new data ###
for sample, i in zip(samples, range(len(samples))):
if sample == ' TNT':
sample = 'TNT'
barlist[i].set_color(colors[sample])
################################
ytitle = 'mean ' + unit
ax.set_ylabel(ytitle)
ax.set_xticks(x_pos)
ax.set_xticklabels(samples)
ax.yaxis.grid(True)
plt.xticks(rotation=45)
plt.ylabel(param.replace('_',' '), fontsize=plot_properties['label_size'])
plt.yticks(fontsize=plot_properties['font_size'])
plt.xticks(fontsize=plot_properties['font_size'])
plt.tight_layout()
# plt.show()
save_fig(fig, path, param)
plt.close()
def plot_features(path, properties):
"""
This function reads and plots the mean and standard deviation files of all characteristics and samples.
Args:
path (string): root path to data
properties (dictionary): dictionary with parameters for processing
"""
path_mean = path + '\\results\\mean.csv'
path_stabw = path + '\\results\\std.csv'
df_mean = pd.read_csv(path_mean, decimal='.', sep=';')
df_stabw = pd.read_csv(path_stabw, decimal='.', sep=';')
df_mean.rename(columns={"Unnamed: 0": "sample"}, inplace=True)
df_stabw.rename(columns={"Unnamed: 0": "sample"}, inplace=True)
df_mean.set_index('sample', inplace=True)
df_stabw.set_index('sample', inplace=True)
transform_table(path, df_mean, df_stabw, properties)
if __name__ == '__main__':
path = 'E:\\Promotion\\Daten\\29.06.21_Paper_reduziert'
plot_features(path)
|
#!/usr/bin/env python3
#
# This is an example script for migrating single Trello board to already existing
# Kanboard project (you
# NB: for the sake of example all provided identifiers/tokens/keys here are randomly generated
# and you have to provide your own to perform migration. Please consult README for details.
#
import re
import pytz
from kanboard import Client as KanboardClient
from trello import TrelloClient
from migrator import Migrator
def _add_label_mappings(registry, pairs):
for regex, value in pairs:
registry[re.compile(regex, re.I)] = value
class MigrateFactory:
def __init__(self):
self._trello_client = TrelloClient(
api_key='73256545e41ez1101a7x1c2280yf4600',
api_secret='fdfba7d4287279b43f7x60261702c2c19340c6y18178589372d1d69661z6a3c3',
token='1dd336d2e571ae0d9506620be008xy7543a6360443ff28f0ece033cc7345625b',
token_secret='f07b4c4eaedda3e3168050xyz9c4eae1'
)
self._kanboard_client = KanboardClient(
url='https://localhost/jsonrpc.php',
username='user',
password='4a24bc173c208a6f5512637320309wxf2e03af69eb765ec6016a8d7e9f7f'
)
def migrate(self, board_id, project_id):
m = Migrator(self._trello_client, self._kanboard_client, board_id, project_id)
# Customize Trello labels handling: use them to determine task priority and complexity
_add_label_mappings(
m.complexity_map,
[('complexity: low', 2), ('complexity: medium', 5), ('complexity: high', 7)]
)
_add_label_mappings(
m.priority_map,
[('priority: low', 1), ('priority: medium', 3), ('priority: high', 5)]
)
# Explicitly map Trello usernames to Kanboard user ids instead of relying
# on Migrator's attempts to match user by names
# In any case users have to be added to the project beforehand
m.users_map.update({'root': 0, 'admin': 1})
# Workaround for kanboard issues #3653, #3654
# (https://github.com/kanboard/kanboard)
m.kanboard_timezome = pytz.timezone('Europe/Kiev')
# Override attachment size limit
m.attachment_max_size = 2 * 1024 * 1024
m.run()
if __name__ == '__main__':
MigrateFactory().migrate(
board_id='e787e73314d22x516bf115cb',
project_id=3
)
|
# -*- coding: utf-8 -*-
import cv2
import os
import sys
cap = cv2.VideoCapture(0) # creating camera object
def main():
while(cap.isOpened()):
ret, im = cap.read() # reading the frames
# ็ตๆใ่กจ็คบ
cv2.imshow("Show Image",im)
k = cv2.waitKey(10)
# cv2.waitKey(0) # ใญใผๅ
ฅๅๅพ
ๆฉ
# cv2.destroyAllWindows() # ใฆใฃใณใใฆ็ ดๆฃ
if __name__ == '__main__':
main()
|
from ..constants import asset_dir
from PIL import Image
import os.path as osp
import os
img_library = {}
def get_loaded_images(img_dict):
for filename in os.listdir(asset_dir):
if filename.endswith(".png"):
image_loc = osp.join(asset_dir, filename)
img = Image.open(image_loc)
mode = img.mode
size = img.size
data = img.tobytes()
img_dict[image_loc] = (data, size, mode)
get_loaded_images(img_library)
|
import requests
import requests.auth
from functools import lru_cache
from tinydb.table import Table
from tinydb import TinyDB, where
from collections import deque
from src.utils import get_logger, get_ttl_hash, get_number_of_seconds_before_time
from time import sleep
try:
from data.config import (
PATH_DB,
USER_AGENT,
HTTP_AUTH_LOGIN,
HTTP_AUTH_PASSWORD,
REDDIT_LOGIN,
REDDIT_PASSWORD,
)
except ImportError:
from pathlib import Path
# Path for db
PATH_DB = Path().cwd() / "data" / "db.json"
PATH_DB.mkdir(parents=True, exist_ok=True)
# Bot name - "<platform>:<name>:v<version> (by /u/<username>)"
USER_AGENT = ""
# Reddit client id/secret
HTTP_AUTH_LOGIN = ""
HTTP_AUTH_PASSWORD = ""
# Reddit login/password
REDDIT_LOGIN = ""
REDDIT_PASSWORD = ""
logger = get_logger("reddit_archiver", file_name="reddit_archiver.log")
@lru_cache()
def get_token(ttl_hash: int = None):
"""
Authenticate with Reddit and receive token.
:return: token.
"""
del ttl_hash
client_auth = requests.auth.HTTPBasicAuth(HTTP_AUTH_LOGIN, HTTP_AUTH_PASSWORD)
post_data = {
"grant_type": "password",
"username": REDDIT_LOGIN,
"password": REDDIT_PASSWORD,
}
headers = {"User-Agent": USER_AGENT}
response = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth=client_auth,
data=post_data,
headers=headers,
)
return response.json()["access_token"]
def is_post_in_db(db: Table, post: dict) -> bool:
"""
Check whether db contains the post.
:param db: TinyDB object.
:param post: post to check.
:return: True if post is already saved in db.
"""
if db.search(where("permalink") == post["data"]["permalink"]):
logger.debug('Post "{0}" is in the db.'.format(post["data"]["permalink"]))
return True
logger.debug('Post "{0}" is not in the db.'.format(post["data"]["permalink"]))
return False
def add_posts_to_db(db: Table, posts: deque) -> tuple:
"""
Add posts to db and return number of inserted and skipped entries.
:param db: TinyDB object.
:param posts: Deque of posts to add.
:return: Tuple of inserted and skipped entries counters.
"""
inserted = 0
for post in reversed(posts):
if not is_post_in_db(db, post):
# db.insert(
# {
# "permalink": post["data"]["permalink"],
# "subreddit": post["data"]["subreddit"],
# "title": post["data"]["title"],
# "url": post["data"]["url"],
# }
# )
db.insert(post["data"])
inserted += 1
return inserted, len(posts) - inserted
def get_saved_posts():
db = TinyDB(PATH_DB, sort_keys=True, indent=4).table("reddit_archive")
# Fill out headers.
token = get_token(get_ttl_hash())
headers = {
"Authorization": "bearer {}".format(token),
"User-Agent": USER_AGENT,
}
# Get first batch.
response = requests.get(
"https://oauth.reddit.com/user/{0}/saved".format(REDDIT_LOGIN), headers=headers
)
received_json = response.json()
after = received_json["data"]["after"]
posts = deque(received_json["data"]["children"])
all_posts = posts
# Get next batch if all posts were added and there is a filled after field.
while after and not is_post_in_db(db, posts[-1]):
response = requests.get(
"https://oauth.reddit.com/user/TheKagdar/saved?after={}".format(after),
headers=headers,
)
received_json = response.json()
after = received_json["data"]["after"]
posts = received_json["data"]["children"]
logger.debug("Getting new batch of posts.")
all_posts.extend(posts)
inserted, skipped = add_posts_to_db(db, all_posts)
logger.info(
"{} posts were added to the db. Skipped {} posts.".format(inserted, skipped)
)
if __name__ == "__main__":
wait_time = get_number_of_seconds_before_time(60 * 60 * 16)
logger.info("Sleeping for {0} seconds until 16:00 UTC.".format(wait_time))
sleep(wait_time)
while True:
get_saved_posts()
sleep(60 * 60 * 24)
|
# https://codeforces.com/problemset/problem/144/A
n = int(input())
sh = list(map(int, input().split()))
sh_max = sh.index(max(sh))
sh_min = max(idx for idx, val in enumerate(sh) if val == min(sh))
if sh_max < sh_min:
print(sh_max + n - sh_min - 1)
else:
print(sh_max + n - sh_min - 2)
|
from m5.SimObject import SimObject
from BaseHarvest import BaseHarvest
from m5.params import *
from m5.proxy import *
class BasicHarvest(BaseHarvest):
type = 'BasicHarvest'
cxx_header = "gem5_hw/basic_harvest.hh"
capacity = Param.Float(50000, "capacity of energy system, default 50000")
|
"""Module providing dataset class for annotated Redwood dataset."""
import json
import os
from typing import TypedDict, Optional
import zipfile
from scipy.spatial.transform import Rotation
import numpy as np
import open3d as o3d
import torch
from PIL import Image
import yoco
from cpas_toolbox import camera_utils, pointset_utils, quaternion_utils, utils
class AnnotatedRedwoodDataset(torch.utils.data.Dataset):
"""Dataset class for annotated Redwood dataset.
Data can be found here:
http://redwood-data.org/3dscan/index.html
Annotations are of repo.
Expected directory format:
{root_dir}/{category_str}/rgbd/{sequence_id}/...
{ann_dir}/{sequence_id}.obj
{ann_dir}/annotations.json
"""
num_categories = 3
category_id_to_str = {
0: "bottle",
1: "bowl",
2: "mug",
}
category_str_to_id = {v: k for k, v in category_id_to_str.items()}
class Config(TypedDict, total=False):
"""Configuration dictionary for annoated Redwood dataset.
Attributes:
root_dir: See AnnotatedRedwoodDataset docstring.
ann_dir: See AnnotatedRedwoodDataset docstring.
mask_pointcloud: Whether the returned pointcloud will be masked.
normalize_pointcloud:
Whether the returned pointcloud and position will be normalized, such
that pointcloud centroid is at the origin.
scale_convention:
Which scale is returned. The following strings are supported:
"diagonal":
Length of bounding box' diagonal. This is what NOCS uses.
"max": Maximum side length of bounding box.
"half_max": Half maximum side length of bounding box.
"full": Bounding box side lengths. Shape (3,).
camera_convention:
Which camera convention is used for position and orientation. One of:
"opengl": x right, y up, z back
"opencv": x right, y down, z forward
Note that this does not influence how the dataset is processed, only the
returned position and quaternion.
orientation_repr:
Which orientation representation is used. Currently only "quaternion"
supported.
remap_y_axis:
If not None, the Redwood y-axis will be mapped to the provided axis.
Resulting coordinate system will always be right-handed.
This is typically the up-axis.
Note that NOCS object models are NOT aligned the same as ShapeNetV2.
To get ShapeNetV2 alignment: -y
One of: "x", "y", "z", "-x", "-y", "-z"
remap_x_axis:
If not None, the original x-axis will be mapped to the provided axis.
Resulting coordinate system will always be right-handed.
Note that NOCS object models are NOT aligned the same as ShapeNetV2.
To get ShapeNetV2 alignment: z
One of: "x", "y", "z", "-x", "-y", "-z"
category_str:
If not None, only samples from the matching category will be returned.
See AnnotatedRedwoodDataset.category_id_to_str for admissible category
strings.
"""
root_dir: str
ann_dir: str
split: str
mask_pointcloud: bool
normalize_pointcloud: bool
scale_convention: str
camera_convention: str
orientation_repr: str
orientation_grid_resolution: int
remap_y_axis: Optional[str]
remap_x_axis: Optional[str]
category_str: Optional[str]
default_config: Config = {
"root_dir": None,
"ann_dir": None,
"mask_pointcloud": False,
"normalize_pointcloud": False,
"camera_convention": "opengl",
"scale_convention": "half_max",
"orientation_repr": "quaternion",
"orientation_grid_resolution": None,
"category_str": None,
"remap_y_axis": None,
"remap_x_axis": None,
}
def __init__(
self,
config: Config,
) -> None:
"""Initialize the dataset.
Args:
config:
Configuration dictionary of dataset. Provided dictionary will be merged
with default_dict. See AnnotatedRedwoodDataset.Config for keys.
"""
config = yoco.load_config(
config, current_dict=AnnotatedRedwoodDataset.default_config
)
self._root_dir = utils.resolve_path(config["root_dir"])
self._ann_dir = utils.resolve_path(config["ann_dir"])
self._check_dirs()
self._camera_convention = config["camera_convention"]
self._mask_pointcloud = config["mask_pointcloud"]
self._normalize_pointcloud = config["normalize_pointcloud"]
self._scale_convention = config["scale_convention"]
self._remap_y_axis = config["remap_y_axis"]
self._remap_x_axis = config["remap_x_axis"]
self._orientation_repr = config["orientation_repr"]
self._load_annotations()
self._camera = camera_utils.Camera(
width=640, height=480, fx=525, fy=525, cx=319.5, cy=239.5
)
def _check_dirs(self) -> None:
if os.path.exists(self._root_dir) and os.path.exists(self._ann_dir):
pass
else:
print(
"REDWOOD75 dataset not found, do you want to download it into the "
"following directories:"
)
print(" ", self._root_dir)
print(" ", self._ann_dir)
while True:
decision = input("(Y/n) ").lower()
if decision == "" or decision == "y":
self._download_dataset()
break
elif decision == "n":
print("Dataset not found. Aborting.")
exit(0)
def _download_dataset(self) -> None:
# Download anns
if not os.path.exists(self._ann_dir):
zip_path = os.path.join(self._ann_dir, "redwood75.zip")
os.makedirs(self._ann_dir, exist_ok=True)
url = (
"https://drive.google.com/u/0/uc?id=1PMvIblsXWDxEJykVwhUk_QEjy4_bmDU"
"-&export=download"
)
utils.download(url, zip_path)
z = zipfile.ZipFile(zip_path)
z.extractall(os.path.join(self._ann_dir, ".."))
z.close()
os.remove(zip_path)
ann_json = os.path.join(self._ann_dir, "annotations.json")
with open(ann_json, "r") as f:
anns_dict = json.load(f)
baseurl = "https://s3.us-west-1.wasabisys.com/redwood-3dscan/rgbd/"
for seq_id in anns_dict.keys():
download_dir = os.path.join(self._root_dir, anns_dict[seq_id]["category"])
os.makedirs(download_dir, exist_ok=True)
zip_path = os.path.join(download_dir, f"{seq_id}.zip")
os.makedirs(os.path.dirname(zip_path), exist_ok=True)
utils.download(baseurl + f"{seq_id}.zip", zip_path)
z = zipfile.ZipFile(zip_path)
out_folder = os.path.join(download_dir, "rgbd", seq_id)
os.makedirs(out_folder, exist_ok=True)
z.extractall(out_folder)
z.close()
os.remove(zip_path)
def _load_annotations(self) -> None:
"""Load annotations into memory."""
ann_json = os.path.join(self._ann_dir, "annotations.json")
with open(ann_json, "r") as f:
anns_dict = json.load(f)
self._raw_samples = []
for seq_id, seq_anns in anns_dict.items():
for pose_ann in seq_anns["pose_anns"]:
self._raw_samples.append(
self._create_raw_sample(seq_id, seq_anns, pose_ann)
)
def _create_raw_sample(
self, seq_id: str, sequence_dict: dict, annotation_dict: dict
) -> dict:
"""Create raw sample from information in annotations file."""
position = torch.tensor(annotation_dict["position"])
orientation_q = torch.tensor(annotation_dict["orientation"])
rgb_filename = annotation_dict["rgb_file"]
depth_filename = annotation_dict["depth_file"]
mesh_filename = sequence_dict["mesh"]
mesh_path = os.path.join(self._ann_dir, mesh_filename)
category_str = sequence_dict["category"]
color_path = os.path.join(
self._root_dir, category_str, "rgbd", seq_id, "rgb", rgb_filename
)
depth_path = os.path.join(
self._root_dir, category_str, "rgbd", seq_id, "depth", depth_filename
)
extents = torch.tensor(sequence_dict["scale"]) * 2
return {
"position": position,
"orientation_q": orientation_q,
"extents": extents,
"color_path": color_path,
"depth_path": depth_path,
"mesh_path": mesh_path,
"category_str": category_str,
}
def __len__(self) -> int:
"""Return number of sample in dataset."""
return len(self._raw_samples)
def __getitem__(self, idx: int) -> dict:
"""Return a sample of the dataset.
Args:
idx: Index of the instance.
Returns:
Sample containing the following keys:
"color"
"depth"
"mask"
"pointset"
"position"
"orientation"
"quaternion"
"scale"
"color_path"
"obj_path"
"category_id"
"category_str"
"""
raw_sample = self._raw_samples[idx]
color = torch.from_numpy(
np.asarray(Image.open(raw_sample["color_path"]), dtype=np.float32) / 255
)
depth = self._load_depth(raw_sample["depth_path"])
instance_mask = self._compute_mask(depth, raw_sample)
pointcloud_mask = instance_mask if self._mask_pointcloud else None
pointcloud = pointset_utils.depth_to_pointcloud(
depth,
self._camera,
mask=pointcloud_mask,
convention=self._camera_convention,
)
# adjust camera convention for position, orientation and scale
position = pointset_utils.change_position_camera_convention(
raw_sample["position"], "opencv", self._camera_convention
)
# orientation / scale
orientation_q, extents = self._change_axis_convention(
raw_sample["orientation_q"], raw_sample["extents"]
)
orientation_q = pointset_utils.change_orientation_camera_convention(
orientation_q, "opencv", self._camera_convention
)
orientation = self._quat_to_orientation_repr(orientation_q)
scale = self._get_scale(extents)
# normalize pointcloud & position
if self._normalize_pointcloud:
pointcloud, centroid = pointset_utils.normalize_points(pointcloud)
position = position - centroid
category_str = raw_sample["category_str"]
sample = {
"color": color,
"depth": depth,
"pointset": pointcloud,
"mask": instance_mask,
"position": position,
"orientation": orientation,
"quaternion": orientation_q,
"scale": scale,
"color_path": raw_sample["color_path"],
"obj_path": raw_sample["mesh_path"],
"category_id": self.category_str_to_id[category_str],
"category_str": category_str,
}
return sample
def _compute_mask(self, depth: torch.Tensor, raw_sample: dict) -> torch.Tensor:
posed_mesh = o3d.io.read_triangle_mesh(raw_sample["mesh_path"])
R = Rotation.from_quat(raw_sample["orientation_q"]).as_matrix()
posed_mesh.rotate(R, center=np.array([0, 0, 0]))
posed_mesh.translate(raw_sample["position"])
posed_mesh.compute_vertex_normals()
gt_depth = torch.from_numpy(_draw_depth_geometry(posed_mesh, self._camera))
mask = gt_depth != 0
# exclude occluded parts from mask
mask[(depth != 0) * (depth < gt_depth - 0.01)] = 0
return mask
def _load_depth(self, depth_path: str) -> torch.Tensor:
"""Load depth from depth filepath."""
depth = torch.from_numpy(
np.asarray(Image.open(depth_path), dtype=np.float32) * 0.001
)
return depth
def _get_scale(self, extents: torch.Tensor) -> float:
"""Return scale from stored sample data and extents."""
if self._scale_convention == "diagonal":
return torch.linalg.norm(extents)
elif self._scale_convention == "max":
return extents.max()
elif self._scale_convention == "half_max":
return 0.5 * extents.max()
elif self._scale_convention == "full":
return extents
else:
raise ValueError(
f"Specified scale convention {self._scale_convention} not supported."
)
def _change_axis_convention(
self, orientation_q: torch.Tensor, extents: torch.Tensor
) -> tuple:
"""Adjust up-axis for orientation and extents.
Returns:
Tuple of position, orienation_q and extents, with specified up-axis.
"""
if self._remap_y_axis is None and self._remap_x_axis is None:
return orientation_q, extents
elif self._remap_y_axis is None or self._remap_x_axis is None:
raise ValueError("Either both or none of remap_{y,x}_axis have to be None.")
rotation_o2n = self._get_o2n_object_rotation_matrix()
remapped_extents = torch.abs(torch.Tensor(rotation_o2n) @ extents)
# quaternion so far: original -> camera
# we want a quaternion: new -> camera
rotation_n2o = rotation_o2n.T
quaternion_n2o = torch.from_numpy(Rotation.from_matrix(rotation_n2o).as_quat())
remapped_orientation_q = quaternion_utils.quaternion_multiply(
orientation_q, quaternion_n2o
) # new -> original -> camera
return remapped_orientation_q, remapped_extents
def _get_o2n_object_rotation_matrix(self) -> np.ndarray:
"""Compute rotation matrix which rotates original to new object coordinates."""
rotation_o2n = np.zeros((3, 3)) # original to new object convention
if self._remap_y_axis == "x":
rotation_o2n[0, 1] = 1
elif self._remap_y_axis == "-x":
rotation_o2n[0, 1] = -1
elif self._remap_y_axis == "y":
rotation_o2n[1, 1] = 1
elif self._remap_y_axis == "-y":
rotation_o2n[1, 1] = -1
elif self._remap_y_axis == "z":
rotation_o2n[2, 1] = 1
elif self._remap_y_axis == "-z":
rotation_o2n[2, 1] = -1
else:
raise ValueError("Unsupported remap_y_axis {self.remap_y}")
if self._remap_x_axis == "x":
rotation_o2n[0, 0] = 1
elif self._remap_x_axis == "-x":
rotation_o2n[0, 0] = -1
elif self._remap_x_axis == "y":
rotation_o2n[1, 0] = 1
elif self._remap_x_axis == "-y":
rotation_o2n[1, 0] = -1
elif self._remap_x_axis == "z":
rotation_o2n[2, 0] = 1
elif self._remap_x_axis == "-z":
rotation_o2n[2, 0] = -1
else:
raise ValueError("Unsupported remap_x_axis {self.remap_y}")
# infer last column
rotation_o2n[:, 2] = 1 - np.abs(np.sum(rotation_o2n, 1)) # rows must sum to +-1
rotation_o2n[:, 2] *= np.linalg.det(rotation_o2n) # make special orthogonal
if np.linalg.det(rotation_o2n) != 1.0: # check if special orthogonal
raise ValueError("Unsupported combination of remap_{y,x}_axis. det != 1")
return rotation_o2n
def _quat_to_orientation_repr(self, quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion to selected orientation representation.
Args:
quaternion:
The quaternion to convert, scalar-last, shape (4,).
Returns:
The same orientation as represented by the quaternion in the chosen
orientation representation.
"""
if self._orientation_repr == "quaternion":
return quaternion
elif self._orientation_repr == "discretized":
index = self._orientation_grid.quat_to_index(quaternion.numpy())
return torch.tensor(
index,
dtype=torch.long,
)
else:
raise NotImplementedError(
f"Orientation representation {self._orientation_repr} is not supported."
)
def load_mesh(self, object_path: str) -> o3d.geometry.TriangleMesh:
"""Load an object mesh and adjust its object frame convention."""
mesh = o3d.io.read_triangle_mesh(object_path)
if self._remap_y_axis is None and self._remap_x_axis is None:
return mesh
elif self._remap_y_axis is None or self._remap_x_axis is None:
raise ValueError("Either both or none of remap_{y,x}_axis have to be None.")
rotation_o2n = self._get_o2n_object_rotation_matrix()
mesh.rotate(
rotation_o2n,
center=np.array([0.0, 0.0, 0.0])[:, None],
)
return mesh
class ObjectError(Exception):
"""Error if something with the mesh is wrong."""
pass
def _draw_depth_geometry(
posed_mesh: o3d.geometry.TriangleMesh, camera: camera_utils.Camera
) -> np.ndarray:
"""Render a posed mesh given a camera looking along z axis (OpenCV convention)."""
# see http://www.open3d.org/docs/latest/tutorial/visualization/customized_visualization.html
# Create visualizer
vis = o3d.visualization.Visualizer()
vis.create_window(width=camera.width, height=camera.height, visible=False)
# Add mesh in correct position
vis.add_geometry(posed_mesh, True)
options = vis.get_render_option()
options.mesh_show_back_face = True
# Set camera at fixed position (i.e., at 0,0,0, looking along z axis)
view_control = vis.get_view_control()
o3d_cam = camera.get_o3d_pinhole_camera_parameters()
o3d_cam.extrinsic = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
)
view_control.convert_from_pinhole_camera_parameters(o3d_cam, True)
# Generate the depth image
vis.poll_events()
vis.update_renderer()
depth = np.asarray(vis.capture_depth_float_buffer())
return depth
|
class Solution:
def sortColors(self, nums) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums:return []
p0,curr = 0,0
p2 = len(nums) - 1
while curr <= p2:
if nums[curr] == 0:
nums[p0],nums[curr] = nums[curr],nums[p0]
p0 += 1
curr += 1
elif nums[curr] == 2:
nums[p2],nums[curr] = nums[curr],nums[p2]
p2 -= 1
# ่ฟ้ไบคๆข่ฟๅ๏ผcurr็ๅผๆฒกๆๆซๆ๏ผๆไปฅcurrไธ่ฝ+1
else:
curr += 1
test = Solution()
list=[2,2,2,2,2,2,1,1,0,0,1,2]
test.sortColors(list)
print(list)
|
import itertools
import datetime
import dateparser
from loguru import logger
from cloudproxy.check import check_alive
from cloudproxy.providers import settings
from cloudproxy.providers.hetzner.functions import list_proxies, delete_proxy, create_proxy
from cloudproxy.providers.settings import config, delete_queue, restart_queue
def hetzner_deployment(min_scaling):
total_proxies = len(list_proxies())
if min_scaling < total_proxies:
logger.info("Overprovisioned: Hetzner destroying.....")
for proxy in itertools.islice(
list_proxies(), 0, (total_proxies - min_scaling)
):
delete_proxy(proxy)
logger.info("Destroyed: Hetzner -> " + str(proxy.public_net.ipv4.ip))
if min_scaling - total_proxies < 1:
logger.info("Minimum Hetzner proxies met")
else:
total_deploy = min_scaling - total_proxies
logger.info("Deploying: " + str(total_deploy) + " Hetzner proxy")
for _ in range(total_deploy):
create_proxy()
logger.info("Deployed")
return len(list_proxies())
def hetzner_check_alive():
ip_ready = []
for proxy in list_proxies():
elapsed = datetime.datetime.now(
datetime.timezone.utc
) - dateparser.parse(str(proxy.created))
if config["age_limit"] > 0:
if elapsed > datetime.timedelta(seconds=config["age_limit"]):
delete_proxy(proxy)
logger.info(
"Recycling proxy, reached age limit -> " + str(proxy.public_net.ipv4.ip)
)
elif check_alive(proxy.public_net.ipv4.ip):
logger.info("Alive: Hetzner -> " + str(proxy.public_net.ipv4.ip))
ip_ready.append(proxy.public_net.ipv4.ip)
else:
if elapsed > datetime.timedelta(minutes=10):
delete_proxy(proxy)
logger.info(
"Destroyed: Hetzner took too long -> " + str(proxy.public_net.ipv4.ip)
)
else:
logger.info("Waiting: Hetzner -> " + str(proxy.public_net.ipv4.ip))
return ip_ready
def hetzner_check_delete():
for proxy in list_proxies():
if proxy.public_net.ipv4.ip in delete_queue or proxy.public_net.ipv4.ip in restart_queue:
delete_proxy(proxy)
logger.info("Destroyed: not wanted -> " + str(proxy.public_net.ipv4.ip))
delete_queue.remove(proxy.public_net.ipv4.ip)
def hetzner_start():
hetzner_check_delete()
hetzner_deployment(settings.config["providers"]["hetzner"]["scaling"]["min_scaling"])
ip_ready = hetzner_check_alive()
return ip_ready
|
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
def plot_images(images, targets, n_plot=30):
n_rows = n_plot // 6 + ((n_plot % 6) > 0)
fig, axes = plt.subplots(n_rows, 6, figsize=(9, 1.5 * n_rows))
axes = np.atleast_2d(axes)
for i, (image, target) in enumerate(zip(images[:n_plot], targets[:n_plot])):
row, col = i // 6, i % 6
ax = axes[row, col]
ax.set_title('#{} - Label:{}'.format(i, target), {'size': 12})
# plot filter channel in grayscale
ax.imshow(image.squeeze(), cmap='gray', vmin=0, vmax=1)
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
ax.label_outer()
plt.tight_layout()
return fig
def image_channels(red, green, blue, rgb, gray, rows=(0, 1, 2)):
fig, axs = plt.subplots(len(rows), 4, figsize=(15, 5.5))
zeros = np.zeros((5, 5), dtype=np.uint8)
titles1 = ['Red', 'Green', 'Blue', 'Grayscale Image']
titles0 = ['image_r', 'image_g', 'image_b', 'image_gray']
titles2 = ['as first channel', 'as second channel', 'as third channel', 'RGB Image']
idx0 = np.argmax(np.array(rows) == 0)
idx1 = np.argmax(np.array(rows) == 1)
idx2 = np.argmax(np.array(rows) == 2)
for i, m in enumerate([red, green, blue, gray]):
if 0 in rows:
axs[idx0, i].axis('off')
axs[idx0, i].invert_yaxis()
if (1 in rows) or (i < 3):
axs[idx0, i].text(0.15, 0.25, str(m.astype(np.uint8)), verticalalignment='top')
axs[idx0, i].set_title(titles0[i], fontsize=16)
if 1 in rows:
axs[idx1, i].set_title(titles1[i], fontsize=16)
axs[idx1, i].set_xlabel('5x5', fontsize=14)
axs[idx1, i].imshow(m, cmap=plt.cm.gray)
if 2 in rows:
axs[idx2, i].set_title(titles2[i], fontsize=16)
axs[idx2, i].set_xlabel(f'5x5x3 - {titles1[i][0]} only', fontsize=14)
if i < 3:
stacked = [zeros] * 3
stacked[i] = m
axs[idx2, i].imshow(np.stack(stacked, axis=2))
else:
axs[idx2, i].imshow(rgb)
for r in [1, 2]:
if r in rows:
idx = idx1 if r == 1 else idx2
axs[idx, i].set_xticks([])
axs[idx, i].set_yticks([])
for k, v in axs[idx, i].spines.items():
v.set_color('black')
v.set_linewidth(.8)
if 1 in rows:
axs[idx1, 0].set_ylabel('Single\nChannel\n(grayscale)', rotation=0, labelpad=40, fontsize=12)
axs[idx1, 3].set_xlabel('5x5 = 0.21R + 0.72G + 0.07B')
if 2 in rows:
axs[idx2, 0].set_ylabel('Three\nChannels\n(color)', rotation=0, labelpad=40, fontsize=12)
axs[idx2, 3].set_xlabel('5x5x3 = (R, G, B) stacked')
fig.tight_layout()
return fig
def figure5(sbs_logistic, sbs_nn):
fig, axs = plt.subplots(1, 2, figsize=(15, 6))
axs[0].plot(sbs_logistic.losses, 'b--', label='Logistic - Training')
axs[1].plot(sbs_logistic.val_losses, 'r--', label='Logistic - Validation')
axs[0].plot(sbs_nn.losses, 'b', label='3-layer Network - Training', alpha=.5)
axs[1].plot(sbs_nn.val_losses, 'r', label='3-layer Network - Validation', alpha=.5)
axs[0].set_xlabel('Epochs')
axs[0].set_ylabel('Losses')
axs[0].set_ylim([0.45, 0.75])
axs[0].legend()
axs[1].set_xlabel('Epochs')
axs[1].set_ylabel('Losses')
axs[1].set_ylim([0.45, 0.75])
axs[1].legend()
fig.tight_layout()
return fig
def figure7(weights):
fig, axs = plt.subplots(1, 5, figsize=(15, 4))
for i, m in enumerate(weights):
axs[i].imshow(m.reshape(-1, 5).tolist(), cmap='gray')
axs[i].grid(False)
axs[i].set_xticks([])
axs[i].set_yticks([])
axs[i].set_title(r'$w_{0' + str(i) + '}$')
fig.suptitle('Hidden Layer #0')
fig.subplots_adjust(top=0.6)
fig.tight_layout()
return fig
def figure5b(sbs_logistic, sbs_nn, sbs_relu):
fig, axs = plt.subplots(1, 2, figsize=(15, 6))
axs[0].plot(sbs_logistic.losses, 'b--', label='Logistic - Training')
axs[1].plot(sbs_logistic.val_losses, 'r--', label='Logistic - Validation')
axs[0].plot(sbs_nn.losses, 'b', label='3-layer Network - Training', alpha=.5)
axs[1].plot(sbs_nn.val_losses, 'r', label='3-layer Network - Validation', alpha=.5)
axs[0].plot(sbs_relu.losses, 'b', label='ReLU Network - Training', alpha=.8)
axs[1].plot(sbs_relu.val_losses, 'r', label='ReLU Network - Validation', alpha=.8)
axs[0].set_xlabel('Epochs')
axs[0].set_ylabel('Losses')
axs[0].legend()
axs[1].set_xlabel('Epochs')
axs[1].set_ylabel('Losses')
axs[1].legend()
fig.tight_layout()
return fig
def plot_activation(func, name=None):
z = torch.linspace(-5, 5, 1000)
z.requires_grad_(True)
func(z).sum().backward()
sig = func(z).detach()
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
if name is None:
try:
name = func.__name__
except AttributeError:
name = ''
if name == 'sigmoid':
ax.set_ylim([0, 1.1])
elif name == 'tanh':
ax.set_ylim([-1.1, 1.1])
elif name == 'relu':
ax.set_ylim([-.1, 5.01])
else:
ax.set_ylim([-1.1, 5.01])
ax.set_xticks(np.arange(-5, 6, 1))
ax.set_xlabel('z')
ax.set_ylabel(r'$\sigma(z)$')
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_title(name, fontsize=16)
ax.plot(z.detach().numpy(), sig.numpy(), c='k', label='Activation')
ax.plot(z.detach().numpy(), z.grad.numpy(), c='r', label='Gradient')
ax.legend(loc=2)
fig.tight_layout()
fig.show()
return fig
def weights_comparison(w_logistic_output, w_nn_equiv):
fig = plt.figure(figsize=(15, 6))
ax0 = plt.subplot2grid((1, 3), (0, 0), colspan=2)
ax1 = plt.subplot2grid((1, 3), (0, 2))
ax0.bar(np.arange(25), w_logistic_output.cpu().numpy().squeeze(), alpha=1, label='Logistic')
ax0.bar(np.arange(25), w_nn_equiv.cpu().numpy().squeeze(), alpha=.5, label='3-layer Network (Composed)')
ax0.set_title('Weights')
ax0.set_xlabel('Parameters')
ax0.set_ylabel('Value')
ax0.legend()
ax1.scatter(w_logistic_output.cpu().numpy(), w_nn_equiv.cpu().numpy(), alpha=.5)
ax1.set_xlabel('Logistic')
ax1.set_ylabel('3-layer network (Composed)')
ax1.set_title('Weights')
ax1.set_xlim([-2, 2])
ax1.set_ylim([-2, 2])
fig.tight_layout()
return fig
|
# Generated by Django 3.0 on 2020-01-18 19:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('neighbor', '0004_auto_20200118_1810'),
]
operations = [
migrations.DeleteModel(
name='Neighborhood',
),
]
|
# Generated by Django 2.0.13 on 2019-04-15 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0001_initial")]
operations = [
migrations.AddField(
model_name="user",
name="last_user_agent",
field=models.CharField(
blank=True, max_length=255, verbose_name="Last time Used user agent"
),
)
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.