hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1507c96d9d4f256bc65da807cd5af86c8c25fb94
| 6,371
|
py
|
Python
|
dft/dft-hartree-hydrogen.py
|
marvinfriede/projects
|
7050cd76880c8ff0d9de17b8676e82f1929a68e0
|
[
"MIT"
] | null | null | null |
dft/dft-hartree-hydrogen.py
|
marvinfriede/projects
|
7050cd76880c8ff0d9de17b8676e82f1929a68e0
|
[
"MIT"
] | 3
|
2021-04-14T20:15:26.000Z
|
2021-04-14T20:20:54.000Z
|
dft/dft-hartree-hydrogen.py
|
marvinfriede/projects
|
7050cd76880c8ff0d9de17b8676e82f1929a68e0
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
# coding: utf8
'''
My implementation of DFT Assignment 5.1: Hartree energy for H-atom GS
Taught by René Wirnata in 2019/2020.
Links:
https://tu-freiberg.de/fakultaet2/thph/lehre/density-functional-theory
https://github.com/PandaScience/teaching-resources
This script uses the last assignment's code to determine a solution of the
radial Schrödinger equation for the hydrogen ground state (n=1, l=0). After
normalizing, the Hartree potential energy w(r) = r*vh(r) is computed in a
second "integration" step and numerically integrated to the Hartree energy
(~0.3125 Ha). For hydrogen, the homogeneous solution w_hom(r) = beta * r
is not required in order to match the boundary condition (--> beta = 0).
Note, that the integration limits (tmin, tmax) and step size (h) need to be
identical for solve_rseq() and solve_poisson() or you must use interpolated
versions of the functions w(r) and u(r) when computing the Hartree energy.
Further, tmin for solve_poisson() should not be smaller than tmin for
solve_rseq(), because extrapolating u(r) beyond the computed data points may
result in errors.
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp, trapz
from scipy.interpolate import interp1d
nsteps = 10000
rmin = 0.000001
rmax = 20
def secant(f, x1=-12345, x2=6789, maxiter=10000, tol=1e-10):
"""secant method; x1 and x2 are crucial for finding the desired root"""
for itr in range(maxiter):
xnew = x2 - (x2 - x1) / (f(x2) - f(x1)) * f(x2)
if abs(xnew - x2) < tol:
break
x1 = x2
x2 = xnew
else:
print("Calculation exceeded maximum number of iterations!")
exit()
return xnew, itr
def trapezoidal(f, a, b, n=10000):
"""trapez method for numerical integration"""
s = 0.0
h = (b - a) / n
for i in range(0, n):
s += f(a + i * h)
return h * (s + 0.5 * (f(a) + f(b)))
def rad_seq(t, y, energy):
"""returns radial SEQ as system of two 1st order differential equations"""
# input: y = [y1, y2]; return y = [y1', y2']
# y1' = y2; y2' = (...)*y1
return [y[1], (- 2 * (1 / t + energy)) * y[0]]
def initValues(r):
"""getting initial values for numeric intergration from correct solution"""
u = 2 * r * np.exp(-r)
uPrime = (1 - r) * 2 * np.exp(-r)
return [u, uPrime]
def solve_rad_seq(energy):
"""wrapper for ODE integration; energy and l as parameter, integration from
rmax to rmin (inwards)"""
sol = solve_ivp(
lambda t, y: rad_seq(t, y, energy),
t_span=[rmax, rmin],
t_eval=np.linspace(rmax, rmin, nsteps),
y0=initValues(rmax))
u = sol.y[0]
r = sol.t
return u[::-1], r[::-1]
def u0(energy):
"""get first value of integrated Schrödinger equation; since the array is
reversed, u[0] corresponds to the u-value at r = 0 (y-interscetion); different
energies are passed in by secant method"""
u, r = solve_rad_seq(energy)
return u[0]
def normalize(energy):
"""integrating with calculated energy eigenvalue and normalization"""
u, r = solve_rad_seq(energy)
norm = trapz(u * u, r)
u_norm = u / np.sqrt(norm)
return u_norm, r, norm
def poisson(t, y, u):
"""returns poisson equation w''(t) = - u²(t) / t as system of two 1st order
differential equations"""
# input: y = [y1, y2]; return y = [y1', y2']
# y1' = y2; y2' = - u²(t) / t
return [y[1], -u(t) ** 2 / t]
def solve_poisson(f_int):
"""solve radial poisson equation; input is u(r) from interpolation"""
sol = solve_ivp(
lambda t, y: poisson(t, y, f_int),
t_span=[rmin, rmax],
t_eval=np.linspace(rmin, rmax, nsteps),
y0=[0, 1])
return sol.y[0], sol.t
def main():
# find lowest energy eigenvalue; corresponds to energy of hydrogen atom
root_start_time = time.time()
root, numIter = secant(u0, -0.6, -0.55)
root_exec_time = round((time.time() - root_start_time), 2)
print("Energy of hydrogen atom: {:.5f} Hartree\t\t ({:.2f}s)".format(
root, root_exec_time))
# normalization
norm_start_time = time.time()
u_norm, r, norm = normalize(root)
norm_exec_time = round((time.time() - norm_start_time), 2)
print(
"Normalization done ({:.5f} -> 1)\t\t\t ({:.2f}s)".format(norm, norm_exec_time))
# interpolation of radial SEQ
u_norm_spline = interp1d(r, u_norm)
# solving radial poisson equation result is w(r), the single orbital density
w, r = solve_poisson(u_norm_spline)
# adding homogeneous solution, so that boundary conditions are fulfilled
addhom_start_time = time.time()
qtot = 1
beta = (qtot - w[-1]) / r[-1]
w += beta * r
addhom_exec_time = round((time.time() - addhom_start_time), 2)
print("Correction for boundary condition: beta = {:.5f}\t ({:.2f}s)".format(
beta, addhom_exec_time))
# compute hartree energy: Z/2 * Int(dr v_h(r) u²(r)) with v_h(r) = w(r) / r
hartree_start_time = time.time()
e_hartree = 0.5 * trapz(w / r * u_norm * u_norm, r)
hartree_exec_time = round((time.time() - hartree_start_time), 2)
print("Hartree energy for hydrogen: E_h = {:.5f}\t\t ({:.2f}s)".format(
e_hartree, hartree_exec_time))
# compute hartree energy with own trapezoidal method
hartree2_start_time = time.time()
spline = interp1d(r, w / r * u_norm * u_norm)
eh = 0.5 * trapezoidal(spline, r[0], r[-1])
hartree2_exec_time = round((time.time() - hartree2_start_time), 2)
print("Hartree energy for hydrogen: E_h = {:.5f}\t\t ({:.2f}s)".format(
eh, hartree2_exec_time))
# plotting numerical solutions
plt.plot(r, w, "g", lw=2, ls="--", label=r"$w_{\mathrm{num}}(r)$")
plt.plot(r, w / r, "r", lw=2, ls="--", label=r"$v_{\mathrm{num}}(r)$")
plt.plot(r, w / r * u_norm ** 2, "b", lw=2, ls="--",
alpha=0.5, label=r"$v_{\mathrm{num}}(r) \, |u(r)|^2$")
# plotting exact solutions
def w_exact(r):
return - (r + 1) * np.exp(- 2 * r) + 1
def v_exact(r):
return w_exact(r) / r
plt.plot(r, w_exact(r), "g", lw=4, alpha=0.5,
ls="-", label=r"$w_{\mathrm{exact}}(r)$")
plt.plot(r, v_exact(r), "r", lw=4, alpha=0.5,
ls="-", label=r"$v_{\mathrm{exact}}(r)$")
# plot styling
plt.xlabel(r"$r$ in Bohr")
plt.xlim(-0.3, 10)
plt.axhline(y=0, color="k", ls="--", lw="0.5")
plt.axvline(x=0, color="k", ls="--", lw="0.5")
plt.legend(loc="best", fancybox=True, shadow=True)
# plt.show()
if __name__ == "__main__":
main()
| 30.777778
| 86
| 0.644169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,105
| 0.486906
|
1507f85202e8ecdff0fe986b123a48f1bb2bac41
| 18,714
|
py
|
Python
|
workflow & analyses notebooks/fukushima_telomere_methods.py
|
Jared-Luxton/Fukushima-Nuclear-Disaster-Humans
|
1cb84f63172005f3bd8947d2bca041deaeec90e8
|
[
"MIT"
] | null | null | null |
workflow & analyses notebooks/fukushima_telomere_methods.py
|
Jared-Luxton/Fukushima-Nuclear-Disaster-Humans
|
1cb84f63172005f3bd8947d2bca041deaeec90e8
|
[
"MIT"
] | null | null | null |
workflow & analyses notebooks/fukushima_telomere_methods.py
|
Jared-Luxton/Fukushima-Nuclear-Disaster-Humans
|
1cb84f63172005f3bd8947d2bca041deaeec90e8
|
[
"MIT"
] | 1
|
2021-05-23T22:06:17.000Z
|
2021-05-23T22:06:17.000Z
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from difflib import SequenceMatcher
import seaborn as sns
from statistics import mean
from ast import literal_eval
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from pygam import LinearGAM, s, l, f
from matplotlib import lines
import six
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list
def gen_missing_values_andimpute_or_randomsampledown(n_cells, telosPercell, df):
max_telos = n_cells * telosPercell
half_telos = (n_cells * telosPercell) / 2
if df.size > max_telos:
df_sampled = df.sample(max_telos)
return df_sampled
if df.size > 25 and df.size <= half_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, replace=True, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
if df.size > 25 and df.size < max_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
else:
return df
def clean_individ_telos(telo_data):
labels=[6, 172, 338, 504, 670, 836, 1002, 1168, 1334, 1500, 1666, 1832,
1998, 2164, 2330, 2496, 2662, 2828, 2994, 3160, 3326, 3492, 3658, 3824,
3990, 4156, 4322, 4488, 4654, 4820]
labels_offset_by6 = [(x-6) for x in labels]
telo_data = telo_data.drop(labels_offset_by6)
telo_data = pd.to_numeric(telo_data.iloc[:,0], errors='coerce')
telo_data = telo_data.dropna(axis=0, how='any')
telo_data = telo_data.to_frame(name=None)
telo_data = telo_data[(np.abs(stats.zscore(telo_data)) < 3).all(axis=1)]
telo_data = pd.Series(telo_data.iloc[:,0])
telo_data = gen_missing_values_andimpute_or_randomsampledown(30, 160, telo_data)
telo_data.reset_index(drop=True, inplace=True)
return telo_data
def remove_dashes_space_sampleIDs(row):
if '-' in str(row):
row = str(row).replace('-', '').replace(' ', '')
if '_' in str(row):
row = str(row).replace('_', '')
if ' ' in str(row):
row = str(row).replace(' ', '')
if 'gps' in str(row):
row = str(row).replace('gps', '')
if 'GPS' in str(row):
row = str(row).replace('GPS', '')
if 'collar' in (row):
row = str(row).replace('collar', '')
if 'COLLAR' in str(row):
row = str(row).replace('COLLAR', '')
return str(row)
def readable_snake_df_dummy_variables(snake_df):
Exposure_Status = []
for row in snake_df['Sample ID']:
if row.startswith('C'):
Exposure_Status.append('Control')
elif row.startswith('E'):
Exposure_Status.append('Exposed')
snake_df['Exposure Status'] = Exposure_Status
### making dummy variables for snake exposure status
snake_dum = pd.get_dummies(snake_df['Exposure Status'], prefix='Encoded', drop_first=True)
snake_df['Encoded Exposed'] = snake_dum
return snake_df
def count_shared_sample_IDs(df1, df2, print_names=None):
df1_IDs = set(df1['Sample ID'].unique())
df2_IDs = set(df2['Sample ID'].unique())
# common_IDs = df1_list - (df1_list - df2_list)
common_IDs = list(df1_IDs & df2_IDs)
print(f'The number of sample IDs in common are: {len(common_IDs)}')
if print_names == 'yes' or print_names == 'Yes':
print(f'The sample IDs in common are:\n{common_IDs}')
def average_age_weeks(row):
if '-' in str(row):
numbers = str(row).split('-')
average = (int(numbers[1]) + int(numbers[0])) / len(numbers)
return int(average)
else:
return int(row)
def quartile_cts_rel_to_df1(df1, df2):
df1 = pd.DataFrame(df1)
df2 = pd.DataFrame(df2)
# count how many instances in df2 are below the 0.25 quantile of df1
quartile_1 = df2[df2 <= df1.quantile(0.25)].count()
# count how many instances in df2 are within the 0.25 - 0.75 range quantile of df1
quartile_2_3 = df2[(df2 > df1.quantile(0.25)) & (df2 < df1.quantile(0.75))].count()
# count how many instances in df2 are above 0.75 range quantile of df1
quartile_4 = df2[df2 >= df1.quantile(0.75)].count()
# return counts of values
return int(quartile_1.values), int(quartile_2_3.values), int(quartile_4.values)
def make_quartiles_columns(total_boar_telos, df):
pos_1, pos_2, pos_3 = 17, 18, 19
sample_id, telo_data = 0, 1
for i, row in df.iterrows():
boar_sample_telos = row[telo_data]
df.iat[i, pos_1], df.iat[i, pos_2], df.iat[i, pos_3] = (quartile_cts_rel_to_df1(total_boar_telos, boar_sample_telos))
return df
def linear_regression_graphs_between_variables(x=None, y=None, data=None,
hue=None, col=None,
hue_order=None, col_order=None,
snake=False):
if 'Binary' in y:
ax=sns.lmplot(x=x, y=y, hue=hue, col=col, data=data, logistic=True,
height=5.5, aspect=1, scatter_kws={"s": 175, "edgecolor":'black'})
else:
ax=sns.lmplot(x=x, y=y, hue=hue, col=col, data=data,
height=5.5, aspect=1, scatter_kws={"s": 175, "edgecolor":'black'})
fig = ax.fig
ax.set_xlabels(x, fontsize=18)
ax.set_xticklabels(fontsize=14)
ax.set_ylabels(y, fontsize=18)
ax.set_yticklabels(fontsize=14)
ax.set_titles(size=14)
# if 'Cortisol' in y:
# ax.set(ylim=(0, 40))
plt.subplots_adjust(top=0.88)
if hue == None and col == None:
fig.suptitle(f'{x} vs.\n {y} in Fukushima Wild Boar', fontsize=18,
)
# ax.savefig(f"../graphs/{x} vs {y}.png", dpi=400)
if snake:
fig.suptitle(f'{x} vs.\n {y} in Fukushima Wild Snake', fontsize=18,
)
# elif hue == 'Sex' and col == 'Sex':
# fig.suptitle(f'{x} vs. {y}\nper Sex in Fukushima Wild Boar', fontsize=16, weight='bold')
# fig.legend(fontsize='large')
# ax.savefig(f"../graphs/{x} vs {y} per sex.png", dpi=400)
def graph_dose_age_vs_telos(df=None, x=None, x2=None, y=None, hue=None,):
f, axes = plt.subplots(1, 2, figsize=(12,5), sharey=False, sharex=False)
# dose vs. telomeres
sns.regplot(x=x, y=y, data=df, ax=axes[0],
# hue=hue,
scatter_kws={'alpha':0.8, 'linewidth':1, 'edgecolor':'black', 's':df['Age (months)']*12, })
axes[0].set_xlabel(x, fontsize=14)
axes[0].set_ylabel(y, fontsize=14)
axes[0].tick_params(labelsize=12)
# age vs. telomeres
sns.regplot(x=x2, y=y, data=df, ax=axes[1],
# hue=hue,
scatter_kws={'alpha':0.8, 'linewidth':1, 'edgecolor':'black', 's':175, })
axes[1].set_xlabel(x2, fontsize=14)
axes[1].set_xlim(-4,55)
axes[1].set_ylabel(y, fontsize=14)
if y == 'Mean Telomere Length (FISH)':
axes[1].set_ylim(0.2,1.6)
if y == 'Mean Telomere Length (qPCR)':
axes[1].set_ylim(0.6,1.8)
axes[1].tick_params(labelsize=12)
def score_linear_regressions(x=None, y=None, data=None, sexes=['Overall']):
for sex in sexes:
if sex == 'Overall':
X_r = data[x].values.reshape(-1, len(x))
y_r = data[y].values.reshape(-1, 1)
regression = LinearRegression().fit(X_r,y_r)
print(f'Linear regression for {x} vs. {y}:\nOverall R2 is {regression.score(X_r, y_r):.4f}\n')
return regression
else:
X_r = data[data['Sex'] == sex][x].values.reshape(-1, len(x))
y_r = data[data['Sex'] == sex][y].values.reshape(-1, 1)
regression = LinearRegression().fit(X_r,y_r)
print(f"Linear regression for {x} vs. {y}:\nR2 for {sex}s is {regression.score(X_r, y_r):.4f}")
return regression
def eval_number(x):
if x > 15:
x = 1
return x
elif x < 15:
x = 0
return x
def score_logistic_regressions(x=None, y=None, data=None):
# for y in y_cols:
sexes = [
# 'Male',
# 'Female',
'Overall']
for sex in sexes:
if sex == 'Overall':
X_r = data[x].values.reshape(-1, 1)
y_r = data[y].values.reshape(-1, )
log_reg = LogisticRegression(solver='lbfgs')
regression = log_reg.fit(X_r,y_r)
print(f'Logistic regression for {x} vs. {y}:\nOverall R2 is {regression.score(X_r, y_r):.4f}\n')
else:
X_r = data[data['Sex'] == sex][x].values.reshape(-1, 1)
y_r = data[data['Sex'] == sex][y].values.reshape(-1, )
regression = LinearRegression().fit(X_r,y_r)
print(f"Logistic regression for {x} vs. {y}:\nR2 for {sex}s is {regression.score(X_r, y_r):.4f}")
def encode_sex(row):
if row == 'Male':
return 0
elif row == 'Female':
return 1
else:
print(f'ERROR.. row == {row}')
def merge_return_df_cols_interest(dose_df, cortisol_df, cols_of_interest):
merge_dose_cortisol = dose_df.merge(cortisol_df, on=['Sample ID'])
trim_dose_cortisol = merge_dose_cortisol[cols_of_interest].copy()
return trim_dose_cortisol
def enforce_col_types(df):
for col in df.columns:
if col == 'Sample ID' or col == 'Sex':
df[col] = df[col].astype('str')
elif col == 'Age (months)' or col == 'encode sex':
df[col] = df[col].astype('int64')
else:
df[col] = df[col].astype('float64')
def male_or_female(row):
if row == 'M' or row == 'm' or row == 'Male':
return 'Male'
elif row == 'F' or row == 'f' or row == 'Female':
return 'Female'
else:
print(f'error... row == {row}')
return np.NaN
def make_age_class(row):
if row <= 12:
return 'piglet'
elif row > 12 and row < 24:
return 'yearling'
elif row >= 20:
return 'adult'
def linear_regression_scores_X_y(df, y, y_name, dose_types):
"""
specifically for EDA
"""
for Xn in dose_types:
features_list = [[Xn], [Xn, 'Age (months)'], [Xn, 'Age (months)', 'encoded sex']]
for features in features_list:
X = df[features].values.reshape(-1, len(features))
fit_lm = LinearRegression().fit(X, y)
print(f'OLS | {features} vs. {y_name} --> R2: {fit_lm.score(X, y):.4f}')
print('')
return fit_lm
def fit_gam_plot_dependencies(df=None, features=None, target=None,
basis_1=s, basis_2=False, summary=False):
X = df[features]
y = df[target]
if basis_1 and basis_2:
gam = LinearGAM(basis_1(0, lam=60) + basis_2(1, lam=60), fit_intercept=True).fit(X, y)
elif basis_1:
gam = LinearGAM(basis_1(0, lam=60), fit_intercept=True).fit(X, y)
else:
print('no basis called for features.. error')
if summary:
print(gam.summary())
plot_gam_partial_dependencies(gam, features, target)
def plot_gam_partial_dependencies(gam, features, target):
for i, term in enumerate(gam.terms):
if term.isintercept:
continue
XX = gam.generate_X_grid(term=i)
pdep, confi = gam.partial_dependence(term=i, X=XX, width=0.95)
plt.figure()
plt.plot(XX[:, term.feature], pdep)
plt.plot(XX[:, term.feature], confi, c='r', ls='--')
plt.xlabel(f'{features[i]}', fontsize=14)
plt.ylabel(f'{target}', fontsize=14)
plt.title(f'Functional dependence of Y on X', fontsize=14)
plt.show()
def graph_y_vs_dose_age_sex(df=None, x=None, x2=None, x3=None, y=None, hue=None,
dose_x_size='Age (months)', multiplier=12):
f, axes = plt.subplots(1, 3, figsize=(15,5), sharey=True, sharex=False)
fontsize=16
colors = sns.color_palette('Paired', len(df['Sample ID'].unique())),
t = (0.7,)
test = [x + t for x in colors[0]]
# DOSE vs. Y
sns.regplot(x=x, y=y, data=df, ax=axes[0], color=test[4],
scatter_kws={'alpha':.8, 'linewidth':1, 'edgecolor':'black', 's':df[dose_x_size]*multiplier})
# AGE vs. Y
# male O markers
sns.regplot(x=x2, y=y, data=df[df['Sex'] == 'Male'], ax=axes[1], color=test[8], marker='o', fit_reg=False,
scatter_kws={'alpha':.8, 'linewidth':1, 'edgecolor':'black', 's':175,})
# female X markers
sns.regplot(x=x2, y=y, data=df[df['Sex'] == 'Female'], ax=axes[1], color=test[8], marker='X', fit_reg=False,
scatter_kws={'alpha':.8, 'linewidth':1, 'edgecolor':'black', 's':200,})
# plotting just the linear reg
sns.regplot(x=x2, y=y, data=df, ax=axes[1], color=test[8], scatter_kws={'s':0,})
# creating custom legend
handles, labels = [], []
line1 = lines.Line2D([], [], color=test[8], alpha=.8, marker='o', mew=1, mec='black')
line2 = lines.Line2D([], [], color=test[8], alpha=.8, marker='X', mew=1, mec='black')
handles.append(line1)
handles.append(line2)
labels.append('Male')
labels.append('Female')
axes[1].legend(handles, labels, loc='upper right',ncol=1, fancybox=True,
fontsize=fontsize, markerscale=2)
# SEX vs. Y
palette_cust = {'Male':test[0], 'Female':test[10]}
sns.boxplot(x=x3, y=y, dodge=True, palette=palette_cust, order=['Male', 'Female'], data=df, ax=axes[2],)
for patch in axes[2].artists:
r, g, b, a = patch.get_facecolor()
patch.set_facecolor((r, g, b, .6))
sns.swarmplot(x=x3, y=y, dodge=True, palette=palette_cust, order=['Male', 'Female'], data=df, ax=axes[2],
size=12, edgecolor='black', linewidth=1, **{'alpha':0.8})
x_name = 'Reasonable Total Life Time Dose (mGy)'
axes[0].set_xlabel(x_name, fontsize=fontsize)
axes[0].set_ylabel(y, fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[1].set_xlabel(x2, fontsize=fontsize)
axes[1].set_ylabel('', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[2].set_xlabel(x3, fontsize=fontsize)
axes[2].set_ylabel('', fontsize=fontsize)
axes[2].tick_params(labelsize=fontsize)
# axes[0].set_xlim(-50,700)
# axes[1].set_xlim(-4,55)
if y == 'Mean Telomere Length (Telo-FISH)':
axes[0].set_ylim(0.2,1.6)
axes[1].set_ylim(0.2,1.6)
y_name = y
elif y == 'Mean Telomere Length (qPCR)':
axes[0].set_ylim(0.6,1.8)
axes[1].set_ylim(0.6,1.8)
y_name = y
elif y == 'Cortisol (pg/mg)':
axes[0].set_ylim(-3, 35)
y_name = y.replace('/', '')
elif y == 'Average # of dicentrics per cell':
axes[0].set_ylim(-0.005, .065)
y_name = y
plt.tight_layout()
plt.savefig(f'graphs/main figures/{y_name} vs {x} and {x2}.png', dpi=600, bbox_inches='tight')
def render_mpl_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='black',
bbox=[0, 0, 1, 1], header_columns=0, path=None,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
plt.tight_layout()
if path != None:
plt.savefig(path, dpi=600, bbox_inches='tight')
plt.close()
| 35.850575
| 125
| 0.589879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,958
| 0.211499
|
1508aa76e743b64f436cbb0a8c19cf6751c48d1b
| 4,684
|
py
|
Python
|
src/xia2/cli/report.py
|
graeme-winter/xia2
|
e00d688137d4ddb4b125be9a3f37ae00265886c2
|
[
"BSD-3-Clause"
] | 10
|
2015-10-30T06:36:55.000Z
|
2021-12-10T20:06:22.000Z
|
src/xia2/cli/report.py
|
graeme-winter/xia2
|
e00d688137d4ddb4b125be9a3f37ae00265886c2
|
[
"BSD-3-Clause"
] | 528
|
2015-11-24T08:20:12.000Z
|
2022-03-21T21:47:29.000Z
|
src/xia2/cli/report.py
|
graeme-winter/xia2
|
e00d688137d4ddb4b125be9a3f37ae00265886c2
|
[
"BSD-3-Clause"
] | 14
|
2016-03-15T22:07:03.000Z
|
2020-12-14T07:13:35.000Z
|
import json
import os
import sys
from collections import OrderedDict
import iotbx.phil
import xia2.Handlers.Streams
from dials.util.options import OptionParser
from jinja2 import ChoiceLoader, Environment, PackageLoader
from xia2.Modules.Report import Report
from xia2.XIA2Version import Version
phil_scope = iotbx.phil.parse(
"""\
title = 'xia2 report'
.type = str
prefix = 'xia2'
.type = str
log_include = None
.type = path
include scope xia2.Modules.Analysis.phil_scope
json {
indent = None
.type = int(value_min=0)
}
""",
process_includes=True,
)
help_message = """
"""
def run(args):
usage = "xia2.report [options] scaled_unmerged.mtz"
parser = OptionParser(
usage=usage, phil=phil_scope, check_format=False, epilog=help_message
)
params, options, args = parser.parse_args(
show_diff_phil=True, return_unhandled=True
)
if len(args) == 0:
parser.print_help()
return
unmerged_mtz = args[0]
report = Report.from_unmerged_mtz(unmerged_mtz, params, report_dir=".")
# xtriage
xtriage_success, xtriage_warnings, xtriage_danger = None, None, None
if params.xtriage_analysis:
try:
xtriage_success, xtriage_warnings, xtriage_danger = report.xtriage_report()
except Exception as e:
params.xtriage_analysis = False
print("Exception runnning xtriage:")
print(e)
json_data = {}
if params.xtriage_analysis:
json_data["xtriage"] = xtriage_success + xtriage_warnings + xtriage_danger
(
overall_stats_table,
merging_stats_table,
stats_plots,
) = report.resolution_plots_and_stats()
json_data.update(stats_plots)
json_data.update(report.batch_dependent_plots())
json_data.update(report.intensity_stats_plots(run_xtriage=False))
json_data.update(report.pychef_plots())
resolution_graphs = OrderedDict(
(k, json_data[k])
for k in (
"cc_one_half",
"i_over_sig_i",
"second_moments",
"wilson_intensity_plot",
"completeness",
"multiplicity_vs_resolution",
)
if k in json_data
)
if params.include_radiation_damage:
batch_graphs = OrderedDict(
(k, json_data[k])
for k in (
"scale_rmerge_vs_batch",
"i_over_sig_i_vs_batch",
"completeness_vs_dose",
"rcp_vs_dose",
"scp_vs_dose",
"rd_vs_batch_difference",
)
)
else:
batch_graphs = OrderedDict(
(k, json_data[k])
for k in ("scale_rmerge_vs_batch", "i_over_sig_i_vs_batch")
)
misc_graphs = OrderedDict(
(k, json_data[k])
for k in ("cumulative_intensity_distribution", "l_test", "multiplicities")
if k in json_data
)
for k, v in report.multiplicity_plots().items():
misc_graphs[k] = {"img": v}
styles = {}
for axis in ("h", "k", "l"):
styles["multiplicity_%s" % axis] = "square-plot"
loader = ChoiceLoader(
[PackageLoader("xia2", "templates"), PackageLoader("dials", "templates")]
)
env = Environment(loader=loader)
if params.log_include:
with open(params.log_include, "rb") as fh:
log_text = fh.read().decode("utf-8")
else:
log_text = ""
template = env.get_template("report.html")
html = template.render(
page_title=params.title,
filename=os.path.abspath(unmerged_mtz),
space_group=report.intensities.space_group_info().symbol_and_number(),
unit_cell=str(report.intensities.unit_cell()),
mtz_history=[h.strip() for h in report.mtz_object.history()],
xtriage_success=xtriage_success,
xtriage_warnings=xtriage_warnings,
xtriage_danger=xtriage_danger,
overall_stats_table=overall_stats_table,
merging_stats_table=merging_stats_table,
cc_half_significance_level=params.cc_half_significance_level,
resolution_graphs=resolution_graphs,
batch_graphs=batch_graphs,
misc_graphs=misc_graphs,
styles=styles,
xia2_version=Version,
log_text=log_text,
)
with open("%s-report.json" % params.prefix, "w") as fh:
json.dump(json_data, fh, indent=params.json.indent)
with open("%s-report.html" % params.prefix, "wb") as fh:
fh.write(html.encode("utf-8", "xmlcharrefreplace"))
def run_with_log():
xia2.Handlers.Streams.setup_logging(
logfile="xia2.report.txt", debugfile="xia2.report-debug.txt"
)
run(sys.argv[1:])
| 28.216867
| 87
| 0.637916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 850
| 0.181469
|
150bff7433b6fabe00d05feee353f17bc33f7d36
| 757
|
py
|
Python
|
minoan_project/minoan_project/urls.py
|
mtzirkel/minoan
|
3eadeb1f73acf261e2f550642432ea5c25557ecb
|
[
"MIT"
] | null | null | null |
minoan_project/minoan_project/urls.py
|
mtzirkel/minoan
|
3eadeb1f73acf261e2f550642432ea5c25557ecb
|
[
"MIT"
] | null | null | null |
minoan_project/minoan_project/urls.py
|
mtzirkel/minoan
|
3eadeb1f73acf261e2f550642432ea5c25557ecb
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
from . import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
url(r'^admin/', include(admin.site.urls)),
#login
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
#home
url(r'^home/$', views.home),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 27.035714
| 89
| 0.698811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.401585
|
150c07692f09dbc4c2bc2f82c96435eb48b056d8
| 324
|
py
|
Python
|
algorithm/__init__.py
|
sirCamp/bioinformatics
|
2609044c57eba1097263829f9db579cd1825b8bb
|
[
"MIT"
] | null | null | null |
algorithm/__init__.py
|
sirCamp/bioinformatics
|
2609044c57eba1097263829f9db579cd1825b8bb
|
[
"MIT"
] | null | null | null |
algorithm/__init__.py
|
sirCamp/bioinformatics
|
2609044c57eba1097263829f9db579cd1825b8bb
|
[
"MIT"
] | null | null | null |
from algorithm.InsertionLengthAlgorithm import InsertionLengthAlgorithm
from algorithm.PhysicalCoverageAlgorithm import PhysicalCoverageAlgorithm
from algorithm.SequenceCoverageAlgorithm import SequenceCoverageAlgorithm
from algorithm.CigarAlgorithm import CigarAlgorithm
from algorithm.KmersAlgorithm import KmersAlgorithm
| 54
| 73
| 0.92284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
150e69b2f9539045223d00d448f50c262f488903
| 1,874
|
py
|
Python
|
attackMain.py
|
saurabhK99/substitution-cipher
|
dcf69cd4866ce7408eda6faf03ddd9b601bc3fec
|
[
"MIT"
] | null | null | null |
attackMain.py
|
saurabhK99/substitution-cipher
|
dcf69cd4866ce7408eda6faf03ddd9b601bc3fec
|
[
"MIT"
] | null | null | null |
attackMain.py
|
saurabhK99/substitution-cipher
|
dcf69cd4866ce7408eda6faf03ddd9b601bc3fec
|
[
"MIT"
] | null | null | null |
from tkinter import *
from attack import *
#calls letter frequency attack
def attack(on, cipherTxt):
plainTxt = str()
attack = LetterFrequencyAttack(cipherTxt, on)
for i in range(10):
plainTxt = plainTxt + attack.attack() + '\n\n'
answer.config(text = plainTxt)
#defining main window
root = Tk()
root.title('Letter Frequency Attack')
root.configure(
background='#221b1b',
)
root.option_add('*Font', 'helvatica 12')
root.option_add('*Foreground', 'whitesmoke')
root.option_add('*Background', '#221b1b')
root.option_add('*Entry.HighlightColor', 'whitesmoke')
#key value pairs for radio buttons
types = [
('MONOALPHABETIC_CIPHER', 'MONOALPHABETIC_CIPHER'),
('ADDITIVE_CIPHER', 'ADDITIVE_CIPHER')
]
#variable to store current selection of radio button
attackOn= StringVar()
attackOn.set('MONOALPHABETIC_CIPHER')
Label(root, text='ATTACK ON').grid(row=0, column=0, padx=20)
#radio buttons
for i in range(2):
Radiobutton(
root,
text=types[i][0],
value=types[i][1],
variable=attackOn,
highlightthickness=0,
activebackground='#221b1b',
activeforeground='whitesmoke'
).grid(
row=0,
column=i+1,
padx=20,
pady=20
)
#label to show the result
answer = Label(root, text='ANSWER HERE', wraplength=700, justify=CENTER)
answer.grid(row=1, column=0, columnspan=3, pady=20)
#entry widget to input cipher text to crack
Label(root, text='CIPHER TXT').grid(row=6, column=0)
cipherTxt = Entry(root)
cipherTxt.grid(row=6, column=1, columnspan=2, pady=20)
#button to call attack()
Button(
root,
text='DECRYPT',
justify=CENTER,
command=lambda: attack(
attackOn.get(),
cipherTxt.get()
)
).grid(
row=7,
column=0,
columnspan=3,
pady=20
)
#mainloop of tkinter window
root.mainloop()
| 23.425
| 72
| 0.657417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 583
| 0.311099
|
150ef1714addd55d364456c56a5bbe4b9e5b825d
| 12,703
|
py
|
Python
|
eden.py
|
nobesio/eden
|
c301abdc64647fde02e8117ea137db322a804739
|
[
"MIT"
] | null | null | null |
eden.py
|
nobesio/eden
|
c301abdc64647fde02e8117ea137db322a804739
|
[
"MIT"
] | null | null | null |
eden.py
|
nobesio/eden
|
c301abdc64647fde02e8117ea137db322a804739
|
[
"MIT"
] | null | null | null |
from random import randint
import copy
# Auxiliary Function for rotating the DNA in each cycle.
def rotate(l,n):
return l[n:] + l[:n]
# History is the object responsible for accounting all the organisms.
class History:
def __init__(self):
self.orgs = []
def addOrganism(self, org):
self.orgs.append(org)
def getGenepool(self):
genepool = []
genepooldetail = []
for organism in self.orgs:
if not organism.dna in genepool:
genepool.append(organism.dna)
genepooldetail.append([[organism.name], organism.dna, 1])
else:
for unit in genepooldetail:
if unit[1] == organism.dna:
unit[0].append(organism.name)
unit[2] += 1
return genepooldetail
# Organism is the structure for the living organisms.
class Organism:
def __init__(self, name, dna, energy):
self.memory = 0
self.name = name
self.dna = dna
self.energy = energy
self.size = len(dna)
self.age = 0
self.sons = 0
self.parent = ""
def __repr__(self):
return self.name + " E:" + str(self.energy) + "Y:" + str(self.age)
def toAge(self):
self.age += 1
def reportStatus(self):
print("Name: ", self.name)
print("DNA: ", self.dna)
print("Energy: ", self.energy)
print("Size: ", self.size)
print("Age: ", self.age)
def divide(self):
self.sons += 1
son = copy.deepcopy(self)
son.sons = 0
son.parent = self.name
son.name = son.name + "-" + str(self.sons)
son.age = 0
son.energy = 5
self.energy += -5
for x in range(randint(0,10)):
if randint(1,100) > 95:
print("MUTACION!")
if randint(0,1) == 0:
# ADD GEN
son.dna.insert(randint(0,len(son.dna)-1), randint(0,12))
else:
# REMOVE GEN
son.dna.pop(randint(0, len(son.dna)-1))
print(son.dna)
return son
def decreaseEnergy(self):
print("Bajando de ", self.energy)
self.energy = self.energy - 1
def increaseEnergy(self, energy):
self.energy = energy + energy
# QuantumPackages are the "food" of this simulation. The name comes from the concept used in operative systems.
class QuantumPackage:
def __init__(self, quantums):
self.quantums = quantums
def __repr__(self):
return 'QP'
# Enviroment is the class responsible for holding all the living organisms.
class Enviroment:
def __init__(self, size):
self.size = size
self.landscape = [[0 for x in range(size)] for x in range(size)]
def reportStatus(self):
print("LANDSCAPE:")
for row in self.landscape:
print(row)
def getOrganismsCoor(self):
organisms = []
fila = 0
columna = 0
for row in self.landscape:
columna = 0
for element in row:
if isinstance(element,Organism):
organisms.append((fila, columna))
columna += 1
fila += 1
print("FOUND ", len(organisms))
return organisms
def getOrganisms(self):
orgs = []
for row in self.landscape:
for element in row:
if isinstance(element,Organism):
orgs.append(element)
return orgs
def countOrgs(self):
cont = 0
for row in self.landscape:
for element in row:
if isinstance(element, Organism):
cont += 1
return cont
# Time is the class responsible for aging the living organisms.
class Time:
def aging(self, enviroment):
for row in enviroment.landscape:
for element in row:
if isinstance(element, Organism):
element.toAge()
# Death is the class responsible for killing old or starving organisms.
class Death:
def __init__(self):
self.killed = []
def kill(self, enviroment):
fila=0
for row in enviroment.landscape:
columna = 0
for element in row:
if isinstance(element, Organism):
if element.energy <= 0 or element.age > 20:
self.killed.append(element)
print("Killing ", fila, columna)
enviroment.landscape[fila][columna] = 0
columna +=1
fila +=1
# Interpreter is the class that gives life to the organism. It executes the code in their DNA.
class Interpreter:
def interprete(self, enviroment):
def up():
enviroment.landscape[x][y].decreaseEnergy()
print("Move Up" , x, y)
if x > 0:
if enviroment.landscape[x-1][y] == 0:
enviroment.landscape[x-1][y] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
elif isinstance(enviroment.landscape[x-1][y],QuantumPackage):
enviroment.landscape[x][y].increaseEnergy(enviroment.landscape[x-1][y].quantums)
enviroment.landscape[x-1][y] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
def down():
enviroment.landscape[x][y].decreaseEnergy()
print("Move Down", x, y)
if x < enviroment.size-1:
if enviroment.landscape[x+1][y] == 0:
enviroment.landscape[x+1][y] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
elif isinstance(enviroment.landscape[x+1][y],QuantumPackage):
enviroment.landscape[x][y].increaseEnergy(enviroment.landscape[x+1][y].quantums)
enviroment.landscape[x+1][y] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
def right():
enviroment.landscape[x][y].decreaseEnergy()
print("Move Right", x, y)
if y < enviroment.size-1:
if enviroment.landscape[x][y+1] == 0:
enviroment.landscape[x][y+1] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
elif isinstance(enviroment.landscape[x][y+1],QuantumPackage):
enviroment.landscape[x][y].increaseEnergy(enviroment.landscape[x][y+1].quantums)
enviroment.landscape[x][y+1] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
def left():
enviroment.landscape[x][y].decreaseEnergy()
print("Move Left", x, y)
if y > 0:
if enviroment.landscape[x][y-1] == 0:
enviroment.landscape[x][y-1] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
elif isinstance(enviroment.landscape[x][y-1],QuantumPackage):
enviroment.landscape[x][y].increaseEnergy(enviroment.landscape[x][y-1].quantums)
enviroment.landscape[x][y-1] = enviroment.landscape[x][y]
enviroment.landscape[x][y] = 0
def divide():
if enviroment.landscape[x][y].energy > 7:
enviroment.landscape[x][y].decreaseEnergy()
sonX = randint(-1,1)
sonY = randint(-1,1)
if sonX != 0 or sonY != 0:
if x + sonX > 0 and x + sonX < enviroment.size-1:
if y + sonY > 0 and y + sonY < enviroment.size-1:
if enviroment.landscape[x + sonX][y + sonY] == 0:
enviroment.landscape[x + sonX][y + sonY] = enviroment.landscape[x][y].divide()
else:
enviroment.landscape[x][y].decreaseEnergy()
def writeOne():
print("WRITE 1")
enviroment.landscape[x][y].memory = 1
def writeCero():
print("WRITE 0")
enviroment.landscape[x][y].memory = 0
def ifTrue():
print("CHECKING iF TRUE")
if enviroment.landscape[x][y].memory != 1:
enviroment.landscape[x][y].dna = rotate(enviroment.landscape[x][y].dna, 1)
def checkUp():
print("CHECKING UP")
if x > 0:
if enviroment.landscape[x-1][y] != 0:
enviroment.landscape[x][y].memory = 1
else:
enviroment.landscape[x][y].memory = 0
else:
enviroment.landscape[x][y].memory = 0
def checkDown():
print("CHECKING DOWN")
if x < enviroment.size-1:
if enviroment.landscape[x+1][y] != 0:
enviroment.landscape[x][y].memory = 1
else:
enviroment.landscape[x][y].memory = 0
else:
enviroment.landscape[x][y].memory = 0
def checkRight():
print("CHECKING RIGHT")
if y < enviroment.size-1:
if enviroment.landscape[x][y+1] != 0:
enviroment.landscape[x][y].memory = 1
else:
enviroment.landscape[x][y].memory = 0
else:
enviroment.landscape[x][y].memory = 0
def checkLeft():
print("CHECKING LEFT")
if y > 0:
if enviroment.landscape[x][y-1] != 0:
enviroment.landscape[x][y].memory = 1
else:
enviroment.landscape[x][y].memory = 0
else:
enviroment.landscape[x][y].memory = 0
def checkEnergyDivide():
if enviroment.landscape[x][y].energy > 7:
enviroment.landscape[x][y].memory = 1
else:
enviroment.landscape[x][y].memory = 0
options = {0 : up,
1 : down,
2 : right,
3 : left,
4 : divide,
5 : writeOne,
6 : writeCero,
7 : ifTrue,
8 : checkUp,
9 : checkDown,
10 : checkRight,
11 : checkLeft,
12: checkEnergyDivide
}
for organismCoordinates in enviroment.getOrganismsCoor():
x = organismCoordinates[0]
y = organismCoordinates[1]
gen = enviroment.landscape[x][y].dna[0]
enviroment.landscape[x][y].dna = rotate(enviroment.landscape[x][y].dna, 1)
print("ejecutando en ", x, y, "gen ", gen)
options[gen]()
if __name__ == '__main__':
book = History()
earth = Enviroment(10)
earth.reportStatus()
earth.landscape[0][0] = QuantumPackage(10)
earth.landscape[1][1] = Organism("Eva", [8,7,0,9,7,1,10,7,2,11,7,3,12,7,4], 15)
#Poblemos Tierra
for i in range(0,4):
x = randint(0, earth.size-1)
y = randint(0, earth.size-1)
if earth.landscape[x][y] == 0:
dna = []
for a in range(1,11):
dna.append(randint(0,12))
earth.landscape[x][y] = Organism("Eva"+str(i), dna, 15)
earth.reportStatus()
chronos = Time()
parca = Death()
god = Interpreter()
for i in range(0,200):
if earth.countOrgs() > 0:
print("ciclo: ", i)
god.interprete((earth))
chronos.aging(earth)
parca.kill(earth)
earth.reportStatus()
for i in range(1,4):
x = randint(0,9)
y = randint(0,9)
if earth.landscape[x][y] == 0:
earth.landscape[x][y] = QuantumPackage(randint(5,10))
for org in earth.getOrganisms():
if not org in book.orgs:
book.addOrganism(org)
else:
print("SE MURIERON TODOS EN EL CICLO: ", i)
break
print("Living:", len(earth.getOrganisms()))
print("GENEPOOL:", book.getGenepool())
| 37.919403
| 112
| 0.492954
| 10,457
| 0.823191
| 0
| 0
| 0
| 0
| 0
| 0
| 995
| 0.078328
|
1512acbfbf9725f996d722bba323e798347b6270
| 2,407
|
py
|
Python
|
examples/example_pipeline.py
|
madconsulting/datanectar
|
7177b907c72c92de31fb136740f33c509ed5d499
|
[
"Unlicense"
] | null | null | null |
examples/example_pipeline.py
|
madconsulting/datanectar
|
7177b907c72c92de31fb136740f33c509ed5d499
|
[
"Unlicense"
] | null | null | null |
examples/example_pipeline.py
|
madconsulting/datanectar
|
7177b907c72c92de31fb136740f33c509ed5d499
|
[
"Unlicense"
] | null | null | null |
import os
import datetime
from pathlib import Path
import pandas as pd
import luigi
PROCESSED_DIR = 'processed'
ROLLUP_DIR = 'rollups'
class PrepareDataTask(luigi.Task):
def __init__(self):
super().__init__()
self.last_processed_id = 0
if os.path.exists('last_processed_id.txt'):
try:
with open('last_processed_id.txt', 'r') as f:
self.last_processed_id = int(f.read())
except Exception as e:
print('Error reading last_processed_id.txt')
self.last_id = self.last_processed_id
self.df = pd.read_json('test_data/trip_data.json')
# Simulate only getting the latest (unprocessed).
self.df = self.df[self.df['id'] > self.last_processed_id]
if len(self.df):
self.last_id = int(self.df.iloc[-1]['id'])
def requires(self):
return None
def run(self):
if not os.path.exists(PROCESSED_DIR):
os.makedirs(PROCESSED_DIR)
# Simulate work
#import time
#time.sleep(10)
# Simulate error
#import random
#if random.random() > 0.5:
# raise Exception('Fake error')
output_path = f'{PROCESSED_DIR}/processed_{self.last_id}.parquet'
self.df.to_parquet(output_path)
with open('last_processed_id.txt', 'w') as f:
f.write(f'{self.last_id}')
def output(self):
output_path = f'{PROCESSED_DIR}/processed_{self.last_id}.parquet'
return luigi.LocalTarget(output_path)
class RollupTask(luigi.Task):
date_param = luigi.DateParameter(default=datetime.date.today())
rollup_dir = Path(ROLLUP_DIR)
def _output_path(self):
return f'{ROLLUP_DIR}/rollup_{self.date_param}.parquet'
def requires(self):
return PrepareDataTask()
def run(self):
if not os.path.exists(ROLLUP_DIR):
os.makedirs(ROLLUP_DIR)
data_dir = Path(PROCESSED_DIR)
df = pd.concat(
pd.read_parquet(parquet_file)
for parquet_file in data_dir.glob('*.parquet')
)
# Average travel times
rollup = df.groupby(['origin_id', 'destination_id'])['travel_time'].mean().to_frame()
rollup.to_parquet(self._output_path())
def output(self):
return luigi.LocalTarget(self._output_path())
if __name__ == '__main__':
luigi.run()
| 28.317647
| 93
| 0.617366
| 2,221
| 0.922725
| 0
| 0
| 0
| 0
| 0
| 0
| 597
| 0.248027
|
151306af1c1480903dd00ab70e45e88f683fbe48
| 2,463
|
py
|
Python
|
scripts/tflite_model_tools/tflite/Metadata.py
|
LaudateCorpus1/edgeai-tidl-tools
|
d98789769a711e5a3700dfdc20d877073bd87da7
|
[
"CNRI-Python"
] | 15
|
2021-09-05T03:43:54.000Z
|
2022-03-29T14:17:29.000Z
|
scripts/tflite_model_tools/tflite/Metadata.py
|
LaudateCorpus1/edgeai-tidl-tools
|
d98789769a711e5a3700dfdc20d877073bd87da7
|
[
"CNRI-Python"
] | 21
|
2021-09-01T06:58:31.000Z
|
2022-03-31T06:33:15.000Z
|
scripts/tflite_model_tools/tflite/Metadata.py
|
LaudateCorpus1/edgeai-tidl-tools
|
d98789769a711e5a3700dfdc20d877073bd87da7
|
[
"CNRI-Python"
] | 6
|
2021-09-22T06:44:19.000Z
|
2022-02-07T06:28:35.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Metadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Metadata()
x.Init(buf, n + offset)
return x
@classmethod
def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Metadata
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Metadata
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Metadata
def Buffer(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
def MetadataStart(builder): builder.StartObject(2)
def MetadataAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def MetadataAddBuffer(builder, buffer): builder.PrependUint32Slot(1, buffer, 0)
def MetadataEnd(builder): return builder.EndObject()
class MetadataT(object):
# MetadataT
def __init__(self):
self.name = None # type: str
self.buffer = 0 # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
metadata = Metadata()
metadata.Init(buf, pos)
return cls.InitFromObj(metadata)
@classmethod
def InitFromObj(cls, metadata):
x = MetadataT()
x._UnPack(metadata)
return x
# MetadataT
def _UnPack(self, metadata):
if metadata is None:
return
self.name = metadata.Name()
self.buffer = metadata.Buffer()
# MetadataT
def Pack(self, builder):
if self.name is not None:
name = builder.CreateString(self.name)
MetadataStart(builder)
if self.name is not None:
MetadataAddName(builder, name)
MetadataAddBuffer(builder, self.buffer)
metadata = MetadataEnd(builder)
return metadata
| 29.674699
| 131
| 0.657734
| 1,967
| 0.79862
| 0
| 0
| 676
| 0.274462
| 0
| 0
| 197
| 0.079984
|
15136d40366243c73182b9f6916a6c550042f55f
| 1,124
|
py
|
Python
|
kukur/config.py
|
timeseer-ai/kukur
|
28210ff0bde396d961b60828782fef56e326b319
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-09-12T08:29:30.000Z
|
2022-01-19T19:06:45.000Z
|
kukur/config.py
|
timeseer-ai/kukur
|
28210ff0bde396d961b60828782fef56e326b319
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2021-03-16T08:21:01.000Z
|
2022-03-21T07:30:28.000Z
|
kukur/config.py
|
timeseer-ai/kukur
|
28210ff0bde396d961b60828782fef56e326b319
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-12T08:29:34.000Z
|
2021-09-12T08:29:34.000Z
|
"""Read the Kukur configuration."""
# SPDX-FileCopyrightText: 2021 Timeseer.AI
#
# SPDX-License-Identifier: Apache-2.0
import glob
import toml
class InvalidIncludeException(Exception):
"""Raised when the include configuration is invalid."""
def __init__(self, message: str):
Exception.__init__(self, f"invalid include: {message}")
def from_toml(path):
"""Read the configuration from a TOML file, processing includes."""
config = toml.load(path)
for include_options in config.get("include", []):
if "glob" not in include_options:
raise InvalidIncludeException('"glob" is required')
for include_path in glob.glob(include_options["glob"]):
include_config = toml.load(include_path)
for k, v in include_config.items():
if k not in config:
config[k] = v
elif isinstance(config[k], list):
config[k].append(v)
elif isinstance(config[k], dict):
config[k].update(v)
else:
config[k] = v
return config
| 32.114286
| 71
| 0.598754
| 204
| 0.181495
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.273132
|
1514c4cab7976c14d2d2ff2686c1ed82e350d931
| 3,326
|
py
|
Python
|
scheduletest.py
|
ambimanus/appsim
|
8f60b3a736af8aa7f03435c28aef2685a3dbfbe3
|
[
"MIT"
] | null | null | null |
scheduletest.py
|
ambimanus/appsim
|
8f60b3a736af8aa7f03435c28aef2685a3dbfbe3
|
[
"MIT"
] | null | null | null |
scheduletest.py
|
ambimanus/appsim
|
8f60b3a736af8aa7f03435c28aef2685a3dbfbe3
|
[
"MIT"
] | null | null | null |
import time
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import epoch2num
import device_factory
if __name__ == '__main__':
amount = 50
devices = []
for i in range(amount):
device = device_factory.ecopower_4(i, i)
devices.append(device)
start = int(time.mktime(datetime(2010, 1, 2).timetuple()) // 60)
end = int(time.mktime(datetime(2010, 1, 3).timetuple()) // 60)
sample_time = start + 15 * 24
sample_dur = 16
P = [[] for d in devices]
T = [[] for d in devices]
Th = [[] for d in devices]
for now in range(start, sample_time):
for idx, device in enumerate(devices):
device.step(now)
P[idx].append(device.components.consumer.P)
T[idx].append(device.components.storage.T)
Th[idx].append(device.components.heatsink.in_heat)
samples = []
for d in devices:
# d.components.sampler.setpoint_density = 0.1
samples.append(d.components.sampler.sample(100, sample_dur))
# samples = [d.components.sampler.sample(100, sample_dur) for d in devices]
schedule = np.zeros(sample_dur)
for idx, device in enumerate(devices):
# min_schedule_idx = np.argmin(np.sum(np.abs(samples[idx]), axis=1))
# device.components.scheduler.schedule = samples[idx][min_schedule_idx]
# schedule += samples[idx][min_schedule_idx]
max_schedule_idx = np.argmax(np.sum(np.abs(samples[idx]), axis=1))
device.components.scheduler.schedule = samples[idx][max_schedule_idx]
schedule += samples[idx][max_schedule_idx]
for now in range(sample_time, end):
for idx, device in enumerate(devices):
device.step(now)
P[idx].append(device.components.consumer.P)
T[idx].append(device.components.storage.T)
Th[idx].append(device.components.heatsink.in_heat)
P = np.sum(P, axis=0)
Th = np.sum(Th, axis=0)
T = np.mean(T, axis=0)
ax = plt.subplot(2, 1, 1)
ax.grid(True)
tz = 60 # timezone deviation in minutes
x = epoch2num(np.arange((start + tz) * 60, (end + tz) * 60, 60))
Th = np.reshape(Th, (len(x) // 15, 15)).mean(axis=1)
ax.plot_date(x[::15], Th, color='magenta', label='P$_{th,out}$ (kW)', ls='-',
marker=None)
ax.legend()
ax = plt.subplot(2, 1, 2, sharex=ax)
ax.grid(True)
l1 = ax.plot_date(x, P, label='P$_{el}$ (kW)', ls='-', marker=None)
sched_x = epoch2num(np.arange(
(sample_time + tz) * 60, ((sample_time + tz) + sample_dur * 15) * 60, 60))
l2 = ax.plot_date(sched_x[::15], schedule, color='r', label='Schedule',
ls='-', marker=None)
ax = plt.twinx()
l3 = ax.plot_date(x, T, color='g', label='T (\\textdegree C)', ls='-', marker=None)
lines = l1 + l2 + l3
labels = [l.get_label() for l in lines]
ax.legend(lines, labels)
plt.gcf().autofmt_xdate()
# # Samples plot
# fig, ax = plt.subplots(len(samples))
# if len(samples) == 1:
# ax = [ax]
# for i, sample in enumerate(samples):
# t = np.arange(len(sample[0]))
# for s in sample:
# ax[i].plot(t, s)
plt.show()
| 35.010526
| 88
| 0.585989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 662
| 0.199038
|
15165694e2716645ea22f6406f0f303943c423b8
| 329
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowInstallState/cli/equal/golden_output3_expected.py
|
ykoehler/genieparser
|
b62cf622c3d8eab77c7b69e932c214ed04a2565a
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowInstallState/cli/equal/golden_output3_expected.py
|
ykoehler/genieparser
|
b62cf622c3d8eab77c7b69e932c214ed04a2565a
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowInstallState/cli/equal/golden_output3_expected.py
|
ykoehler/genieparser
|
b62cf622c3d8eab77c7b69e932c214ed04a2565a
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"location": {
"R0 R1": {
"auto_abort_timer": "inactive",
"pkg_state": {
1: {
"filename_version": "17.08.01.0.149429",
"state": "U",
"type": "IMG",
}
},
}
}
}
| 23.5
| 60
| 0.31307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.346505
|
1516d58cc828bc371a33c9b4a9ca474fdb7eba79
| 8,637
|
py
|
Python
|
lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py
|
714627034/Paddle-Lite
|
015ba88a4d639db0b73603e37f83e47be041a4eb
|
[
"Apache-2.0"
] | 808
|
2018-04-17T17:43:12.000Z
|
2019-08-18T07:39:13.000Z
|
lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py
|
714627034/Paddle-Lite
|
015ba88a4d639db0b73603e37f83e47be041a4eb
|
[
"Apache-2.0"
] | 728
|
2018-04-18T08:15:25.000Z
|
2019-08-16T07:14:43.000Z
|
lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py
|
714627034/Paddle-Lite
|
015ba88a4d639db0b73603e37f83e47be041a4eb
|
[
"Apache-2.0"
] | 364
|
2018-04-18T17:05:02.000Z
|
2019-08-18T03:25:38.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('.')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize, ConvTransposeOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestConvElementwiseFuse(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.X86, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
#conv or conv_transpose
Transpose = draw(st.sampled_from([True, False]))
#conv param or conv_transpose param
in_shape = draw(
st.lists(
st.integers(
min_value=3, max_value=128),
min_size=3,
max_size=3))
in_shape = [draw(st.integers(min_value=1, max_value=4))] + in_shape
weight_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
paddings = draw(
st.lists(
st.integers(
min_value=0, max_value=2), min_size=2, max_size=2))
dilations = draw(st.sampled_from([[2, 2]]))
groups = draw(st.sampled_from([1, 2, in_shape[1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
output_padding = draw(
st.sampled_from([[], [
draw(
st.integers(
min_value=0,
max_value=max(strides[0], dilations[0]) - 1)), draw(
st.integers(
min_value=0,
max_value=max(strides[1], dilations[1]) - 1))
]]))
scale_in = draw(st.floats(min_value=0.001, max_value=0.1))
scale_out = draw(st.floats(min_value=0.001, max_value=0.1))
if Transpose:
bias_sample_shape = weight_shape[1] * groups
else:
bias_sample_shape = weight_shape[0]
elementwise_bias_shape = [bias_sample_shape]
conv_out_shape = []
paddings_, dilations_ = UpdatePaddingAndDilation(
in_shape, weight_shape, paddings, dilations, groups,
padding_algorithm, strides)
if Transpose:
assume(in_shape[1] == weight_shape[0])
assume(in_shape[1] % groups == 0) #TODO
if len(output_padding):
assume(output_padding[0] < max(strides[0], dilations_[0]))
assume(output_padding[1] < max(strides[1], dilations_[1]))
conv_out_shape = [in_shape[0], weight_shape[1] * groups]
oh, ow = ConvTransposeOutputSize(in_shape, weight_shape,
dilations_, paddings_, strides)
if len(output_padding):
oh = oh + output_padding[0]
ow = ow + output_padding[1]
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
if len(output_padding):
conv_output_h = (oh + output_padding[0] + paddings[0] +
paddings[1] -
(dilations[0] *
(weight_shape[2] - 1) + 1)) / strides[0] + 1
conv_output_w = (oh + output_padding[1] + paddings[0] +
paddings[1] -
(dilations[1] *
(weight_shape[3] - 1) + 1)) / strides[1] + 1
assume(in_shape[2] == (int)(conv_output_h))
assume(in_shape[3] == (int)(conv_output_w))
else:
assume(in_shape[1] == weight_shape[1] * groups)
assume(weight_shape[0] % groups == 0)
conv_out_shape = [in_shape[0], weight_shape[0]]
oh, ow = ConvOutputSize(in_shape, weight_shape, dilations_,
paddings_, strides)
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
conv_type = ""
conv_attrs = {}
if Transpose:
conv_type = "conv2d_transpose"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out,
"output_size": [],
"output_padding": output_padding
}
else:
conv_type = "conv2d"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out
}
conv_op = OpConfig(
type=conv_type,
inputs={"Input": ["input_data"],
"Filter": ["filter_data"]},
outputs={"Output": ["conv_output_data"]},
attrs=conv_attrs)
elementwise_add_op = OpConfig(
type="elementwise_add",
inputs={"X": ["conv_output_data"],
"Y": ["add_bias_data"]},
outputs={"Out": ["output_data"]},
attrs={"axis": 1})
ops = [conv_op, elementwise_add_op]
self.ops = ops
program_config = ProgramConfig(
ops=ops,
weights={
"filter_data": TensorConfig(shape=weight_shape),
"add_bias_data": TensorConfig(shape=elementwise_bias_shape)
},
inputs={"input_data": TensorConfig(shape=in_shape)},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
config = CxxConfig()
return self.get_predictor_configs(), [self.ops[0].type], (1e-4, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(
quant=False,
max_examples=500,
passes=["lite_conv_elementwise_fuser_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
| 40.359813
| 125
| 0.554706
| 7,369
| 0.85319
| 0
| 0
| 0
| 0
| 0
| 0
| 1,176
| 0.136158
|
151724d850402f50ae0bbd91cc2f5825d03ab2de
| 22,871
|
py
|
Python
|
cfn_policy_validator/tests/validation_tests/test_resource_validator.py
|
awslabs/aws-cloudformation-iam-policy-validator
|
52c1439e4d76d2c7d45c97563cc87f8458134e0b
|
[
"MIT-0"
] | 41
|
2021-09-30T01:28:51.000Z
|
2022-03-24T09:42:09.000Z
|
cfn_policy_validator/tests/validation_tests/test_resource_validator.py
|
awslabs/aws-cloudformation-iam-policy-validator
|
52c1439e4d76d2c7d45c97563cc87f8458134e0b
|
[
"MIT-0"
] | 10
|
2021-09-30T08:13:11.000Z
|
2022-03-22T07:34:41.000Z
|
cfn_policy_validator/tests/validation_tests/test_resource_validator.py
|
awslabs/aws-cloudformation-iam-policy-validator
|
52c1439e4d76d2c7d45c97563cc87f8458134e0b
|
[
"MIT-0"
] | 3
|
2021-11-29T21:13:30.000Z
|
2022-02-04T12:49:40.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import boto3
import copy
import unittest
from botocore.stub import ANY
from cfn_policy_validator.tests import account_config, offline_only, only_run_for_end_to_end
from cfn_policy_validator.tests.boto_mocks import mock_test_setup, BotoResponse, get_test_mode, TEST_MODE
from cfn_policy_validator.tests.validation_tests import FINDING_TYPE, mock_access_analyzer_resource_setup, \
MockAccessPreviewFinding, MockNoFindings, MockInvalidConfiguration, MockUnknownError, \
MockTimeout, MockValidateResourcePolicyFinding
from cfn_policy_validator.validation.validator import validate_parser_output, Validator
from cfn_policy_validator.application_error import ApplicationError
from cfn_policy_validator.parsers.output import Output, Policy, Resource
resource_policy_with_no_findings = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': '*',
'Principal': {
'AWS': account_config.account_id
},
'Resource': f'arn:aws:sqs:{account_config.region}:{account_config.account_id}:resource1'
}
]
}
lambda_permissions_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "lambda:InvokeFunction",
"Resource": f"arn:aws:lambda:{account_config.region}:{account_config.account_id}:function:my-function"
}]
}
class BaseResourcePolicyTest(unittest.TestCase):
def setUp(self):
self.output = Output(account_config)
def add_resources_to_output(self, resource_type, resource_policy, resource_policy_2=None, configuration_1=None, configuration_2=None):
if resource_policy_2 is None:
resource_policy_2 = resource_policy
policy1 = Policy('policy1', copy.deepcopy(resource_policy))
resource1 = Resource('resource1', resource_type, policy1, configuration_1)
policy2 = Policy('policy2', copy.deepcopy(resource_policy_2))
resource2 = Resource('resource2', resource_type, policy2, configuration_2)
self.output.Resources = [
resource1,
resource2
]
@only_run_for_end_to_end
def create_archive_rule(self, resource_type_to_archive):
session = boto3.Session(region_name=account_config.region)
self.client = session.client('accessanalyzer')
response = self.client.list_analyzers(type='ACCOUNT')
self.actual_analyzer_name = next((analyzer['name'] for analyzer in response['analyzers'] if analyzer['status'] == 'ACTIVE'))
self.archive_rule_name = 'IgnoreRoleFindings'
self.client.create_archive_rule(
analyzerName=self.actual_analyzer_name,
ruleName='IgnoreRoleFindings',
filter={
'resourceType': {
'eq': [resource_type_to_archive]
}
}
)
@only_run_for_end_to_end
def delete_archive_rule(self):
self.client.delete_archive_rule(analyzerName=self.actual_analyzer_name, ruleName=self.archive_rule_name)
def assert_finding_is_equal(self, actual_finding, expected_policy_name, expected_resource_name, expected_code):
self.assertEqual(expected_policy_name, actual_finding.policyName)
self.assertEqual(expected_resource_name, actual_finding.resourceName)
self.assertEqual(expected_code, actual_finding.code)
def assert_has_findings(self, findings, errors=0, security_warnings=0, warnings=0, suggestions=0):
self.assertEqual(errors, len(findings.errors))
self.assertEqual(security_warnings, len(findings.security_warnings))
self.assertEqual(warnings, len(findings.warnings))
self.assertEqual(suggestions, len(findings.suggestions))
class WhenValidatingResources(BaseResourcePolicyTest):
def setUp(self):
self.output = Output(account_config)
@mock_access_analyzer_resource_setup(
MockUnknownError()
)
@offline_only
def test_unknown_access_preview_failure(self):
policy = Policy('ResourcePolicy', copy.deepcopy(resource_policy_with_no_findings))
resources = [
Resource('resource1', 'AWS::SQS::Queue', policy)
]
validator = Validator(account_config.account_id, account_config.region, account_config.partition)
with self.assertRaises(ApplicationError) as cm:
validator.validate_resources(resources)
self.assertEqual('Failed to create access preview for resource1. Reason: UNKNOWN_ERROR', str(cm.exception))
@mock_access_analyzer_resource_setup(
MockTimeout()
)
@offline_only
def test_unknown_access_preview_timeout(self):
policy = Policy('ResourcePolicy', copy.deepcopy(resource_policy_with_no_findings))
resources = [
Resource('resource1', 'AWS::SQS::Queue', policy)
]
validator = Validator(account_config.account_id, account_config.region, account_config.partition)
validator.maximum_number_of_access_preview_attempts = 2
with self.assertRaises(ApplicationError) as cm:
validator.validate_resources(resources)
self.assertEqual('Timed out after 5 minutes waiting for access analyzer preview to create.', str(cm.exception))
@mock_test_setup(
accessanalyzer=[
BotoResponse(
method='list_analyzers',
service_response={'analyzers': []},
expected_params={'type': 'ACCOUNT'}
),
BotoResponse(
method='create_analyzer',
service_response={'arn': 'arn:aws:access-analyzer:us-east-1:123456789123:analyzer/MyAnalyzer'},
expected_params={'analyzerName': ANY, 'type': 'ACCOUNT'}
)
],
assert_no_pending_responses=True
)
def test_if_no_analyzer_exists_in_account(self):
validator = Validator(account_config.account_id, account_config.region, account_config.partition)
validator.validate_resources([])
@mock_access_analyzer_resource_setup(
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION)
)
def test_with_resource_type_that_is_not_supported_by_access_previews(self):
output = Output(account_config)
policy = Policy('PermissionsPolicy', copy.deepcopy(lambda_permissions_policy_with_findings))
resource = Resource('resource1', 'Lambda', policy)
output.Resources = [resource]
findings = validate_parser_output(output)
self.assert_has_findings(findings, suggestions=1)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[0],
expected_policy_name='PermissionsPolicy',
expected_resource_name='resource1',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
class WhenValidatingResourcesWithNonActiveFindings(BaseResourcePolicyTest):
def setUp(self):
self.output = Output(account_config)
self.create_archive_rule(resource_type_to_archive='AWS::KMS::Key')
def tearDown(self):
self.delete_archive_rule()
@mock_access_analyzer_resource_setup(
MockAccessPreviewFinding(),
MockAccessPreviewFinding(finding_status='ARCHIVED')
)
def test_output_only_includes_active_findings(self):
self.add_resources_to_output('AWS::SQS::Queue', sqs_queue_policy_that_allows_external_access)
policy1 = Policy('policy1', copy.deepcopy(sqs_queue_policy_that_allows_external_access))
resource1 = Resource('resource1', 'AWS::SQS::Queue', policy1)
policy2 = Policy('policy2', copy.deepcopy(kms_key_policy_that_allows_external_access))
resource2 = Resource('resource2', 'AWS::KMS::Key', policy2)
self.output.Resources = [resource1, resource2]
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, security_warnings=1)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EXTERNAL_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockAccessPreviewFinding(finding_status='ARCHIVED'),
MockAccessPreviewFinding(finding_status='ARCHIVED')
)
def test_output_does_not_include_any_findings_when_all_are_archived(self):
self.add_resources_to_output('AWS::KMS::Key', kms_key_policy_that_allows_external_access)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, security_warnings=0)
sqs_queue_policy_that_allows_external_access = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": ["*"]
},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
sqs_queue_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
sqs_queue_policy_with_no_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": [f'{account_config.account_id}']
},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
sqs_queue_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": {"not": "valid"},
"Principal": {
"AWS": [f'{account_config.account_id}']
},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
class WhenValidatingSqsQueuePolicy(BaseResourcePolicyTest):
@mock_access_analyzer_resource_setup(
MockAccessPreviewFinding(),
MockAccessPreviewFinding()
)
def test_with_sqs_policy_that_allows_external_access(self):
self.add_resources_to_output('AWS::SQS::Queue', sqs_queue_policy_that_allows_external_access)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, security_warnings=2)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EXTERNAL_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EXTERNAL_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION),
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION)
)
def test_with_sqs_policy_with_findings(self):
self.add_resources_to_output('AWS::SQS::Queue', sqs_queue_policy_with_findings)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, suggestions=2)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockNoFindings(),
MockNoFindings()
)
def test_with_sqs_queue_policy_with_no_findings(self):
self.add_resources_to_output('AWS::SQS::Queue', sqs_queue_policy_with_no_findings)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings)
@mock_access_analyzer_resource_setup(
MockInvalidConfiguration(),
MockInvalidConfiguration()
)
def test_with_invalid_sqs_queue_policy(self):
self.add_resources_to_output('AWS::SQS::Queue', sqs_queue_invalid_policy)
with self.assertRaises(ApplicationError) as cm:
validate_parser_output(self.output)
self.assertIn("Failed to create access preview for resource1. Validate that your trust or resource "
"policy's schema is correct.\nThe following validation findings were detected for this resource:", str(cm.exception))
kms_key_policy_that_allows_external_access = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "kms:*",
"Resource": "*"
}]
}
kms_key_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "kms:*",
"Resource": "*"
}]
}
kms_key_policy_with_no_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": "kms:*",
"Resource": "*"
}]
}
kms_key_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": {"not": "valid"},
"Resource": "*"
}]
}
class WhenValidatingKmsKeyPolicy(BaseResourcePolicyTest):
@mock_access_analyzer_resource_setup(
MockAccessPreviewFinding(),
MockAccessPreviewFinding()
)
def test_with_kms_policy_that_allows_external_access(self):
self.add_resources_to_output('AWS::KMS::Key', kms_key_policy_that_allows_external_access)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, security_warnings=2)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EXTERNAL_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EXTERNAL_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION),
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION)
)
def test_with_kms_policy_with_findings(self):
self.add_resources_to_output('AWS::KMS::Key', kms_key_policy_with_findings)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, suggestions=2)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockNoFindings(),
MockNoFindings()
)
def test_with_kms_policy_with_no_findings(self):
self.add_resources_to_output('AWS::KMS::Key', kms_key_policy_with_no_findings)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings)
@mock_access_analyzer_resource_setup(
MockInvalidConfiguration(),
MockInvalidConfiguration()
)
def test_with_invalid_kms_policy(self):
self.add_resources_to_output('AWS::KMS::Key', kms_key_invalid_policy)
with self.assertRaises(ApplicationError) as cm:
validate_parser_output(self.output)
self.assertIn("Failed to create access preview for resource1. Validate that your trust or resource "
"policy's schema is correct.\nThe following validation findings were detected for this resource:", str(cm.exception))
def build_s3_bucket_policy_that_allows_external_access(resource_name):
return {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {'AWS': "arn:aws:iam::123456789123:role/MyOtherRole"},
"Action": "*",
"Resource": [f"arn:aws:s3:::{resource_name}", f"arn:aws:s3:::{resource_name}/*"]
}]
}
def build_s3_bucket_policy_with_findings(resource_name):
return {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": ["s3:PutObject", "s3:PutObjectAcl"],
"Resource": [f"arn:aws:s3:::{resource_name}/*"]
}]
}
def build_s3_bucket_policy_with_no_findings(resource_name):
return {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": [f"arn:aws:iam::{account_config.account_id}:root"]},
"Action": ["s3:PutObject", "s3:PutObjectAcl"],
"Resource": [f"arn:aws:s3:::{resource_name}", f"arn:aws:s3:::{resource_name}/*"]
}]
}
s3_bucket_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": [f"arn:aws:iam::{account_config.account_id}:root"]},
"Action": ["s3:PutObject", "s3:PutObjectAcl"],
"Resource": {"not": "valid"}
}]
}
class WhenValidatingS3BucketPolicy(BaseResourcePolicyTest):
@mock_access_analyzer_resource_setup(
MockAccessPreviewFinding(custom_validate_policy_type='AWS::S3::Bucket'),
MockAccessPreviewFinding(custom_validate_policy_type='AWS::S3::Bucket')
)
def test_with_s3_bucket_policy_that_allows_external_access(self):
self.add_resources_to_output('AWS::S3::Bucket',
build_s3_bucket_policy_that_allows_external_access('resource1'),
build_s3_bucket_policy_that_allows_external_access('resource2'))
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, security_warnings=2)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EXTERNAL_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EXTERNAL_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION, custom_resource_type='AWS::S3::Bucket'),
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION, custom_resource_type='AWS::S3::Bucket')
)
def test_with_s3_bucket_policy_with_findings(self):
self.add_resources_to_output('AWS::S3::Bucket',
build_s3_bucket_policy_with_findings('resource1'),
build_s3_bucket_policy_with_findings('resource2'))
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, suggestions=2)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockNoFindings(custom_validate_policy_type='AWS::S3::Bucket'),
MockNoFindings(custom_validate_policy_type='AWS::S3::Bucket')
)
def test_with_s3_bucket_policy_with_no_findings(self):
self.add_resources_to_output('AWS::S3::Bucket',
build_s3_bucket_policy_with_no_findings('resource1'),
build_s3_bucket_policy_with_no_findings('resource2'))
findings = validate_parser_output(self.output)
self.assert_has_findings(findings)
@mock_access_analyzer_resource_setup(
MockInvalidConfiguration(),
MockInvalidConfiguration()
)
def test_with_invalid_s3_bucket_policy(self):
self.add_resources_to_output('AWS::S3::Bucket', s3_bucket_invalid_policy)
with self.assertRaises(ApplicationError) as cm:
validate_parser_output(self.output)
self.assertIn("Failed to create access preview for resource1. Validate that your trust or resource "
"policy's schema is correct.\nThe following validation findings were detected for this resource:", str(cm.exception))
secrets_manager_resource_policy_that_allows_external_access = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": f"arn:aws:iam::777888999444:root"},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}]
}
secrets_manager_resource_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}]
}
secrets_manager_resource_policy_with_no_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}]
}
secrets_manager_resource_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": {"not": "valid"},
"Resource": "*"
}]
}
class WhenValidatingSecretsManagerResourcePolicy(BaseResourcePolicyTest):
# This doesn't work because secrets manager uses the default KMS key if no KMS key is provided
# the default KMS key is not publicly accessible, so the secret is therefore not publicly accessible.
# To make this work, we'd need to look up the KMS key from the environment OR from the key policy if it had
# yet to be created
@unittest.skip("Skip until this is supported")
def test_with_secrets_manager_resource_policy_that_allows_external_access(self):
self.add_resources_to_output('AWS::SecretsManager::Secret', secrets_manager_resource_policy_that_allows_external_access)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, security_warnings=2)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EXTERNAL_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.security_warnings[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EXTERNAL_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION),
MockValidateResourcePolicyFinding(code='EMPTY_OBJECT_PRINCIPAL', finding_type=FINDING_TYPE.SUGGESTION)
)
def test_with_secrets_manager_resource_policy_with_findings(self):
self.add_resources_to_output('AWS::SecretsManager::Secret', secrets_manager_resource_policy_with_findings)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings, suggestions=2)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[0],
expected_policy_name='policy1',
expected_resource_name='resource1',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
self.assert_finding_is_equal(
actual_finding=findings.suggestions[1],
expected_policy_name='policy2',
expected_resource_name='resource2',
expected_code='EMPTY_OBJECT_PRINCIPAL'
)
@mock_access_analyzer_resource_setup(
MockNoFindings(),
MockNoFindings()
)
def test_with_secrets_manager_resource_policy_with_no_findings(self):
self.add_resources_to_output('AWS::SecretsManager::Secret', secrets_manager_resource_policy_with_no_findings)
findings = validate_parser_output(self.output)
self.assert_has_findings(findings)
@mock_access_analyzer_resource_setup(
MockInvalidConfiguration(),
MockInvalidConfiguration()
)
def test_with_invalid_secrets_manager_resource_policy(self):
self.add_resources_to_output('AWS::SecretsManager::Secret', secrets_manager_resource_invalid_policy)
with self.assertRaises(ApplicationError) as cm:
validate_parser_output(self.output)
self.assertIn("Failed to create access preview for resource1. Validate that your trust or resource "
"policy's schema is correct.\nThe following validation findings were detected for this resource:", str(cm.exception))
| 33.437135
| 145
| 0.773425
| 17,605
| 0.769752
| 0
| 0
| 15,300
| 0.668969
| 0
| 0
| 5,963
| 0.260723
|
1518a255b1570670a775245440b45ebe73fe295d
| 6,672
|
py
|
Python
|
HDF4_H5_NETCDF/source2.7/h5py/tests/hl/test_datatype.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 31
|
2018-10-19T15:28:36.000Z
|
2022-02-14T03:01:25.000Z
|
h5py/tests/hl/test_datatype.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 13
|
2020-01-28T22:20:14.000Z
|
2022-03-11T23:20:14.000Z
|
h5py/tests/hl/test_datatype.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 10
|
2019-01-10T04:02:12.000Z
|
2021-11-17T01:52:15.000Z
|
"""
Tests for the h5py.Datatype class.
"""
from __future__ import absolute_import
from itertools import count
import numpy as np
import h5py
from ..common import ut, TestCase
class TestVlen(TestCase):
"""
Check that storage of vlen strings is carried out correctly.
"""
def assertVlenArrayEqual(self, dset, arr, message=None, precision=None):
self.assert_(
dset.shape == arr.shape,
"Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
)
for (i, d, a) in zip(count(), dset, arr):
self.assertArrayEqual(d, a, message, precision)
def test_compound(self):
fields = []
fields.append(('field_1', h5py.special_dtype(vlen=str)))
fields.append(('field_2', np.int32))
dt = np.dtype(fields)
self.f['mytype'] = np.dtype(dt)
dt_out = self.f['mytype'].dtype.fields['field_1'][0]
self.assertEqual(h5py.check_dtype(vlen=dt_out), str)
def test_compound_vlen_bool(self):
vidt = h5py.special_dtype(vlen=np.uint8)
def a(items):
return np.array(items, dtype=np.uint8)
f = self.f
dt_vb = np.dtype([
('foo', vidt),
('logical', np.bool)])
vb = f.create_dataset('dt_vb', shape=(4,), dtype=dt_vb)
data = np.array([(a([1,2,3]), True),
(a([1 ]), False),
(a([1,5 ]), True),
(a([], ), False),],
dtype=dt_vb)
vb[:] = data
actual = f['dt_vb'][:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertArrayEqual(data['logical'], actual['logical'])
dt_vv = np.dtype([
('foo', vidt),
('bar', vidt)])
f.create_dataset('dt_vv', shape=(4,), dtype=dt_vv)
dt_vvb = np.dtype([
('foo', vidt),
('bar', vidt),
('logical', np.bool)])
vvb = f.create_dataset('dt_vvb', shape=(2,), dtype=dt_vvb)
dt_bvv = np.dtype([
('logical', np.bool),
('foo', vidt),
('bar', vidt)])
bvv = f.create_dataset('dt_bvv', shape=(2,), dtype=dt_bvv)
data = np.array([(True, a([1,2,3]), a([1,2]) ),
(False, a([]), a([2,4,6])),],
dtype=bvv)
bvv[:] = data
actual = bvv[:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertVlenArrayEqual(data['bar'], actual['bar'])
self.assertArrayEqual(data['logical'], actual['logical'])
def test_compound_vlen_enum(self):
eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
vidt = h5py.special_dtype(vlen=np.uint8)
def a(items):
return np.array(items, dtype=np.uint8)
f = self.f
dt_vve = np.dtype([
('foo', vidt),
('bar', vidt),
('switch', eidt)])
vve = f.create_dataset('dt_vve', shape=(2,), dtype=dt_vve)
data = np.array([(a([1,2,3]), a([1,2]), 1),
(a([]), a([2,4,6]), 0),],
dtype=dt_vve)
vve[:] = data
actual = vve[:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertVlenArrayEqual(data['bar'], actual['bar'])
self.assertArrayEqual(data['switch'], actual['switch'])
def test_vlen_enum(self):
fname = self.mktemp()
arr1 = [[1],[1,2]]
dt1 = h5py.special_dtype(vlen=h5py.special_dtype(
enum=('i', dict(foo=1, bar=2))))
with h5py.File(fname,'w') as f:
df1 = f.create_dataset('test', (len(arr1),), dtype=dt1)
df1[:] = np.array(arr1)
with h5py.File(fname,'r') as f:
df2 = f['test']
dt2 = df2.dtype
arr2 = [e.tolist() for e in df2[:]]
self.assertEqual(arr1, arr2)
self.assertEqual(h5py.check_dtype(enum=h5py.check_dtype(vlen=dt1)),
h5py.check_dtype(enum=h5py.check_dtype(vlen=dt2)))
class TestOffsets(TestCase):
"""
Check that compound members with aligned or manual offsets are handled
correctly.
"""
def test_compound_vlen(self):
vidt = h5py.special_dtype(vlen=np.uint8)
eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
for np_align in (False, True):
dt = np.dtype([
('a', eidt),
('foo', vidt),
('bar', vidt),
('switch', eidt)], align=np_align)
np_offsets = [dt.fields[i][1] for i in dt.names]
for logical in (False, True):
if logical and np_align:
# Vlen types have different size in the numpy struct
self.assertRaises(TypeError, h5py.h5t.py_create, dt,
logical=logical)
else:
ht = h5py.h5t.py_create(dt, logical=logical)
offsets = [ht.get_member_offset(i)
for i in range(ht.get_nmembers())]
if np_align:
self.assertEqual(np_offsets, offsets)
def test_aligned_offsets(self):
dt = np.dtype('i2,i8', align=True)
ht = h5py.h5t.py_create(dt)
self.assertEqual(dt.itemsize, ht.get_size())
self.assertEqual(
[dt.fields[i][1] for i in dt.names],
[ht.get_member_offset(i) for i in range(ht.get_nmembers())]
)
def test_aligned_data(self):
dt = np.dtype('i2,f8', align=True)
data = np.empty(10, dtype=dt)
data['f0'] = np.array(np.random.randint(-100, 100, size=data.size),
dtype='i2')
data['f1'] = np.random.rand(data.size)
fname = self.mktemp()
with h5py.File(fname, 'w') as f:
f['data'] = data
with h5py.File(fname, 'r') as f:
self.assertArrayEqual(f['data'], data)
def test_out_of_order_offsets(self):
dt = np.dtype({
'names' : ['f1', 'f2', 'f3'],
'formats' : ['<f4', '<i4', '<f8'],
'offsets' : [0, 16, 8]
})
data = np.empty(10, dtype=dt)
data['f1'] = np.random.rand(data.size)
data['f2'] = np.random.random_integers(-10, 10, data.size)
data['f3'] = np.random.rand(data.size)*-1
fname = self.mktemp()
with h5py.File(fname, 'w') as fd:
fd.create_dataset('data', data=data)
with h5py.File(fname, 'r') as fd:
self.assertArrayEqual(fd['data'], data)
| 33.527638
| 78
| 0.508243
| 6,485
| 0.971972
| 0
| 0
| 0
| 0
| 0
| 0
| 772
| 0.115707
|
151937c4e4552fde0563a4d7a5da8405bfdf819f
| 2,278
|
py
|
Python
|
conmon/regex.py
|
flashdagger/conmon
|
c6e75f115ad104ea7ecc7b14618efadefadad2f8
|
[
"MIT"
] | null | null | null |
conmon/regex.py
|
flashdagger/conmon
|
c6e75f115ad104ea7ecc7b14618efadefadad2f8
|
[
"MIT"
] | null | null | null |
conmon/regex.py
|
flashdagger/conmon
|
c6e75f115ad104ea7ecc7b14618efadefadad2f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
from typing import Pattern, Tuple, Iterator, Match, Union, Optional, List, Dict
from conmon.conan import storage_path
DECOLORIZE_REGEX = re.compile(r"[\u001b]\[\d{1,2}m", re.UNICODE)
CONAN_DATA_PATH = re.compile(
r"""(?x)
(?P<path>
([a-zA-Z]:)?
(?P<sep>[\\/])
(?:[\w\-.]+(?P=sep)){5,} # conservative choice of characters in path names
(?:build|package)(?P=sep)
[a-f0-9]{40}
(?P=sep)
)
"""
)
REF_PART_PATTERN = r"\w[\w\+\.\-]{1,50}"
REF_REGEX = re.compile(
rf"""(?x)
(?P<ref>
(?P<name>{REF_PART_PATTERN})/
(?P<version>{REF_PART_PATTERN})
(?:
@
(?:
(?P<user>{REF_PART_PATTERN})/
(?P<channel>{REF_PART_PATTERN})
)?
)?
)
"""
)
def shorten_conan_path(text: str, placeholder=r"...\g<sep>", count=0) -> str:
storage = str(storage_path())
text = CONAN_DATA_PATH.sub(placeholder, text, count=count)
if len(storage) > 20:
text = text.replace(storage, "(storage)")
return text
def compact_pattern(regex: Pattern) -> Tuple[str, int]:
"""take verbose pattern and remove all whitespace and comments"""
flags = regex.flags
# remove inline flags
pattern = re.sub(r"\(\?([aiLmsux])+\)", "", regex.pattern, flags=re.ASCII)
# remove whitespace in verbose pattern
if flags & re.VERBOSE:
pattern = re.sub(r"(?<!\\)\s+|\\(?= )|#[^\n]+\n", "", pattern, flags=re.ASCII)
flags -= re.VERBOSE
return pattern, flags
def finditer(
pattern: Union[Pattern[str], str], string: str, flags=0
) -> Iterator[Tuple[Optional[Match], str]]:
span_end = 0
for match in re.finditer(pattern, string, flags):
yield match, string[span_end : match.start()]
span_end = match.end()
yield None, string[span_end:]
def filter_by_regex(
string: str, mapping: Dict[str, List[Match]], **patterns: Union[Pattern[str], str]
) -> str:
for name, pattern in patterns.items():
matches, strings = zip(*finditer(pattern, string))
string = "".join(strings)
mapping.setdefault(name, []).extend(matches[:-1])
return string
| 28.475
| 87
| 0.565847
| 0
| 0
| 307
| 0.134767
| 0
| 0
| 0
| 0
| 838
| 0.367867
|
15195236d745c09ce968bf6af2311b1a616e1824
| 5,089
|
py
|
Python
|
src/north/cli/gscli/main.py
|
falcacicd/goldstone-mgmt
|
e7348011180e3c2dcd0558636ddc5c21779c7a3f
|
[
"Apache-2.0"
] | null | null | null |
src/north/cli/gscli/main.py
|
falcacicd/goldstone-mgmt
|
e7348011180e3c2dcd0558636ddc5c21779c7a3f
|
[
"Apache-2.0"
] | null | null | null |
src/north/cli/gscli/main.py
|
falcacicd/goldstone-mgmt
|
e7348011180e3c2dcd0558636ddc5c21779c7a3f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sysrepo as sr
import argparse
from prompt_toolkit import PromptSession
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.completion import Completer
import sys
import os
import logging
import asyncio
from .base import Object, InvalidInput, BreakLoop
from .onlp import Platform
from .tai import Transponder
logger = logging.getLogger(__name__)
stdout = logging.getLogger('stdout')
class Root(Object):
XPATH = '/'
def __init__(self, sess):
self.session = sess
super(Root, self).__init__(None)
@self.command()
def platform(line):
if len(line) != 0:
raise InvalidInput('usage: platform[cr]')
return Platform(self.session, self)
@self.command()
def transponder(line):
if len(line) != 0:
raise InvalidInput('usage: transponder[cr]')
return Transponder(self.session, self)
def __str__(self):
return ''
class GoldstoneShellCompleter(Completer):
def __init__(self, context):
self.context = context
def get_completions(self, document, complete_event):
return self.context.completion(document, complete_event)
class GoldstoneShell(object):
def __init__(self, sess=None, default_prompt='> ', prefix=''):
if sess == None:
conn = sr.SysrepoConnection()
sess = conn.start_session()
self.context = Root(sess)
self.completer = GoldstoneShellCompleter(self.context)
self.default_input = ''
self.default_prompt = default_prompt
self.prefix = prefix
#TODO subscribe to global error message bus
def prompt(self):
c = self.context
l = [str(c)]
while c.parent:
l.insert(0, str(c.parent))
c = c.parent
return self.prefix + ('/'.join(l)[1:] if len(l) > 1 else '') + self.default_prompt
async def exec(self, cmd: list, no_fail=True):
ret = await self.context.exec_async(cmd, no_fail=no_fail)
if ret:
self.context = ret
self.completer.context = ret
self.default_input = ''
def bindings(self):
b = KeyBindings()
@b.add('?')
def _(event):
buf = event.current_buffer
original_text = buf.text
help_msg = event.app.shell.context.help(buf.text)
buf.insert_text('?')
buf.insert_line_below(copy_margin=False)
buf.insert_text(help_msg)
event.app.exit('')
event.app.shell.default_input = original_text
# @b.add(' ')
# def _(event):
# buf = event.current_buffer
# if len(buf.text.strip()) > 0 and len(buf.text) == buf.cursor_position:
# candidates = list(event.app.shell.context.completion(buf.document))
# if len(candidates) == 1:
# c = candidates[0]
# buf.insert_text(c.text[-c.start_position:])
# buf.cancel_completion()
# buf.insert_text(' ')
return b
async def loop_async(shell):
session = PromptSession()
while True:
c = shell.completer
p = shell.prompt()
b = shell.bindings()
session.app.shell = shell
line = await session.prompt_async(p, completer=c, key_bindings=b, default=shell.default_input)
if len(line) > 0:
await shell.exec(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-c', '--command-string')
parser.add_argument('-k', '--keep-open', action='store_true')
parser.add_argument('-x', '--stdin', action='store_true')
args = parser.parse_args()
formatter = logging.Formatter('[%(asctime)s][%(levelname)-5s][%(name)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
if args.verbose:
console.setLevel(logging.DEBUG)
log = sr.Logs()
log.set_stderr(sr.SR_LL_DBG)
console.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
shf = logging.Formatter('%(message)s')
sh.setFormatter(shf)
stdout.setLevel(logging.DEBUG)
stdout.addHandler(sh)
shell = GoldstoneShell()
async def _main():
if args.stdin or args.command_string:
stream = sys.stdin if args.stdin else args.command_string.split(';')
for line in stream:
try:
await shell.exec(line, no_fail=False)
except InvalidInput as e:
stdout.info('failed to execute: {}'.format(line))
stdout.info(e)
sys.exit(1)
if not args.keep_open:
return
tasks = [loop_async(shell)]
try:
await asyncio.gather(*tasks)
except BreakLoop:
return
asyncio.run(_main())
if __name__ == '__main__':
main()
| 28.751412
| 118
| 0.592847
| 2,679
| 0.52643
| 0
| 0
| 753
| 0.147966
| 1,220
| 0.239733
| 834
| 0.163883
|
1519776f4ef0553b7494300ab7ab52a92881c3de
| 350
|
py
|
Python
|
InsertionSort/selectionSort/selectionsort/selectionSort.py
|
khaledshishani32/data-structures-and-algorithms-python
|
6397ef2467958b100747ef430ddfb3e691a97a0f
|
[
"MIT"
] | null | null | null |
InsertionSort/selectionSort/selectionsort/selectionSort.py
|
khaledshishani32/data-structures-and-algorithms-python
|
6397ef2467958b100747ef430ddfb3e691a97a0f
|
[
"MIT"
] | null | null | null |
InsertionSort/selectionSort/selectionsort/selectionSort.py
|
khaledshishani32/data-structures-and-algorithms-python
|
6397ef2467958b100747ef430ddfb3e691a97a0f
|
[
"MIT"
] | null | null | null |
def selection_sort(my_list):
for i in range(len(my_list)):
min_index=i
for j in range(i+1 , len(my_list)):
if my_list[min_index]>my_list[j]:
min_index= j
my_list[i],my_list[min_index]= my_list[min_index] ,my_list[i]
print(my_list)
cus_list=[8,4,23,42,16,15]
selection_sort(cus_list)
| 25
| 69
| 0.611429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1519b725bc8e51fd74703c95a095ecb5723fb0b3
| 437
|
py
|
Python
|
tests/creditcrawler_test.py
|
Mivinci/cqupt-piper
|
ce76a4334a2d7a7b75750d7bfac9efa747f968c7
|
[
"MIT"
] | 3
|
2019-09-08T16:22:30.000Z
|
2021-01-23T02:54:10.000Z
|
tests/creditcrawler_test.py
|
Mivinci/cqupt-piper
|
ce76a4334a2d7a7b75750d7bfac9efa747f968c7
|
[
"MIT"
] | 1
|
2020-01-11T05:13:43.000Z
|
2020-01-11T05:13:43.000Z
|
tests/creditcrawler_test.py
|
Mivinci/cqupt-piper
|
ce76a4334a2d7a7b75750d7bfac9efa747f968c7
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from prettytable import PrettyTable
# html = requests.get(
# 'http://jwzx.cqu.pt/student/xkxfTj.php',
# cookies={'PHPSESSID': 'o2r2fpddrj892dp1ntqddcp2hv'}).text
# soup = BeautifulSoup(html, 'html.parser')
# for tr in soup.find('table', {'id': 'AxfTjTable'}).findAll('tr')[1:]:
# tds = tr.findAll('td')
# print(tds[1:5])
table = PrettyTable(['aaa', 'bbb'])
print(table)
| 24.277778
| 71
| 0.665904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.695652
|
1519c99cb202a036f7cd0c6cfb24bf58a516d62b
| 602
|
py
|
Python
|
ClassMethod.py
|
AdarshKvT/python-oop
|
b619226807c3a0b434fe9789952cc86dc8cde9b7
|
[
"Apache-2.0"
] | null | null | null |
ClassMethod.py
|
AdarshKvT/python-oop
|
b619226807c3a0b434fe9789952cc86dc8cde9b7
|
[
"Apache-2.0"
] | null | null | null |
ClassMethod.py
|
AdarshKvT/python-oop
|
b619226807c3a0b434fe9789952cc86dc8cde9b7
|
[
"Apache-2.0"
] | null | null | null |
class Person:
number_of_people = 0
def __init__(self, name):
print("__init__ initiated")
self.name = name
print("calling add_person()")
Person.add_person()
@classmethod
def num_of_people(cls):
print("initiating num_of_person()")
return cls.number_of_people
@classmethod
def add_person(cls):
print("add_person(cls)")
cls.number_of_people += 1
# create an object of person
p1 = Person("KvT")
# creating another instance
p2 = Person("Shin")
# accessing the class method directly
print(Person.num_of_people())
| 20.066667
| 43
| 0.647841
| 433
| 0.719269
| 0
| 0
| 224
| 0.372093
| 0
| 0
| 190
| 0.315615
|
1519fb893e14d2984bb652c58400576b1b324256
| 1,117
|
py
|
Python
|
webpack_manifest/templatetags/webpack_manifest_tags.py
|
temoto/python-webpack-manifest
|
bb10dbb718f2b41d8356c983b375b064e220d521
|
[
"MIT"
] | 55
|
2015-11-02T19:50:41.000Z
|
2022-03-06T21:48:36.000Z
|
webpack_manifest/templatetags/webpack_manifest_tags.py
|
temoto/python-webpack-manifest
|
bb10dbb718f2b41d8356c983b375b064e220d521
|
[
"MIT"
] | 7
|
2015-09-16T05:24:37.000Z
|
2018-07-25T23:10:30.000Z
|
webpack_manifest/templatetags/webpack_manifest_tags.py
|
temoto/python-webpack-manifest
|
bb10dbb718f2b41d8356c983b375b064e220d521
|
[
"MIT"
] | 10
|
2016-03-06T16:30:00.000Z
|
2020-08-12T01:41:51.000Z
|
from django import template
from django.conf import settings
from webpack_manifest import webpack_manifest
if not hasattr(settings, 'WEBPACK_MANIFEST'):
raise webpack_manifest.WebpackManifestConfigError('`WEBPACK_MANIFEST` has not been defined in settings')
if 'manifests' not in settings.WEBPACK_MANIFEST:
raise webpack_manifest.WebpackManifestConfigError(
'`WEBPACK_MANIFEST[\'manifests\']` has not been defined in settings'
)
register = template.Library()
@register.simple_tag
def load_webpack_manifest(name):
if name not in settings.WEBPACK_MANIFEST['manifests']:
raise webpack_manifest.WebpackManifestConfigError(
'"%s" has not been defined in `WEBPACK_MANIFEST[\'manifests\']`' % name,
)
conf = settings.WEBPACK_MANIFEST['manifests'][name]
for prop in ('path', 'static_url', 'static_root'):
if prop not in conf:
raise webpack_manifest.WebpackManifestConfigError(
'"%s" has not been defined in `WEBPACK_MANIFEST[\'manifests\'][\'%s\']`' % (prop, name),
)
return webpack_manifest.load(**conf)
| 34.90625
| 108
| 0.706356
| 0
| 0
| 0
| 0
| 632
| 0.565801
| 0
| 0
| 339
| 0.303491
|
151a77fa24452704d617da768baec7d8f8f8b186
| 2,668
|
py
|
Python
|
utilities/jaccard_utilities.py
|
jjc2718/netreg
|
292540e911cdfbe18ff6fe0f9bfe8e055053d23c
|
[
"BSD-3-Clause"
] | null | null | null |
utilities/jaccard_utilities.py
|
jjc2718/netreg
|
292540e911cdfbe18ff6fe0f9bfe8e055053d23c
|
[
"BSD-3-Clause"
] | 6
|
2019-07-12T15:52:31.000Z
|
2020-01-13T18:14:41.000Z
|
utilities/jaccard_utilities.py
|
jjc2718/netreg
|
292540e911cdfbe18ff6fe0f9bfe8e055053d23c
|
[
"BSD-3-Clause"
] | 1
|
2019-07-18T18:28:59.000Z
|
2019-07-18T18:28:59.000Z
|
import os
import itertools as it
import pandas as pd
def compute_jaccard(v1, v2):
v1, v2 = set(v1), set(v2)
intersection = v1.intersection(v2)
union = v1.union(v2)
return ((len(intersection) / len(union) if len(union) != 0 else 0),
len(intersection),
len(union))
def get_inter_method_similarity(sk_coefs_folds, torch_coefs_folds,
seeds, folds, signal='signal'):
inter_method_sims = []
for seed in seeds:
for fold in folds:
sk_coefs = sk_coefs_folds[signal][seed][fold][0]
sk_genes = sk_coefs_folds[signal][seed][fold][1]
sk_nz_coefs = (sk_coefs != 0)
sk_nz_genes = sk_genes[sk_nz_coefs]
torch_coefs = torch_coefs_folds[signal][seed][fold][0]
torch_genes = torch_coefs_folds[signal][seed][fold][1]
torch_nz_coefs = (torch_coefs != 0)
torch_nz_genes = torch_genes[torch_nz_coefs]
inter_method_sims.append(compute_jaccard(set(sk_nz_genes), set(torch_nz_genes))[0])
return inter_method_sims
def get_intra_method_similarity(sk_coefs_folds, torch_coefs_folds,
seeds, folds, signal='signal'):
intra_method_sims_sk = []
intra_method_sims_torch = []
for seed in seeds:
for f1, f2 in it.combinations(folds, 2):
# first for scikit-learn
sk_coefs_f1 = sk_coefs_folds[signal][seed][f1][0]
sk_genes_f1 = sk_coefs_folds[signal][seed][f1][1]
sk_coefs_f2 = sk_coefs_folds[signal][seed][f2][0]
sk_genes_f2 = sk_coefs_folds[signal][seed][f2][1]
sk_nz_coefs_f1 = (sk_coefs_f1 != 0)
sk_nz_genes_f1 = sk_genes_f1[sk_nz_coefs_f1]
sk_nz_coefs_f2 = (sk_coefs_f2 != 0)
sk_nz_genes_f2 = sk_genes_f2[sk_nz_coefs_f2]
intra_method_sims_sk.append(compute_jaccard(set(sk_nz_genes_f1), set(sk_nz_genes_f2))[0])
# then for torch
torch_coefs_f1 = torch_coefs_folds[signal][seed][f1][0]
torch_genes_f1 = torch_coefs_folds[signal][seed][f1][1]
torch_coefs_f2 = torch_coefs_folds[signal][seed][f2][0]
torch_genes_f2 = torch_coefs_folds[signal][seed][f2][1]
torch_nz_coefs_f1 = (torch_coefs_f1 != 0)
torch_nz_genes_f1 = torch_genes_f1[torch_nz_coefs_f1]
torch_nz_coefs_f2 = (torch_coefs_f2 != 0)
torch_nz_genes_f2 = torch_genes_f2[torch_nz_coefs_f2]
intra_method_sims_torch.append(compute_jaccard(set(torch_nz_genes_f1), set(torch_nz_genes_f2))[0])
return (intra_method_sims_sk, intra_method_sims_torch)
| 44.466667
| 110
| 0.642054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.02099
|
151aa06c987c92f779a676ea9b8988f697c25f28
| 2,600
|
py
|
Python
|
CursoEmVideo/pythonProject/venv/Lib/site-packages/Interface/tests/unitfixtures.py
|
cassio645/Aprendendo-python
|
17a8b5a0e7abc3342d24841ed28093db13d2c130
|
[
"MIT"
] | null | null | null |
CursoEmVideo/pythonProject/venv/Lib/site-packages/Interface/tests/unitfixtures.py
|
cassio645/Aprendendo-python
|
17a8b5a0e7abc3342d24841ed28093db13d2c130
|
[
"MIT"
] | null | null | null |
CursoEmVideo/pythonProject/venv/Lib/site-packages/Interface/tests/unitfixtures.py
|
cassio645/Aprendendo-python
|
17a8b5a0e7abc3342d24841ed28093db13d2c130
|
[
"MIT"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from Interface import Interface
from Interface.Attribute import Attribute
class mytest(Interface):
pass
class C:
def m1(self, a, b):
"return 1"
return 1
def m2(self, a, b):
"return 2"
return 2
# testInstancesOfClassImplements
# YAGNI IC=Interface.impliedInterface(C)
class IC(Interface):
def m1(a, b):
"return 1"
def m2(a, b):
"return 2"
C.__implements__=IC
class I1(Interface):
def ma():
"blah"
class I2(I1): pass
class I3(Interface): pass
class I4(Interface): pass
class A(I1.deferred()):
__implements__=I1
class B:
__implements__=I2, I3
class D(A, B): pass
class E(A, B):
__implements__ = A.__implements__, C.__implements__
class FooInterface(Interface):
""" This is an Abstract Base Class """
foobar = Attribute("fuzzed over beyond all recognition")
def aMethod(foo, bar, bingo):
""" This is aMethod """
def anotherMethod(foo=6, bar="where you get sloshed", bingo=(1,3,)):
""" This is anotherMethod """
def wammy(zip, *argues):
""" yadda yadda """
def useless(**keywords):
""" useless code is fun! """
class Foo:
""" A concrete class """
__implements__ = FooInterface,
foobar = "yeah"
def aMethod(self, foo, bar, bingo):
""" This is aMethod """
return "barf!"
def anotherMethod(self, foo=6, bar="where you get sloshed", bingo=(1,3,)):
""" This is anotherMethod """
return "barf!"
def wammy(self, zip, *argues):
""" yadda yadda """
return "barf!"
def useless(self, **keywords):
""" useless code is fun! """
return "barf!"
foo_instance = Foo()
class Blah:
pass
new = Interface.__class__
FunInterface = new('FunInterface')
BarInterface = new('BarInterface', [FunInterface])
BobInterface = new('BobInterface')
BazInterface = new('BazInterface', [BobInterface, BarInterface])
| 22.033898
| 78
| 0.602308
| 1,517
| 0.583462
| 0
| 0
| 0
| 0
| 0
| 0
| 1,181
| 0.454231
|
151beeecee85f8f8f1854a4eb0eedf92f2702417
| 7,188
|
py
|
Python
|
noise_robust_cobras/noise_robust/datastructures/cycle.py
|
jonassoenen/noise_robust_cobras
|
0e5823dbba0263c3ccb3c2afb4267f2f542fc568
|
[
"Apache-2.0"
] | 2
|
2020-07-30T15:09:53.000Z
|
2020-07-31T06:33:36.000Z
|
noise_robust_cobras/noise_robust/datastructures/cycle.py
|
magicalJohn/noise_robust_cobras
|
0e5823dbba0263c3ccb3c2afb4267f2f542fc568
|
[
"Apache-2.0"
] | null | null | null |
noise_robust_cobras/noise_robust/datastructures/cycle.py
|
magicalJohn/noise_robust_cobras
|
0e5823dbba0263c3ccb3c2afb4267f2f542fc568
|
[
"Apache-2.0"
] | 1
|
2021-12-12T11:11:25.000Z
|
2021-12-12T11:11:25.000Z
|
from collections import defaultdict
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
from noise_robust_cobras.noise_robust.datastructures.constraint_index import (
ConstraintIndex,
)
class Cycle:
"""
A class that represents a valid constraint cycle
attributes:
- constraints: a list of constraints the way they appear in the cycle (starts at a random point in the cycle)
- sorted_constraints: a tuple of constraints that is sorted for __eq__ and __hash__
- number_of_CLs: the number of CL constraints in this cycle
"""
def __init__(self, constraints, composed_from=None, number_of_CLs=None):
assert Cycle.is_valid_constraint_set_for_cycle(constraints)
self.constraints = set(constraints)
self.sorted_constraints = Cycle.sort_constraints(constraints)
self.composed_from = set(composed_from) if composed_from is not None else {self}
if number_of_CLs is None:
self.number_of_CLs = sum(
1 for constraint in constraints if constraint.is_CL()
)
else:
self.number_of_CLs = number_of_CLs
@staticmethod
def compose_multiple_cycles_ordered(cycles):
composed_cycle = cycles[0]
for to_compose in cycles[1:]:
composed_cycle = composed_cycle.compose_with(to_compose)
if composed_cycle is None:
break
return composed_cycle
@staticmethod
def compose_multiple_cycles(cycles):
composed_constraints = set(cycles[0].constraints)
composed_from = set(cycles[0].composed_from)
for to_compose in cycles[1:]:
composed_constraints.symmetric_difference_update(to_compose.constraints)
composed_from.symmetric_difference_update(to_compose.composed_from)
if not Cycle.is_valid_constraint_set_for_cycle(composed_constraints):
return None
return Cycle(composed_constraints, composed_from=composed_from)
@staticmethod
def make_cycle_from_raw_cons(raw_constraints):
constraints = Constraint.raw_constraints_to_constraints(raw_constraints)
return Cycle(constraints)
@staticmethod
def cycle_from_instances(instances):
instances = [int(i) for i in instances]
raw_constraints = list(zip(instances[:-1], instances[1:])) + [
(instances[0], instances[-1])
]
return Cycle.make_cycle_from_raw_cons(raw_constraints)
@staticmethod
def cycle_from_instances_constraint_index(instances, constraint_index):
instances = [int(i) for i in instances]
raw_constraints = list(zip(instances[:-1], instances[1:])) + [
(instances[0], instances[-1])
]
return Cycle(constraint_index.instance_tuples_to_constraints(raw_constraints))
@staticmethod
def is_valid_constraint_set_for_cycle(constraints):
if len(constraints) == 0:
return False
# check if each instance occurs twice
count = defaultdict(lambda: 0)
for constraint in constraints:
count[constraint.i1] += 1
count[constraint.i2] += 1
for key, value in count.items():
if value != 2:
return False
# check if all constraints are connected
all_sets = []
for constraint in constraints:
found_sets = [
s for s in all_sets if constraint.i1 in s or constraint.i2 in s
]
if len(found_sets) == 0:
all_sets.append({constraint.i1, constraint.i2})
elif len(found_sets) == 1:
found_sets[0].update(constraint.get_instance_tuple())
elif len(found_sets) == 2:
found_sets[0].update(found_sets[1])
all_sets.remove(found_sets[1])
return len(all_sets) == 1
def is_valid_cycle(self):
return Cycle.is_valid_constraint_set_for_cycle(self.constraints)
def get_sorted_constraint_list(self):
"""
:return: a list of all constraints in the order by which they appear in the cycle with an arbitrary starting constraints
"""
all_constraints = list(self.constraints)
start_constraint = all_constraints[0]
temp_index = ConstraintIndex()
for constraint in all_constraints[1:]:
temp_index.add_constraint(constraint)
current_list = [(start_constraint.get_instance_tuple(), start_constraint)]
current_instance = start_constraint.i2
while len(temp_index.constraints) > 0:
matching_constraints = temp_index.find_constraints_for_instance(
current_instance
)
if len(matching_constraints) == 1:
matching_constraint = list(matching_constraints)[0]
else:
raise Exception("Not a valid cycle!")
other_instance = matching_constraint.get_other_instance(current_instance)
current_list.append(
((current_instance, other_instance), matching_constraint)
)
current_instance = other_instance
temp_index.remove_constraint(matching_constraint)
# check if the cycle is complete
if start_constraint.i1 != current_instance:
raise Exception("Not a valid cycle!")
return current_list
def compose_with(self, other_cycle):
if len(self.constraints.intersection(other_cycle.constraints)) == 0:
return None
new_constraints = set(self.constraints).symmetric_difference(
other_cycle.constraints
)
if len(new_constraints) == 0:
return None
if not Cycle.is_valid_constraint_set_for_cycle(new_constraints):
return None
new_cycle = Cycle(
new_constraints,
other_cycle.composed_from.symmetric_difference(self.composed_from),
)
return new_cycle
def replace_constraint(self, old_constraint, new_constraint):
assert old_constraint in self.constraints
new_constraints = set(self.constraints)
new_constraints.remove(old_constraint)
new_constraints.add(new_constraint)
return Cycle(new_constraints)
@staticmethod
def sort_constraints(constraints):
return tuple(sorted(constraints))
def is_useful(self):
return self.number_of_CLs <= 2
def is_inconsistent(self):
return self.number_of_CLs == 1
def __iter__(self):
return self.constraints.__iter__()
def __len__(self):
return len(self.constraints)
def __eq__(self, other):
if other == None:
return False
return self.sorted_constraints == other.sorted_constraints
def __contains__(self, item):
return item in self.constraints
def __hash__(self):
return hash(self.sorted_constraints)
def __repr__(self):
return str(self)
def __str__(self):
# return ",".join([str(constraint) for constraint in self.constraints])
return ",".join([str(con) for _, con in self.get_sorted_constraint_list()])
| 37.243523
| 128
| 0.657763
| 6,964
| 0.968837
| 0
| 0
| 2,784
| 0.387312
| 0
| 0
| 734
| 0.102115
|
151d22605d16726325dce1205b7a8ba505f35329
| 525
|
py
|
Python
|
python3/hackerrank_leetcode/remove_duplicates_from_sorted_array/test.py
|
seLain/codesnippets
|
ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289
|
[
"MIT"
] | null | null | null |
python3/hackerrank_leetcode/remove_duplicates_from_sorted_array/test.py
|
seLain/codesnippets
|
ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289
|
[
"MIT"
] | null | null | null |
python3/hackerrank_leetcode/remove_duplicates_from_sorted_array/test.py
|
seLain/codesnippets
|
ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289
|
[
"MIT"
] | null | null | null |
import unittest
from main import Solution
class TestSolutionMethods(unittest.TestCase):
solution = Solution()
def test_longestCommonPrefix(self):
# leetcode test
self.assertEqual(self.solution.removeDuplicates([1,1,2]), 2)
# customized test
self.assertEqual(self.solution.removeDuplicates([]), 0)
self.assertEqual(self.solution.removeDuplicates([1]), 1)
self.assertEqual(self.solution.removeDuplicates([1,1,2,3,3]), 3)
if __name__ == '__main__':
unittest.main()
| 30.882353
| 72
| 0.693333
| 434
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.08
|
12772bd26a04aaf3f825acfbb2e6f63963b94d81
| 246
|
py
|
Python
|
7KYU/word_splitter.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/word_splitter.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/word_splitter.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
SEPARATOR: list = [':', ',', '*', ';', '#', '|', '+', '%', '>', '?', '&', '=', '!']
def word_splitter(string: str) -> list:
for i in string:
if i in SEPARATOR:
string = string.replace(i, ' ')
return string.split()
| 35.142857
| 83
| 0.426829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.170732
|
12781452042b292ed356843d47c2a5e60478909f
| 7,998
|
py
|
Python
|
parsers/sales_order.py
|
njncalub/logistiko
|
74b1d17bc76538de6f5f70c7eca927780d6b4113
|
[
"MIT"
] | null | null | null |
parsers/sales_order.py
|
njncalub/logistiko
|
74b1d17bc76538de6f5f70c7eca927780d6b4113
|
[
"MIT"
] | null | null | null |
parsers/sales_order.py
|
njncalub/logistiko
|
74b1d17bc76538de6f5f70c7eca927780d6b4113
|
[
"MIT"
] | null | null | null |
import csv
from core.exceptions import InvalidFileException
def load_so_item_from_file(path, db_service):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file)
error_msg = 'Missing required header: {}'
for i, row in enumerate(csv_reader, 1):
data = {
'id_sales_order_item': row[0],
'bob_id_sales_order_item': row[1],
'fk_sales_order': row[2],
'fk_sales_order_item_status': row[3],
'fk_delivery_type': row[4],
'unit_price': row[5],
'tax_amount': row[6],
'paid_price': row[7],
'name': row[8],
'sku': row[9],
'created_at': row[10],
'updated_at': row[11],
'last_status_change': row[12],
'original_unit_price': row[13],
'shipping_type': row[14],
'real_delivery_date': row[15],
'bob_id_supplier': row[16],
'is_marketplace': row[17],
}
if i == 1: # check if the header values line up
if not data['id_sales_order_item'] == 'id_sales_order_item':
raise InvalidFileException(
error_msg.format('id_sales_order_item'))
if not data['bob_id_sales_order_item'] == \
'bob_id_sales_order_item':
raise InvalidFileException(
error_msg.format('bob_id_sales_order_item'))
if not data['fk_sales_order'] == 'fk_sales_order':
raise InvalidFileException(
error_msg.format('fk_sales_order'))
if not data['fk_sales_order_item_status'] == \
'fk_sales_order_item_status':
raise InvalidFileException(
error_msg.format('fk_sales_order_item_status'))
if not data['fk_delivery_type'] == 'fk_delivery_type':
raise InvalidFileException(
error_msg.format('fk_delivery_type'))
if not data['unit_price'] == 'unit_price':
raise InvalidFileException(error_msg.format('unit_price'))
if not data['tax_amount'] == 'tax_amount':
raise InvalidFileException(error_msg.format('tax_amount'))
if not data['paid_price'] == 'paid_price':
raise InvalidFileException(error_msg.format('paid_price'))
if not data['name'] == 'name':
raise InvalidFileException(error_msg.format('name'))
if not data['sku'] == 'sku':
raise InvalidFileException(error_msg.format('sku'))
if not data['created_at'] == 'created_at':
raise InvalidFileException(error_msg.format('created_at'))
if not data['updated_at'] == 'updated_at':
raise InvalidFileException(error_msg.format('updated_at'))
if not data['last_status_change'] == 'last_status_change':
raise InvalidFileException(
error_msg.format('last_status_change'))
if not data['original_unit_price'] == 'original_unit_price':
raise InvalidFileException(
error_msg.format('original_unit_price'))
if not data['shipping_type'] == 'shipping_type':
raise InvalidFileException(
error_msg.format('shipping_type'))
if not data['real_delivery_date'] == 'real_delivery_date':
raise InvalidFileException(
error_msg.format('real_delivery_date'))
if not data['bob_id_supplier'] == 'bob_id_supplier':
raise InvalidFileException(
error_msg.format('bob_id_supplier'))
if not data['is_marketplace'] == 'is_marketplace':
raise InvalidFileException(
error_msg.format('is_marketplace'))
else:
process_so_item_data(data=data, db_service=db_service)
print(f'Processed {i} sales order item(s).')
def load_so_item_status_from_file(path, db_service):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file)
error_msg = 'Missing required header: {}'
for i, row in enumerate(csv_reader, 1):
data = {
'id_sales_order_item_status': row[0],
'fk_oms_function': row[1],
'status': row[2],
'desc': row[3],
'deprecated': row[4],
'updated_at': row[5],
}
if i == 1: # check if the header values line up
if not data['id_sales_order_item_status'] == \
'id_sales_order_item_status':
raise InvalidFileException(
error_msg.format('id_sales_order_item_status'))
if not data['fk_oms_function'] == 'fk_oms_function':
raise InvalidFileException(
error_msg.format('fk_oms_function'))
if not data['status'] == 'status':
raise InvalidFileException(error_msg.format('status'))
if not data['desc'] == 'desc':
raise InvalidFileException(error_msg.format('desc'))
if not data['deprecated'] == 'deprecated':
raise InvalidFileException(error_msg.format('deprecated'))
if not data['updated_at'] == 'updated_at':
raise InvalidFileException(error_msg.format('updated_at'))
else:
process_so_item_status_data(data=data, db_service=db_service)
print(f'Processed {i} sales order item status.')
def load_so_item_status_history_from_file(path, db_service):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file)
error_msg = 'Missing required header: {}'
for i, row in enumerate(csv_reader, 1):
data = {
'id_sales_order_item_status_history': row[0],
'fk_sales_order_item': row[1],
'fk_sales_order_item_status': row[2],
'created_at': row[3],
}
if i == 1: # check if the header values line up
if not data['id_sales_order_item_status_history'] == \
'id_sales_order_item_status_history':
raise InvalidFileException(
error_msg.format('id_sales_order_item_status_history'))
if not data['fk_sales_order_item'] == 'fk_sales_order_item':
raise InvalidFileException(
error_msg.format('fk_sales_order_item'))
if not data['fk_sales_order_item_status'] == \
'fk_sales_order_item_status':
raise InvalidFileException(
error_msg.format('fk_sales_order_item_status'))
if not data['created_at'] == 'created_at':
raise InvalidFileException(error_msg.format('created_at'))
else:
process_so_item_status_history_data(data=data,
db_service=db_service)
print(f'Processed {i} sales order item status history.')
def process_so_item_data(data, db_service):
if data['real_delivery_date'] == 'NULL':
data['real_delivery_date'] = None
db_service.add_so_item(**data)
def process_so_item_status_data(data, db_service):
db_service.add_so_item_status(**data)
def process_so_item_status_history_data(data, db_service):
db_service.add_so_item_status_history(**data)
| 46.77193
| 79
| 0.542136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,240
| 0.28007
|
1278169f69007b0aff65ad2222788f61228ad8d6
| 8,342
|
py
|
Python
|
maps.py
|
BouncyButton/places-simulator
|
a1f5fc385750af9968cc3c6216ba20f5de4719fd
|
[
"MIT"
] | null | null | null |
maps.py
|
BouncyButton/places-simulator
|
a1f5fc385750af9968cc3c6216ba20f5de4719fd
|
[
"MIT"
] | null | null | null |
maps.py
|
BouncyButton/places-simulator
|
a1f5fc385750af9968cc3c6216ba20f5de4719fd
|
[
"MIT"
] | null | null | null |
import googlemaps
import secret
from datetime import datetime
import requests
import pickle
import time
gmaps = googlemaps.Client(key=secret.PLACES_API_KEY)
# lat = 45.411400
# lon = 11.887491
coordinates = [
(45.411400, 11.887491), # torre archimede
(45.409218, 11.877915), # piazza garibaldi
(45.407698, 11.873351), # piazza dei signori
(45.401403, 11.880813), # basilica di sant'antonio
]
# def find_places():
# results = gmaps.places_nearby(location=(lat, lon), type='bar', radius=500)
# print(len(results))
# return results
def find_places():
place_types = ['bar|restaurant|cafe|night_club']
f = open('maps_data.pickle', "rb")
data = pickle.load(f)
# data = dict()
# data['requests'] = []
f.close()
for lat, lon in coordinates:
for place_type in place_types:
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?" \
"location={1},{2}&radius=500&type={3}&key={0}".format( # &keyword=xxx
secret.PLACES_API_KEY, lat, lon, place_type)
execute_request(url, data)
print("Retrieved {0} item(s).".format(len(data['requests'])))
save_changes(data)
return data
def save_changes(data):
f = open('maps_data_tmp.pickle', "wb")
pickle.dump(data, f)
f.close()
import os
os.replace("maps_data_tmp.pickle", "maps_data.pickle")
def execute_request(url, data):
r = requests.get(url)
if r.status_code == 200:
pass
else:
print("Errore: ", r.status_code)
raise r.status_code
for item in r.json()['results']:
if item['place_id'] not in [place['place_id'] for place in data['requests']]:
data['requests'].append(item)
if r.json().get('next_page_token') is not None \
and r.json()['next_page_token'] is not None \
and r.json()['next_page_token'] != "":
time.sleep(5) # need to wait a bit..
print("new page!")
execute_request("https://maps.googleapis.com/maps/api/place/nearbysearch/json?pagetoken={0}&key={1}".format(
r.json()['next_page_token'], secret.PLACES_API_KEY), data)
# else:
# print(r.json(), "non ha next_page_token")
return r.json()
def reinitialize_data():
f = open('maps_data.pickle', "wb")
data = dict()
data['requests'] = []
pickle.dump(data, f)
f.close()
def read_data():
f = open('maps_data.pickle', "rb")
data = pickle.load(f)
# for item in data['requests']:
# print(item['name'])
print("Found {0} places.".format(len(data['requests'])))
return data
def get_details(place_id, data):
url = "https://maps.googleapis.com/maps/api/place/details/json?place_id={0}&fields=address_component,adr_address," \
"business_status,formatted_address,geometry,icon,name,photo,place_id,plus_code,type,url,utc_offset,vicinity," \
"formatted_phone_number,international_phone_number,opening_hours,website,price_level,rating,review," \
"user_ratings_total&key={1}&language=it".format(
place_id, secret.PLACES_API_KEY)
r = requests.get(url)
if r.status_code == 200:
pass
else:
print("Errore: ", r.status_code)
raise r.status_code
data['details'][place_id] = r.json()
def fill_details(data):
if data.get('details') is None:
data['details'] = dict()
ids = [place['place_id'] for place in data['requests']]
for place_id in ids:
if data['details'].get(place_id) is None: # risparmio call, "cache"?
get_details(place_id, data)
save_changes(data)
def word2vec_analysis(labels, weights=None, N=4, translate=True):
import gensim
import numpy as np
print("loading dataset...")
model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)
print('dataset loaded :)')
if translate:
def find_longest_word(word_list):
longest_word = max(word_list, key=len)
return longest_word
from googletrans import Translator
translator = Translator()
labels_en = []
new_weights = []
for label, weight in zip(labels, weights):
translated = translator.translate(label, dest='en').text
longest = find_longest_word(translated.split(" "))
try:
_ = model[longest]
labels_en.append(longest)
new_weights.append(weight)
except KeyError:
continue
print("Starting labels: ", labels)
print("Translated labels: ", labels_en)
labels = labels_en
# labels = ['cat', 'dog', 'mouse', 'lately', 'seldom', 'somehow', 'this', 'pencil', 'suitcase', 'pen']
X = np.array([model[label] for label in labels])
print(X.shape)
kmeans_analysis(X, labels, new_weights, N)
def kmeans_analysis(X, labels, weights=None, N=5):
from sklearn.cluster import KMeans
import numpy as np
kmeans = KMeans(n_clusters=N, random_state=0).fit(X, sample_weight=weights)
# labels[X.tolist().index(x)[0]]
clusters = [[] for x in kmeans.cluster_centers_]
for i, label in enumerate(kmeans.labels_):
clusters[label].append((labels[i], X[i]))
for cluster, cluster_center in zip(clusters, kmeans.cluster_centers_):
label_center = None
for point in cluster:
if (point[1] == cluster_center).all():
label_center = point[0]
# else:
# print(point[1], cluster_center)
print("Cluster has {0} item(s):".format(len(cluster)))
for point in cluster:
print(point[0])
def text_analysis(data):
precorpus = []
for item in data['details'].values():
if item['result'].get('reviews') is not None:
reviews = item['result']['reviews']
reviews_text = [x['text'] for x in reviews]
for text in reviews_text:
precorpus.append(text)
print("Found {0} reviews".format(len(precorpus)))
import re
# nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
# nltk.download('wordnet')
from nltk.stem.wordnet import WordNetLemmatizer
##Creating a list of stop words and adding custom stopwords
try:
stop_words = set(stopwords.words("italian"))
except LookupError:
import nltk
nltk.download('wordnet')
nltk.download('stopwords')
stop_words = set(stopwords.words("italian"))
# Creating a list of custom stopwords
# Creato dopo analisi grafica di word1.png
new_words = ["bar", "molto", "ottimo", "locale", "posto", "ben", "volta", "po", "più", "sempre", "padova", "ottimi",
"poco", "ottima"]
stop_words = stop_words.union(new_words)
corpus = []
for t in precorpus:
# Remove punctuations
text = re.sub('[^a-zA-Zùàèé]', ' ', t)
# Convert to lowercase
text = text.lower()
# remove tags
text = re.sub("</?.*?>", " <> ", text)
# remove special characters and digits
text = re.sub("(\\d|\\W)+", " ", text)
##Convert to list from string
text = text.split()
##Stemming
ps = PorterStemmer()
# Lemmatisation
lem = WordNetLemmatizer()
text = [lem.lemmatize(word) for word in text if not word in
stop_words]
text = " ".join(text)
corpus.append(text)
# Word cloud
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
wordcloud = WordCloud(
background_color='white',
stopwords=stop_words,
max_words=100,
max_font_size=50,
random_state=42
).generate(str(corpus))
print(wordcloud)
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off')
# plt.show()
# plt.close()
fig.savefig("word1.png", dpi=900)
return wordcloud.words_
d = read_data()
occ = text_analysis(d)
word2vec_analysis(occ.keys(), list(occ.values()), N=12, translate=True)
| 29.167832
| 121
| 0.609686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,455
| 0.294118
|
12785f321ec0fa0181c3a4c19bc2048854ea35ad
| 31,231
|
py
|
Python
|
azure-iot-device/tests/iothub/test_sync_handler_manager.py
|
dt-boringtao/azure-iot-sdk-python
|
35a09679bdf4d7a727391b265a8f1fbb99a30c45
|
[
"MIT"
] | null | null | null |
azure-iot-device/tests/iothub/test_sync_handler_manager.py
|
dt-boringtao/azure-iot-sdk-python
|
35a09679bdf4d7a727391b265a8f1fbb99a30c45
|
[
"MIT"
] | null | null | null |
azure-iot-device/tests/iothub/test_sync_handler_manager.py
|
dt-boringtao/azure-iot-sdk-python
|
35a09679bdf4d7a727391b265a8f1fbb99a30c45
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import pytest
import threading
import time
from azure.iot.device.common import handle_exceptions
from azure.iot.device.iothub import client_event
from azure.iot.device.iothub.sync_handler_manager import SyncHandlerManager, HandlerManagerException
from azure.iot.device.iothub.sync_handler_manager import MESSAGE, METHOD, TWIN_DP_PATCH
from azure.iot.device.iothub.inbox_manager import InboxManager
from azure.iot.device.iothub.sync_inbox import SyncClientInbox
logging.basicConfig(level=logging.DEBUG)
# NOTE ON TEST IMPLEMENTATION:
# Despite having significant shared implementation between the sync and async handler managers,
# there are not shared tests. This is because while both have the same set of requirements and
# APIs, the internal implementation is different to an extent that it simply isn't really possible
# to test them to an appropriate degree of correctness with a shared set of tests.
# This means we must be very careful to always change both test modules when a change is made to
# shared behavior, or when shared features are added.
# NOTE ON TIMING/DELAY
# Several tests in this module have sleeps/delays in their implementation due to needing to wait
# for things to happen in other threads.
all_internal_receiver_handlers = [MESSAGE, METHOD, TWIN_DP_PATCH]
all_internal_client_event_handlers = [
"_on_connection_state_change",
"_on_new_sastoken_required",
"_on_background_exception",
]
all_internal_handlers = all_internal_receiver_handlers + all_internal_client_event_handlers
all_receiver_handlers = [s.lstrip("_") for s in all_internal_receiver_handlers]
all_client_event_handlers = [s.lstrip("_") for s in all_internal_client_event_handlers]
all_handlers = all_receiver_handlers + all_client_event_handlers
class ThreadsafeMock(object):
"""This class provides (some) Mock functionality in a threadsafe manner, specifically, it
ensures that the 'call_count' attribute will be accurate when the mock is called from another
thread.
It does not cover ALL mock functionality, but more features could be added to it as necessary
"""
def __init__(self):
self.call_count = 0
self.lock = threading.Lock()
def __call__(self, *args, **kwargs):
with self.lock:
self.call_count += 1
@pytest.fixture
def inbox_manager(mocker):
return InboxManager(inbox_type=SyncClientInbox)
@pytest.fixture
def handler():
def some_handler_fn(arg):
pass
return some_handler_fn
@pytest.mark.describe("SyncHandlerManager - Instantiation")
class TestInstantiation(object):
@pytest.mark.it("Initializes handler properties to None")
@pytest.mark.parametrize("handler_name", all_handlers)
def test_handlers(self, inbox_manager, handler_name):
hm = SyncHandlerManager(inbox_manager)
assert getattr(hm, handler_name) is None
@pytest.mark.it("Initializes receiver handler runner thread references to None")
@pytest.mark.parametrize(
"handler_name", all_internal_receiver_handlers, ids=all_receiver_handlers
)
def test_receiver_handler_runners(self, inbox_manager, handler_name):
hm = SyncHandlerManager(inbox_manager)
assert hm._receiver_handler_runners[handler_name] is None
@pytest.mark.it("Initializes client event handler runner thread reference to None")
def test_client_event_handler_runner(self, inbox_manager):
hm = SyncHandlerManager(inbox_manager)
assert hm._client_event_runner is None
@pytest.mark.describe("SyncHandlerManager - .stop()")
class TestStop(object):
@pytest.fixture(
params=[
"No handlers running",
"Some receiver handlers running",
"Some client event handlers running",
"Some receiver and some client event handlers running",
"All handlers running",
]
)
def handler_manager(self, request, inbox_manager, handler):
hm = SyncHandlerManager(inbox_manager)
if request.param == "Some receiver handlers running":
# Set an arbitrary receiver handler
hm.on_message_received = handler
elif request.param == "Some client event handlers running":
# Set an arbitrary client event handler
hm.on_connection_state_change = handler
elif request.param == "Some receiver and some client event handlers running":
# Set an arbitrary receiver and client event handler
hm.on_message_received = handler
hm.on_connection_state_change = handler
elif request.param == "All handlers running":
# NOTE: this sets all handlers to be the same fn, but this doesn't really
# make a difference in this context
for handler_name in all_handlers:
setattr(hm, handler_name, handler)
yield hm
hm.stop()
@pytest.mark.it("Stops all currently running handlers")
def test_stop_all(self, handler_manager):
handler_manager.stop()
for handler_name in all_internal_receiver_handlers:
assert handler_manager._receiver_handler_runners[handler_name] is None
assert handler_manager._client_event_runner is None
@pytest.mark.it(
"Stops only the currently running receiver handlers if the 'receiver_handlers_only' parameter is True"
)
def test_stop_only_receiver_handlers(self, handler_manager):
if handler_manager._client_event_runner is not None:
client_event_handlers_running = True
else:
client_event_handlers_running = False
handler_manager.stop(receiver_handlers_only=True)
# All receiver handlers have stopped
for handler_name in all_internal_receiver_handlers:
assert handler_manager._receiver_handler_runners[handler_name] is None
# If the client event handlers were running, they are STILL running
if client_event_handlers_running:
assert handler_manager._client_event_runner is not None
@pytest.mark.it("Completes all pending handler invocations before stopping the runner(s)")
def test_completes_pending(self, mocker, inbox_manager):
hm = SyncHandlerManager(inbox_manager)
# NOTE: We use two handlers arbitrarily here to show this happens for all handler runners
mock_msg_handler = ThreadsafeMock()
mock_mth_handler = ThreadsafeMock()
msg_inbox = inbox_manager.get_unified_message_inbox()
mth_inbox = inbox_manager.get_method_request_inbox()
for _ in range(200): # sufficiently many items so can't complete quickly
msg_inbox.put(mocker.MagicMock())
mth_inbox.put(mocker.MagicMock())
hm.on_message_received = mock_msg_handler
hm.on_method_request_received = mock_mth_handler
assert mock_msg_handler.call_count < 200
assert mock_mth_handler.call_count < 200
hm.stop()
time.sleep(0.1)
assert mock_msg_handler.call_count == 200
assert mock_mth_handler.call_count == 200
assert msg_inbox.empty()
assert mth_inbox.empty()
@pytest.mark.describe("SyncHandlerManager - .ensure_running()")
class TestEnsureRunning(object):
@pytest.fixture(
params=[
"All handlers set, all stopped",
"All handlers set, receivers stopped, client events running",
"All handlers set, all running",
"Some receiver and client event handlers set, all stopped",
"Some receiver and client event handlers set, receivers stopped, client events running",
"Some receiver and client event handlers set, all running",
"Some receiver handlers set, all stopped",
"Some receiver handlers set, all running",
"Some client event handlers set, all stopped",
"Some client event handlers set, all running",
"No handlers set",
]
)
def handler_manager(self, request, inbox_manager, handler):
# NOTE: this sets all handlers to be the same fn, but this doesn't really
# make a difference in this context
hm = SyncHandlerManager(inbox_manager)
if request.param == "All handlers set, all stopped":
for handler_name in all_handlers:
setattr(hm, handler_name, handler)
hm.stop()
elif request.param == "All handlers set, receivers stopped, client events running":
for handler_name in all_handlers:
setattr(hm, handler_name, handler)
hm.stop(receiver_handlers_only=True)
elif request.param == "All handlers set, all running":
for handler_name in all_handlers:
setattr(hm, handler_name, handler)
elif request.param == "Some receiver and client event handlers set, all stopped":
hm.on_message_received = handler
hm.on_method_request_received = handler
hm.on_connection_state_change = handler
hm.on_new_sastoken_required = handler
hm.stop()
elif (
request.param
== "Some receiver and client event handlers set, receivers stopped, client events running"
):
hm.on_message_received = handler
hm.on_method_request_received = handler
hm.on_connection_state_change = handler
hm.on_new_sastoken_required = handler
hm.stop(receiver_handlers_only=True)
elif request.param == "Some receiver and client event handlers set, all running":
hm.on_message_received = handler
hm.on_method_request_received = handler
hm.on_connection_state_change = handler
hm.on_new_sastoken_required = handler
elif request.param == "Some receiver handlers set, all stopped":
hm.on_message_received = handler
hm.on_method_request_received = handler
hm.stop()
elif request.param == "Some receiver handlers set, all running":
hm.on_message_received = handler
hm.on_method_request_received = handler
elif request.param == "Some client event handlers set, all stopped":
hm.on_connection_state_change = handler
hm.on_new_sastoken_required = handler
hm.stop()
elif request.param == "Some client event handlers set, all running":
hm.on_connection_state_change = handler
hm.on_new_sastoken_required = handler
yield hm
hm.stop()
@pytest.mark.it(
"Starts handler runners for any handler that is set, but does not have a handler runner running"
)
def test_starts_runners_if_necessary(self, handler_manager):
handler_manager.ensure_running()
# Check receiver handlers
for handler_name in all_receiver_handlers:
if getattr(handler_manager, handler_name) is not None:
# NOTE: this assumes the convention of internal names being the name of a handler
# prefixed with a "_". If this ever changes, you must change this test.
assert handler_manager._receiver_handler_runners["_" + handler_name] is not None
# Check client event handlers
for handler_name in all_client_event_handlers:
if getattr(handler_manager, handler_name) is not None:
assert handler_manager._client_event_runner is not None
# don't need to check the rest of the handlers since they all share a runner
break
# ##############
# # PROPERTIES #
# ##############
class SharedHandlerPropertyTests(object):
@pytest.fixture
def handler_manager(self, inbox_manager):
hm = SyncHandlerManager(inbox_manager)
yield hm
hm.stop()
# NOTE: We use setattr() and getattr() in these tests so they're generic to all properties.
# This is functionally identical to doing explicit assignment to a property, it just
# doesn't read quite as well.
@pytest.mark.it("Can be both read and written to")
def test_read_write(self, handler_name, handler_manager, handler):
assert getattr(handler_manager, handler_name) is None
setattr(handler_manager, handler_name, handler)
assert getattr(handler_manager, handler_name) is handler
setattr(handler_manager, handler_name, None)
assert getattr(handler_manager, handler_name) is None
class SharedReceiverHandlerPropertyTests(SharedHandlerPropertyTests):
# NOTE: If there is ever any deviation in the convention of what the internal names of handlers
# are other than just a prefixed "_", we'll have to move this fixture to the child classes so
# it can be unique to each handler
@pytest.fixture
def handler_name_internal(self, handler_name):
return "_" + handler_name
@pytest.mark.it(
"Creates and starts a daemon Thread for the correpsonding handler runner when value is set to a function"
)
def test_thread_created(self, handler_name, handler_name_internal, handler_manager, handler):
assert handler_manager._receiver_handler_runners[handler_name_internal] is None
setattr(handler_manager, handler_name, handler)
assert isinstance(
handler_manager._receiver_handler_runners[handler_name_internal], threading.Thread
)
assert handler_manager._receiver_handler_runners[handler_name_internal].daemon is True
@pytest.mark.it(
"Stops the corresponding handler runner and completes any existing daemon Thread for it when the value is set back to None"
)
def test_thread_removed(self, handler_name, handler_name_internal, handler_manager, handler):
# Set handler
setattr(handler_manager, handler_name, handler)
# Thread has been created and is alive
t = handler_manager._receiver_handler_runners[handler_name_internal]
assert isinstance(t, threading.Thread)
assert t.is_alive()
# Set the handler back to None
setattr(handler_manager, handler_name, None)
# Thread has finished and the manager no longer has a reference to it
assert not t.is_alive()
assert handler_manager._receiver_handler_runners[handler_name_internal] is None
@pytest.mark.it(
"Does not delete, remove, or replace the Thread for the corresponding handler runner, when updated with a new function value"
)
def test_thread_unchanged_by_handler_update(
self, handler_name, handler_name_internal, handler_manager, handler
):
# Set the handler
setattr(handler_manager, handler_name, handler)
# Thread has been crated and is alive
t = handler_manager._receiver_handler_runners[handler_name_internal]
assert isinstance(t, threading.Thread)
assert t.is_alive()
# Set new handler
def new_handler(arg):
pass
setattr(handler_manager, handler_name, new_handler)
assert handler_manager._receiver_handler_runners[handler_name_internal] is t
assert t.is_alive()
@pytest.mark.it(
"Is invoked by the runner when the Inbox corresponding to the handler receives an object, passing that object to the handler"
)
def test_handler_invoked(self, mocker, handler_name, handler_manager, inbox):
# Set the handler
mock_handler = mocker.MagicMock()
setattr(handler_manager, handler_name, mock_handler)
# Handler has not been called
assert mock_handler.call_count == 0
# Add an item to corresponding inbox, triggering the handler
mock_obj = mocker.MagicMock()
inbox.put(mock_obj)
time.sleep(0.1)
# Handler has been called with the item from the inbox
assert mock_handler.call_count == 1
assert mock_handler.call_args == mocker.call(mock_obj)
@pytest.mark.it(
"Is invoked by the runner every time the Inbox corresponding to the handler receives an object"
)
def test_handler_invoked_multiple(self, mocker, handler_name, handler_manager, inbox):
# Set the handler
mock_handler = ThreadsafeMock()
setattr(handler_manager, handler_name, mock_handler)
# Handler has not been called
assert mock_handler.call_count == 0
# Add 5 items to the corresponding inbox, triggering the handler
for _ in range(5):
inbox.put(mocker.MagicMock())
time.sleep(0.2)
# Handler has been called 5 times
assert mock_handler.call_count == 5
@pytest.mark.it(
"Is invoked for every item already in the corresponding Inbox at the moment of handler removal"
)
def test_handler_resolve_pending_items_before_handler_removal(
self, mocker, handler_name, handler_manager, inbox
):
# Use a threadsafe mock to ensure accurate counts
mock_handler = ThreadsafeMock()
assert inbox.empty()
# Queue up a bunch of items in the inbox
for _ in range(100):
inbox.put(mocker.MagicMock())
# The handler has not yet been called
assert mock_handler.call_count == 0
# Items are still in the inbox
assert not inbox.empty()
# Set the handler
setattr(handler_manager, handler_name, mock_handler)
# The handler has not yet been called for everything that was in the inbox
# NOTE: I'd really like to show that the handler call count is also > 0 here, but
# it's pretty difficult to make the timing work
assert mock_handler.call_count < 100
# Immediately remove the handler
setattr(handler_manager, handler_name, None)
# Wait to give a chance for the handler runner to finish calling everything
time.sleep(0.2)
# Despite removal, handler has been called for everything that was in the inbox at the
# time of the removal
assert mock_handler.call_count == 100
assert inbox.empty()
# Add some more items
for _ in range(100):
inbox.put(mocker.MagicMock())
# Wait to give a chance for the handler to be called (it won't)
time.sleep(0.2)
# Despite more items added to inbox, no further handler calls have been made beyond the
# initial calls that were made when the original items were added
assert mock_handler.call_count == 100
@pytest.mark.it(
"Sends a HandlerManagerException to the background exception handler if any exception is raised during its invocation"
)
def test_exception_in_handler(
self, mocker, handler_name, handler_manager, inbox, arbitrary_exception
):
background_exc_spy = mocker.spy(handle_exceptions, "handle_background_exception")
# Handler will raise exception when called
mock_handler = mocker.MagicMock()
mock_handler.side_effect = arbitrary_exception
# Set handler
setattr(handler_manager, handler_name, mock_handler)
# Handler has not been called
assert mock_handler.call_count == 0
# Background exception handler has not been called
assert background_exc_spy.call_count == 0
# Add an item to corresponding inbox, triggering the handler
inbox.put(mocker.MagicMock())
time.sleep(0.1)
# Handler has now been called
assert mock_handler.call_count == 1
# Background exception handler was called
assert background_exc_spy.call_count == 1
e = background_exc_spy.call_args[0][0]
assert isinstance(e, HandlerManagerException)
assert e.__cause__ is arbitrary_exception
@pytest.mark.it(
"Can be updated with a new value that the corresponding handler runner will immediately begin using for handler invocations instead"
)
def test_handler_update_handler(self, mocker, handler_name, handler_manager, inbox):
def handler(arg):
# Invoking handler replaces the set handler with a mock
setattr(handler_manager, handler_name, mocker.MagicMock())
setattr(handler_manager, handler_name, handler)
inbox.put(mocker.MagicMock())
time.sleep(0.1)
# Handler has been replaced with a mock, but the mock has not been invoked
assert getattr(handler_manager, handler_name) is not handler
assert getattr(handler_manager, handler_name).call_count == 0
# Add a new item to the inbox
inbox.put(mocker.MagicMock())
time.sleep(0.1)
# The mock was now called
assert getattr(handler_manager, handler_name).call_count == 1
class SharedClientEventHandlerPropertyTests(SharedHandlerPropertyTests):
@pytest.fixture
def inbox(self, inbox_manager):
return inbox_manager.get_client_event_inbox()
@pytest.mark.it(
"Creates and starts a daemon Thread for the Client Event handler runner when value is set to a function if the Client Event handler runner does not already exist"
)
def test_no_client_event_runner(self, handler_name, handler_manager, handler):
assert handler_manager._client_event_runner is None
setattr(handler_manager, handler_name, handler)
t = handler_manager._client_event_runner
assert isinstance(t, threading.Thread)
assert t.daemon is True
@pytest.mark.it(
"Does not modify the Client Event handler runner thread when value is set to a function if the Client Event handler runner already exists"
)
def test_client_event_runner_already_exists(self, handler_name, handler_manager, handler):
# Add a fake client event runner thread
fake_runner_thread = threading.Thread()
fake_runner_thread.daemon = True
fake_runner_thread.start()
handler_manager._client_event_runner = fake_runner_thread
# Set handler
setattr(handler_manager, handler_name, handler)
# Fake thread was not changed
assert handler_manager._client_event_runner is fake_runner_thread
@pytest.mark.it(
"Does not delete, remove, or replace the Thread for the Client Event handler runner when value is set back to None"
)
def test_handler_removed(self, handler_name, handler_manager, handler):
# Set handler
setattr(handler_manager, handler_name, handler)
# Thread has been created and is alive
t = handler_manager._client_event_runner
assert isinstance(t, threading.Thread)
assert t.is_alive()
# Set the handler back to None
setattr(handler_manager, handler_name, None)
# Thread is still maintained on the manager and alive
assert handler_manager._client_event_runner is t
assert t.is_alive()
@pytest.mark.it(
"Does not delete, remove, or replace the Thread for the Client Event handler runner when updated with a new function value"
)
def test_handler_update(self, handler_name, handler_manager, handler):
# Set handler
setattr(handler_manager, handler_name, handler)
# Thread has been created and is alive
t = handler_manager._client_event_runner
assert isinstance(t, threading.Thread)
assert t.is_alive()
# Set new handler
def new_handler(arg):
pass
setattr(handler_manager, handler_name, new_handler)
# Thread is still maintained on the manager and alive
assert handler_manager._client_event_runner is t
assert t.is_alive()
@pytest.mark.it(
"Is invoked by the runner only when the Client Event Inbox receives a matching Client Event, passing any arguments to the handler"
)
def test_handler_invoked(self, mocker, handler_name, handler_manager, inbox, event):
# Set the handler
mock_handler = mocker.MagicMock()
setattr(handler_manager, handler_name, mock_handler)
# Handler has not been called
assert mock_handler.call_count == 0
# Add the event to the client event inbox
inbox.put(event)
time.sleep(0.1)
# Handler has been called with the arguments from the event
assert mock_handler.call_count == 1
assert mock_handler.call_args == mocker.call(*event.args_for_user)
# Add non-matching event to the client event inbox
non_matching_event = client_event.ClientEvent("NON_MATCHING_EVENT")
inbox.put(non_matching_event)
time.sleep(0.1)
# Handler has not been called again
assert mock_handler.call_count == 1
@pytest.mark.it(
"Is invoked by the runner every time the Client Event Inbox receives a matching Client Event"
)
def test_handler_invoked_multiple(self, handler_name, handler_manager, inbox, event):
# Set the handler
mock_handler = ThreadsafeMock()
setattr(handler_manager, handler_name, mock_handler)
# Handler has not been called
assert mock_handler.call_count == 0
# Add 5 matching events to the corresponding inbox, triggering the handler
for _ in range(5):
inbox.put(event)
time.sleep(0.2)
# Handler has been called 5 times
assert mock_handler.call_count == 5
@pytest.mark.it(
"Sends a HandlerManagerException to the background exception handler if any exception is raised during its invocation"
)
def test_exception_in_handler(
self, mocker, handler_name, handler_manager, inbox, event, arbitrary_exception
):
background_exc_spy = mocker.spy(handle_exceptions, "handle_background_exception")
# Handler will raise exception when called
mock_handler = mocker.MagicMock()
mock_handler.side_effect = arbitrary_exception
# Set handler
setattr(handler_manager, handler_name, mock_handler)
# Handler has not been called
assert mock_handler.call_count == 0
# Background exception handler has not been called
assert background_exc_spy.call_count == 0
# Add the event to the client event inbox, triggering the handler
inbox.put(event)
time.sleep(0.1)
# Handler has now been called
assert mock_handler.call_count == 1
# Background exception handler was called
assert background_exc_spy.call_count == 1
e = background_exc_spy.call_args[0][0]
assert isinstance(e, HandlerManagerException)
assert e.__cause__ is arbitrary_exception
@pytest.mark.it(
"Can be updated with a new value that the Client Event handler runner will immediately begin using for handler invocations instead"
)
def test_updated_handler(self, mocker, handler_name, handler_manager, inbox, event):
def handler(*args):
# Invoking handler replaces the set handler with a mock
setattr(handler_manager, handler_name, mocker.MagicMock())
setattr(handler_manager, handler_name, handler)
inbox.put(event)
time.sleep(0.1)
# Handler has been replaced with a mock, but the mock has not been invoked
assert getattr(handler_manager, handler_name) is not handler
assert getattr(handler_manager, handler_name).call_count == 0
# Add a new event to the inbox
inbox.put(event)
time.sleep(0.1)
# The mock was now called
assert getattr(handler_manager, handler_name).call_count == 1
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .on_message_received")
class TestSyncHandlerManagerPropertyOnMessageReceived(SharedReceiverHandlerPropertyTests):
@pytest.fixture
def handler_name(self):
return "on_message_received"
@pytest.fixture
def inbox(self, inbox_manager):
return inbox_manager.get_unified_message_inbox()
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .on_method_request_received")
class TestSyncHandlerManagerPropertyOnMethodRequestReceived(SharedReceiverHandlerPropertyTests):
@pytest.fixture
def handler_name(self):
return "on_method_request_received"
@pytest.fixture
def inbox(self, inbox_manager):
return inbox_manager.get_method_request_inbox()
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .on_twin_desired_properties_patch_received")
class TestSyncHandlerManagerPropertyOnTwinDesiredPropertiesPatchReceived(
SharedReceiverHandlerPropertyTests
):
@pytest.fixture
def handler_name(self):
return "on_twin_desired_properties_patch_received"
@pytest.fixture
def inbox(self, inbox_manager):
return inbox_manager.get_twin_patch_inbox()
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .on_connection_state_change")
class TestSyncHandlerManagerPropertyOnConnectionStateChange(SharedClientEventHandlerPropertyTests):
@pytest.fixture
def handler_name(self):
return "on_connection_state_change"
@pytest.fixture
def event(self):
return client_event.ClientEvent(client_event.CONNECTION_STATE_CHANGE)
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .on_new_sastoken_required")
class TestSyncHandlerManagerPropertyOnNewSastokenRequired(SharedClientEventHandlerPropertyTests):
@pytest.fixture
def handler_name(self):
return "on_new_sastoken_required"
@pytest.fixture
def event(self):
return client_event.ClientEvent(client_event.NEW_SASTOKEN_REQUIRED)
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .on_background_exception")
class TestSyncHandlerManagerPropertyOnBackgroundException(SharedClientEventHandlerPropertyTests):
@pytest.fixture
def handler_name(self):
return "on_background_exception"
@pytest.fixture
def event(self, arbitrary_exception):
return client_event.ClientEvent(client_event.BACKGROUND_EXCEPTION, arbitrary_exception)
@pytest.mark.describe("SyncHandlerManager - PROPERTY: .handling_client_events")
class TestSyncHandlerManagerPropertyHandlingClientEvents(object):
@pytest.fixture
def handler_manager(self, inbox_manager):
hm = SyncHandlerManager(inbox_manager)
yield hm
hm.stop()
@pytest.mark.it("Is True if the Client Event Handler Runner is running")
def test_client_event_runner_running(self, handler_manager):
# Add a fake client event runner thread
fake_runner_thread = threading.Thread()
fake_runner_thread.daemon = True
fake_runner_thread.start()
handler_manager._client_event_runner = fake_runner_thread
assert handler_manager.handling_client_events is True
@pytest.mark.it("Is False if the Client Event Handler Runner is not running")
def test_client_event_runner_not_running(self, handler_manager):
assert handler_manager._client_event_runner is None
assert handler_manager.handling_client_events is False
| 43.077241
| 170
| 0.700202
| 28,091
| 0.899459
| 3,839
| 0.122923
| 27,766
| 0.889053
| 0
| 0
| 10,436
| 0.334155
|
1278ee593e924b3273cd53898ff8735b235b993e
| 885
|
py
|
Python
|
src/python/Chameleon.Faas/demo/helloworld_grpc_client.py
|
sevenTiny/Seventiny.Cloud.ScriptEngine
|
dda66a7d2ec8c203823e07666314b9d0c8795768
|
[
"Apache-2.0"
] | 2
|
2020-01-17T03:16:42.000Z
|
2020-08-28T04:23:06.000Z
|
src/python/Chameleon.Faas/demo/helloworld_grpc_client.py
|
sevenTiny/Seventiny.Cloud.ScriptEngine
|
dda66a7d2ec8c203823e07666314b9d0c8795768
|
[
"Apache-2.0"
] | null | null | null |
src/python/Chameleon.Faas/demo/helloworld_grpc_client.py
|
sevenTiny/Seventiny.Cloud.ScriptEngine
|
dda66a7d2ec8c203823e07666314b9d0c8795768
|
[
"Apache-2.0"
] | 1
|
2019-12-13T07:02:56.000Z
|
2019-12-13T07:02:56.000Z
|
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
from grpc.beta import implementations
def run():
# 连接 rpc 服务器
# TSL连接方式 >>>
with open('G:\\DotNet\\SevenTiny.Cloud.FaaS\\Code\\Python\\SevenTiny.Cloud.FaaS.GRpc\\ca\\client.pem', 'rb') as f:
pem = f.read()
creds = implementations.ssl_channel_credentials(
pem, None, None)
channel = implementations.secure_channel('localhost', 5001, creds)
# TSL连接方式 <<<
# channel = grpc.insecure_channel('localhost:39901')
# 调用 rpc 服务
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='czl'))
print("Greeter client received: " + response.message)
response = stub.SayHelloAgain(helloworld_pb2.HelloRequest(name='daydaygo'))
print("Greeter client received: " + response.message)
if __name__ == '__main__':
run()
| 34.038462
| 118
| 0.701695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 320
| 0.348205
|
1279a170c86c50a1d9aa504d29a7b4fbc15ef3a6
| 2,350
|
py
|
Python
|
tools/pca_outcore.py
|
escorciav/deep-action-proposals
|
c14f512febc1abd0ec40bd3188a83e4ee3913535
|
[
"MIT"
] | 28
|
2017-03-19T12:02:22.000Z
|
2021-07-08T13:49:41.000Z
|
tools/pca_outcore.py
|
escorciav/deep-action-proposals
|
c14f512febc1abd0ec40bd3188a83e4ee3913535
|
[
"MIT"
] | 2
|
2018-05-07T07:43:15.000Z
|
2018-12-14T16:06:48.000Z
|
tools/pca_outcore.py
|
escorciav/deep-action-proposals
|
c14f512febc1abd0ec40bd3188a83e4ee3913535
|
[
"MIT"
] | 7
|
2017-03-19T11:51:21.000Z
|
2020-01-07T11:17:48.000Z
|
#!/usr/bin/env python
"""
PCA done via matrix multiplication out-of-core.
"""
import argparse
import time
import h5py
import hickle as hkl
import numpy as np
def input_parse():
description = 'Compute PCA with A.T * A computation out of core'
p = argparse.ArgumentParser(description=description)
p.add_argument('dsfile', help='HDF5-file with features')
p.add_argument('pcafile', help='HDF5-file with PCA results')
p.add_argument('-ll', '--log_loop', default=500, type=int,
help='Verbose in terms of number of videos')
return p
def main(dsfile, pcafile, t_size=16, t_stride=8, source='c3d_features',
log_loop=100):
print time.ctime(), 'start: loading hdf5'
fid = h5py.File(dsfile, 'r')
video_names = fid.keys()
feat_dim = fid[video_names[0]][source].shape[1]
print time.ctime(), 'finish: loading hdf5'
print time.ctime(), 'start: compute mean'
x_mean, n = np.zeros((1, feat_dim), dtype=np.float32), 0
for i, v in fid.iteritems():
feat = v[source][:]
n += feat.shape[0]
x_mean += feat.sum(axis=0)
x_mean /= n
print time.ctime(), 'finish: compute mean'
def compute_ATA(chunk, f=fid, source=source, mean=x_mean):
feat_dim = f[chunk[0]][source].shape[1]
ATA_c = np.zeros((feat_dim, feat_dim), dtype=np.float32)
for i in chunk:
feat_c = f[i][source][:]
feat_c_ = feat_c - mean
ATA_c += np.dot(feat_c_.T, feat_c_)
return ATA_c
print time.ctime(), 'start: out-of-core matrix multiplication'
j, n_videos = 0, len(video_names)
ATA = np.zeros((feat_dim, feat_dim), dtype=np.float32)
for i, v in fid.iteritems():
feat = v[source][:]
feat_ = feat - x_mean
ATA += np.dot(feat_.T, feat_)
j += 1
if j % log_loop == 0:
print time.ctime(), 'Iteration {}/{}'.format(j, n_videos)
print time.ctime(), 'finish: out-of-core matrix multiplication'
# SVD
print time.ctime(), 'start: SVD in memory'
U, S, _ = np.linalg.svd(ATA)
print time.ctime(), 'finish: SVD in memory'
print time.ctime(), 'serializing ...'
hkl.dump({'x_mean': x_mean, 'U': U, 'S': S, 'n_samples': n}, pcafile)
if __name__ == '__main__':
p = input_parse()
args = p.parse_args()
main(**vars(args))
| 30.519481
| 73
| 0.609362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 560
| 0.238298
|
127b202282fe9d7b819fac4de12d835378edbe4e
| 5,680
|
py
|
Python
|
azdev/params.py
|
marstr/azure-cli-dev-tools
|
8b82b1867a425a9a017868c6c1aef2f4bb5aa62b
|
[
"MIT"
] | null | null | null |
azdev/params.py
|
marstr/azure-cli-dev-tools
|
8b82b1867a425a9a017868c6c1aef2f4bb5aa62b
|
[
"MIT"
] | null | null | null |
azdev/params.py
|
marstr/azure-cli-dev-tools
|
8b82b1867a425a9a017868c6c1aef2f4bb5aa62b
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
# pylint: disable=line-too-long
import argparse
from knack.arguments import ArgumentsContext
from azdev.completer import get_test_completion
class Flag(object):
pass
def load_arguments(self, _):
with ArgumentsContext(self, '') as c:
c.argument('modules', options_list=['--modules', '-m'], nargs='+', help='Space-separated list of modules to check. Omit to check all.')
c.argument('ci_mode', options_list='--ci', action='store_true', help='Run in CI mode.')
c.argument('private', action='store_true', help='Target the private repo.')
with ArgumentsContext(self, 'setup') as c:
c.argument('cli_path', options_list=['--cli', '-c'], nargs='?', const=Flag, help='Path to an existing Azure CLI repo. Omit value to search for the repo.')
c.argument('ext_repo_path', options_list=['--repo', '-r'], nargs='+', help='Space-separated list of paths to existing Azure CLI extensions repos.')
c.argument('ext', options_list=['--ext', '-e'], nargs='+', help='Space-separated list of extensions to install initially.')
with ArgumentsContext(self, 'test') as c:
c.argument('ci_mode', options_list='--ci', action='store_true', help='Run the tests in CI mode.')
c.argument('discover', options_list='--discover', action='store_true', help='Build an index of test names so that you don\'t need to specify fully qualified test paths.')
c.argument('xml_path', options_list='--xml-path', help='Path and filename at which to store the results in XML format. If omitted, the file will be saved as `test_results.xml` in your `.azdev` directory.')
c.argument('in_series', options_list='--series', action='store_true', help='Disable test parallelization.')
c.argument('run_live', options_list='--live', action='store_true', help='Run all tests live.')
c.positional('tests', nargs='*', help='Space-separated list of tests to run. Can specify test filenames, class name or individual method names.', completer=get_test_completion)
c.argument('profile', options_list='--profile', help='Run automation against a specific profile. If omit, the tests will run against current profile.')
c.argument('pytest_args', nargs=argparse.REMAINDER, options_list=['--pytest-args', '-a'], help='Denotes the remaining args will be passed to pytest.')
c.argument('last_failed', options_list='--lf', action='store_true', help='Re-run the last tests that failed.')
with ArgumentsContext(self, 'coverage') as c:
c.argument('prefix', type=str, help='Filter analysis by command prefix.')
c.argument('report', action='store_true', help='Display results as a report.')
c.argument('untested_params', nargs='+', help='Space-separated list of param dest values to search for (OR logic)')
with ArgumentsContext(self, 'style') as c:
c.positional('modules', nargs='*', help='Space-separated list of modules or extensions to check.')
c.argument('pylint', action='store_true', help='Run pylint.')
c.argument('pep8', action='store_true', help='Run flake8 to check PEP8.')
for scope in ['history', 'version']:
with ArgumentsContext(self, 'verify {}'.format(scope)) as c:
c.positional('modules', nargs='*', help='Space-separated list of modules to check.')
with ArgumentsContext(self, 'verify version') as c:
c.argument('update', action='store_true', help='If provided, the command will update the versions in azure-cli\'s setup.py file.')
with ArgumentsContext(self, 'linter') as c:
c.positional('modules', nargs='*', help='Space-separated list of modules or extensions to check.')
c.argument('rules', options_list=['--rules', '-r'], nargs='+', help='Space-separated list of rules to run. Omit to run all rules.')
c.argument('rule_types', options_list=['--rule-types', '-t'], nargs='+', choices=['params', 'commands', 'command_groups', 'help_entries'], help='Space-separated list of rule types to run. Omit to run all.')
with ArgumentsContext(self, 'perf') as c:
c.argument('runs', type=int, help='Number of runs to average performance over.')
for scope in ['extension add', 'extension remove']:
with ArgumentsContext(self, scope) as c:
c.positional('extensions', metavar='NAME', nargs='+', help='Space-separated list of extension names.')
for scope in ['extension repo add', 'extension repo remove']:
with ArgumentsContext(self, scope) as c:
c.positional('repos', metavar='PATH', nargs='+', help='Space-separated list of paths to Git repositories.')
with ArgumentsContext(self, 'extension update-index') as c:
c.positional('extension', metavar='URL', help='URL to an extension WHL file.')
with ArgumentsContext(self, 'group delete') as c:
c.argument('product', help='Value for tag `product` to mark for deletion.', arg_group='Tag')
c.argument('older_than', type=int, help='Minimum age (in hours) for tag `date` to mark for deletion.', arg_group='Tag')
c.argument('cause', help='Value for tag `cause` to mark for deletion.', arg_group='Tag')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt.')
c.argument('prefixes', options_list=['--prefixes', '-p'], nargs='+', help='Space-separated list of prefixes to filter by.')
| 67.619048
| 214
| 0.659859
| 28
| 0.00493
| 0
| 0
| 0
| 0
| 0
| 0
| 3,171
| 0.558275
|
127b40e7a10ad49a4f232756467391a18976528f
| 1,968
|
py
|
Python
|
gamry_parser/cv.py
|
bcliang/gamry-parser
|
c1dfcf73d973c88ee496f0aa256d99f642ab6013
|
[
"MIT"
] | 6
|
2019-03-14T21:21:13.000Z
|
2022-03-04T19:21:32.000Z
|
gamry_parser/cv.py
|
bcliang/gamry-parser
|
c1dfcf73d973c88ee496f0aa256d99f642ab6013
|
[
"MIT"
] | 34
|
2019-03-11T04:21:51.000Z
|
2022-01-10T21:45:38.000Z
|
gamry_parser/cv.py
|
bcliang/gamry-parser
|
c1dfcf73d973c88ee496f0aa256d99f642ab6013
|
[
"MIT"
] | 5
|
2019-08-11T15:38:30.000Z
|
2021-04-24T20:06:09.000Z
|
import gamry_parser as parser
class CyclicVoltammetry(parser.GamryParser):
"""Load a Cyclic Voltammetry experiment generated in Gamry EXPLAIN format."""
def get_v_range(self):
"""retrieve the programmed voltage scan ranges
Args:
None
Returns:
tuple, containing:
float: voltage limit 1, in V
float: voltage limit 2, in V
"""
assert self.loaded, "DTA file not loaded. Run CyclicVoltammetry.load()"
assert (
"VLIMIT1" in self.header.keys()
), "DTA header file missing VLIMIT1 specification"
assert (
"VLIMIT2" in self.header.keys()
), "DTA header file missing VLIMIT2 specification"
return self.header["VLIMIT1"], self.header["VLIMIT2"]
def get_scan_rate(self):
"""retrieve the programmed scan rate
Args:
None
Returns:
float: the scan rate, in mV/s
"""
assert self.loaded, "DTA file not loaded. Run CyclicVoltammetry.load()"
assert (
"SCANRATE" in self.header.keys()
), "DTA header file missing SCANRATE specification"
return self.header["SCANRATE"]
def get_curve_data(self, curve: int = 0):
"""retrieve relevant cyclic voltammetry experimental data
Args:
curve (int, optional): curve number to return. Defaults to 0.
Returns:
pandas.DataFrame:
- Vf: potential, in V
- Im: current, in A
"""
assert self.loaded, "DTA file not loaded. Run CyclicVoltammetry.load()"
assert curve >= 0, "Invalid curve ({}). Indexing starts at 0".format(curve)
assert (
curve < self.curve_count
), "Invalid curve ({}). File contains {} total curves.".format(
curve, self.curve_count
)
df = self.curves[curve]
return df[["Vf", "Im"]]
| 29.373134
| 83
| 0.571646
| 1,935
| 0.983232
| 0
| 0
| 0
| 0
| 0
| 0
| 1,182
| 0.60061
|
127c2b5fae2468e39370fecece20d2e64788de00
| 11,609
|
py
|
Python
|
comps.py
|
matthewb66/bdconsole
|
edc9a03f93dd782d58ff274ebe5152f7eccecff7
|
[
"MIT"
] | null | null | null |
comps.py
|
matthewb66/bdconsole
|
edc9a03f93dd782d58ff274ebe5152f7eccecff7
|
[
"MIT"
] | null | null | null |
comps.py
|
matthewb66/bdconsole
|
edc9a03f93dd782d58ff274ebe5152f7eccecff7
|
[
"MIT"
] | null | null | null |
import json
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import dash_table
def get_comps_data(bd, projverurl):
print('Getting components ...')
# path = projverurl + "/components?limit=5000"
#
# custom_headers = {'Accept': 'application/vnd.blackducksoftware.bill-of-materials-6+json'}
# resp = hub.execute_get(path, custom_headers=custom_headers)
# if resp.status_code != 200:
# print('component list response ' + str(resp.status_code))
# return None
#
# comps = resp.json()
comps = bd.get_json(projverurl + "/components?limit=5000")
df = pd.json_normalize(comps, record_path=['items'])
for index, comp in enumerate(comps['items']):
df.loc[index, 'json'] = json.dumps(comp)
print('Found ' + str(len(df.index)) + ' Components')
return df, comps['items']
col_data_comps = [
{"name": ['Component'], "id": "componentName"},
{"name": ['Version'], "id": "componentVersionName"},
{"name": ['Ignored'], "id": "ignored"},
# {"name": ['Ignored'], "id": "ignoreIcon"},
{"name": ['Reviewed'], "id": "reviewStatus"},
{"name": ['Policy Violation'], "id": "policyStatus"},
# {"name": ['Policy Status'], "id": "polIcon"},
{"name": ['Usage'], "id": "usages"},
{"name": ['Match Types'], "id": "matchTypes"},
]
def create_compstab(compdata, projname, vername):
global col_data_comps
for col, dtype in compdata.dtypes.items():
if dtype == 'bool':
compdata[col] = compdata[col].astype('str')
return [
dbc.Row(
dbc.Col(html.H2("Components")),
),
dbc.Row(
[
dbc.Col(html.H5("Project: " + projname + " - Version: " + vername), width=8),
dbc.Col(
dcc.Dropdown(
id="sel_comp_action",
options=[
{'label': 'Select Action ...', 'value': 'NOTHING'},
{'label': 'Ignore', 'value': 'IGNORE'},
{'label': 'Unignore', 'value': 'UNIGNORE'},
{'label': 'Set Reviewed', 'value': 'REVIEW'},
{'label': 'Set Unreviewed', 'value': 'UNREVIEW'},
{'label': 'Usage - Source', 'value': 'USAGE_SOURCE'},
{'label': 'Usage - Statically Linked', 'value': 'USAGE_STATIC'},
{'label': 'Usage - Dynamically Linked', 'value': 'USAGE_DYNAMIC'},
{'label': 'Usage - Separate Work', 'value': 'USAGE_SEPARATE'},
{'label': 'Usage - Merely Aggregated', 'value': 'USAGE_AGGREGATED'},
{'label': 'Usage - Implement Standard', 'value': 'USAGE_STANDARD'},
{'label': 'Usage - Prerequisite', 'value': 'USAGE_PREREQUISITE'},
{'label': 'Usage - Dev Tool/Excluded', 'value': 'USAGE_EXCLUDED'},
],
multi=False,
placeholder='Select Action ...'
), width=2,
align='center',
),
dbc.Col(dbc.Button("Selected Rows", id="button_comp_selected",
className="mr-2", size='sm'), width=1),
dbc.Col(dbc.Button("All Filtered Rows", id="button_comp_all",
className="mr-2", size='sm'), width=1),
]
),
dbc.Row(
dbc.Col(
dash_table.DataTable(
id='compstable',
columns=col_data_comps,
style_cell={
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 0,
'font_size': '12px',
},
data=compdata.to_dict('records'),
page_size=30, sort_action='native',
filter_action='native',
row_selectable="multi",
cell_selectable=False,
style_header={'backgroundColor': 'rgb(30, 30, 30)', 'color': 'white'},
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in compdata.to_dict('records')
],
tooltip_duration=None,
style_data_conditional=[
{
'if': {'column_id': 'componentName'},
'width': '30%'
},
{
'if': {'column_id': 'componentVersionName'},
'width': '20%'
},
{
'if': {'column_id': 'ignored'},
'width': '10%'
},
{
'if': {'column_id': 'reviewStatus'},
'width': '10%'
},
{
'if': {'column_id': 'policyStatus'},
'width': '10%'
},
{
'if': {'column_id': 'usages'},
'width': '10%'
},
{
'if': {'column_id': 'matchTypes'},
'width': '10%'
},
{
'if': {
'filter_query': '{policyStatus} = "IN_VIOLATION"',
'column_id': 'policyStatus'
},
'backgroundColor': 'maroon',
'color': 'white'
},
{
'if': {
'filter_query': '{reviewStatus} = "REVIEWED"',
'column_id': 'reviewStatus'
},
'backgroundColor': 'blue',
'color': 'white'
},
{
'if': {
'filter_query': '{ignored} eq "True"',
'column_id': 'ignored'
},
'backgroundColor': 'grey',
'color': 'white'
},
],
sort_by=[{'column_id': 'componentName', 'direction': 'asc'},
{'column_id': 'componentVersionName', 'direction': 'asc'}]
# merge_duplicate_headers=True
),
width=12
),
),
]
def make_comp_toast(message):
"""
Helper function for making a toast. dict id for use in pattern matching
callbacks.
"""
return dbc.Toast(
message,
id={"type": "toast", "id": "toast_comp"},
key='toast_comp',
header="Component Processing",
is_open=True,
dismissable=False,
icon="info",
duration=8000,
)
def compactions(bd, action, origdata, vdata, rows, projverurl):
def do_comp_action(url, cdata):
custom_headers = {'Accept': 'application/vnd.blackducksoftware.bill-of-materials-6+json',
'Content-Type': 'application/vnd.blackducksoftware.bill-of-materials-6+json'}
# putresp = hub.execute_put(url, cdata, custom_headers=custom_headers)
# if not putresp.ok:
# print('Error - cannot update component ' + url)
# return False
# else:
# print('Processed component ' + cdata['componentName'])
# return True
r = bd.session.put(url, json=cdata)
if r.status_code == 200:
print('Processed component ' + cdata['componentName'])
return True
else:
print('Error - cannot update component ' + url)
return False
compaction_dict = {
'IGNORE':
{'field': 'ignored', 'value': True,
'confirmation': 'Ignored', 'display': 'True'},
'UNIGNORE':
{'field': 'ignored', 'value': False,
'confirmation': 'Unignored', 'display': 'False'},
'REVIEW':
{'field': 'reviewStatus', 'value': 'REVIEWED',
'confirmation': 'Set Reviewed', 'display': 'REVIEWED'},
'UNREVIEW':
{'field': 'reviewStatus', 'value': 'NOT_REVIEWED',
'confirmation': 'Set Unreviewed', 'display': 'NOT_REVIEWED'},
'USAGE_SOURCE':
{'field': 'usages', 'value': ['SOURCE_CODE'],
'confirmation': 'Usage Changed', 'display': 'SOURCE_CODE'},
'USAGE_STATIC':
{'field': 'usages', 'value': ['STATICALLY_LINKED'],
'confirmation': 'Usage Changed', 'display': 'STATICALLY_LINKED'},
'USAGE_DYNAMIC':
{'field': 'usages', 'value': ['DYNAMICALLY_LINKED'],
'confirmation': 'Usage Changed', 'display': 'DYNAMICALLY_LINKED'},
'USAGE_SEPARATE':
{'field': 'usages', 'value': ['SEPARATE_WORK'],
'confirmation': 'Usage Changed', 'display': 'SEPARATE_WORK'},
'USAGE_AGGREGATED':
{'field': 'usages', 'value': ['MERELY_AGGREGATED'],
'confirmation': 'Usage Changed', 'display': 'MERELY_AGGREGATED'},
'USAGE_STANDARD':
{'field': 'usages', 'value': ['IMPLEMENTATION_OF_STANDARD'],
'confirmation': 'Usage Changed', 'display': 'IMPLEMENTATION_OF_STANDARD'},
'USAGE_PREREQUISITE':
{'field': 'usages', 'value': ['PREREQUISITE'],
'confirmation': 'Usage Changed', 'display': 'PREREQUISITE'},
'USAGE_EXCLUDED':
{'field': 'usages', 'value': ['DEV_TOOL_EXCLUDED'],
'confirmation': 'Usage Changed', 'display': 'DEV_TOOL_EXCLUDED'},
}
count = 0
confirmation = ''
for row in rows:
thiscomp = vdata[row]
compurl = thiscomp['componentVersion']
#
# Find component in allcomps list
# compdata = next(comp for comp in allcomps if comp["componentVersion"] == compurl)
compdata = json.loads(thiscomp['json'])
if action in compaction_dict.keys():
entry = compaction_dict[action]
foundrow = -1
for origrow, origcomp in enumerate(origdata):
if origcomp['componentVersion'] == vdata[row]['componentVersion']:
foundrow = origrow
break
if foundrow >= 0:
origdata[foundrow][entry['field']] = entry['display']
confirmation = entry['confirmation']
compdata[entry['field']] = entry['value']
thiscompurl = projverurl + '/' + '/'.join(compurl.split('/')[4:])
if do_comp_action(thiscompurl, compdata):
count += 1
toast = ''
if count > 0:
toast = make_comp_toast("{} Components {}".format(count, confirmation))
return origdata, toast
| 41.460714
| 103
| 0.450168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,606
| 0.396761
|
127c9e72b97842964045050d2c4c20f3d0a12a28
| 656
|
py
|
Python
|
CursoemVideoPython/Desafio 35.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
CursoemVideoPython/Desafio 35.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
CursoemVideoPython/Desafio 35.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
'''
Desenvolva um programa que leia o comprimento de três retas e diga ao usuário se elas
podem ou não formar um triângulo.
'''
reta1 = float(input('Digite o comprimento da primeira reta: '))
reta2 = float(input('Digite o comprimento da segunda reta: '))
reta3 = float(input('Digite o comprimento da terceira reta: '))
if reta1 < 0 or reta2 < 0 or reta3 < 0:
print('\nValor Inválido!')
print('Não EXISTE medida de lado NEGATIVA!')
else:
if reta1 + reta2 > reta3 and reta1 + reta3 > reta2 and reta2 + reta3 > reta1:
print('\nAs três retas podem formar triângulo!')
else:
print('\nAs três retas NÃO podem formar triângulo!')
| 38.588235
| 85
| 0.689024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 402
| 0.602699
|
127d60f439a2eeaeea97213b05b97e925b002613
| 15,790
|
py
|
Python
|
osprofiler/tests/unit/drivers/test_ceilometer.py
|
charliebr30/osprofiler
|
cffca4e29e373e3f09f2ffdd458761183a851569
|
[
"Apache-2.0"
] | null | null | null |
osprofiler/tests/unit/drivers/test_ceilometer.py
|
charliebr30/osprofiler
|
cffca4e29e373e3f09f2ffdd458761183a851569
|
[
"Apache-2.0"
] | 1
|
2017-04-15T22:16:06.000Z
|
2017-04-15T22:16:06.000Z
|
osprofiler/tests/unit/drivers/test_ceilometer.py
|
shwsun/osprofiler
|
46d29fc5ab8a4068217e399883f39cdd443a7500
|
[
"Apache-2.0"
] | 1
|
2020-02-17T09:48:43.000Z
|
2020-02-17T09:48:43.000Z
|
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler.drivers.ceilometer import Ceilometer
from osprofiler.tests import test
class CeilometerParserTestCase(test.TestCase):
def setUp(self):
super(CeilometerParserTestCase, self).setUp()
self.ceilometer = Ceilometer("ceilometer://",
ceilometer_api_version="2")
def test_build_empty_tree(self):
self.assertEqual([], self.ceilometer._build_tree({}))
def test_build_complex_tree(self):
test_input = {
"2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}},
"1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}},
"21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}},
"22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}},
"11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}},
"113": {"parent_id": "11", "trace_id": "113",
"info": {"started": 3}},
"112": {"parent_id": "11", "trace_id": "112",
"info": {"started": 2}},
"114": {"parent_id": "11", "trace_id": "114",
"info": {"started": 5}}
}
expected_output = [
{
"parent_id": "0",
"trace_id": "1",
"info": {"started": 0},
"children": [
{
"parent_id": "1",
"trace_id": "11",
"info": {"started": 1},
"children": [
{"parent_id": "11", "trace_id": "112",
"info": {"started": 2}, "children": []},
{"parent_id": "11", "trace_id": "113",
"info": {"started": 3}, "children": []},
{"parent_id": "11", "trace_id": "114",
"info": {"started": 5}, "children": []}
]
}
]
},
{
"parent_id": "0",
"trace_id": "2",
"info": {"started": 1},
"children": [
{"parent_id": "2", "trace_id": "21",
"info": {"started": 6}, "children": []},
{"parent_id": "2", "trace_id": "22",
"info": {"started": 7}, "children": []}
]
}
]
result = self.ceilometer._build_tree(test_input)
self.assertEqual(expected_output, result)
def test_get_report_empty(self):
self.ceilometer.client = mock.MagicMock()
self.ceilometer.client.events.list.return_value = []
expected = {
"info": {
"name": "total",
"started": 0,
"finished": None,
"last_trace_started": None
},
"children": [],
"stats": {},
}
base_id = "10"
self.assertEqual(expected, self.ceilometer.get_report(base_id))
def test_get_report(self):
self.ceilometer.client = mock.MagicMock()
results = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock(), mock.MagicMock()]
self.ceilometer.client.events.list.return_value = results
results[0].to_dict.return_value = {
"traits": [
{
"type": "string",
"name": "base_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "host",
"value": "ubuntu"
},
{
"type": "string",
"name": "method",
"value": "POST"
},
{
"type": "string",
"name": "name",
"value": "wsgi-start"
},
{
"type": "string",
"name": "parent_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "project",
"value": "keystone"
},
{
"type": "string",
"name": "service",
"value": "main"
},
{
"type": "string",
"name": "timestamp",
"value": "2015-12-23T14:02:22.338776"
},
{
"type": "string",
"name": "trace_id",
"value": "06320327-2c2c-45ae-923a-515de890276a"
}
],
"raw": {},
"generated": "2015-12-23T10:41:38.415793",
"event_type": "profiler.main",
"message_id": "65fc1553-3082-4a6f-9d1e-0e3183f57a47"}
results[1].to_dict.return_value = {
"traits":
[
{
"type": "string",
"name": "base_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "host",
"value": "ubuntu"
},
{
"type": "string",
"name": "name",
"value": "wsgi-stop"
},
{
"type": "string",
"name": "parent_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "project",
"value": "keystone"
},
{
"type": "string",
"name": "service",
"value": "main"
},
{
"type": "string",
"name": "timestamp",
"value": "2015-12-23T14:02:22.380405"
},
{
"type": "string",
"name": "trace_id",
"value": "016c97fd-87f3-40b2-9b55-e431156b694b"
}
],
"raw": {},
"generated": "2015-12-23T10:41:38.406052",
"event_type": "profiler.main",
"message_id": "3256d9f1-48ba-4ac5-a50b-64fa42c6e264"}
results[2].to_dict.return_value = {
"traits":
[
{
"type": "string",
"name": "base_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "db.params",
"value": "[]"
},
{
"type": "string",
"name": "db.statement",
"value": "SELECT 1"
},
{
"type": "string",
"name": "host",
"value": "ubuntu"
},
{
"type": "string",
"name": "name",
"value": "db-start"
},
{
"type": "string",
"name": "parent_id",
"value": "06320327-2c2c-45ae-923a-515de890276a"
},
{
"type": "string",
"name": "project",
"value": "keystone"
},
{
"type": "string",
"name": "service",
"value": "main"
},
{
"type": "string",
"name": "timestamp",
"value": "2015-12-23T14:02:22.395365"
},
{
"type": "string",
"name": "trace_id",
"value": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"
}
],
"raw": {},
"generated": "2015-12-23T10:41:38.984161",
"event_type": "profiler.main",
"message_id": "60368aa4-16f0-4f37-a8fb-89e92fdf36ff"}
results[3].to_dict.return_value = {
"traits":
[
{
"type": "string",
"name": "base_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "host",
"value": "ubuntu"
},
{
"type": "string",
"name": "name",
"value": "db-stop"
},
{
"type": "string",
"name": "parent_id",
"value": "06320327-2c2c-45ae-923a-515de890276a"
},
{
"type": "string",
"name": "project",
"value": "keystone"
},
{
"type": "string",
"name": "service",
"value": "main"
},
{
"type": "string",
"name": "timestamp",
"value": "2015-12-23T14:02:22.415486"
},
{
"type": "string",
"name": "trace_id",
"value": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"
}
],
"raw": {},
"generated": "2015-12-23T10:41:39.019378",
"event_type": "profiler.main",
"message_id": "3fbeb339-55c5-4f28-88e4-15bee251dd3d"}
results[4].to_dict.return_value = {
"traits":
[
{
"type": "string",
"name": "base_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "host",
"value": "ubuntu"
},
{
"type": "string",
"name": "method",
"value": "GET"
},
{
"type": "string",
"name": "name",
"value": "wsgi-start"
},
{
"type": "string",
"name": "parent_id",
"value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
},
{
"type": "string",
"name": "project",
"value": "keystone"
},
{
"type": "string",
"name": "service",
"value": "main"
},
{
"type": "string",
"name": "timestamp",
"value": "2015-12-23T14:02:22.427444"
},
{
"type": "string",
"name": "trace_id",
"value": "016c97fd-87f3-40b2-9b55-e431156b694b"
}
],
"raw": {},
"generated": "2015-12-23T10:41:38.360409",
"event_type": "profiler.main",
"message_id": "57b971a9-572f-4f29-9838-3ed2564c6b5b"}
expected = {"children": [
{"children": [{"children": [],
"info": {"finished": 76,
"host": "ubuntu",
"meta.raw_payload.db-start": {},
"meta.raw_payload.db-stop": {},
"name": "db",
"project": "keystone",
"service": "main",
"started": 56,
"exception": "None"},
"parent_id": "06320327-2c2c-45ae-923a-515de890276a",
"trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}
],
"info": {"finished": 0,
"host": "ubuntu",
"meta.raw_payload.wsgi-start": {},
"name": "wsgi",
"project": "keystone",
"service": "main",
"started": 0},
"parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
"trace_id": "06320327-2c2c-45ae-923a-515de890276a"},
{"children": [],
"info": {"finished": 41,
"host": "ubuntu",
"meta.raw_payload.wsgi-start": {},
"meta.raw_payload.wsgi-stop": {},
"name": "wsgi",
"project": "keystone",
"service": "main",
"started": 88,
"exception": "None"},
"parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
"trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}],
"info": {
"finished": 88,
"name": "total",
"started": 0,
"last_trace_started": 88
},
"stats": {"db": {"count": 1, "duration": 20},
"wsgi": {"count": 2, "duration": -47}},
}
base_id = "10"
result = self.ceilometer.get_report(base_id)
expected_filter = [{"field": "base_id", "op": "eq", "value": base_id}]
self.ceilometer.client.events.list.assert_called_once_with(
expected_filter, limit=100000)
self.assertEqual(expected, result)
| 37.240566
| 79
| 0.338252
| 15,058
| 0.953642
| 0
| 0
| 0
| 0
| 0
| 0
| 5,559
| 0.352058
|
127dce97d99e34df63ba730d1cd14233e203885a
| 2,271
|
py
|
Python
|
threshold.py
|
jiep/unicode-similarity
|
a32a031f96dce2b8a52a8ff4b5365c768c016fc6
|
[
"MIT"
] | 1
|
2019-02-22T10:31:51.000Z
|
2019-02-22T10:31:51.000Z
|
threshold.py
|
jiep/unicode-similarity
|
a32a031f96dce2b8a52a8ff4b5365c768c016fc6
|
[
"MIT"
] | null | null | null |
threshold.py
|
jiep/unicode-similarity
|
a32a031f96dce2b8a52a8ff4b5365c768c016fc6
|
[
"MIT"
] | 1
|
2020-12-15T15:34:43.000Z
|
2020-12-15T15:34:43.000Z
|
from pathlib import Path
import numpy as np
import pickle
import argparse
import errno
import sys
def file_exists(path):
return Path(path).is_file()
def dir_exists(path):
return Path(path).is_dir()
def remove_extension(x): return x.split('.')[0]
def print_error(type, file):
print(FileNotFoundError(errno.ENOENT,
'The {} {} does not exist'.format(type, file)))
def calculate_threshold(similarity, output='confusables',
threshold=0.8, verbose=False):
lines = [line.rstrip('\n') for line in open(similarity)]
unicode_characters = np.asarray(lines[0].split(' ')[1:])
data = {}
data['threshold'] = threshold
data['characters'] = {}
for l in lines[1:]:
line = l.split(' ')
latin = line[0]
del line[0]
similarity_row = np.asarray(line, dtype=np.float)
indexes = np.where(similarity_row >= threshold)
data['characters'][latin] = unicode_characters[np.asarray(indexes[0])]\
.tolist()
chars = unicode_characters[np.asarray(indexes[0])].tolist()
if(verbose):
print('[{}] {}: {}'.format(len(chars), latin, ','.join(chars)))
output = '{}-{}.pickle'.format(output, int(threshold*100))
with open(output, 'wb') as f:
pickle.dump(data, f)
def main():
parser = argparse.ArgumentParser(description='Filter Unicode characters '
'based on a given threshold '
'between 0 and 1 '
'and a similarity matrix')
parser.add_argument('-s', '--similarity', default='similarities.txt')
parser.add_argument('-t', '--threshold', default=0.8, type=float)
parser.add_argument('-o', '--output', default='confusables')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
similarity = args.similarity
threshold = args.threshold
output = args.output
verbose = args.verbose
if not file_exists(similarity):
print_error('file', similarity)
sys.exit(1)
calculate_threshold(similarity, output, threshold, verbose)
if __name__ == '__main__':
main()
| 28.037037
| 79
| 0.589608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 344
| 0.151475
|
127def7299a4b8a5f141ed18533a55c708f10769
| 1,813
|
py
|
Python
|
y2019/control_loops/python/wrist.py
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | null | null | null |
y2019/control_loops/python/wrist.py
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | null | null | null |
y2019/control_loops/python/wrist.py
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
from aos.util.trapezoid_profile import TrapezoidProfile
from frc971.control_loops.python import control_loop
from frc971.control_loops.python import angular_system
from frc971.control_loops.python import controls
import copy
import numpy
import sys
from matplotlib import pylab
import gflags
import glog
FLAGS = gflags.FLAGS
try:
gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
except gflags.DuplicateFlagError:
pass
# Wrist alone
# 0.1348
# Wrist with ball
# 0.3007
# Wrist with hatch
# 0.446
kWrist = angular_system.AngularSystemParams(
name='Wrist',
motor=control_loop.BAG(),
G=(6.0 / 60.0) * (20.0 / 100.0) * (24.0 / 84.0),
J=0.30,
q_pos=0.20,
q_vel=5.0,
kalman_q_pos=0.12,
kalman_q_vel=2.0,
kalman_q_voltage=4.0,
kalman_r_position=0.05)
kWristBall = copy.copy(kWrist)
kWristBall.J = 0.4007
kWristBall.q_pos = 0.55
kWristBall.q_vel = 5.0
kWristPanel = copy.copy(kWrist)
kWristPanel.J = 0.446
kWristModel = copy.copy(kWrist)
kWristModel.J = 0.1348
def main(argv):
if FLAGS.plot:
R = numpy.matrix([[numpy.pi / 2.0], [0.0]])
angular_system.PlotKick(kWristBall, R, plant_params=kWristBall)
angular_system.PlotMotion(kWristBall, R, plant_params=kWristBall)
# Write the generated constants out to a file.
if len(argv) != 5:
glog.fatal(
'Expected .h file name and .cc file name for the wrist and integral wrist.'
)
else:
namespaces = ['y2019', 'control_loops', 'superstructure', 'wrist']
angular_system.WriteAngularSystem([kWrist, kWristBall, kWristPanel],
argv[1:3], argv[3:5], namespaces)
if __name__ == '__main__':
argv = FLAGS(sys.argv)
glog.init()
sys.exit(main(argv))
| 24.835616
| 87
| 0.674021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.173194
|
12810e363b2fde4bb2f563894e88d9b033fc5d56
| 2,666
|
py
|
Python
|
utils/tools.py
|
alipay/Pyraformer
|
84af4dbd93b7b96975b5034f0dde412005260123
|
[
"Apache-2.0"
] | 7
|
2022-03-24T03:42:14.000Z
|
2022-03-27T16:27:31.000Z
|
utils/tools.py
|
alipay/Pyraformer
|
84af4dbd93b7b96975b5034f0dde412005260123
|
[
"Apache-2.0"
] | 1
|
2022-03-17T08:54:42.000Z
|
2022-03-17T08:54:42.000Z
|
utils/tools.py
|
alipay/Pyraformer
|
84af4dbd93b7b96975b5034f0dde412005260123
|
[
"Apache-2.0"
] | 1
|
2022-03-29T16:33:44.000Z
|
2022-03-29T16:33:44.000Z
|
from torch.nn.modules import loss
import torch
import numpy as np
def MAE(pred, true):
return np.mean(np.abs(pred-true))
def MSE(pred, true):
return np.mean((pred-true)**2)
def RMSE(pred, true):
return np.sqrt(MSE(pred, true))
def MAPE(pred, true):
return np.mean(np.abs((pred - true) / true))
def MSPE(pred, true):
return np.mean(np.square((pred - true) / true))
def metric(pred, true):
mae = MAE(pred, true)
mse = MSE(pred, true)
rmse = RMSE(pred, true)
mape = MAPE(pred, true)
mspe = MSPE(pred, true)
return mae,mse,rmse,mape,mspe
class StandardScaler():
def __init__(self):
self.mean = 0.
self.std = 1.
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
return (data - mean) / std
def inverse_transform(self, data):
mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
return (data * std) + mean
class TopkMSELoss(torch.nn.Module):
def __init__(self, topk) -> None:
super().__init__()
self.topk = topk
self.criterion = torch.nn.MSELoss(reduction='none')
def forward(self, output, label):
losses = self.criterion(output, label).mean(2).mean(1)
losses = torch.topk(losses, self.topk)[0]
return losses
class SingleStepLoss(torch.nn.Module):
""" Compute top-k log-likelihood and mse. """
def __init__(self, ignore_zero):
super().__init__()
self.ignore_zero = ignore_zero
def forward(self, mu, sigma, labels, topk=0):
if self.ignore_zero:
indexes = (labels != 0)
else:
indexes = (labels >= 0)
distribution = torch.distributions.normal.Normal(mu[indexes], sigma[indexes])
likelihood = -distribution.log_prob(labels[indexes])
diff = labels[indexes] - mu[indexes]
se = diff * diff
if 0 < topk < len(likelihood):
likelihood = torch.topk(likelihood, topk)[0]
se = torch.topk(se, topk)[0]
return likelihood, se
def AE_loss(mu, labels, ignore_zero):
if ignore_zero:
indexes = (labels != 0)
else:
indexes = (labels >= 0)
ae = torch.abs(labels[indexes] - mu[indexes])
return ae
| 28.361702
| 112
| 0.62003
| 1,876
| 0.703676
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.01913
|
1282bd510ec173d21c0fd86f0dd67b09824e394a
| 2,772
|
py
|
Python
|
.venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 115
|
2020-06-18T15:00:58.000Z
|
2022-03-02T10:13:19.000Z
|
.venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
.venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 60
|
2020-07-22T14:53:10.000Z
|
2022-03-23T10:17:59.000Z
|
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import TimedeltaIndex
import pandas._testing as tm
class TestTimedeltaIndexShift:
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="T"), idx)
exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="T"), exp)
exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
def test_tdi_shift_int(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(
[
"1 days 01:00:00",
"2 days 01:00:00",
"3 days 01:00:00",
"4 days 01:00:00",
"5 days 01:00:00",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(3, freq="2D 1s")
expected = TimedeltaIndex(
[
"6 days 01:00:03",
"7 days 01:00:03",
"8 days 01:00:03",
"9 days 01:00:03",
"10 days 01:00:03",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
tdi.shift(2)
| 35.538462
| 82
| 0.544372
| 2,625
| 0.94697
| 0
| 0
| 0
| 0
| 0
| 0
| 658
| 0.237374
|
1282edeb2a30864dc3a5aa0e406d5fae2795f292
| 1,974
|
py
|
Python
|
webScraping/Instagram/2a_selenium_corriere.py
|
PythonBiellaGroup/MaterialeSerate
|
58b45ecda7b9a8a298b9ca966d2806618a277372
|
[
"MIT"
] | 12
|
2021-12-12T22:19:52.000Z
|
2022-03-18T11:45:17.000Z
|
webScraping/Instagram/2a_selenium_corriere.py
|
PythonGroupBiella/MaterialeLezioni
|
58b45ecda7b9a8a298b9ca966d2806618a277372
|
[
"MIT"
] | 1
|
2022-03-23T13:58:33.000Z
|
2022-03-23T14:05:08.000Z
|
webScraping/Instagram/2a_selenium_corriere.py
|
PythonGroupBiella/MaterialeLezioni
|
58b45ecda7b9a8a298b9ca966d2806618a277372
|
[
"MIT"
] | 5
|
2021-11-30T19:38:41.000Z
|
2022-01-30T14:50:44.000Z
|
# use selenium to scrape headlines from corriere.it
# pip install selenium
from re import L
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import pandas as pd
import time
import sys
HOME = "https://corriere.it"
# open Firefox
driver = webdriver.Firefox()
# navigate to corriere.it
driver.get(HOME)
# In order to extract the information that you’re looking to scrape,
# you need to locate the element’s XPath.
# An XPath is a syntax used for finding any element on a webpage.
# We can see the headline
#<a class="has-text-black" href="https://www.corriere.it/sport/calcio/coppa-italia/22_aprile_19/inter-milan-formazioni-news-risultato-f607f438-bfef-11ec-9f78-c9d279c21b38.shtml">Inter-Milan, doppio Lautaro e Gosens, nerazzurri in finale di Coppa Italia </a>
# --> [@class=”name”]
# all great but we need to sort out this coxokie pop-up
#driver.find_element_by_xpath("//*[@id='_cpmt-accept']").click()
#WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, '_cpmt-accept'))).click()
#WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div#_cpmt-buttons button#_cpmt-accept"))).click()
time.sleep(5)
# carefully look at the env, we have an iframe here
cookie_iframe = driver.find_element_by_xpath("//iframe[@id='_cpmt-iframe']")
driver.switch_to.frame(cookie_iframe)
print(cookie_iframe)
#driver.switch_to.frame(driver.find_element(By.XPATH("//iframe[@id='_cpmt-iframe']")))
button = driver.find_element_by_id("_cpmt-accept").click()
# back to the main class
driver.get(HOME)
# elements --> find_all
headlines = driver.find_elements_by_xpath('//h4[@class="title-art-hp is-medium is-line-h-106"]')
# here we get all the headlines from the corriere
# we can get the text
for headline in headlines:
print(headline.text)
| 44.863636
| 258
| 0.766971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,320
| 0.665994
|
1283922931293c1f0272600761d089b38ea78f4b
| 2,033
|
py
|
Python
|
stolos/tests/test_bin.py
|
sailthru/stolos
|
7b74da527033b2da7f3ccd6d19ed6fb0245ea0fc
|
[
"Apache-2.0"
] | 121
|
2015-01-20T08:58:35.000Z
|
2021-08-08T15:13:11.000Z
|
stolos/tests/test_bin.py
|
sailthru/stolos
|
7b74da527033b2da7f3ccd6d19ed6fb0245ea0fc
|
[
"Apache-2.0"
] | 3
|
2015-01-20T22:19:49.000Z
|
2016-02-10T10:48:11.000Z
|
stolos/tests/test_bin.py
|
sailthru/stolos
|
7b74da527033b2da7f3ccd6d19ed6fb0245ea0fc
|
[
"Apache-2.0"
] | 20
|
2016-02-03T17:08:31.000Z
|
2021-04-19T10:43:28.000Z
|
import os
from subprocess import check_output, CalledProcessError
from nose import tools as nt
from stolos import queue_backend as qb
from stolos.testing_tools import (
with_setup, validate_zero_queued_task, validate_one_queued_task,
validate_n_queued_task
)
def run(cmd, tasks_json_tmpfile, **kwargs):
cmd = (
"set -o pipefail ; STOLOS_TASKS_JSON={tasks_json} {cmd}").format(
cmd=cmd, tasks_json=tasks_json_tmpfile, **kwargs)
rv = check_output(cmd, shell=True, executable="bash", env=os.environ)
return rv
@with_setup
def test_stolos_submit(app1, job_id1, tasks_json_tmpfile):
with nt.assert_raises(CalledProcessError):
run("stolos-submit -h", tasks_json_tmpfile)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s" % (app1, job_id1), tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
run("stolos-submit -a %s -j %s" % (app1, job_id1), tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
@with_setup
def test_stolos_submit_readd(app1, job_id1, tasks_json_tmpfile):
qb.set_state(app1, job_id1, failed=True)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s" % (app1, job_id1),
tasks_json_tmpfile)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s --readd" % (app1, job_id1),
tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
@with_setup
def test_stolos_submit_multiple_jobs(app1, app2, job_id1, job_id2,
tasks_json_tmpfile):
validate_zero_queued_task(app1)
validate_zero_queued_task(app2)
run("stolos-submit -a %s %s -j %s %s" % (app1, app2, job_id1, job_id2),
tasks_json_tmpfile)
validate_n_queued_task(app1, job_id1, job_id2)
validate_n_queued_task(app2, job_id1, job_id2)
run("stolos-submit -a %s %s -j %s %s" % (app1, app2, job_id1, job_id2),
tasks_json_tmpfile)
validate_n_queued_task(app1, job_id1, job_id2)
validate_n_queued_task(app2, job_id1, job_id2)
| 36.303571
| 75
| 0.713724
| 0
| 0
| 0
| 0
| 1,474
| 0.725037
| 0
| 0
| 262
| 0.128874
|
1283e6ee8cf196eb827ab2c20c8605ca98bca840
| 12,442
|
py
|
Python
|
senlin/tests/unit/engine/actions/test_create.py
|
chenyb4/senlin
|
8b9ec31566890dc9989fe08e221172d37c0451b4
|
[
"Apache-2.0"
] | null | null | null |
senlin/tests/unit/engine/actions/test_create.py
|
chenyb4/senlin
|
8b9ec31566890dc9989fe08e221172d37c0451b4
|
[
"Apache-2.0"
] | null | null | null |
senlin/tests/unit/engine/actions/test_create.py
|
chenyb4/senlin
|
8b9ec31566890dc9989fe08e221172d37c0451b4
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from senlin.common import consts
from senlin.engine.actions import base as ab
from senlin.engine.actions import cluster_action as ca
from senlin.engine import cluster as cm
from senlin.engine import dispatcher
from senlin.engine import node as nm
from senlin.objects import action as ao
from senlin.objects import cluster as co
from senlin.objects import dependency as dobj
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
@mock.patch.object(cm.Cluster, 'load')
class ClusterCreateTest(base.SenlinTestCase):
def setUp(self):
super(ClusterCreateTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(ao.Action, 'update')
@mock.patch.object(ab.Action, 'create')
@mock.patch.object(co.Cluster, 'get_next_index')
@mock.patch.object(nm, 'Node')
@mock.patch.object(dobj.Dependency, 'create')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(ca.ClusterAction, '_wait_for_dependents')
def test__create_nodes_single(self, mock_wait, mock_start, mock_dep,
mock_node, mock_index, mock_action,
mock_update, mock_load):
# prepare mocks
cluster = mock.Mock(id='CLUSTER_ID', profile_id='FAKE_PROFILE',
user='FAKE_USER', project='FAKE_PROJECT',
domain='FAKE_DOMAIN',
config={"node.name.format": "node-$3I"})
mock_index.return_value = 123
node = mock.Mock(id='NODE_ID')
mock_node.return_value = node
mock_load.return_value = cluster
# cluster action is real
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
action.id = 'CLUSTER_ACTION_ID'
mock_wait.return_value = (action.RES_OK, 'All dependents completed')
# node_action is faked
mock_action.return_value = 'NODE_ACTION_ID'
# do it
res_code, res_msg = action._create_nodes(1)
# assertions
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('All dependents completed', res_msg)
mock_index.assert_called_once_with(action.context, 'CLUSTER_ID')
mock_node.assert_called_once_with('node-123',
'FAKE_PROFILE',
'CLUSTER_ID',
context=action.context,
user='FAKE_USER',
project='FAKE_PROJECT',
domain='FAKE_DOMAIN',
index=123, metadata={})
node.store.assert_called_once_with(action.context)
mock_action.assert_called_once_with(action.context, 'NODE_ID',
'NODE_CREATE',
name='node_create_NODE_ID',
cause='Derived Action')
mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'],
'CLUSTER_ACTION_ID')
mock_update.assert_called_once_with(
action.context, 'NODE_ACTION_ID',
{'status': ab.Action.READY})
mock_start.assert_called_once_with()
mock_wait.assert_called_once_with()
self.assertEqual({'nodes_added': ['NODE_ID']}, action.outputs)
@mock.patch.object(co.Cluster, 'get')
def test_create_nodes_zero(self, mock_get, mock_load):
cluster = mock.Mock()
cluster.id = 'FAKE_CLUSTER'
mock_get.return_value = mock.Mock()
mock_load.return_value = cluster
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
res_code, res_msg = action._create_nodes(0)
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('', res_msg)
@mock.patch.object(ao.Action, 'update')
@mock.patch.object(ab.Action, 'create')
@mock.patch.object(co.Cluster, 'get_next_index')
@mock.patch.object(nm, 'Node')
@mock.patch.object(dobj.Dependency, 'create')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(ca.ClusterAction, '_wait_for_dependents')
def test__create_nodes_multiple(self, mock_wait, mock_start, mock_dep,
mock_node, mock_index, mock_action,
mock_update, mock_load):
cluster = mock.Mock(id='01234567-123434',
config={"node.name.format": "node-$3I"})
node1 = mock.Mock(id='01234567-abcdef',
data={'placement': {'region': 'regionOne'}})
node2 = mock.Mock(id='abcdefab-123456',
data={'placement': {'region': 'regionTwo'}})
mock_node.side_effect = [node1, node2]
mock_index.side_effect = [123, 124]
mock_load.return_value = cluster
# cluster action is real
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
action.id = 'CLUSTER_ACTION_ID'
action.data = {
'placement': {
'count': 2,
'placements': [
{'region': 'regionOne'},
{'region': 'regionTwo'}
]
}
}
mock_wait.return_value = (action.RES_OK, 'All dependents completed')
# node_action is faked
mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2']
# do it
res_code, res_msg = action._create_nodes(2)
# assertions
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('All dependents completed', res_msg)
self.assertEqual(2, mock_index.call_count)
self.assertEqual(2, mock_node.call_count)
node1.store.assert_called_once_with(action.context)
node2.store.assert_called_once_with(action.context)
self.assertEqual(2, mock_action.call_count)
self.assertEqual(1, mock_dep.call_count)
update_calls = [
mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}),
mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'})
]
mock_update.assert_has_calls(update_calls)
mock_start.assert_called_once_with()
mock_wait.assert_called_once_with()
self.assertEqual({'nodes_added': [node1.id, node2.id]}, action.outputs)
self.assertEqual({'region': 'regionOne'}, node1.data['placement'])
self.assertEqual({'region': 'regionTwo'}, node2.data['placement'])
mock_node_calls = [
mock.call('node-123', mock.ANY, '01234567-123434',
user=mock.ANY, project=mock.ANY, domain=mock.ANY,
index=123, context=mock.ANY, metadata={},
data={'placement': {'region': 'regionOne'}}),
mock.call('node-124', mock.ANY, '01234567-123434',
user=mock.ANY, project=mock.ANY, domain=mock.ANY,
index=124, context=mock.ANY, metadata={},
data={'placement': {'region': 'regionTwo'}})
]
mock_node.assert_has_calls(mock_node_calls)
cluster.add_node.assert_has_calls([
mock.call(node1), mock.call(node2)])
@mock.patch.object(ao.Action, 'update')
@mock.patch.object(co.Cluster, 'get')
@mock.patch.object(nm, 'Node')
@mock.patch.object(dobj.Dependency, 'create')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(ca.ClusterAction, '_wait_for_dependents')
def test__create_nodes_multiple_failed_wait(self, mock_wait, mock_start,
mock_dep, mock_node, mock_get,
mock_update, mock_load):
cluster = mock.Mock(id='01234567-123434', config={})
db_cluster = mock.Mock(next_index=1)
mock_get.return_value = db_cluster
node1 = mock.Mock(id='01234567-abcdef', data={})
node2 = mock.Mock(id='abcdefab-123456', data={})
mock_node.side_effect = [node1, node2]
mock_load.return_value = cluster
# cluster action is real
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
action.id = 'CLUSTER_ACTION_ID'
action.data = {
'placement': {
'count': 2,
'placements': [
{'region': 'regionOne'},
{'region': 'regionTwo'}
]
}
}
mock_wait.return_value = (action.RES_ERROR, 'Waiting timed out')
# node_action is faked
n_action_1 = mock.Mock()
n_action_2 = mock.Mock()
self.patchobject(ab, 'Action', side_effect=[n_action_1, n_action_2])
# do it
res_code, res_msg = action._create_nodes(2)
# assertions
self.assertEqual(action.RES_ERROR, res_code)
self.assertEqual('Failed in creating nodes.', res_msg)
def test_do_create_success(self, mock_load):
cluster = mock.Mock(id='FAKE_CLUSTER', ACTIVE='ACTIVE')
cluster.do_create.return_value = True
mock_load.return_value = cluster
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
x_create_nodes = self.patchobject(action, '_create_nodes',
return_value=(action.RES_OK, 'OK'))
# do it
res_code, res_msg = action.do_create()
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('Cluster creation succeeded.', res_msg)
x_create_nodes.assert_called_once_with(cluster.desired_capacity)
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CREATE, created_at=mock.ANY)
def test_do_create_failed_create_cluster(self, mock_load):
cluster = mock.Mock(id='FAKE_CLUSTER')
cluster.do_create.return_value = False
mock_load.return_value = cluster
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
# do it
res_code, res_msg = action.do_create()
self.assertEqual(action.RES_ERROR, res_code)
self.assertEqual('Cluster creation failed.', res_msg)
cluster.set_status.assert_called_once_with(
action.context, 'ERROR', 'Cluster creation failed.')
def test_do_create_failed_create_nodes(self, mock_load):
cluster = mock.Mock(id='FAKE_ID',)
cluster.do_create.return_value = True
mock_load.return_value = cluster
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
# do it
for code in [action.RES_CANCEL, action.RES_TIMEOUT, action.RES_ERROR]:
self.patchobject(action, '_create_nodes',
return_value=(code, 'Really Bad'))
res_code, res_msg = action.do_create()
self.assertEqual(code, res_code)
self.assertEqual('Really Bad', res_msg)
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CREATE)
cluster.eval_status.reset_mock()
def test_do_create_failed_for_retry(self, mock_load):
cluster = mock.Mock(id='FAKE_ID', INIT='INIT')
cluster.do_create.return_value = True
mock_load.return_value = cluster
action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx)
self.patchobject(action, '_create_nodes',
return_value=(action.RES_RETRY, 'retry'))
# do it
res_code, res_msg = action.do_create()
self.assertEqual(action.RES_RETRY, res_code)
self.assertEqual('retry', res_msg)
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CREATE)
| 43.201389
| 79
| 0.60987
| 11,379
| 0.914564
| 0
| 0
| 11,418
| 0.917698
| 0
| 0
| 2,553
| 0.205192
|
12848f59193336131bb837186f98da6abb8ba010
| 1,665
|
py
|
Python
|
tests/test_api.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 155
|
2020-07-03T05:09:22.000Z
|
2022-03-28T06:57:39.000Z
|
tests/test_api.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 453
|
2020-07-02T21:21:52.000Z
|
2022-03-31T21:35:36.000Z
|
tests/test_api.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 21
|
2020-09-07T12:13:27.000Z
|
2022-03-26T16:26:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import pytest
from wetterdienst import Wetterdienst
@pytest.mark.remote
@pytest.mark.parametrize(
"provider,kind,kwargs",
[
# German Weather Service (DWD)
(
"dwd",
"observation",
{"parameter": "kl", "resolution": "daily", "period": "recent"},
),
("dwd", "forecast", {"parameter": "large", "mosmix_type": "large"}),
# Environment and Climate Change Canada
("eccc", "observation", {"parameter": "daily", "resolution": "daily"}),
],
)
@pytest.mark.parametrize("si_units", (False, True))
def test_api(provider, kind, kwargs, si_units):
""" Test main wetterdienst API """
# Build API
api = Wetterdienst(provider, kind)
# Discover parameters
assert api.discover()
# All stations
request = api(**kwargs, si_units=si_units).all()
stations = request.df
# Check stations DataFrame columns
assert set(stations.columns).issuperset(
{
"station_id",
"from_date",
"to_date",
"height",
"latitude",
"longitude",
"name",
"state",
}
)
# Check that there are actually stations
assert not stations.empty
# Query first DataFrame from values
values = next(request.values.query()).df
# TODO: DWD Forecast has no quality
assert set(values.columns).issuperset(
{"station_id", "parameter", "date", "value", "quality"}
)
assert not values.empty
| 26.015625
| 79
| 0.587988
| 0
| 0
| 0
| 0
| 1,464
| 0.879279
| 0
| 0
| 763
| 0.458258
|
128572fd0692d7bc47b673410cce38c578481632
| 5,803
|
py
|
Python
|
examples/sentence_embedding/task_sentence_embedding_sbert_unsupervised_TSDAE.py
|
Tongjilibo/bert4torch
|
71d5ffb3698730b16e5a252b06644a136787711e
|
[
"MIT"
] | 49
|
2022-03-15T07:28:16.000Z
|
2022-03-31T07:16:15.000Z
|
examples/sentence_embedding/task_sentence_embedding_sbert_unsupervised_TSDAE.py
|
Tongjilibo/bert4torch
|
71d5ffb3698730b16e5a252b06644a136787711e
|
[
"MIT"
] | null | null | null |
examples/sentence_embedding/task_sentence_embedding_sbert_unsupervised_TSDAE.py
|
Tongjilibo/bert4torch
|
71d5ffb3698730b16e5a252b06644a136787711e
|
[
"MIT"
] | null | null | null |
#! -*- coding:utf-8 -*-
# 语义相似度任务-无监督:训练集为网上pretrain数据, dev集为sts-b
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from sklearn.metrics.pairwise import paired_cosine_distances
from scipy.stats import pearsonr, spearmanr
import copy
import random
import numpy as np
random.seed(2022)
np.random.seed(2002)
maxlen = 256
batch_size = 8
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
def collate_fn(batch):
def add_noise(token_ids, del_ratio=0.6):
n = len(token_ids)
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
return list(np.array(token_ids)[keep_or_not])
texts_list = [[] for _ in range(3)]
for text in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen)
texts_list[0].append([tokenizer._token_start_id] + add_noise(token_ids[1:-1]) + [tokenizer._token_end_id])
texts_list[1].append(token_ids[:-1])
texts_list[2].append(token_ids[1:])
for i, texts in enumerate(texts_list):
texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device)
return texts_list[:2], texts_list[2].flatten()
# 加载数据集
def get_data(filename):
train_data = []
with open(filename, encoding='utf-8') as f:
for row, l in enumerate(f):
if row == 0: # 跳过首行
continue
text = l.strip().replace(' ', '')
if len(text) > 0:
train_data.append(text)
return train_data
train_data = get_data('F:/Projects/data/corpus/pretrain/film/film.txt')
train_dataloader = DataLoader(ListDataset(data=train_data), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
from task_sentence_embedding_sbert_sts_b__CosineSimilarityLoss import valid_dataloader
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='mean', scale=20.0):
super().__init__()
self.encoder, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, with_mlm=True, return_model_config=True, segment_vocab_size=0)
self.decoder = self.encoder # 这里可以通过使用copy和不使用copy来决定一个模型还是两个独立的模型
self.pool_method = pool_method
self.scale = scale
def forward(self, token_ids_list):
token_ids1 = token_ids_list[0]
hidden_state1, pool_cls1, _ = self.encoder([token_ids1])
embeddings_a = self.get_pool_emb(hidden_state1, pool_cls1, attention_mask=token_ids1.gt(0).long())
token_ids2 = token_ids_list[1]
_, _, mlm_score2 = self.decoder([token_ids2, embeddings_a.unsqueeze(1), torch.ones_like(token_ids1)[:, 0:1]])
return mlm_score2.reshape(-1, mlm_score2.shape[-1])
def encode(self, token_ids):
self.eval()
with torch.no_grad():
hidden_state, pool_cls, _ = self.encoder([token_ids])
output = self.get_pool_emb(hidden_state, pool_cls, attention_mask=token_ids.gt(0).long())
return output
def get_pool_emb(self, hidden_state, pool_cls, attention_mask):
if self.pool_method == 'cls':
return pool_cls
elif self.pool_method == 'mean':
hidden_state = torch.sum(hidden_state * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hidden_state / attention_mask
elif self.pool_method == 'max':
seq_state = hidden_state * attention_mask[:, :, None]
return torch.max(seq_state, dim=1)
else:
raise ValueError('pool_method illegal')
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(ignore_index=0),
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率
)
# 定义评价函数
def evaluate(data):
embeddings1, embeddings2, labels = [], [], []
for (batch_token1_ids, batch_token2_ids), label in data:
embeddings1.append(model.encode(batch_token1_ids))
embeddings2.append(model.encode(batch_token2_ids))
labels.append(label)
embeddings1 = torch.concat(embeddings1).cpu().numpy()
embeddings2 = torch.concat(embeddings2).cpu().numpy()
labels = torch.concat(labels).cpu().numpy()
cosine_scores = 1 - (paired_cosine_distances(embeddings1, embeddings2))
eval_pearson_cosine, _ = pearsonr(labels, cosine_scores)
return eval_pearson_cosine
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_consine = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_consine = evaluate(valid_dataloader)
if val_consine > self.best_val_consine:
self.best_val_consine = val_consine
# model.save_weights('best_model.pt')
print(f'val_consine: {val_consine:.5f}, best_val_consine: {self.best_val_consine:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader,
epochs=20,
steps_per_epoch=100,
callbacks=[evaluator]
)
else:
model.load_weights('best_model.pt')
| 37.681818
| 196
| 0.689988
| 2,319
| 0.385793
| 0
| 0
| 0
| 0
| 0
| 0
| 975
| 0.162203
|
12867ea275e82f412c64f544501dc211d18fb6b3
| 2,761
|
py
|
Python
|
crowd_anki/export/anki_exporter_wrapper.py
|
katrinleinweber/CrowdAnki
|
c78d837e082365d69bde5b1361b1dd4d11cd3d63
|
[
"MIT"
] | 391
|
2016-08-31T21:55:07.000Z
|
2022-03-30T16:30:12.000Z
|
crowd_anki/export/anki_exporter_wrapper.py
|
katrinleinweber/CrowdAnki
|
c78d837e082365d69bde5b1361b1dd4d11cd3d63
|
[
"MIT"
] | 150
|
2016-09-01T00:35:35.000Z
|
2022-03-30T23:26:48.000Z
|
crowd_anki/export/anki_exporter_wrapper.py
|
katrinleinweber/CrowdAnki
|
c78d837e082365d69bde5b1361b1dd4d11cd3d63
|
[
"MIT"
] | 51
|
2016-09-04T17:02:39.000Z
|
2022-02-04T11:49:10.000Z
|
from pathlib import Path
from .anki_exporter import AnkiJsonExporter
from ..anki.adapters.anki_deck import AnkiDeck
from ..config.config_settings import ConfigSettings
from ..utils import constants
from ..utils.notifier import AnkiModalNotifier, Notifier
from ..utils.disambiguate_uuids import disambiguate_note_model_uuids
EXPORT_FAILED_TITLE = "Export failed"
class AnkiJsonExporterWrapper:
"""
Wrapper designed to work with standard export dialog in anki.
"""
key = "CrowdAnki JSON representation"
ext = constants.ANKI_EXPORT_EXTENSION
hideTags = True
includeTags = True
directory_export = True
def __init__(self, collection,
deck_id: int = None,
json_exporter: AnkiJsonExporter = None,
notifier: Notifier = None):
self.includeMedia = True
self.did = deck_id
self.count = 0 # Todo?
self.collection = collection
self.anki_json_exporter = json_exporter or AnkiJsonExporter(collection, ConfigSettings.get_instance())
self.notifier = notifier or AnkiModalNotifier()
# required by anki exporting interface with its non-PEP-8 names
# noinspection PyPep8Naming
def exportInto(self, directory_path):
if self.did is None:
self.notifier.warning(EXPORT_FAILED_TITLE, "CrowdAnki export works only for specific decks. "
"Please use CrowdAnki snapshot if you want to export "
"the whole collection.")
return
deck = AnkiDeck(self.collection.decks.get(self.did, default=False))
if deck.is_dynamic:
self.notifier.warning(EXPORT_FAILED_TITLE, "CrowdAnki does not support export for dynamic decks.")
return
# Clean up duplicate note models. See
# https://github.com/Stvad/CrowdAnki/wiki/Workarounds-%E2%80%94-Duplicate-note-model-uuids.
disambiguate_note_model_uuids(self.collection)
# .parent because we receive name with random numbers at the end (hacking around internals of Anki) :(
export_path = Path(directory_path).parent
self.anki_json_exporter.export_to_directory(deck, export_path, self.includeMedia,
create_deck_subdirectory=ConfigSettings.get_instance().export_create_deck_subdirectory)
self.count = self.anki_json_exporter.last_exported_count
def get_exporter_id(exporter):
return f"{exporter.key} (*{exporter.ext})", exporter
def exporters_hook(exporters_list):
exporter_id = get_exporter_id(AnkiJsonExporterWrapper)
if exporter_id not in exporters_list:
exporters_list.append(exporter_id)
| 40.014493
| 139
| 0.680913
| 2,122
| 0.768562
| 0
| 0
| 0
| 0
| 0
| 0
| 666
| 0.241217
|
1286fbd5f6c9f344c50efdbd092dd4dcc7eb7bc9
| 1,086
|
py
|
Python
|
shadow/apis/item.py
|
f1uzz/shadow
|
0c2a1308f8bbe77ce4be005153148aac8ea0b4b2
|
[
"MIT"
] | 1
|
2020-09-10T22:31:54.000Z
|
2020-09-10T22:31:54.000Z
|
shadow/apis/item.py
|
f1uzz/shadow
|
0c2a1308f8bbe77ce4be005153148aac8ea0b4b2
|
[
"MIT"
] | 1
|
2020-03-12T15:47:14.000Z
|
2020-09-11T18:46:44.000Z
|
shadow/apis/item.py
|
f1uzz/shadow
|
0c2a1308f8bbe77ce4be005153148aac8ea0b4b2
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
from typing import Optional
import requests
from .patches import Patches
class Item:
"""
Manipulation of static item data
"""
ITEM_URL = f"http://ddragon.leagueoflegends.com/cdn/{Patches.get_current_patch()}/data/en_US/item.json"
items = requests.get(ITEM_URL).json()
@classmethod
@lru_cache()
def id_for_name(cls, name: str) -> Optional[str]:
"""
Finds the id for an item given its name
Returns the id, None if not found
name - full name of item
"""
for item_id, item in cls.items["data"].items():
if item["name"].casefold() == name.casefold():
return item_id
@classmethod
@lru_cache()
def name_for_id(cls, item_id: str) -> Optional[str]:
"""
Finds the name for an item given its id
Returns the name, None if not found
item_id - id of item
"""
for found_item_id, item in cls.items["data"].items():
if found_item_id == item_id:
return item["name"]
| 25.255814
| 107
| 0.598527
| 977
| 0.899632
| 0
| 0
| 750
| 0.690608
| 0
| 0
| 440
| 0.405157
|
128751ef3f270c09dd8bfd854209616c9fbc00a9
| 2,694
|
py
|
Python
|
tests/test_lmdb_eager.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
tests/test_lmdb_eager.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
tests/test_lmdb_eager.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LMDBDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
import tensorflow_io.lmdb as lmdb_io # pylint: disable=wrong-import-position
def test_lmdb_read_from_file():
"""test_read_from_file"""
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_lmdb", "data.mdb")
tmp_path = tempfile.mkdtemp()
filename = os.path.join(tmp_path, "data.mdb")
shutil.copy(path, filename)
num_repeats = 2
lmdb_dataset = lmdb_io.LMDBDataset([filename]).repeat(num_repeats)
ii = 0
for vv in lmdb_dataset:
i = ii % 10
k, v = vv
assert k.numpy() == str(i).encode()
assert v.numpy() == str(chr(ord("a") + i)).encode()
ii += 1
shutil.rmtree(tmp_path)
def test_lmdb_read_from_file_with_batch():
"""test_read_from_file"""
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_lmdb", "data.mdb")
tmp_path = tempfile.mkdtemp()
filename = os.path.join(tmp_path, "data.mdb")
shutil.copy(path, filename)
lmdb_dataset = lmdb_io.LMDBDataset([filename], batch=3)
i = 0
for vv in lmdb_dataset:
k, v = vv
if i < 9:
assert np.alltrue(k.numpy() == [
str(i).encode(),
str(i + 1).encode(),
str(i + 2).encode()])
assert np.alltrue(v.numpy() == [
str(chr(ord("a") + i)).encode(),
str(chr(ord("a") + i + 1)).encode(),
str(chr(ord("a") + i + 2)).encode()])
else:
assert k.numpy() == str(9).encode()
assert v.numpy() == str('j').encode()
i += 3
shutil.rmtree(tmp_path)
if __name__ == "__main__":
test.main()
| 33.259259
| 80
| 0.655902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,039
| 0.385672
|
128792253fac3bfe35e8e9d68865a244469d6f80
| 5,211
|
py
|
Python
|
recbole/quick_start/quick_start.py
|
RuihongQiu/DuoRec
|
4ebc30d8b7d9465f854867887b127a0bbc38bc31
|
[
"MIT"
] | 16
|
2021-11-03T02:12:49.000Z
|
2022-03-27T05:48:19.000Z
|
recbole/quick_start/quick_start.py
|
RuihongQiu/DuoRec
|
4ebc30d8b7d9465f854867887b127a0bbc38bc31
|
[
"MIT"
] | 2
|
2021-11-21T14:12:25.000Z
|
2022-03-11T03:00:04.000Z
|
recbole/quick_start/quick_start.py
|
RuihongQiu/DuoRec
|
4ebc30d8b7d9465f854867887b127a0bbc38bc31
|
[
"MIT"
] | 4
|
2021-11-25T09:23:41.000Z
|
2022-03-26T11:23:26.000Z
|
# @Time : 2020/10/6
# @Author : Shanlei Mu
# @Email : slmu@ruc.edu.cn
"""
recbole.quick_start
########################
"""
import logging
from logging import getLogger
from recbole.config import Config
from recbole.data import create_dataset, data_preparation
from recbole.utils import init_logger, get_model, get_trainer, init_seed
from recbole.utils.utils import set_color
def run_recbole(model=None, dataset=None, config_file_list=None, config_dict=None, saved=True):
r""" A fast running api, which includes the complete process of
training and testing a model on a specified dataset
Args:
model (str): model name
dataset (str): dataset name
config_file_list (list): config files used to modify experiment parameters
config_dict (dict): parameters dictionary used to modify experiment parameters
saved (bool): whether to save the model
"""
# configurations initialization
config = Config(model=model, dataset=dataset, config_file_list=config_file_list, config_dict=config_dict)
# init_seed(config['seed'], config['reproducibility'])
# logger initialization
init_logger(config)
logger = getLogger()
import os
log_dir = os.path.dirname(logger.handlers[0].baseFilename)
config['log_dir'] = log_dir
logger.info(config)
# dataset filtering
dataset = create_dataset(config)
logger.info(dataset)
# dataset splitting
train_data, valid_data, test_data = data_preparation(config, dataset)
# model loading and initialization
model = get_model(config['model'])(config, train_data).to(config['device'])
logger.info(model)
# trainer loading and initialization
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
# model training
best_valid_score, best_valid_result = trainer.fit(
train_data, valid_data, saved=saved, show_progress=config['show_progress']
)
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
embedding_matrix = model.item_embedding.weight[1:].cpu().detach().numpy()
svd = TruncatedSVD(n_components=2)
svd.fit(embedding_matrix)
comp_tr = np.transpose(svd.components_)
proj = np.dot(embedding_matrix, comp_tr)
cnt = {}
for i in dataset['item_id']:
if i.item() in cnt:
cnt[i.item()] += 1
else:
cnt[i.item()] = 1
freq = np.zeros(embedding_matrix.shape[0])
for i in cnt:
freq[i-1] = cnt[i]
# freq /= freq.max()
sns.set(style='darkgrid')
sns.set_context("notebook", font_scale=1.8, rc={"lines.linewidth": 3, 'lines.markersize': 20})
plt.figure(figsize=(6, 4.5))
plt.scatter(proj[:, 0], proj[:, 1], s=1, c=freq, cmap='viridis_r')
plt.colorbar()
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.axis('square')
# plt.show()
plt.savefig(log_dir + '/' + config['model'] + '-' + config['dataset'] + '.pdf', format='pdf', transparent=False, bbox_inches='tight')
from scipy.linalg import svdvals
svs = svdvals(embedding_matrix)
svs /= svs.max()
np.save(log_dir + '/sv.npy', svs)
sns.set(style='darkgrid')
sns.set_context("notebook", font_scale=1.8, rc={"lines.linewidth": 3, 'lines.markersize': 20})
plt.figure(figsize=(6, 4.5))
plt.plot(svs)
# plt.show()
plt.savefig(log_dir + '/svs.pdf', format='pdf', transparent=False, bbox_inches='tight')
# model evaluation
test_result = trainer.evaluate(test_data, load_best_model=saved, show_progress=config['show_progress'])
logger.info(set_color('best valid ', 'yellow') + f': {best_valid_result}')
logger.info(set_color('test result', 'yellow') + f': {test_result}')
return {
'best_valid_score': best_valid_score,
'valid_score_bigger': config['valid_metric_bigger'],
'best_valid_result': best_valid_result,
'test_result': test_result
}
def objective_function(config_dict=None, config_file_list=None, saved=True):
r""" The default objective_function used in HyperTuning
Args:
config_dict (dict): parameters dictionary used to modify experiment parameters
config_file_list (list): config files used to modify experiment parameters
saved (bool): whether to save the model
"""
config = Config(config_dict=config_dict, config_file_list=config_file_list)
init_seed(config['seed'], config['reproducibility'])
logging.basicConfig(level=logging.ERROR)
dataset = create_dataset(config)
train_data, valid_data, test_data = data_preparation(config, dataset)
model = get_model(config['model'])(config, train_data).to(config['device'])
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
best_valid_score, best_valid_result = trainer.fit(train_data, valid_data, verbose=False, saved=saved)
test_result = trainer.evaluate(test_data, load_best_model=saved)
return {
'best_valid_score': best_valid_score,
'valid_score_bigger': config['valid_metric_bigger'],
'best_valid_result': best_valid_result,
'test_result': test_result
}
| 35.209459
| 137
| 0.682978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,749
| 0.335636
|
1287e0c57eb8a30f8e6d4ada3266d63abc50f722
| 4,947
|
py
|
Python
|
inferlo/generic/inference/bucket_renormalization.py
|
InferLO/inferlo
|
a65efce721d7f99d2f274dd94a1aaf7ca159e944
|
[
"Apache-2.0"
] | 1
|
2022-01-27T18:44:07.000Z
|
2022-01-27T18:44:07.000Z
|
inferlo/generic/inference/bucket_renormalization.py
|
InferLO/inferlo
|
a65efce721d7f99d2f274dd94a1aaf7ca159e944
|
[
"Apache-2.0"
] | 3
|
2022-01-23T18:02:30.000Z
|
2022-01-27T23:10:51.000Z
|
inferlo/generic/inference/bucket_renormalization.py
|
InferLO/inferlo
|
a65efce721d7f99d2f274dd94a1aaf7ca159e944
|
[
"Apache-2.0"
] | 1
|
2021-09-03T06:12:57.000Z
|
2021-09-03T06:12:57.000Z
|
# Copyright (c) The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE.
import warnings
import numpy as np
from sklearn.utils.extmath import randomized_svd
from .bucket_elimination import BucketElimination
from .factor import Factor, default_factor_name, product_over_
from .graphical_model import GraphicalModel
from .mini_bucket_elimination import MiniBucketElimination
class BucketRenormalization(MiniBucketElimination):
"""Bucket Renormalization algorithm."""
def __init__(self, model: GraphicalModel, **kwargs):
super(BucketRenormalization, self).__init__(model, **kwargs)
self._initialize_projectors()
def _initialize_projectors(self):
replications = dict()
working_model = self.renormalized_model.copy()
for var in self.elimination_order:
main_rvar = self.variables_replicated_from_[var][-1]
main_projectors = []
for (i, rvar) in enumerate(self.variables_replicated_from_[var]):
if i < len(self.variables_replicated_from_[var]) - 1:
fac = product_over_(*working_model.get_adj_factors(rvar))
replicated_projector = self._get_svd_projector(fac, rvar)
replicated_projector.name = "RP_{}".format(rvar)
projector = replicated_projector.copy()
projector.variables = [main_rvar]
projector.name = "P_{}".format(rvar)
replications[rvar] = (
main_rvar, replicated_projector, projector)
main_projectors.append(projector)
working_model.add_factors_from(
[replicated_projector.copy(), projector.copy()])
self.renormalized_model.add_factors_from(
[replicated_projector, projector])
working_model.contract_variable(rvar)
self.replications = replications
def _optimize(self):
for var in reversed(self.renormalized_elimination_order):
if var in self.replications.keys():
mb_var, projector, mb_projector = self.replications[var]
self.renormalized_model.remove_factors_from(
[projector, mb_projector])
be = BucketElimination(self.renormalized_model)
marginal_factor = be.get_marginal_factor(
elimination_order_method="given",
elimination_order=self.renormalized_elimination_order,
exception_variables=[var, mb_var],
)
new_mb_projector = self._get_svd_projector(marginal_factor,
mb_var)
new_projector = Factor(
name=default_factor_name(),
variables=[var],
log_values=new_mb_projector.log_values,
)
self.renormalized_model.add_factors_from(
[new_projector, new_mb_projector])
self.replications[var] = (
mb_var, new_projector, new_mb_projector)
def run(self, max_iter=10):
"""Runs the algorithm, returns log(Z)."""
for _ in range(max_iter):
self._optimize()
def get_log_z(self):
"""Calculates log Z."""
be = BucketElimination(self.renormalized_model)
logZ = self.base_logZ
logZ += be.run(
elimination_order_method="given",
elimination_order=self.renormalized_elimination_order
)
return logZ
def _get_svd_projector(self, factor, variable):
factor.transpose_by_(
[variable, *sorted(set(factor.variables) - set([variable]))])
flattened_factor_log_values = factor.log_values.reshape(
factor.get_cardinality_for_(variable), -1
)
max_log = np.max(flattened_factor_log_values)
if np.isnan(max_log):
warnings.warn('Got nan in flattened_factor_log_values')
np.nan_to_num(flattened_factor_log_values, copy=False, nan=-np.inf)
max_log = np.max(flattened_factor_log_values)
if not np.isfinite(max_log):
warnings.warn('Got infinite value in flattened_factor_log_values')
max_log = 0.0
flattened_factor_values = np.exp(flattened_factor_log_values - max_log)
U, _, _ = randomized_svd(flattened_factor_values, n_components=1)
# U,_,_ = np.linalg.svd(flattened_factor_values)
u = U[:, 0]
if np.sum(u) < 0:
u = -u
u[u < 0] = 0.0
u /= np.linalg.norm(u)
return Factor(name=default_factor_name(), variables=[variable],
values=u)
| 41.571429
| 80
| 0.595512
| 4,506
| 0.910855
| 0
| 0
| 0
| 0
| 0
| 0
| 392
| 0.07924
|
1287eefddb9d27db413d1feaac4d915eb6887055
| 5,519
|
py
|
Python
|
oldcode/guestbook111013.py
|
mdreid/dinkylink
|
34370633c9361f6625227440d4aca6ed2b57bfab
|
[
"MIT"
] | 1
|
2015-05-06T20:07:36.000Z
|
2015-05-06T20:07:36.000Z
|
oldcode/guestbook111013.py
|
mdreid/dinkylink
|
34370633c9361f6625227440d4aca6ed2b57bfab
|
[
"MIT"
] | null | null | null |
oldcode/guestbook111013.py
|
mdreid/dinkylink
|
34370633c9361f6625227440d4aca6ed2b57bfab
|
[
"MIT"
] | null | null | null |
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
from sys import argv
import datetime
import pickle
import sys
sys.path.insert(0, 'libs')
import BeautifulSoup
from bs4 import BeautifulSoup
import requests
import json
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape', 'jinja2.ext.loopcontrols'],
autoescape=True)
url = 'http://www.njtransit.com/sf/sf_servlet.srv?hdnPageAction=TrainSchedulesFrom'
pu_code = "124_PRIN"
ny_code = "105_BNTN"
prs = "Princeton"
nyp = "New York Penn Station"
# get date
today = datetime.date.today()
str_date = today.__format__("%m/%d/%Y")
# trip info
toNY_dict = {'selOrigin': pu_code, 'selDestination': ny_code, 'datepicker': str_date, 'OriginDescription': prs, 'DestDescription': nyp}
toPU_dict = {'selOrigin': ny_code, 'selDestination': pu_code, 'datepicker': str_date, 'OriginDescription': nyp, 'DestDescription': prs}
# get to webpage with data for the day
with requests.Session() as re:
toNY = re.post(url, data=toNY_dict)
toPU = re.post(url, data=toPU_dict)
toPUhtml = toPU.text
toNYhtml = toNY.text
#Reads in html file and name of destination and outputs csv file with comma spliced file of train information
def scrape(html,destination):
title = str(today) + str(destination)
soup = BeautifulSoup(html)
# Improvements: instead of being so hacky with 10 search for td
# Gather all lines in table
table1 = soup.find_all("tr")
table2 = table1[10] #table1[10] contains the table of interest
table3 = table2.find_all('span')
# Create 7 lists
origin = [] #Times for departure at origin
origintrain = []
transferarrive = [] #Times for arrival at transfer
transferdepart = [] #Time for departure at transfer
transfertrain = [] #Train or bus number
destination = [] #Time of arrival at destination
total = [] #Total time of Travel
#Create 3 Columns of Text File
origin.append("Origin Departure") #Times for departure at origin
origintrain.append("Origin Train")
transferarrive.append("Transfer Arrival") #Times for arrival at transfer
transferdepart.append("Transfer Departure") #Time for departure at transfer
transfertrain.append("Transfer Train or Bus")
destination.append("Destination Arrival") #Time of arrival at destination
total.append("Total Travel Time") #Total time of Travel
#Store 4 columns into 4 lists
#Regex and pull approapriate data
for i in range(4, len(table3)-3, 4):
#origin.append(str(table3[i].text)[0:len(table3[i].text)])
origin.append(str(table3[i].text)[0:8])
origintrain.append(str(table3[i].text)[-5:])
transferarrive.append(str(table3[i+1].text)[7:15])
transferdepart.append(str(table3[i+1].text)[39:48])
transfertrain.append(str(table3[i+1].text)[-5:])
destination.append(str(table3[i+2].text)[0:len(table3[i+2].text)])
total.append(str(table3[i+3].text)[0:len(table3[i+3].text)])
#text_file = open(str(title) + ".csv", "w")
Dict = {'origin': origin[1:], 'transferarrive' : transferarrive[1:], 'transferdepart': transferdepart[1:], 'destination':destination[1:]}
return Dict
#Create csv files for to Princeton and to New York
toPUDict = scrape(toPUhtml, 'PU')
toNYDict = scrape(toNYhtml, 'NY')
class njdata(ndb.Model):
"""Models an individual Guestbook entry with author, content, and date."""
originstring = ndb.StringProperty(repeated = True)
transferarrivestring = ndb.StringProperty(repeated = True)
transferdepartstring = ndb.StringProperty(repeated = True)
destinationstring = ndb.StringProperty(repeated = True)
date = ndb.DateTimeProperty(auto_now_add=True) #Need date to get most recent data
globalPUDict = {}
class Test123(webapp2.RequestHandler):
def get(self):
toPUdata = njdata()
#toNYdata = njdata()
#toPUdata.content = pickle.dumps(toPUDict)
toPUdata.originstring = toPUDict['origin']
toPUdata.transferarrivestring = toPUDict['transferarrive']
toPUdata.transferdepartstring = toPUDict['transferdepart']
toPUdata.destinationstring = toPUDict['destination']
#Save data into data models
toPUdata.put()
#toNYdata.put()
toPUdata_query = toPUdata.query().order(-njdata.date)
a = toPUdata_query.fetch(1)
global globalPUDict
globalPUDict = {'origin': a[0].originstring, 'transferarrive': a[0].transferarrivestring, 'transferdepart': a[0].transferdepartstring, 'destination': a[0].destinationstring}
self.response.write(globalPUDict)
self.response.write(toPUDict)
class MainPage(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render())
class ToNY(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('toNY.html')
self.response.write(template.render(toNYDict))
class ToPU(webapp2.RequestHandler):
def get(self):
self.response.write(globalPUDict)
template = JINJA_ENVIRONMENT.get_template('toPU.html')
self.response.write(template.render(globalPUDict))
application = webapp2.WSGIApplication([
('/', MainPage),
('/toNY', ToNY),
('/toPU', ToPU),
('/test', Test123),
], debug=True)
| 31.901734
| 181
| 0.698315
| 1,885
| 0.341547
| 0
| 0
| 0
| 0
| 0
| 0
| 1,792
| 0.324697
|
1289c37f5bf5c6f565d40cc79d0b3cb7b6862bc0
| 4,482
|
py
|
Python
|
is_core/tests/crawler.py
|
zzuzzy/django-is-core
|
3f87ec56a814738683c732dce5f07e0328c2300d
|
[
"BSD-3-Clause"
] | null | null | null |
is_core/tests/crawler.py
|
zzuzzy/django-is-core
|
3f87ec56a814738683c732dce5f07e0328c2300d
|
[
"BSD-3-Clause"
] | null | null | null |
is_core/tests/crawler.py
|
zzuzzy/django-is-core
|
3f87ec56a814738683c732dce5f07e0328c2300d
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.utils.encoding import force_text
from germanium.tools import assert_true, assert_not_equal
from germanium.test_cases.client import ClientTestCase
from germanium.decorators import login
from germanium.crawler import Crawler, LinkExtractor, HtmlLinkExtractor as OriginalHtmlLinkExtractor
def flatt_list(iterable_value):
flatten_list = []
for val in iterable_value:
if isinstance(val, list):
flatten_list += val
else:
flatten_list.append(val)
return flatten_list
class JSONLinkExtractor(LinkExtractor):
def _extract_web_links(self, data):
return flatt_list(data.values())
def _extract_rest_links(self, data):
links = []
for rest_link in data.values():
if 'GET' in rest_link['methods']:
links += flatt_list([rest_link['url']])
return links
def _extract_from_dict(self, data):
links = []
for key, val in data.items():
if key == '_web_links':
links += self._extract_web_links(val)
elif key == '_rest_links':
links += self._extract_rest_links(val)
elif isinstance(val, (list, tuple)):
links += self._extract_from_list(val)
elif isinstance(val, dict):
links += self._extract_from_dict(val)
return links
def _extract_from_list(self, data):
links = []
for val in data:
if isinstance(val, dict):
links += self._extract_from_dict(val)
elif isinstance(val, (list, tuple)):
links += self._extract_from_list(val)
return links
def extract(self, content):
data = json.loads(content)
if isinstance(data, dict):
links = self._extract_from_dict(data)
elif isinstance(data, (list, tuple)):
links = self._extract_from_list(data)
return links
class HTMLLinkExtractor(OriginalHtmlLinkExtractor):
link_attr_names = ('href', 'src', 'data-resource')
class TextPlainSnippetsExtractor(LinkExtractor):
def extract(self, content):
links = []
try:
data = json.loads(content)
html_extractor = HTMLLinkExtractor()
for html in data.get('snippets', {}).values():
links += html_extractor.extract(html)
except ValueError:
# I text/plain is not snippet return empty links
pass
return links
class CrawlerTestCase(ClientTestCase):
REST_BASE = None
exclude_urls = ()
def get_users(self):
raise NotImplementedError
def get_exlude_urls(self):
return list(self.exclude_urls) + ['/logout/']
@login(users_generator='get_users')
def test_crawler(self):
self.logger.info('\n---------------------------')
self.logger.info('Test crawling with logged user %s' % self.logged_user.user)
tested_urls = []
failed_urls = []
def pre_request(url, referer, headers):
if url.startswith('/api/'):
headers['HTTP_X_FIELDS'] = '_rest_links,_web_links'
if self.REST_BASE:
headers['HTTP_X_BASE'] = str(self.REST_BASE)
return url, headers
def post_response(url, referer, resp, exception):
tested_urls.append(url)
assert_true(exception is None, msg='Received exception %s, url %s' % (force_text(exception), url))
if resp.status_code != 200:
failed_urls.append(url)
self.logger.warning('Response code for url %s from referer %s should be 200 but code is %s, user %s' %
(url, referer, resp.status_code, self.logged_user.user))
assert_not_equal(resp.status_code, 500, msg='Response code for url %s from referer %s is 500, user %s' %
(url, referer, self.logged_user.user))
Crawler(self.c, ('/',), self.get_exlude_urls(), pre_request, post_response,
extra_link_extractors={'application/json; charset=utf-8': JSONLinkExtractor(),
'text/plain': TextPlainSnippetsExtractor(),
'default': HTMLLinkExtractor()}).run()
self.logger.info('Completed with tested %s urls (warnings %s)' % (len(tested_urls), len(failed_urls)))
self.logger.info('---------------------------')
| 34.744186
| 118
| 0.594378
| 3,929
| 0.876618
| 0
| 0
| 1,730
| 0.385988
| 0
| 0
| 574
| 0.128068
|
1289e9a1e3edba91a08623829d6f72757cbc5c8d
| 136
|
py
|
Python
|
example/geometry/admin.py
|
emelianovss-yandex-praktikum/07_pyplus_django_2
|
09bda00f9c8e9fd1ff0f3a483ecb210041d19a48
|
[
"MIT"
] | null | null | null |
example/geometry/admin.py
|
emelianovss-yandex-praktikum/07_pyplus_django_2
|
09bda00f9c8e9fd1ff0f3a483ecb210041d19a48
|
[
"MIT"
] | null | null | null |
example/geometry/admin.py
|
emelianovss-yandex-praktikum/07_pyplus_django_2
|
09bda00f9c8e9fd1ff0f3a483ecb210041d19a48
|
[
"MIT"
] | 2
|
2021-11-27T08:06:35.000Z
|
2021-11-27T13:52:41.000Z
|
from django.contrib import admin
from geometry.models import Shape
@admin.register(Shape)
class AdminShape(admin.ModelAdmin):
...
| 17
| 35
| 0.772059
| 43
| 0.316176
| 0
| 0
| 66
| 0.485294
| 0
| 0
| 0
| 0
|
128a2d7a634e13b30d2d38fc5ac9815e890ebcfe
| 943
|
py
|
Python
|
demo2/demo2_consume2.py
|
YuYanzy/kafka-python-demo
|
fc01ac29230b41fe1821f6e5a9d7226dea9688fe
|
[
"Apache-2.0"
] | 3
|
2021-05-07T01:48:37.000Z
|
2021-09-24T20:53:51.000Z
|
demo2/demo2_consume2.py
|
YuYanzy/kafka-python-demo
|
fc01ac29230b41fe1821f6e5a9d7226dea9688fe
|
[
"Apache-2.0"
] | null | null | null |
demo2/demo2_consume2.py
|
YuYanzy/kafka-python-demo
|
fc01ac29230b41fe1821f6e5a9d7226dea9688fe
|
[
"Apache-2.0"
] | 1
|
2021-05-08T08:46:01.000Z
|
2021-05-08T08:46:01.000Z
|
# -*- coding: utf-8 -*-
# @Author : Ecohnoch(xcy)
# @File : demo2_consume.py
# @Function : TODO
import kafka
demo2_config = {
'kafka_host': 'localhost:9092',
'kafka_topic': 'demo2',
'kafka_group_id': 'demo2_group1'
}
def consume():
consumer = kafka.KafkaConsumer(demo2_config['kafka_topic'],
group_id=demo2_config['kafka_group_id'],
bootstrap_servers=[demo2_config['kafka_host']])
print('link kafka ok.')
for msg in consumer:
this_key_bytes = msg.key
this_val_bytes = msg.value
this_key = str(this_key_bytes, encoding='utf-8')
this_val = str(this_val_bytes, encoding='utf-8')
# msg.key, msg.value, msg.topic, msg.partition, msg.offset
print(this_key, this_val, 'topic: {}, partition: {}, offset: {}'.format(msg.topic, msg.partition, msg.offset))
if __name__ == '__main__':
consume()
| 29.46875
| 118
| 0.604454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 354
| 0.375398
|
128a56c54e5b4a6dbabdff93bd337ad93578a5cd
| 2,280
|
py
|
Python
|
autoscalingsim/scaling/scaling_model/scaling_model.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 6
|
2021-03-10T16:23:10.000Z
|
2022-01-14T04:57:46.000Z
|
autoscalingsim/scaling/scaling_model/scaling_model.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | null | null | null |
autoscalingsim/scaling/scaling_model/scaling_model.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 1
|
2022-01-14T04:57:55.000Z
|
2022-01-14T04:57:55.000Z
|
import json
import pandas as pd
from .application_scaling_model import ApplicationScalingModel
from .platform_scaling_model import PlatformScalingModel
from autoscalingsim.deltarepr.group_of_services_delta import GroupOfServicesDelta
from autoscalingsim.deltarepr.node_group_delta import NodeGroupDelta
from autoscalingsim.utils.error_check import ErrorChecker
class ScalingModel:
"""
Defines the scaling behaviour that does not depend upon the scaling policy.
Scaling model captures unmanaged scaling characteristics such as booting times
for virtual nodes or start-up times for service instances.
Contains two parts related to different resource abstraction levels, viz,
the application scaling model and the platform scaling model.
"""
def __init__(self, services_scaling_config : dict, simulation_step : pd.Timedelta, config_filename : str):
with open(config_filename) as f:
try:
config = json.load(f)
self.platform_scaling_model = PlatformScalingModel(simulation_step)
platform_config = ErrorChecker.key_check_and_load('platform', config)
for platform_i in platform_config:
provider = ErrorChecker.key_check_and_load('provider', platform_i)
nodes_scaling_infos_raw = ErrorChecker.key_check_and_load('nodes', platform_i, 'provider', provider)
self.platform_scaling_model.add_provider(provider, nodes_scaling_infos_raw)
app_config = ErrorChecker.key_check_and_load('application', config)
service_scaling_infos_raw = ErrorChecker.key_check_and_load('services', app_config)
self.application_scaling_model = ApplicationScalingModel(service_scaling_infos_raw, services_scaling_config)
except json.JSONDecodeError:
raise ValueError(f'An invalid JSON when parsing for {self.__class__.__name__}')
def platform_delay(self, node_group_delta : NodeGroupDelta):
return self.platform_scaling_model.delay(node_group_delta)
def application_delay(self, services_group_delta : GroupOfServicesDelta, provider : str = None):
return self.application_scaling_model.delay(services_group_delta, provider)
| 43.018868
| 124
| 0.744298
| 1,914
| 0.839474
| 0
| 0
| 0
| 0
| 0
| 0
| 502
| 0.220175
|
128b3b5e8ee085ddcb7d0e7d01778d05032f8030
| 1,662
|
py
|
Python
|
src/zojax/filefield/copy.py
|
Zojax/zojax.filefield
|
36d92242dffbd5a7b4ce3c6886d8d5898067245a
|
[
"ZPL-2.1"
] | null | null | null |
src/zojax/filefield/copy.py
|
Zojax/zojax.filefield
|
36d92242dffbd5a7b4ce3c6886d8d5898067245a
|
[
"ZPL-2.1"
] | null | null | null |
src/zojax/filefield/copy.py
|
Zojax/zojax.filefield
|
36d92242dffbd5a7b4ce3c6886d8d5898067245a
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import component, interface
from zc.copy.interfaces import ICopyHook
from data import File, Image
from interfaces import IFile, IImage
@component.adapter(IFile)
@interface.implementer(ICopyHook)
def fileCopyFactory(original):
def factory(location, register):
file = File()
file.filename = original.filename
file.mimeType = original.mimeType
file.disablePreview = original.disablePreview
file.disablePrint = original.disablePrint
def afterCopy(translate):
file.data = original.data
register(afterCopy)
return file
return factory
@component.adapter(IImage)
@interface.implementer(ICopyHook)
def imageCopyFactory(original):
def factory(location, register):
image = Image()
image.filename = original.filename
image.mimeType = original.mimeType
def afterCopy(translate):
image.data = original.data
register(afterCopy)
return image
return factory
| 29.157895
| 78
| 0.642599
| 0
| 0
| 0
| 0
| 860
| 0.517449
| 0
| 0
| 636
| 0.382671
|
128cfb0881a4cb2a09e645ca55b7c92a498aaab7
| 192
|
py
|
Python
|
verbose.py
|
lowrey/myjsonstore
|
4d47f147fa5d86bea5d4e9b0bcab567583a794af
|
[
"MIT"
] | 1
|
2018-07-30T14:17:25.000Z
|
2018-07-30T14:17:25.000Z
|
verbose.py
|
lowrey/myjsonstore
|
4d47f147fa5d86bea5d4e9b0bcab567583a794af
|
[
"MIT"
] | null | null | null |
verbose.py
|
lowrey/myjsonstore
|
4d47f147fa5d86bea5d4e9b0bcab567583a794af
|
[
"MIT"
] | null | null | null |
import sys
verbose = False
def set_v(v):
global verbose
verbose = v
def print_v(s):
if verbose:
print(s)
def write_v(s):
if verbose:
sys.stdout.write(s)
| 10.105263
| 27
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
128d0ee6d357971754e6aa9345f8db462e223612
| 1,087
|
py
|
Python
|
app/component_b/command/services.py
|
mirevsky/django-grpc-cqrs-kafka-template
|
31af0bf5d15e393837f937cace90f82a7de26355
|
[
"MIT"
] | 2
|
2022-01-10T19:52:36.000Z
|
2022-03-19T07:34:54.000Z
|
app/component_b/command/services.py
|
mirevsky/django-grpc-cqrs-kafka-template
|
31af0bf5d15e393837f937cace90f82a7de26355
|
[
"MIT"
] | null | null | null |
app/component_b/command/services.py
|
mirevsky/django-grpc-cqrs-kafka-template
|
31af0bf5d15e393837f937cace90f82a7de26355
|
[
"MIT"
] | null | null | null |
import grpc
from google.protobuf import empty_pb2
from django_grpc_framework.services import Service
from component_b.common.serializers import PersonProtoSerializer
from component_b.common.models import PersonModel
class PersonCommandService(Service):
def get_object(self, pk):
try:
return PersonModel.objects.get(pk=pk)
except PersonModel.DoesNotExist:
self.context.abort(grpc.StatusCode.NOT_FOUND, 'Post:%s not found!' % pk)
def Create(self, request, context):
serializer = PersonProtoSerializer(message=request)
serializer.is_valid(raise_exception=True)
serializer.save()
return serializer.message
def Update(self, request, context):
post = self.get_object(request.id)
serializer = PersonProtoSerializer(post, message=request)
serializer.is_valid(raise_exception=True)
serializer.save()
return serializer.message
def Destroy(self, request, context):
post = self.get_object(request.id)
post.delete()
return empty_pb2.Empty()
| 31.970588
| 84
| 0.706532
| 867
| 0.797608
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.018399
|
128d2e658f8131c779045c3cbeaae1830ec9ef68
| 485
|
py
|
Python
|
Lab 5/course_reader.py
|
kq4hy/CS3240-Lab-Files
|
2611c3185a405da95547434825da9052cd4c6cec
|
[
"MIT"
] | null | null | null |
Lab 5/course_reader.py
|
kq4hy/CS3240-Lab-Files
|
2611c3185a405da95547434825da9052cd4c6cec
|
[
"MIT"
] | null | null | null |
Lab 5/course_reader.py
|
kq4hy/CS3240-Lab-Files
|
2611c3185a405da95547434825da9052cd4c6cec
|
[
"MIT"
] | null | null | null |
__author__ = 'kq4hy'
import csv
import sqlite3
def load_course_database(db_name, csv_filename):
conn = sqlite3.connect(db_name)
with conn:
curs = conn.cursor()
with open(csv_filename, 'rU') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
sql_cmd = "insert into coursedata values(?, ?, ?, ?, ?, ?, ?)"
curs.execute(sql_cmd, row)
load_course_database('course1.db', 'seas-courses-5years.csv')
| 28.529412
| 78
| 0.610309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.206186
|
128e53da4b600437f498e3a40b34bc75e174bc07
| 117
|
py
|
Python
|
marshmallow_helpers/__init__.py
|
hilearn/marsh-enum
|
2003ed850b076cd9d29a340ee44abe1c73aadc66
|
[
"MIT"
] | null | null | null |
marshmallow_helpers/__init__.py
|
hilearn/marsh-enum
|
2003ed850b076cd9d29a340ee44abe1c73aadc66
|
[
"MIT"
] | null | null | null |
marshmallow_helpers/__init__.py
|
hilearn/marsh-enum
|
2003ed850b076cd9d29a340ee44abe1c73aadc66
|
[
"MIT"
] | null | null | null |
from .enum_field import EnumField, RegisteredEnum # noqa
from .marsh_schema import attr_with_schema, derive # noqa
| 39
| 58
| 0.811966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.102564
|
128e7777e186dad8ff8ca443386abd102aa7f54e
| 1,492
|
py
|
Python
|
Weather Station using DHT Sensor with Raspberry Pi and ThingSpeak Platform/Weather Station - ThingSpeak - Raspberry Pi.py
|
MeqdadDev/ai-robotics-cv-iot-mini-projects
|
0c591bc495c95aa95d436e51f38e55bf510349ac
|
[
"MIT"
] | null | null | null |
Weather Station using DHT Sensor with Raspberry Pi and ThingSpeak Platform/Weather Station - ThingSpeak - Raspberry Pi.py
|
MeqdadDev/ai-robotics-cv-iot-mini-projects
|
0c591bc495c95aa95d436e51f38e55bf510349ac
|
[
"MIT"
] | null | null | null |
Weather Station using DHT Sensor with Raspberry Pi and ThingSpeak Platform/Weather Station - ThingSpeak - Raspberry Pi.py
|
MeqdadDev/ai-robotics-cv-iot-mini-projects
|
0c591bc495c95aa95d436e51f38e55bf510349ac
|
[
"MIT"
] | 1
|
2022-03-29T07:41:23.000Z
|
2022-03-29T07:41:23.000Z
|
'''
IoT Mini Project
Weather Station using DHT Sensor and Raspberry Pi with ThingSpeak Platform
Code Sample: Interfacing DHT22 with Raspberry Pi and sending the data to an IoT Platform (ThingSpeak Platform)
'''
from time import sleep
# import Adafruit_DHT # Not supported library
import adafruit_dht
from board import *
import requests
# After creating your account on ThingSpeak platform, put your channel id below
channel_id = 12345
write_key = 'WriteYourKeyAsString.......' # Put your write key here
# D4 = GPIO4 / D17 = GPIO17 ...etc.
SENSOR_PIN = D17
def get_measurements():
dht22 = adafruit_dht.DHT22(SENSOR_PIN, use_pulseio=False)
temperature = dht22.temperature
humidity = dht22.humidity
print(f"Humidity= {humidity:.2f}")
print(f"Temperature= {temperature:.2f}°C")
return temperature, humidity
def sendData(temp, humidity):
url = 'https://api.thingspeak.com/update'
params = {'key': write_key, 'field1': temp, 'field2': humidity}
res = requests.get(url, params=params)
if __name__ == "__main__":
while True:
# 15 seconds is the minimum time for the free account on ThingSpeak
sleep(15)
try:
temperature, humidity = get_measurements()
except:
print("Error: Can't get the sensor values, check out your wiring connection.")
try:
sendData(temperature, humidity)
except:
print("Error: Can't push the sensor values to ThingSpeak server.")
| 29.84
| 110
| 0.690349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 749
| 0.501674
|
128e873ecfed93a46701bf97c5bfb7c6ee49fa55
| 931
|
py
|
Python
|
Demo2_PageObjectModel/features/steps/PageObject_Registration.py
|
imademethink/imademethink_python_selenium_demo
|
cc364bda00e75eb9115c680ddea5e2fbca1d7acb
|
[
"BSD-4-Clause"
] | 2
|
2019-04-05T05:09:14.000Z
|
2020-07-21T16:06:53.000Z
|
Demo2_PageObjectModel/features/steps/PageObject_Registration.py
|
imademethink/Python_Selenium_Demo
|
cc364bda00e75eb9115c680ddea5e2fbca1d7acb
|
[
"BSD-4-Clause"
] | 1
|
2020-01-08T08:15:42.000Z
|
2020-01-08T08:15:42.000Z
|
Demo2_PageObjectModel/features/steps/PageObject_Registration.py
|
imademethink/Python_Selenium_Demo
|
cc364bda00e75eb9115c680ddea5e2fbca1d7acb
|
[
"BSD-4-Clause"
] | 4
|
2018-04-13T08:28:53.000Z
|
2018-12-30T20:35:19.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
from page_objects import PageObject, PageElement
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
delay_min = 3 # sec
delay_medium = 5 # sec
delay_max = 9 # sec
class RegistrationPage(PageObject):
text_box_first_name = PageElement(id_='customer.firstName')
button_submit_form = PageElement(css="input[value='Register']")
form_submit_result_message = PageElement(id_='customer.lastName.errors')
def method_registration_page_clean_database(self, current_web_driver,):
self.text_box_first_name = 'name_first'
self.button_submit_form.click()
WebDriverWait(current_web_driver,delay_medium).until(expected_conditions.visibility_of(self.form_submit_result_message))
return
| 35.807692
| 128
| 0.784103
| 553
| 0.593985
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.148228
|
128eba5345a78af068fb819342cfe180d8d296fd
| 53
|
py
|
Python
|
Tests/TestData/HOSimulation/HOTrialWavefunction/config.py
|
McCoyGroup/RynLib
|
8d7e119ebbd3da4c8b0efb49facba9ff1cbaa09d
|
[
"MIT"
] | 1
|
2019-05-04T00:34:11.000Z
|
2019-05-04T00:34:11.000Z
|
Tests/TestData/HOSimulation/HOTrialWavefunction/config.py
|
McCoyGroup/RynLib
|
8d7e119ebbd3da4c8b0efb49facba9ff1cbaa09d
|
[
"MIT"
] | null | null | null |
Tests/TestData/HOSimulation/HOTrialWavefunction/config.py
|
McCoyGroup/RynLib
|
8d7e119ebbd3da4c8b0efb49facba9ff1cbaa09d
|
[
"MIT"
] | 1
|
2020-03-04T22:47:09.000Z
|
2020-03-04T22:47:09.000Z
|
config = dict(
module="HOTrialWavefunction.py"
)
| 13.25
| 35
| 0.698113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.45283
|
128f728bec79cfe03c54bf8f06695117449e7c5a
| 5,771
|
py
|
Python
|
python/ucloud/import_data.py
|
oldthreefeng/miscellany
|
8d3c7a14b53929d752c7356c85ae6681000cd526
|
[
"MIT"
] | 1
|
2019-01-04T07:44:08.000Z
|
2019-01-04T07:44:08.000Z
|
python/ucloud/import_data.py
|
oldthreefeng/miscellany
|
8d3c7a14b53929d752c7356c85ae6681000cd526
|
[
"MIT"
] | null | null | null |
python/ucloud/import_data.py
|
oldthreefeng/miscellany
|
8d3c7a14b53929d752c7356c85ae6681000cd526
|
[
"MIT"
] | 2
|
2018-12-10T12:55:38.000Z
|
2019-01-04T07:43:55.000Z
|
#!/usr/bin/python2
import sys
import os
import redis
import time
import datetime
string_keys = []
hash_keys = []
list_keys = []
set_keys = []
zset_keys = []
def import_string(source, dest):
print "Begin Import String Type"
keys_count = len(string_keys)
print "String Key Count is:", keys_count
pipeSrc = source.pipeline(transaction=False)
pipeDst = dest.pipeline(transaction=False)
index = 0
pipe_size = 1000
while index < keys_count:
old_index = index
num = 0
while (index < keys_count) and (num < pipe_size):
pipeSrc.get(string_keys[index])
index += 1
num += 1
results = pipeSrc.execute()
for value in results:
pipeDst.set(string_keys[old_index], value)
old_index += 1
pipeDst.execute()
def import_hash(source, dest):
print "Begin Import Hash Type"
keys_count = len(hash_keys)
print "Hash Key Count is:", keys_count
pipeSrc = source.pipeline(transaction=False)
pipeDst = dest.pipeline(transaction=False)
for key in hash_keys:
hkeys = source.hkeys(key)
keys_count = len(hkeys)
index = 0
pipe_size = 1000
while index < keys_count:
old_index = index
num = 0
while (index < keys_count) and (num < pipe_size):
pipeSrc.hget(key, hkeys[index])
index += 1
num += 1
results = pipeSrc.execute()
for value in results:
pipeDst.hset(key, hkeys[old_index], value)
old_index += 1
pipeDst.execute()
def import_set(source, dest):
print "Begin Import Set Type"
keys_count = len(set_keys)
print "Set Key Count is:", keys_count
pipeDst = dest.pipeline(transaction=False)
for key in set_keys:
sValues = source.smembers(key)
value_count = len(sValues)
index = 0
pipe_size = 1000
while index < value_count:
old_index = index
num = 0
while (index < value_count) and (num < pipe_size):
pipeDst.sadd(key, sValues.pop())
index += 1
num += 1
pipeDst.execute()
def import_zset(source, dest):
print "Begin Import ZSet Type"
keys_count = len(zset_keys)
print "ZSet Key Count is:", keys_count
pipeSrc = source.pipeline(transaction=False)
pipeDst = dest.pipeline(transaction=False)
for key in zset_keys:
zset_size = source.zcard(key)
index = 0
pipe_size = 1000
while index < zset_size:
members = source.zrange(key, index, index + pipe_size)
index += len(members)
for member in members:
pipeSrc.zscore(key, member)
scores = pipeSrc.execute()
i = 0
for member in members:
pipeDst.zadd(key, member, scores[i])
i += 1
pipeDst.execute()
def import_list(source, dest):
print "Begin Import List Type"
keys_count = len(list_keys)
print "List Key Count is:", keys_count
pipeDst = dest.pipeline(transaction=False)
for key in list_keys:
list_size = source.llen(key)
index = 0
pipe_size = 1000
while index < list_size:
results = source.lrange(key, index, index + pipe_size)
index += len(results)
for value in results:
pipeDst.rpush(key, value)
pipeDst.execute()
def read_type_keys(source):
keys = source.keys()
keys_count = len(keys)
print "Key Count is:", keys_count
pipe = source.pipeline(transaction=False)
# for key in keys:
index = 0
pipe_size = 5000
while index < keys_count:
old_index = index
num = 0
while (index < keys_count) and (num < pipe_size):
pipe.type(keys[index])
index += 1
num += 1
results = pipe.execute()
for type in results:
if type == "string":
string_keys.append(keys[old_index])
elif type == "list":
list_keys.append(keys[old_index])
elif type == "hash":
hash_keys.append(keys[old_index])
elif type == "set":
set_keys.append(keys[old_index])
elif type == "zset":
zset_keys.append(keys[old_index])
else:
print keys[old_index], " is not find when TYPE"
old_index += 1
if __name__ == '__main__':
config = {
"source": ['10.4.1.91:0', '10.4.13.124:0', '10.4.12.16:0', '10.4.2.250:0'],
"dest": ['127.0.0.1:11', '127.0.0.1:12', '127.0.0.1:2', '127.0.0.1:1']
}
start = datetime.datetime.now()
for group in zip(config["source"], config["dest"]):
print group
SrcIP = group[0].split(':')[0]
SrcPort = 6379
DstIP = group[1].split(':')[0]
DstPort = 6379
DstDB = group[1].split(':')[1]
source = redis.Redis(host=SrcIP, port=SrcPort)
dest = redis.Redis(host=DstIP, port=DstPort, db=DstDB)
print "Begin Read Keys"
read_type_keys(source)
print "String Key Count is:", len(string_keys)
print "Set Key Count is:", len(set_keys)
print "ZSet Key Count is:", len(zset_keys)
print "List Key Count is:", len(list_keys)
print "Hash Key Count is:", len(hash_keys)
import_string(source, dest)
import_hash(source, dest)
import_list(source, dest)
import_set(source, dest)
import_zset(source, dest)
stop = datetime.datetime.now()
diff = stop - start
print "Finish, token time:", str(diff)
| 30.21466
| 83
| 0.562468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 624
| 0.108127
|
128ffa30d0305f7d87c64ef11d99dcfb6d3e311f
| 5,990
|
py
|
Python
|
kinlin/core/strategy.py
|
the-lay/kinlin
|
ce7c95d46d130049e356104ba77fad51bc59fb3f
|
[
"MIT"
] | null | null | null |
kinlin/core/strategy.py
|
the-lay/kinlin
|
ce7c95d46d130049e356104ba77fad51bc59fb3f
|
[
"MIT"
] | null | null | null |
kinlin/core/strategy.py
|
the-lay/kinlin
|
ce7c95d46d130049e356104ba77fad51bc59fb3f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
from enum import Enum
from typing import List, Callable, Any
from tqdm import tqdm
from .model import Model
from .dataset import Dataset
from .experiment import Experiment
from .callback import Callback
class TrainingEvents(Enum):
START = 'on_start'
FINISH = 'on_finish'
TRAINING_EPOCH_START = 'on_training_epoch_start'
TRAINING_EPOCH_FINISH = 'on_training_epoch_finish'
TRAINING_BATCH_START = 'on_training_batch_start'
TRAINING_BATCH_FINISH = 'on_training_batch_finish'
VALIDATION_EPOCH_START = 'on_validation_epoch_start'
VALIDATION_EPOCH_FINISH = 'on_validation_epoch_finish'
VALIDATION_BATCH_START = 'on_validation_batch_start'
VALIDATION_BATCH_FINISH = 'on_validation_batch_finish'
TESTING_START = 'on_testing_start'
TESTING_FINISH = 'on_testing_finish'
TESTING_BATCH_START = 'on_testing_batch_start'
TESTING_BATCH_FINISH = 'on_testing_batch_finish'
class TrainingStrategy:
def __init__(self, model: Model, dataset: Dataset, optimizer: torch.optim.Optimizer, experiment: Experiment = None,
callbacks: List[Callback] = None):
# properties
self.model: Model = model
self.dataset: Dataset = dataset
self.optimizer: torch.optim.Optimizer = optimizer
#self.experiment: Experiment = experiment
self.callbacks: List[Callback] = callbacks
# parallelize network depending on experiment settings
# if len(self.experiment.devices) > 1:
# self.network = nn.DataParallel(self.model.network, device_ids=self.experiment.devices)
# else:
self.network = self.model.network
# event handler
self.handlers = {k: [] for k in TrainingEvents}
# register events
for event in TrainingEvents:
# model events
self.on_event(event, getattr(self.model, event.value))
# callback events
for c in self.callbacks:
self.on_event(event, getattr(c, event.value))
def on_event(self, event: TrainingEvents, handler: Callable):
self.handlers[event].append(handler)
def emit(self, event: TrainingEvents, *args, **kwargs):
for handler in self.handlers[event]:
handler(*args, **kwargs)
def training_epoch(self, epoch: int) -> None:
raise NotImplementedError
def validation_epoch(self, epoch: int) -> None:
raise NotImplementedError
def test(self) -> None:
raise NotImplementedError
def __call__(self, n_epochs: int = 1, validation: bool = True, verbose: bool = True):
if verbose:
print(f'Training{" and validating" if validation else ""}'
f' for {n_epochs} {"epochs" if n_epochs > 1 else "epoch"}')
self.model.print_summary()
self.dataset.print_summary()
print(f'Optimizer: {self.optimizer.__class__.__name__}\n'
f'\tLearning rate: {self.optimizer.param_groups[0]["lr"]}')
print(f'Callbacks: {", ".join(c.__class__.__name__ for c in self.callbacks)}')
for epoch in range(n_epochs):
print(f'\nEpoch {epoch}:')
self.training_epoch(epoch)
if validation:
self.validation_epoch(epoch)
class SupervisedTraining(TrainingStrategy):
def training_epoch(self, epoch: int) -> None:
self.model.network.train()
self.emit(TrainingEvents.TRAINING_EPOCH_START, epoch, self.model)
with tqdm(self.dataset.training_dataloader(), desc='Training', unit='batch') as t:
for batch_id, batch in enumerate(t):
self.emit(TrainingEvents.TRAINING_BATCH_START, batch, batch_id, epoch)
loss, y_pred, y_true = self.model.training_fn(batch, batch_id, epoch)
self.model.backprop_fn(loss, self.optimizer)
self.emit(TrainingEvents.TRAINING_BATCH_FINISH, batch, batch_id, epoch,
loss.detach(), y_pred.detach(), y_true)
# update progress bar
t.set_postfix(self.model.progressbar_metrics())
self.emit(TrainingEvents.TRAINING_EPOCH_FINISH, epoch, self.model)
def validation_epoch(self, epoch: int) -> None:
self.model.network.eval()
self.model.network.train(False)
with torch.no_grad():
self.emit(TrainingEvents.VALIDATION_EPOCH_START, epoch, self.model)
with tqdm(self.dataset.validation_dataloader(), desc='Validation', unit='batch') as t:
for batch_id, batch in enumerate(t):
self.emit(TrainingEvents.VALIDATION_BATCH_START, batch, batch_id, epoch)
loss, y_pred, y_true = self.model.validation_fn(batch, batch_id, epoch)
self.emit(TrainingEvents.VALIDATION_BATCH_FINISH, batch, batch_id, epoch,
loss.detach(), y_pred.detach(), y_true)
# update progress bar
t.set_postfix(self.model.progressbar_metrics())
self.emit(TrainingEvents.VALIDATION_EPOCH_FINISH, epoch, self.model)
def test(self) -> None:
self.model.network.eval()
self.model.network.train(False)
with torch.no_grad():
self.emit(TrainingEvents.TESTING_START)
with tqdm(self.dataset.validation_dataloader(), desc='Testing', unit='batch') as t:
for batch_id, batch in enumerate(t):
self.emit(TrainingEvents.TESTING_BATCH_START, batch, batch_id)
loss, y_pred, y_true = self.model.validation_fn(batch, batch_id, -1)
self.emit(TrainingEvents.TESTING_BATCH_FINISH, batch, batch_id,
loss.detach(), y_pred.detach(), y_true)
# update progress bar
t.set_postfix(self.model.progressbar_metrics())
self.emit(TrainingEvents.TESTING_FINISH)
| 40.748299
| 119
| 0.645576
| 5,724
| 0.955593
| 0
| 0
| 0
| 0
| 0
| 0
| 1,050
| 0.175292
|
1290da62e7e73de3c4c75ef861a9d5a9bcbe1f4b
| 2,924
|
py
|
Python
|
tests/test_utils.py
|
jamesmcclain/pystac
|
993b54f5a10b0d55db18dbda81c5ad7acc06d921
|
[
"Apache-2.0"
] | 1
|
2018-08-04T05:24:58.000Z
|
2018-08-04T05:24:58.000Z
|
tests/test_utils.py
|
jamesmcclain/pystac
|
993b54f5a10b0d55db18dbda81c5ad7acc06d921
|
[
"Apache-2.0"
] | 4
|
2017-12-11T22:15:44.000Z
|
2018-06-15T15:20:34.000Z
|
tests/test_utils.py
|
jamesmcclain/pystac
|
993b54f5a10b0d55db18dbda81c5ad7acc06d921
|
[
"Apache-2.0"
] | 5
|
2018-06-15T14:51:50.000Z
|
2019-08-22T05:33:55.000Z
|
import unittest
from pystac.utils import (make_relative_href, make_absolute_href,
is_absolute_href)
class UtilsTest(unittest.TestCase):
def test_make_relative_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('/a/b/c/d/catalog.json', '/a/b/c/catalog.json',
'./d/catalog.json'),
('/a/b/catalog.json', '/a/b/c/catalog.json', '../catalog.json'),
('/a/catalog.json', '/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', './d/catalog.json'),
('http://stacspec.org/a/b/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', '../catalog.json'),
('http://stacspec.org/a/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/catalog.json',
'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json',
'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_absolute_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./z/item.json', '/a/b/c/catalog.json', '/a/b/c/z/item.json'),
('../item.json', '/a/b/c/catalog.json', '/a/b/item.json'),
('item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/item.json'),
('./item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/item.json'),
('./z/item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/z/item.json'),
('../item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/item.json')
]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_is_absolute_href(self):
# Test cases of (href, expected)
test_cases = [('item.json', False), ('./item.json', False),
('../item.json', False), ('/item.json', True),
('http://stacgeo.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
| 46.412698
| 77
| 0.548906
| 2,794
| 0.95554
| 0
| 0
| 0
| 0
| 0
| 0
| 1,474
| 0.504104
|
1290db3be5d147e6281013adc1419767bcf94d89
| 1,322
|
py
|
Python
|
services/web/manage.py
|
EMBEDDIA/ULR_NER_REST
|
520accbced155a43543969f8a0a96a02c0b2d46d
|
[
"MIT"
] | null | null | null |
services/web/manage.py
|
EMBEDDIA/ULR_NER_REST
|
520accbced155a43543969f8a0a96a02c0b2d46d
|
[
"MIT"
] | 3
|
2020-04-24T11:38:40.000Z
|
2021-12-03T09:01:17.000Z
|
services/web/manage.py
|
EMBEDDIA/ULR_NER_REST
|
520accbced155a43543969f8a0a96a02c0b2d46d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Michael Herman
# Copyright (c) 2020 Vid Podpečan
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from flask.cli import FlaskGroup
from flask_cors import CORS
from project import flask_app
CORS(flask_app)
cli = FlaskGroup(flask_app)
if __name__ == "__main__":
cli()
#flask_app.run(debug=True)
| 45.586207
| 96
| 0.781392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,140
| 0.861678
|
12916103d8a5f146e7baa8906defb115aac95a11
| 5,737
|
py
|
Python
|
GUI/PopUps/ExportPopUp.py
|
iagerogiannis/Image_to_plot
|
15c01c50dcd23dfd187069145b3f2fdc06ed73a9
|
[
"BSD-3-Clause"
] | null | null | null |
GUI/PopUps/ExportPopUp.py
|
iagerogiannis/Image_to_plot
|
15c01c50dcd23dfd187069145b3f2fdc06ed73a9
|
[
"BSD-3-Clause"
] | null | null | null |
GUI/PopUps/ExportPopUp.py
|
iagerogiannis/Image_to_plot
|
15c01c50dcd23dfd187069145b3f2fdc06ed73a9
|
[
"BSD-3-Clause"
] | null | null | null |
from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton
from GUI.CustomWidgets.PathFileLineEdit import PathFileLineEdit
from GUI.CustomWidgets.InputField import InputField
class ExportPopUp(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.selected_shape_ids = []
self.selected_shape_names = []
self.selected_axis_system_id = None
self.dx_division = True
self.filepath = ""
self.setWindowTitle("Export Data")
self.axis_group = QGroupBox(self)
self.axis_group.setTitle("Axis System")
axis_systems = [axis_system["Name"] for axis_system in self.parent.workspace.shapes_tree.get_axis_systems()]
self.axis_system_ids = [axis_system["id"] for axis_system in self.parent.workspace.shapes_tree.get_axis_systems()]
self.axis_systems = QComboBox(self.axis_group)
self.axis_systems.addItem("(Choose Axis System)")
for axis_system in axis_systems:
self.axis_systems.addItem(axis_system)
self.axis_layout = QVBoxLayout(self.axis_group)
self.axis_layout.addWidget(self.axis_systems)
self.axis_group.setLayout(self.axis_layout)
self.shapes_group = QGroupBox(self)
self.shapes_group.setTitle("Shapes")
shapes = [shape["Name"] for shape in self.parent.workspace.shapes_tree.get_shapes()]
self.shape_ids = [shape["id"] for shape in self.parent.workspace.shapes_tree.get_shapes()]
self.shapes = []
for shape in shapes:
self.add_shape(shape)
self.shapes_layout = QGridLayout(self.shapes_group)
self.arrange_shapes_layout()
self.shapes_group.setLayout(self.shapes_layout)
self.options_group = QGroupBox(self)
self.options_group.setTitle("Spline Options")
self.radio_buttons = [QRadioButton(self.options_group) for i in range(2)]
self.radio_buttons[0].dx_division = True
self.radio_buttons[0].setText("dx Division")
self.radio_buttons[0].setChecked(True)
self.radio_buttons[1].dx_division = False
self.radio_buttons[1].setText("dt Division")
for radio in self.radio_buttons:
radio.toggled.connect(self.handle_radio_toggled)
self.num_of_divisions_value = 200
self.num_of_divisions = InputField(self.options_group, "Number of Points", str(self.num_of_divisions_value),
10, [2, 1], 170)
self.options_layout = QGridLayout(self.options_group)
for i in range(2):
self.options_layout.addWidget(self.radio_buttons[i], 0, i)
self.options_layout.addWidget(self.num_of_divisions, 1, 0, 1, 2)
self.options_group.setLayout(self.options_layout)
self.export_group = QGroupBox(self)
self.export_group.setTitle("File Export")
self.filepath_line_edit = PathFileLineEdit(self.export_group, "Export File", filename="plot_data",
filters="Excel Workbook (*.xlsx);; CSV (Comma Delimited) (*.csv)")
self.export_layout = QVBoxLayout(self.export_group)
self.export_layout.addWidget(self.filepath_line_edit)
self.export_group.setLayout(self.export_layout)
self.export_button = QPushButton(self)
self.export_button.setText("Export")
self.export_button.pressed.connect(self.handle_export)
self.layout = QVBoxLayout(self)
self.layout.addWidget(self.axis_group)
self.layout.addWidget(self.shapes_group)
self.layout.addWidget(self.options_group)
self.layout.addWidget(self.export_group)
self.layout.addWidget(self.export_button)
self.setLayout(self.layout)
self.setFixedSize(380, 300 + 10 * (len(self.shapes) + len(self.shapes) % 2))
self.show()
def handle_radio_toggled(self):
radio_button = self.sender()
if radio_button.isChecked():
self.dx_division = radio_button.dx_division
def arrange_shapes_layout(self):
i = 0
n = len(self.shapes)
rows = divmod(n + 1, 2)[0]
for shape in self.shapes:
col, row = divmod(i, rows)
self.shapes_layout.addWidget(shape, row, col)
i += 1
def add_shape(self, shape_name):
self.shapes.append(QCheckBox(shape_name))
def handle_export(self):
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
self.selected_shape_ids = [self.shape_ids[i] for i in range(len(self.shapes)) if self.shapes[i].isChecked()]
self.selected_shape_names = [shape.text() for shape in self.shapes if shape.isChecked()]
if self.axis_systems.currentIndex() == 0:
QMessageBox.warning(self, "Error", "Please select an Axis System!")
elif len(self.selected_shape_ids) == 0:
QMessageBox.warning(self, "Error", "Please select an least one graph for export!")
elif self.filepath_line_edit.text() == "":
QMessageBox.warning(self, "Error", "Please define file path!")
elif not is_int(self.num_of_divisions.value):
QMessageBox.warning(self, "Error", "Please define file path!")
else:
self.filepath = self.filepath_line_edit.text()
self.num_of_divisions_value = int(self.num_of_divisions.value)
self.selected_axis_system_id = self.axis_system_ids[self.axis_systems.currentIndex() - 1]
self.accept()
def closeEvent(self, a0):
self.reject()
| 40.401408
| 134
| 0.656092
| 5,483
| 0.955726
| 0
| 0
| 0
| 0
| 0
| 0
| 399
| 0.069549
|
1291ab8aed0db6cb7b1e8e05e5e25b1e6da39aea
| 7,993
|
py
|
Python
|
cwltool/update.py
|
PlatformedTasks/PLAS-cwl-tes
|
5e66a5f9309906d1e8caa0f7148b8517a17f840d
|
[
"Apache-2.0"
] | null | null | null |
cwltool/update.py
|
PlatformedTasks/PLAS-cwl-tes
|
5e66a5f9309906d1e8caa0f7148b8517a17f840d
|
[
"Apache-2.0"
] | null | null | null |
cwltool/update.py
|
PlatformedTasks/PLAS-cwl-tes
|
5e66a5f9309906d1e8caa0f7148b8517a17f840d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import copy
import re
from typing import (Any, Callable, Dict, List, MutableMapping, MutableSequence,
Optional, Tuple, Union)
from functools import partial
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad import validate
from schema_salad.ref_resolver import Loader # pylint: disable=unused-import
from six import string_types
from six.moves import urllib
from typing_extensions import Text
from schema_salad.sourceline import SourceLine
from .loghandler import _logger
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from .utils import visit_class, visit_field, aslist
def v1_0to1_1(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Text]
"""Public updater for v1.0 to v1.1."""
doc = copy.deepcopy(doc)
rewrite = {
"http://commonwl.org/cwltool#WorkReuse": "WorkReuse",
"http://arvados.org/cwl#ReuseRequirement": "WorkReuse",
"http://commonwl.org/cwltool#TimeLimit": "ToolTimeLimit",
"http://commonwl.org/cwltool#NetworkAccess": "NetworkAccess",
"http://commonwl.org/cwltool#InplaceUpdateRequirement": "InplaceUpdateRequirement",
"http://commonwl.org/cwltool#LoadListingRequirement": "LoadListingRequirement"
}
def rewrite_requirements(t): # type: (MutableMapping[Text, Union[Text, Dict[Text, Any]]]) -> None
if "requirements" in t:
for r in t["requirements"]:
if isinstance(r, MutableMapping):
if r["class"] in rewrite:
r["class"] = rewrite[r["class"]]
else:
raise validate.ValidationException(
"requirements entries must be dictionaries: {} {}.".format(
type(r), r))
if "hints" in t:
for r in t["hints"]:
if isinstance(r, MutableMapping):
if r["class"] in rewrite:
r["class"] = rewrite[r["class"]]
else:
raise validate.ValidationException(
"hints entries must be dictionaries: {} {}.".format(
type(r), r))
if "steps" in t:
for s in t["steps"]:
if isinstance(s, MutableMapping):
rewrite_requirements(s)
else:
raise validate.ValidationException(
"steps entries must be dictionaries: {} {}.".format(
type(s), s))
def update_secondaryFiles(t, top=False):
# type: (Any, bool) -> Union[MutableSequence[MutableMapping[Text, Text]], MutableMapping[Text, Text]]
if isinstance(t, CommentedSeq):
new_seq = copy.deepcopy(t)
for index, entry in enumerate(t):
new_seq[index] = update_secondaryFiles(entry)
return new_seq
elif isinstance(t, MutableSequence):
return CommentedSeq([update_secondaryFiles(p) for p in t])
elif isinstance(t, MutableMapping):
return t
elif top:
return CommentedSeq([CommentedMap([("pattern", t)])])
else:
return CommentedMap([("pattern", t)])
def fix_inputBinding(t): # type: (Dict[Text, Any]) -> None
for i in t["inputs"]:
if "inputBinding" in i:
ib = i["inputBinding"]
for k in list(ib.keys()):
if k != "loadContents":
_logger.warning(SourceLine(ib, k).makeError(
"Will ignore field '{}' which is not valid in {} "
"inputBinding".format(k, t["class"])))
del ib[k]
visit_class(doc, ("CommandLineTool","Workflow"), rewrite_requirements)
visit_class(doc, ("ExpressionTool","Workflow"), fix_inputBinding)
visit_field(doc, "secondaryFiles", partial(update_secondaryFiles, top=True))
upd = doc
if isinstance(upd, MutableMapping) and "$graph" in upd:
upd = upd["$graph"]
for proc in aslist(upd):
proc.setdefault("hints", CommentedSeq())
proc["hints"].insert(0, CommentedMap([("class", "NetworkAccess"),( "networkAccess", True)]))
proc["hints"].insert(0, CommentedMap([("class", "LoadListingRequirement"),("loadListing", "deep_listing")]))
if "cwlVersion" in proc:
del proc["cwlVersion"]
return (doc, "v1.1")
def v1_1_0dev1to1_1(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Text]
return (doc, "v1.1")
UPDATES = {
u"v1.0": v1_0to1_1,
u"v1.1": None
} # type: Dict[Text, Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]]
DEVUPDATES = {
u"v1.0": v1_0to1_1,
u"v1.1.0-dev1": v1_1_0dev1to1_1,
u"v1.1": None
} # type: Dict[Text, Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]]
ALLUPDATES = UPDATES.copy()
ALLUPDATES.update(DEVUPDATES)
INTERNAL_VERSION = u"v1.1"
def identity(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Union[Text, Text]]
"""Default, do-nothing, CWL document upgrade function."""
return (doc, doc["cwlVersion"])
def checkversion(doc, # type: Union[CommentedSeq, CommentedMap]
metadata, # type: CommentedMap
enable_dev # type: bool
):
# type: (...) -> Tuple[CommentedMap, Text]
"""Check the validity of the version of the give CWL document.
Returns the document and the validated version string.
"""
cdoc = None # type: Optional[CommentedMap]
if isinstance(doc, CommentedSeq):
if not isinstance(metadata, CommentedMap):
raise Exception("Expected metadata to be CommentedMap")
lc = metadata.lc
metadata = copy.deepcopy(metadata)
metadata.lc.data = copy.copy(lc.data)
metadata.lc.filename = lc.filename
metadata[u"$graph"] = doc
cdoc = metadata
elif isinstance(doc, CommentedMap):
cdoc = doc
else:
raise Exception("Expected CommentedMap or CommentedSeq")
version = metadata[u"cwlVersion"]
cdoc["cwlVersion"] = version
if version not in UPDATES:
if version in DEVUPDATES:
if enable_dev:
pass
else:
keys = list(UPDATES.keys())
keys.sort()
raise validate.ValidationException(
u"Version '%s' is a development or deprecated version.\n "
"Update your document to a stable version (%s) or use "
"--enable-dev to enable support for development and "
"deprecated versions." % (version, ", ".join(keys)))
else:
raise validate.ValidationException(
u"Unrecognized version %s" % version)
return (cdoc, version)
def update(doc, loader, baseuri, enable_dev, metadata):
# type: (Union[CommentedSeq, CommentedMap], Loader, Text, bool, Any) -> CommentedMap
if isinstance(doc, CommentedMap):
if metadata.get("http://commonwl.org/cwltool#original_cwlVersion") \
or doc.get("http://commonwl.org/cwltool#original_cwlVersion"):
return doc
(cdoc, version) = checkversion(doc, metadata, enable_dev)
originalversion = copy.copy(version)
nextupdate = identity # type: Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]
while nextupdate:
(cdoc, version) = nextupdate(cdoc, loader, baseuri)
nextupdate = ALLUPDATES[version]
cdoc[u"cwlVersion"] = version
metadata[u"cwlVersion"] = version
metadata[u"http://commonwl.org/cwltool#original_cwlVersion"] = originalversion
cdoc[u"http://commonwl.org/cwltool#original_cwlVersion"] = originalversion
return cdoc
| 39.181373
| 116
| 0.600025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,832
| 0.35431
|
129258b78096fc56ca7d44ecd92404b8c97448a2
| 2,072
|
py
|
Python
|
plottify/plottify.py
|
neutrinoceros/plottify
|
21f4858dabe1228559a8beb385f134ccfb25321e
|
[
"MIT"
] | null | null | null |
plottify/plottify.py
|
neutrinoceros/plottify
|
21f4858dabe1228559a8beb385f134ccfb25321e
|
[
"MIT"
] | null | null | null |
plottify/plottify.py
|
neutrinoceros/plottify
|
21f4858dabe1228559a8beb385f134ccfb25321e
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib import collections
from matplotlib.lines import Line2D
def autosize(fig=None, figsize=None):
## Take current figure if no figure provided
if fig is None:
fig = plt.gcf()
if figsize is None:
## Get size of figure
figsize = fig.get_size_inches()
else:
## Set size of figure
fig.set_size_inches(figsize)
## Make font sizes proportional to figure size
fontsize_labels = figsize[0] * 5
fontsize_ticks = fontsize_labels / 2
scatter_size = (figsize[0] * 1.5) ** 2
linewidth = figsize[0]
axes = fig.get_axes()
for ax in axes:
## Set label font sizes
for item in [ax.title, ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(fontsize_labels)
## Set tick font sizes
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(fontsize_ticks)
## Set line widths
plot_objs = [child for child in ax.get_children() if isinstance(child, Line2D)]
for plot_obj in plot_objs:
plot_obj.set_linewidth(linewidth)
## Set scatter point sizes
plot_objs = [
child
for child in ax.get_children()
if isinstance(child, collections.PathCollection)
]
for plot_obj in plot_objs:
plot_obj.set_sizes([scatter_size])
## Set tight layout
plt.tight_layout()
if __name__ == "__main__":
import numpy as np
from plottify import autosize
import matplotlib.pyplot as plt
n = 100
x = np.random.uniform(low=-5, high=5, size=n)
y = x + np.random.normal(scale=0.5, size=n)
for size in [3, 10, 20]:
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Default")
plt.show()
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Autosized")
autosize()
plt.show()
| 26.227848
| 87
| 0.598456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.1361
|
12928ccd7dc4a56b7be40e6eb4668aed89dd266b
| 8,546
|
py
|
Python
|
ocular_algorithm/0x04_BasicRecurrenceAndRecursion.py
|
DistinctWind/ManimProjects
|
6318643afcc24574cbd9a0a45ff0d913d4711b13
|
[
"MIT"
] | 2
|
2020-03-15T01:27:09.000Z
|
2020-03-20T02:08:09.000Z
|
ocular_algorithm/0x04_BasicRecurrenceAndRecursion.py
|
DistinctWind/ManimProjects
|
6318643afcc24574cbd9a0a45ff0d913d4711b13
|
[
"MIT"
] | null | null | null |
ocular_algorithm/0x04_BasicRecurrenceAndRecursion.py
|
DistinctWind/ManimProjects
|
6318643afcc24574cbd9a0a45ff0d913d4711b13
|
[
"MIT"
] | null | null | null |
from re import S
from manimlib import *
import sys
import os
from tqdm.std import tqdm
sys.path.append(os.getcwd())
from utils.imports import *
class Opening(Scene):
def construct(self):
title = Text("基础递推递归", font='msyh')
self.play(Write(title), run_time=2)
self.wait()
self.play(FadeOut(title))
self.wait()
return super().construct()
class BeginningIntroduction(Scene):
def construct(self):
RecurrenceFormula = Tex(
r"a_1=1 ,\quad a_n=a_{n-1}+1"
)
GeneralFormula = Tex(
r"a_n=n"
)
VGroup(RecurrenceFormula, GeneralFormula).arrange(DOWN, buff=LARGE_BUFF)
self.play(Write(RecurrenceFormula))
self.wait()
self.play(Write(GeneralFormula))
self.wait()
RecurrenceFormula.target = Tex(
r"a_n=\begin{cases}1&{n=1,2,}\\a_{n-1}+a_{n-2}&n\geq3.\end{cases}"
).replace(RecurrenceFormula).scale(1.25).shift(UP*.5)
GeneralFormula.target = Tex(
r"a_n=\frac{1}{\sqrt{5}}\left[\left(\frac{1+\sqrt{5}}{2}\right)^n-\left(\frac{1-\sqrt{5}}{2}\right)^n\right]"
).next_to(RecurrenceFormula.target, DOWN, buff=LARGE_BUFF)
self.play(MoveToTarget(RecurrenceFormula), MoveToTarget(GeneralFormula))
self.wait()
self.play(
FadeOut(GeneralFormula),
RecurrenceFormula.animate.move_to(ORIGIN)
)
self.wait()
Fib = [1, 1]
for i in range(2, 2022):
Fib.append(Fib[i-1]+Fib[i-2])
Fib_eq = []
for i in tqdm(range(2021)):
Fib_eq.append(Text("a["+str(i+1)+"]"))
VGroup(*Fib_eq).arrange(DOWN).next_to(RecurrenceFormula, DOWN)
self.play(*[Write(_Fib_eq) for _Fib_eq in Fib_eq], run_time=2)
self.wait()
self.play(self.camera.frame.animate.move_to(Fib_eq[-1].get_center()), run_time=10)
self.wait()
self.play(*[FadeOut(_mobjects) for _mobjects in self.mobjects])
self.wait()
return super().construct()
class RecurrenceFibIntroduction(Scene):
def construct(self):
title = Text("斐波那契数列", font='DengXian')
self.play(Write(title))
self.wait()
subtitle = Text("Fibonacci", font='DengXian')
subtitle.next_to(title, DOWN)
self.play(Write(subtitle))
subtitle.target = Text("Fib", font='DengXian').next_to(title, DOWN)
self.play(MoveToTarget(subtitle))
self.wait()
subtitle.target = Text("fib", font='DengXian').next_to(title, DOWN)
self.play(MoveToTarget(subtitle))
self.wait()
self.play(FadeOut(subtitle))
self.wait()
self.play(title.animate.to_edge(UP).scale(0.75))
RecurrenceFormula = Tex(
r"a_n=\begin{cases}1&{n=1,2,}\\a_{n-1}+a_{n-2}&n\geq3.\end{cases}"
).scale(1.25).shift(UP*.5)
GeneralFormula = Tex(
r"a_n=\frac{1}{\sqrt{5}}\left[\left(\frac{1+\sqrt{5}}{2}\right)^n-\left(\frac{1-\sqrt{5}}{2}\right)^n\right]"
).next_to(RecurrenceFormula, DOWN, buff=LARGE_BUFF)
self.play(Write(RecurrenceFormula), Write(GeneralFormula))
self.wait()
self.play(FadeOut(RecurrenceFormula), FadeOut(GeneralFormula))
self.wait()
seq = Sequence([0 for i in range(10)]).move_to(ORIGIN)
seq.on_show(self)
seq.write(1, 1, self)
seq.write(2, 1, self)
for pos in range(3, 11):
seq.activate(pos, self)
seq.write(pos, seq.get_val(pos-1)+seq.get_val(pos-2), self)
self.play(*[FadeOut(_mobject) for _mobject in self.mobjects])
return super().construct()
class RecursionFibIntroduction(Scene):
def construct(self):
title = Text("斐波那契数列", font='DengXian')
subtitle = Text("(递归解法)", font='DengXian')
subtitle.scale(0.75).next_to(title, DOWN, buff=MED_SMALL_BUFF)
self.play(
Write(title),
Write(subtitle)
)
self.wait()
self.play(
FadeOut(title),
FadeOut(subtitle)
)
seq = Sequence([1, 1, 0, 0, 0])
main_call = seq.cells[rid(5)].copy().next_to(seq, DOWN, buff=MED_LARGE_BUFF)
self.play(ShowCreation(seq))
self.wait()
self.play(ShowCreation(main_call))
return super().construct()
class trying1(Scene):
def construct(self):
tex = Tex("a=1")
self.play(Write(tex))
return super().construct()
class trying2(Scene):
def construct(self):
hello = Tex("1")
rec = Rectangle()
f_always(rec.move_to, hello.get_center)
self.play(Write(hello))
self.play(ShowCreation(rec))
self.play(hello.animate.shift(2*RIGHT+UP))
class trying3(Scene):
def construct(self):
cell = Cell(1234567890, 7)
self.play(ShowCreation(cell))
self.play(*cell.write(1))
return super().construct()
class trying4(Scene):
def construct(self):
seq = Sequence([1, 3, 5, 2, 4, 6])
self.play(ShowCreation(seq), GrowArrow(seq.arrow))
seq.activate(4, self)
seq.activate(6, self)
seq.write(3, 123456, self)
seq.write(6, 123456, self)
seq.write(2, 1345, self)
seq.write(3, 1, self)
return super().construct()
class trying5(Scene):
def construct(self):
depth_bar = DepthBar()
self.play(ShowCreation(depth_bar))
self.play(depth_bar.deepen())
return super().construct()
class trying6(Scene):
def construct(self):
self.camera.frame.shift(DOWN)
seq = Sequence([1, 2, 3]).shift(UP)
main_caller = seq.get_cell(3).copy()
tree = CallTree(main_caller)
self.play(ShowCreation(seq))
self.wait()
self.play(ShowCreation(tree.depth_bar))
self.play(ShowCreation(tree))
to_caller = seq.get_cell(2).copy()
to_caller.next_to(main_caller, DOWN)
self.play(*tree.extent(main_caller, seq.get_cell(2).copy(), 2))
self.play(*tree.compose())
self.play(*tree.extent(main_caller, seq.get_cell(1).copy(), 2))
self.play(*tree.compose())
return super().construct()
class trying7(Scene):
def construct(self):
rec = Rectangle()
cir = Circle()
rec.to_edge(LEFT)
cir.move_to(UP*2, RIGHT*3)
self.play(ShowCreation(rec), ShowCreation(cir))
self.play(cir.animate.align_to(rec, UP))
return super().construct()
class trying8(Scene):
def construct(self):
rec = Rectangle().shift(DOWN)
cir = Circle().shift(DOWN).to_edge(RIGHT)
self.play(ShowCreation(cir))
self.wait()
self.play(cir.animate.shift(x_shift(cir)))
return super().construct()
class trying9(Scene):
def construct(self):
rec = Rectangle().shift(LEFT*2)
cir = Circle().shift(RIGHT*2)
arrow = always_redraw(lambda :Arrow(rec.get_right(), cir.get_left()))
self.play(ShowCreation(rec), ShowCreation(cir))
self.play(GrowArrow(arrow))
self.play(rec.animate.shift(UP))
self.play(cir.animate.shift(DOWN+RIGHT*2))
return super().construct()
class trying10(Scene):
def construct(self):
seq = Sequence([1, 2, 3, 4, 5]).to_edge(UP)
main_caller = seq.get_cell(3).copy()
tree = CallTree(main_caller).next_to(seq, DOWN)
tree.depth_bar.align_to(seq, UP)
self.play(ShowCreation(seq))
self.wait()
self.play(ShowCreation(tree.depth_bar))
self.play(ShowCreation(tree))
to_caller = seq.get_cell(2).copy()
to_caller.next_to(main_caller, DOWN)
self.play(*tree.extent(main_caller, seq.get_cell(2).copy(), 2))
self.play(*tree.compose())
self.play(*tree.extent(main_caller, seq.get_cell(1).copy(), 2))
self.play(*tree.compose())
self.play(self.camera.frame.animate.shift(DOWN))
self.play(*tree.extent(tree.get_cell(2, 1), seq.get_cell(4).copy(), 3))
self.play(*tree.compose())
self.play(*tree.extent(tree.get_cell(2, 1), seq.get_cell(5).copy(), 3))
self.play(*tree.compose())
self.play(*tree.extent(tree.get_cell(2, 2), seq.get_cell(4).copy(), 3))
self.play(*tree.compose())
self.play(*tree.extent(tree.get_cell(2, 2), seq.get_cell(5).copy(), 3))
self.play(*tree.compose())
return super().construct()
| 32.371212
| 121
| 0.589165
| 8,412
| 0.97905
| 0
| 0
| 0
| 0
| 0
| 0
| 567
| 0.065992
|
1292ffb60fd870f5e14b52506ec687c6761bed39
| 299
|
py
|
Python
|
utility.py
|
Ming-desu/POKEMING
|
2def3b47e7c08b71885f14944bffe105a63cc12a
|
[
"MIT"
] | null | null | null |
utility.py
|
Ming-desu/POKEMING
|
2def3b47e7c08b71885f14944bffe105a63cc12a
|
[
"MIT"
] | null | null | null |
utility.py
|
Ming-desu/POKEMING
|
2def3b47e7c08b71885f14944bffe105a63cc12a
|
[
"MIT"
] | null | null | null |
# POKEMING - GON'NA CATCH 'EM ALL
# -- A simple hack 'n slash game in console
# -- This class is handles all utility related things
class Utility:
# This allows to see important message of the game
def pause(message):
print(message)
input('Press any key to continue.')
| 37.375
| 55
| 0.665552
| 164
| 0.548495
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.705686
|
12932a6f23a6e9331d41a53f62dfc3d9f6482d92
| 2,057
|
py
|
Python
|
gpv2/data/lessons/mil.py
|
michalsr/gpv2
|
00a22b311dbaeefb04e1df676eb6ae3373d8d4b5
|
[
"Apache-2.0"
] | null | null | null |
gpv2/data/lessons/mil.py
|
michalsr/gpv2
|
00a22b311dbaeefb04e1df676eb6ae3373d8d4b5
|
[
"Apache-2.0"
] | null | null | null |
gpv2/data/lessons/mil.py
|
michalsr/gpv2
|
00a22b311dbaeefb04e1df676eb6ae3373d8d4b5
|
[
"Apache-2.0"
] | null | null | null |
import logging
import sys
from typing import Union, Optional, Dict, Any, List
from dataclasses import dataclass, replace
from exp.ours import file_paths
from exp.ours.boosting import MaskSpec
from exp.ours.data.dataset import Dataset, Task
from exp.ours.data.gpv_example import GPVExample
from exp.ours.models.model import PredictionArg
from os.path import join, exists
from exp.ours.util.py_utils import int_to_str
from utils.io import load_json_object, dump_json_object
import numpy as np
ID_LIST = set([0])
LAST_ID = 0
@dataclass
class MILExample:
"""
Consists of positive and negative examples for different classes
"""
gpv_id: str
image_id: Union[int, str]
answer: str
query: str
correct_answer: str
rel_query: str
@property
def task(self):
return Task.MIL
def get_gpv_id(self):
return self.gpv_id
@Dataset.register("mil")
class MILDataset(Dataset):
def __init__(self, split: str,):
self.split = split
def get_task(self) -> Task:
return Task.MIL
def load(self) -> List[MILExample]:
instances = load_mil(self.split)
return instances
def _intern(x):
if x is None:
return None
return sys.intern(x)
def load_mil(split):
#file = join(file_paths.WEBQA_DIR, split + "_image_info.json")
#file = file_paths.IMAGECONTRAST_DIR+'/train_large_2.json'
#file = '/data/michal5/gpv/text_contrast/train_large.json'
if split == 'small':
file = '/data/michal5/gpv/lessons/mil_small.json'
else:
file = '/data/michal5/gpv/lessons/mil_train.json'
#file = '/data/michal5/gpv/lessons/mil_small.json'
logging.info(f"Loading mil data from {file}")
raw_instances = load_json_object(file)
out = []
for i, x in enumerate(raw_instances):
if isinstance(x["image"], dict):
image_id = x["image"]["image_id"]
else:
image_id = x["image"]
ex = MILExample(gpv_id=x['gpv_id'],image_id=image_id,answer=x['answer'],
query=x['query'],correct_answer=x['correct'],rel_query=x['rel_query']
)
out.append(ex)
return out
| 21.206186
| 76
| 0.701507
| 556
| 0.270297
| 0
| 0
| 592
| 0.287798
| 0
| 0
| 506
| 0.245989
|
12932d615b9cdc4848ccdf491cf3ec6f30e667d0
| 6,968
|
py
|
Python
|
creel_portal/api/filters/FN024_Filter.py
|
AdamCottrill/CreelPortal
|
5ec867c4f11b4231c112e8209116b6b96c2830ec
|
[
"MIT"
] | null | null | null |
creel_portal/api/filters/FN024_Filter.py
|
AdamCottrill/CreelPortal
|
5ec867c4f11b4231c112e8209116b6b96c2830ec
|
[
"MIT"
] | null | null | null |
creel_portal/api/filters/FN024_Filter.py
|
AdamCottrill/CreelPortal
|
5ec867c4f11b4231c112e8209116b6b96c2830ec
|
[
"MIT"
] | null | null | null |
import django_filters
from ...models import FN024
from .filter_utils import NumberInFilter, ValueInFilter
class FN024SubFilter(django_filters.FilterSet):
"""A fitlerset that allows us to select subsets of net set objects by
net set attributes."""
prd = ValueInFilter(field_name="prd")
prd__not = ValueInFilter(field_name="prd", exclude=True)
prdtm0 = django_filters.TimeFilter(field_name="prdtm0", help_text="format: HH:MM")
prdtm0__gte = django_filters.TimeFilter(
field_name="prdtm0", lookup_expr="gte", help_text="format: HH:MM"
)
prdtm0__lte = django_filters.TimeFilter(
field_name="prdtm0", lookup_expr="lte", help_text="format: HH:MM"
)
prdtm1 = django_filters.TimeFilter(field_name="prdtm1", help_text="format: HH:MM")
prdtm1__gte = django_filters.TimeFilter(
field_name="prdtm1", lookup_expr="gte", help_text="format: HH:MM"
)
prdtm1__lte = django_filters.TimeFilter(
field_name="prdtm1", lookup_expr="lte", help_text="format: HH:MM"
)
prd_dur__gte = django_filters.NumberFilter(field_name="prd_dur", lookup_expr="gte")
prd_dur__lte = django_filters.NumberFilter(field_name="prd_dur", lookup_expr="lte")
class Meta:
model = FN024
fields = [
"prd",
"prdtm0",
"prdtm1",
"prd_dur",
]
class FN024Filter(FN024SubFilter):
"""Extends the FN024SubFilter to include additional fields that
are associated with parent objects.
"""
# FN011 ATTRIBUTES
year = django_filters.CharFilter(
field_name="daytype__season__creel__year", lookup_expr="exact"
)
year__gte = django_filters.NumberFilter(
field_name="daytype__season__creel__year", lookup_expr="gte"
)
year__lte = django_filters.NumberFilter(
field_name="daytype__season__creel__year", lookup_expr="lte"
)
year__gt = django_filters.NumberFilter(
field_name="daytype__season__creel__year", lookup_expr="gt"
)
year__lt = django_filters.NumberFilter(
field_name="daytype__season__creel__year", lookup_expr="lt"
)
prj_date0 = django_filters.DateFilter(
field_name="daytype__season__creel__prj_date0", help_text="format: yyyy-mm-dd"
)
prj_date0__gte = django_filters.DateFilter(
field_name="daytype__season__creel__prj_date0",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
prj_date0__lte = django_filters.DateFilter(
field_name="daytype__season__creel__prj_date0",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
prj_date1 = django_filters.DateFilter(
field_name="daytype__season__creel__prj_date1", help_text="format: yyyy-mm-dd"
)
prj_date1__gte = django_filters.DateFilter(
field_name="daytype__season__creel__prj_date1",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
prj_date1__lte = django_filters.DateFilter(
field_name="daytype__season__creel__prj_date1",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
prj_cd = ValueInFilter(field_name="daytype__season__creel__prj_cd")
prj_cd__not = ValueInFilter(
field_name="daytype__season__creel__prj_cd", exclude=True
)
prj_cd__like = django_filters.CharFilter(
field_name="daytype__season__creel__prj_cd", lookup_expr="icontains"
)
prj_cd__not_like = django_filters.CharFilter(
field_name="daytype__season__creel__prj_cd",
lookup_expr="icontains",
exclude=True,
)
prj_cd__endswith = django_filters.CharFilter(
field_name="daytype__season__creel__prj_cd", lookup_expr="endswith"
)
prj_cd__not_endswith = django_filters.CharFilter(
field_name="daytype__season__creel__prj_cd",
lookup_expr="endswith",
exclude=True,
)
prj_nm__like = django_filters.CharFilter(
field_name="daytype__season__creel__prj_nm", lookup_expr="icontains"
)
prj_nm__not_like = django_filters.CharFilter(
field_name="daytype__season__creel__prj_nm",
lookup_expr="icontains",
exclude=True,
)
prj_ldr = django_filters.CharFilter(
field_name="daytype__season__creel__prj_ldr__username", lookup_expr="iexact"
)
contmeth = ValueInFilter(field_name="daytype__season__creel__contmeth")
contmeth__not = ValueInFilter(
field_name="daytype__season__creel__contmeth", exclude=True
)
lake = ValueInFilter(field_name="daytype__season__creel__lake__abbrev")
lake__not = ValueInFilter(
field_name="daytype__season__creel__lake__abbrev", exclude=True
)
ssn_date0 = django_filters.DateFilter(
field_name="daytype__season__ssn_date0", help_text="format: yyyy-mm-dd"
)
ssn_date0__gte = django_filters.DateFilter(
field_name="daytype__season__ssn_date0",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
ssn_date0__lte = django_filters.DateFilter(
field_name="daytype__season__ssn_date0",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
ssn_date1 = django_filters.DateFilter(
field_name="daytype__season__ssn_date1", help_text="format: yyyy-mm-dd"
)
ssn_date1__gte = django_filters.DateFilter(
field_name="daytype__season__ssn_date1",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
ssn_date1__lte = django_filters.DateFilter(
field_name="daytype__season__ssn_date1",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
ssn = ValueInFilter(field_name="daytype__season__ssn")
ssn__not = ValueInFilter(field_name="daytype__season__ssn", exclude=True)
ssn__like = django_filters.CharFilter(
field_name="daytype__season__ssn", lookup_expr="icontains"
)
ssn__not_like = django_filters.CharFilter(
field_name="daytype__season__ssn", lookup_expr="icontains", exclude=True
)
ssn_des = ValueInFilter(field_name="daytype__season__ssn_des")
ssn_des__not = ValueInFilter(field_name="daytype__season__ssn_des", exclude=True)
ssn_des__like = django_filters.CharFilter(
field_name="daytype__season__ssn_des", lookup_expr="icontains"
)
ssn_des__not_like = django_filters.CharFilter(
field_name="daytype__season__ssn_des", lookup_expr="icontains", exclude=True
)
dtp = ValueInFilter(field_name="daytype__dtp")
dtp__not = ValueInFilter(field_name="daytype__dtp", exclude=True)
dtp_nm__like = django_filters.CharFilter(
field_name="daytype__dtp_nm", lookup_expr="icontains"
)
dtp_nm__not_like = django_filters.CharFilter(
field_name="daytype__dtp_nm", lookup_expr="icontains", exclude=True
)
class Meta:
model = FN024
fields = [
"prd",
"prdtm0",
"prdtm1",
"prd_dur",
]
| 33.180952
| 87
| 0.695896
| 6,855
| 0.983783
| 0
| 0
| 0
| 0
| 0
| 0
| 2,149
| 0.30841
|
1295c606d9e77831f602309b8cf0e51374c22061
| 7,148
|
py
|
Python
|
modules/utils.py
|
PaulLerner/deep_parkinson_handwriting
|
806f34eaa6c5dde2a8230a07615c69e0873c0535
|
[
"MIT"
] | 2
|
2021-01-19T02:47:32.000Z
|
2021-05-20T08:29:36.000Z
|
modules/utils.py
|
PaulLerner/deep_parkinson_handwriting
|
806f34eaa6c5dde2a8230a07615c69e0873c0535
|
[
"MIT"
] | null | null | null |
modules/utils.py
|
PaulLerner/deep_parkinson_handwriting
|
806f34eaa6c5dde2a8230a07615c69e0873c0535
|
[
"MIT"
] | 2
|
2021-01-23T18:20:19.000Z
|
2021-08-09T03:53:32.000Z
|
import numpy as np
from time import time
import matplotlib.pyplot as plt
measure2index={"y-coordinate":0,"x-coordinate":1,"timestamp":2, "button_status":3,"tilt":4, "elevation":5,"pressure":6}
index2measure=list(measure2index.keys())
task2index={"spiral":0,"l":1,"le":2 ,"les":3,"lektorka" :4,"porovnat":5,"nepopadnout":6, "tram":7}
index2task=list(task2index.keys())
max_lengths=[16071, 4226, 6615, 6827, 7993, 5783, 4423, 7676]#max length per task
token_lengths=[16071,1242,1649,1956]#max length per token
stroke_lengths=[16071,752,1104,1476,3568,2057,2267,1231]#max length per stroke (either on paper or in air)
stroke_avg_plus_std=[2904,277,363,411,484,346,324,218]#stroke avg length + stroke avg length std
max_strokes=[25,15,15,21,29,43,35, 67]#max n° of strokes per task (in air + on paper)
plot2index={"loss":0,"accuracy":1}
index2plot= list(plot2index.keys())
on_paper_value=1.0#on_paper_stroke iff button_status==1.0
one_hot=np.identity(8)
def downsample(task,factor=2):
downsampled=[point for i,point in enumerate(task) if i%factor==0]
downsampled=np.array(downsampled)
return downsampled
def upsample(task):
upsampled=[]
for i,point in enumerate(task[:-1]):
upsampled.append(point)
upsampled.append(np.mean(task[i:i+2],axis=0))
upsampled=np.array(upsampled)
#/!\ np.aronud button_status after resampling !!
upsampled[:,measure2index["button_status"]]=np.around(upsampled[:,measure2index["button_status"]])
return upsampled
def get_significance(p):
"""used to print significance of a statistic test given p-value)"""
if p<0.01:
significance="***"
elif p<0.05:
significance="**"
elif p<0.1:
significance="*"
else:
significance="_"
return significance
def CorrectPool(out_size,current_pool):
"""makes convolved size divisible by pooling kernel"""
ratio=out_size/current_pool
if (ratio)%1==0:#whole number
return int(current_pool)
else:
whole_ratio=round(ratio)
if whole_ratio==0:
whole_ratio+=1
return int(out_size/whole_ratio)
def CorrectHyperparameters(input_size,seq_len,hidden_size,conv_kernel,pool_kernel ,padding=0,
stride=1,dilation=1, dropout=0.0,output_size=1,n_seq=1):
"""makes convolved size divisible by pooling kernel and computes size of sequence after convolutions"""
out_size=seq_len
print("seq_len :",out_size)
for i, (h,c,p,pad,d) in enumerate(list(zip(hidden_size,conv_kernel,pool_kernel,padding,dilation))):
print("layer",i+1)
in_size=out_size
out_size=get_out_size(out_size,pad,d,c,stride=1)
print("\tafter conv{} :{}".format(i+1,out_size))
if out_size<1:
c=(in_size-1)//d+1
out_size=get_out_size(in_size,pad,d,c,stride=1)
print("\t\tupdate c. after conv{} :{}".format(i+1,out_size))
conv_kernel[i]=c
pool_kernel[i]=CorrectPool(out_size,p)
out_size=get_out_size(out_size,padding=0,dilation=1,kernel_size=pool_kernel[i],stride=pool_kernel[i])
print("\tafter pool{} :{}".format(i+1,out_size))
out_size*=hidden_size[-1]
print("after flatting",out_size)
return input_size,out_size,hidden_size,conv_kernel,pool_kernel ,padding,stride,dilation, dropout,output_size
def wrong_len_gen(data,good_len):
"""used for splitting tasks into tokens"""
for i,s in enumerate(data):
if len(s) != good_len:
yield i
def get_out_size(in_size,padding,dilation,kernel_size,stride):
"""computes output size after a conv or a pool layer"""
return (in_size+2*padding-dilation*(kernel_size-1)-1)//stride +1
def min_max_scale(data,min_=0,max_=1):
return (max_-min_)*(data-np.min(data)/(np.max(data)-np.min(data)))+min_
def count_params(model):
"""returns (total n° of parameters, n° of trainable parameters)"""
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_params, trainable_params
def plot_task(task,measure2index=measure2index):
plt.plot(task[:,measure2index["x-coordinate"]],task[:,measure2index["y-coordinate"]])
plt.xlabel("x-coordinate")
plt.ylabel("y-coordinate")
def plot_measures(task,subplot=False,figsize=(6,4),index2measure=index2measure):
plt.figure(figsize=figsize)
for i,measure in enumerate(index2measure):
if subplot:
plt.subplot(3,3,i+1)
plt.plot(task[:,i],label=measure)
plt.xlabel("timesteps")
plt.ylabel(measure)
plt.legend()
def return_metrics(tp,tn,fp,fn):
accuracy= (tp+tn)/(tp+tn+fp+fn)
sensitivity = tp/(tp+fn) if (tp+fn) != 0 else 0.0 #without condition positives the sensitivity should be 0
specificity = tn/(tn+fp) if (tn+fp)!= 0 else 0.0 #idem
ppv = tp/(tp+fp) if tp+fp != 0 else 0.0 #without predicted positives the ppv should be 0
npv = tn/(tn+fn) if tn+fn !=0 else 0.0 #idem
return accuracy,sensitivity,specificity,ppv,npv
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected.')
def flat_list(list):
return [item for sublist in list for item in sublist]
def timeSince(since):
now = time()
s = now - since
m = np.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def ReshapeAndVote(model_train_predictions,round_before_voting=True):
"""used to fuse the predictions of n_models models after n_CV CV"""
n_CV=len(model_train_predictions[0])
n_models=len(model_train_predictions)
if round_before_voting:
reshaped_train_predictions=[[np.around(model_train_predictions[i][j]) for i in range(n_models)] for j in range(n_CV)]
else:
reshaped_train_predictions=[[model_train_predictions[i][j] for i in range(n_models)] for j in range(n_CV)]
voted_train_predictions=[np.around(np.mean(reshaped_train_predictions[i],axis=0)) for i in range(n_CV)]
return voted_train_predictions
def confusion_matrix(y_true,y_pred):
if len(y_true)!=len(y_pred):
raise ValueError("y_true and y_pred should have the same shape, got {} and {}, respectively".format(len(y_true),len(y_pred)))
tn, fp, fn, tp=0,0,0,0
false_i=[]
for i, (target, pred) in enumerate(list(zip(y_true,y_pred))):
if target==0:#condition negative
if pred==0:
tn+=1
elif pred==1:
fp+=1
false_i.append(i)
else:
raise ValueError("model prediction should either be 0 or 1, got {}".format(pred))
elif target==1:#condition positive
if pred==0:
fn+=1
false_i.append(i)
elif pred ==1:
tp+=1
else:
raise ValueError("model prediction should either be 0 or 1, got {}".format(pred))
else:
raise ValueError("target should either be 0 or 1, got {}".format(target))
return tn, fp, fn, tp, false_i
| 42.047059
| 133
| 0.663123
| 0
| 0
| 163
| 0.022794
| 0
| 0
| 0
| 0
| 1,552
| 0.217033
|
1296326732d0f3f0616b1b674348b31dbce55859
| 574
|
py
|
Python
|
Mundo2/Desafio039.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
Mundo2/Desafio039.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
Mundo2/Desafio039.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
import datetime
datenasc = int(input(f'insert you date of bit '))
atualdate = str(datetime.date.today())[0:4]
datestr = int(atualdate)
datefinal = datestr - datenasc
print(datefinal)
if datefinal < 18:
print(f'voce esta com {datefinal}Faltam {18-datefinal} pra você se alistar ao exercito hahahah' )
elif datefinal == 18:
print(f'Você completa 18 anos agora em {atualdate}'
f'Chegou a hora ser servir seu país como bucha de canhão otario.\nPegue seus documentos ')
else:
print(f'Você escapou sabichão, ja esta com {datefinal}, se livrou né safadenho')
| 41
| 101
| 0.728223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 329
| 0.566265
|
1296680de0a376242d8b5859461295d893d5f13c
| 4,180
|
py
|
Python
|
local_test/test_pullparser.py
|
rmoskal/e-springpad
|
d2c1dfbae63a29737d9cfdee571704b7a5e85bd5
|
[
"MIT"
] | 1
|
2017-01-10T17:12:25.000Z
|
2017-01-10T17:12:25.000Z
|
local_test/test_pullparser.py
|
rmoskal/e-springpad
|
d2c1dfbae63a29737d9cfdee571704b7a5e85bd5
|
[
"MIT"
] | null | null | null |
local_test/test_pullparser.py
|
rmoskal/e-springpad
|
d2c1dfbae63a29737d9cfdee571704b7a5e85bd5
|
[
"MIT"
] | null | null | null |
__author__ = 'rob'
import unittest
import logging
import evernotebookparser
from xml.etree import ElementTree
import re
class TestNotebookParser(unittest.TestCase):
def setUp(self):
self.o = evernotebookparser.NotebookParser2("../Quotes.enex")
def test_parsing2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
self.assertEquals(32,len(results))
def test_re(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note>Barthes, Roland<br clear="none"/> Sade, Fourier, Loyola: p.7.<br clear="none"/>
<br clear="none"/>Motto: It is a matter of bringing into daily life a fragment of the unintelligible formulas that emanate from a text we admire.
<br clear="none"/></en-note>"""
self.assertEquals(data.find('<en-note>'),133)
self.assertEquals(data.find('</en-note>'),410)
self.assertEquals(data[133],'<')
data = evernotebookparser.extract(data)
self.assertTrue(data.startswith("Barthes"))
def test_construction1(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[0];
self.assertEquals(["B"],item['tags'])
self.assertTrue(item['content'].startswith("Barthes"))
def test_construction2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[1];
self.assertEquals(['O'],item['tags'])
class TestNotebookParser2(unittest.TestCase):
def setUp(self):
self.o = evernotebookparser.NotebookParser2("../test.enex")
def test_parsing2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
self.assertEquals(2,len(results))
def test_construction1(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[0];
self.assertTrue(item['content'].startswith("<div>"))
self.assertFalse("url" in item)
def test_construction2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[1];
self.assertTrue(item['content'].startswith("<div>"))
self.assertTrue("url" in item)
self.assertEquals(item['url'],"http://mostmedia.com")
class TestNotebookMac(unittest.TestCase):
def setUp(self):
self.o = evernotebookparser.NotebookParser2("../Travel.enex")
def test_parsing2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
self.assertEquals(4,len(results))
def test_construction1(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[0];
self.assertTrue(item['content'].startswith("<div>"))
self.assertFalse("url" in item)
def test_construction2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[1];
self.assertTrue(item['content'].startswith("<div>"))
class TestDavids(unittest.TestCase):
def setUp(self):
self.o = evernotebookparser.NotebookParser2("../recipes.enex")
def test_parsing2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
self.assertEquals(49,len(results))
def test_construction1(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[0];
print item['content']
#self.assertTrue(item['content'].startswith("<div>"))
#self.assertFalse("url" in item)
def test_construction2(self):
results = [];
self.o.get_items(lambda x: results.append(x))
item = results[1];
#self.assertTrue(item['content'].startswith("<div>"))
| 32.403101
| 157
| 0.570335
| 4,039
| 0.966268
| 0
| 0
| 0
| 0
| 0
| 0
| 818
| 0.195694
|
1296f3adb86af7c4bde450922af6cd40c775ef6d
| 6,872
|
py
|
Python
|
test/test_sysroot_compiler.py
|
prajakta-gokhale/cross_compile
|
cbdc94ed5b25d6fc336aa5c0faa2838d9ce61db4
|
[
"Apache-2.0"
] | null | null | null |
test/test_sysroot_compiler.py
|
prajakta-gokhale/cross_compile
|
cbdc94ed5b25d6fc336aa5c0faa2838d9ce61db4
|
[
"Apache-2.0"
] | null | null | null |
test/test_sysroot_compiler.py
|
prajakta-gokhale/cross_compile
|
cbdc94ed5b25d6fc336aa5c0faa2838d9ce61db4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the `create_cc_sysroot.py` script."""
import getpass
from pathlib import Path
from typing import Tuple
from cross_compile.sysroot_compiler import DockerConfig
from cross_compile.sysroot_compiler import Platform
from cross_compile.sysroot_compiler import QEMU_DIR_NAME
from cross_compile.sysroot_compiler import ROS_DOCKERFILE_NAME
from cross_compile.sysroot_compiler import SYSROOT_DIR_NAME
from cross_compile.sysroot_compiler import SysrootCompiler
import pytest
def _default_docker_kwargs() -> dict:
return {
'arch': 'aarch64',
'os': 'ubuntu',
'rosdistro': 'dashing',
'sysroot_base_image': '035662560449.dkr.ecr.us-east-2.amazonaws.com/cc-tool:'
'aarch64-bionic-dashing-fastrtps-prebuilt',
'docker_network_mode': 'host',
'sysroot_nocache': False,
}
@pytest.fixture
def platform_config() -> Platform:
return Platform(
arch='aarch64',
os='ubuntu',
rosdistro='dashing',
rmw='fastrtps')
@pytest.fixture
def docker_config() -> DockerConfig:
return DockerConfig(**_default_docker_kwargs())
def setup_mock_sysroot(path: Path) -> Tuple[Path, Path]:
"""Create mock directories to correctly construct the SysrootCreator."""
sysroot_dir = path / SYSROOT_DIR_NAME
sysroot_dir.mkdir()
ros_workspace_dir = sysroot_dir / 'ros_ws'
ros_workspace_dir.mkdir()
qemu_dir = sysroot_dir / QEMU_DIR_NAME
qemu_dir.mkdir()
qemu_binary_mock = qemu_dir / 'qemu'
qemu_binary_mock.ensure()
docker_ws_dir = sysroot_dir / ROS_DOCKERFILE_NAME
docker_ws_dir.ensure()
return sysroot_dir, ros_workspace_dir
def test_get_workspace_image_tag(platform_config):
"""Make sure the image tag is created correctly."""
image_tag = platform_config.get_workspace_image_tag()
test_tag = '{}/{}:latest'.format(getpass.getuser(), str(platform_config))
assert isinstance(image_tag, str)
assert image_tag == test_tag
def test_docker_config_args(docker_config):
"""Make sure the Docker configuration is setup correctly."""
args = _default_docker_kwargs()
test_config_string = (
'Base Image: {}\n'
'Network Mode: {}\n'
'Caching: {}'
).format(
args['sysroot_base_image'], args['docker_network_mode'], args['sysroot_nocache']
)
config_string = str(docker_config)
assert isinstance(config_string, str)
assert config_string == test_config_string
def test_sysroot_compiler_constructor(
platform_config, docker_config, tmpdir):
"""Test the SysrootCompiler constructor assuming valid path setup."""
# Create mock directories and files
sysroot_dir, ros_workspace_dir = setup_mock_sysroot(tmpdir)
sysroot_compiler = SysrootCompiler(
str(tmpdir), 'ros_ws', platform_config,
docker_config, None)
assert isinstance(sysroot_compiler.get_build_setup_script_path(), Path)
assert isinstance(sysroot_compiler.get_system_setup_script_path(), Path)
def test_sysroot_compiler_tree_validation(platform_config, docker_config, tmpdir):
"""
Ensure that the SysrootCompiler constructor validates the workspace.
Start with empty directory and add one piece at a time, expecting failures until
all parts are present.
"""
kwargs = {
'cc_root_dir': str(tmpdir),
'ros_workspace_dir': 'ros_ws',
'platform': platform_config,
'docker_config': docker_config,
'custom_setup_script_path': None,
}
# There's no 'sysroot' at all yet
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
sysroot_dir = tmpdir / SYSROOT_DIR_NAME
sysroot_dir.mkdir()
# ROS2 ws and qemu dirs are missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
ros_workspace_dir = sysroot_dir / 'ros_ws'
ros_workspace_dir.mkdir()
# qemu dirs are missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
qemu_dir = sysroot_dir / QEMU_DIR_NAME
qemu_dir.mkdir()
# the qemu binary is still missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
qemu_binary_mock = qemu_dir / 'qemu'
qemu_binary_mock.ensure()
# everything is present now
compiler = SysrootCompiler(**kwargs)
assert compiler
def verify_base_docker_images(arch, os, rosdistro, image_name):
"""Assert correct base image is generated."""
sysroot_base_image = None
docker_network_mode = 'host'
sysroot_nocache = 'False'
assert DockerConfig(
arch, os, rosdistro, sysroot_base_image,
docker_network_mode, sysroot_nocache).base_image == image_name
def test_get_docker_base_image():
"""Test that the correct base docker image is used for all arguments."""
verify_base_docker_images('aarch64', 'ubuntu', 'dashing', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'ubuntu', 'eloquent', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'ubuntu', 'kinetic', 'arm64v8/ubuntu:xenial')
verify_base_docker_images('aarch64', 'ubuntu', 'melodic', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'debian', 'dashing', 'arm64v8/debian:stretch')
verify_base_docker_images('aarch64', 'debian', 'eloquent', 'arm64v8/debian:buster')
verify_base_docker_images('aarch64', 'debian', 'kinetic', 'arm64v8/debian:jessie')
verify_base_docker_images('aarch64', 'debian', 'melodic', 'arm64v8/debian:stretch')
verify_base_docker_images('armhf', 'ubuntu', 'dashing', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'ubuntu', 'eloquent', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'ubuntu', 'kinetic', 'arm32v7/ubuntu:xenial')
verify_base_docker_images('armhf', 'ubuntu', 'melodic', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'debian', 'dashing', 'arm32v7/debian:stretch')
verify_base_docker_images('armhf', 'debian', 'eloquent', 'arm32v7/debian:buster')
verify_base_docker_images('armhf', 'debian', 'kinetic', 'arm32v7/debian:jessie')
verify_base_docker_images('armhf', 'debian', 'melodic', 'arm32v7/debian:stretch')
| 37.551913
| 88
| 0.721478
| 0
| 0
| 0
| 0
| 273
| 0.039726
| 0
| 0
| 2,698
| 0.392608
|
1297e5fb738245835e074daab17948395423d0ba
| 2,083
|
py
|
Python
|
estimate.py
|
farr/galmassproxy
|
f4a1c7acc19d130a6f57030bceef03c993a7170c
|
[
"MIT"
] | null | null | null |
estimate.py
|
farr/galmassproxy
|
f4a1c7acc19d130a6f57030bceef03c993a7170c
|
[
"MIT"
] | null | null | null |
estimate.py
|
farr/galmassproxy
|
f4a1c7acc19d130a6f57030bceef03c993a7170c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
r"""estimate.py
Use to estimate masses based on observed proxy values (and associated
errors) from a pre-calibrated generative model for the mass-proxy
relationship. The estimates will be returned as samples (fair draws)
from the model's posterior on the mass given the proxy observation.
This program expects the proxy data in a file with at least 'proxy'
and 'dp' column headers, followed by observed proxy values and
relative errors in those columns:
proxy dp
p1 dp1
...
The output will have one row for each proxy measurement, with one
column for each draw from the mass posterior for that system:
m1_draw m1_draw ...
m2_draw m2_draw ...
...
"""
import argparse
import bz2
import numpy as np
import os.path as op
import pickle
import posterior as pos
import plotutils.runner as pr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--caldir', metavar='DIR', required=True, help='directory with calibration data')
parser.add_argument('--proxyfile', metavar='FILE', required=True, help='proxy observations')
parser.add_argument('--output', metavar='FILE', default='masses.dat.bz2', help='mass posterior draws')
args = parser.parse_args()
runner = pr.load_runner(args.caldir)
with bz2.BZ2File(op.join(args.caldir, 'logpost.pkl.bz2'), 'r') as inp:
logpost = pickle.load(inp)
flatchain = runner.thin_chain[:,-16:,:].reshape((-1, runner.chain.shape[2]))
data = np.genfromtxt(args.proxyfile, names=True)
ms = []
for log_p, dp in zip(np.log(data['proxy']), data['dp']):
mdraws = []
for p in flatchain:
((log_m, log_p_est), (var_log_m, var_log_p)) = \
logpost.mass_proxy_estimate(p, log_p, dp)
mdraws.append(np.exp(np.random.normal(loc=log_m, scale=np.sqrt(var_log_m))))
ms.append(mdraws)
ms = np.array(ms)
fname = args.output
fbase, fext = op.splitext(fname)
if not (fext == '.bz2'):
fname = fname + '.bz2'
with bz2.BZ2File(fname, 'w') as out:
np.savetxt(out, ms)
| 30.632353
| 106
| 0.683629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 873
| 0.419107
|
129824738bfae0f0fbd02b667cf74972ac9ca42e
| 143
|
py
|
Python
|
scripts/python/printings.py
|
samk-ai/cmd-tools-course-materials
|
fa3615df7ae70bbc701661bdeef588cbbf17be97
|
[
"MIT"
] | null | null | null |
scripts/python/printings.py
|
samk-ai/cmd-tools-course-materials
|
fa3615df7ae70bbc701661bdeef588cbbf17be97
|
[
"MIT"
] | null | null | null |
scripts/python/printings.py
|
samk-ai/cmd-tools-course-materials
|
fa3615df7ae70bbc701661bdeef588cbbf17be97
|
[
"MIT"
] | null | null | null |
str1 = "Python"
str2 = "Python"
print("\nMemory location of str1 =", hex(id(str1)))
print("Memory location of str2 =", hex(id(str2)))
print()
| 23.833333
| 51
| 0.657343
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.503497
|
12990c8712d2523d8e2f0753d7b1faee0bbfa287
| 353
|
py
|
Python
|
plots_lib/architecture_config.py
|
cmimprota/ASL-SIFT
|
e6e489e9cc06746e2ab8cd11193fc9fc0112e5df
|
[
"Zlib"
] | 1
|
2021-12-30T14:59:43.000Z
|
2021-12-30T14:59:43.000Z
|
plots_lib/architecture_config.py
|
cmimprota/ASL-SIFT
|
e6e489e9cc06746e2ab8cd11193fc9fc0112e5df
|
[
"Zlib"
] | null | null | null |
plots_lib/architecture_config.py
|
cmimprota/ASL-SIFT
|
e6e489e9cc06746e2ab8cd11193fc9fc0112e5df
|
[
"Zlib"
] | 1
|
2021-04-12T11:13:32.000Z
|
2021-04-12T11:13:32.000Z
|
config = dict()
config['fixed_cpu_frequency'] = "@ 3700 MHz"
config['frequency'] = 3.7e9
config['maxflops_sisd'] = 2
config['maxflops_sisd_fma'] = 4
config['maxflops_simd'] = 16
config['maxflops_simd_fma'] = 32
config['roofline_beta'] = 64 # According to WikiChip (Skylake)
config['figure_size'] = (20,9)
config['save_folder'] = '../all_plots/'
| 29.416667
| 69
| 0.691218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.569405
|
129b2012dab2f92bc6a116945f46ccc5481200f2
| 562
|
py
|
Python
|
telemetry_f1_2021/generate_dataset.py
|
jasperan/f1-telemetry-oracle
|
5b2d7efac265539931849863655a5f92d86c75a8
|
[
"MIT"
] | 4
|
2022-02-21T16:36:09.000Z
|
2022-03-28T06:50:54.000Z
|
telemetry_f1_2021/generate_dataset.py
|
jasperan/f1-telemetry-oracle
|
5b2d7efac265539931849863655a5f92d86c75a8
|
[
"MIT"
] | null | null | null |
telemetry_f1_2021/generate_dataset.py
|
jasperan/f1-telemetry-oracle
|
5b2d7efac265539931849863655a5f92d86c75a8
|
[
"MIT"
] | 2
|
2022-02-17T19:25:04.000Z
|
2022-02-23T04:16:16.000Z
|
import cx_Oracle
from oracledb import OracleJSONDatabaseConnection
import json
jsondb = OracleJSONDatabaseConnection()
connection = jsondb.get_connection()
connection.autocommit = True
soda = connection.getSodaDatabase()
x_collection = soda.createCollection('f1_2021_weather')
all_data = list()
for doc in x_collection.find().getCursor():
content = doc.getContent()
all_data.append(content)
print('Data length: {}'.format(len(all_data)))
with open("weather.json", 'w') as outfile:
outfile.write(json.dumps(all_data, indent=4))
outfile.close()
| 24.434783
| 55
| 0.765125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.090747
|
129b447d8e3a2e21029c717a45661b4dd2311adc
| 8,257
|
py
|
Python
|
UserPage.py
|
muath22/BookStore
|
db5b30e540de311931b234e71937ace3db9750c8
|
[
"MIT"
] | 9
|
2018-09-13T10:43:34.000Z
|
2021-05-05T08:51:52.000Z
|
UserPage.py
|
muath22/BookStore
|
db5b30e540de311931b234e71937ace3db9750c8
|
[
"MIT"
] | 4
|
2018-09-13T10:09:32.000Z
|
2021-03-20T00:03:10.000Z
|
UserPage.py
|
muath22/BookStore
|
db5b30e540de311931b234e71937ace3db9750c8
|
[
"MIT"
] | 5
|
2020-02-26T13:54:03.000Z
|
2021-01-06T09:38:56.000Z
|
from Tkinter import *
import ttk
import BuyBook
import BookInformationPage
import Message
class UserPage(object):
def __init__(self, root, color, font, dbConnection, userInfo):
for child in root.winfo_children():
child.destroy()
self.root = root
self.color = color
self.font = font
self.dbConnection = dbConnection
self.userInfo = userInfo
self.screen_width = self.root.winfo_screenwidth() * 3 / 4
self.screen_height = self.root.winfo_screenheight() * 3 / 4
self.gui_init()
def gui_init(self):
self.up_frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
height=self.screen_height / 8,
width=self.screen_width)
self.up_frame.grid_propagate(0)
self.up_frame.pack(side=TOP, expand=True, fill=BOTH)
self.down_frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
height=self.screen_height * 7 / 8,
width=self.screen_width)
self.down_frame.grid_propagate(0)
self.down_frame.pack(side=TOP, expand=True, fill=BOTH)
self.profileFrame = ProfileFrame(self.up_frame, self.screen_width / 2,
self.screen_height / 8, self.color,
self.font, self.userInfo)
self.logoutFrame = LogOutFrame(
self.root, self.up_frame, self.screen_width / 2,
self.screen_height / 8, self.color, self.font, self.dbConnection)
self.booksInfoFrame = BuyedBooks(
self.down_frame, self.screen_width, self.screen_height * 7 / 8,
self.color, self.font, self.dbConnection, self.userInfo)
class ProfileFrame(object):
def __init__(self, root, width, height, color, font, userInfo):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.userInfo = userInfo
self.gui_init()
def gui_init(self):
self.frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
bd=5,
relief=RAISED,
width=self.width,
height=self.height)
self.frame.pack(expand=True, side=LEFT, fill=BOTH)
self.frame.grid_propagate(0)
profile_info = self.extract_profile()
self.profileLabel = Label(
self.frame, text=profile_info, font=self.font, bg=self.color)
self.profileLabel.place(relx=0.5, rely=0.5, anchor='center')
def extract_profile(self):
userInfo = "\n".join(self.userInfo.values())
return userInfo
class LogOutFrame(object):
def __init__(self, parent, root, width, height, color, font, dbConnection):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.parent = parent
self.dbConnection = dbConnection
self.gui_init()
def gui_init(self):
self.frame = Frame(
self.root,
cursor='hand1',
bd=5,
relief=RAISED,
bg=self.color,
width=self.width,
height=self.height)
self.frame.pack(side=LEFT, expand=True, fill=BOTH)
self.frame.grid_propagate(0)
self.logout_button = Button(
self.frame, text="LogOut", font=self.font, borderwidth=5)
self.logout_button.place(relx=0.5, rely=0.5, anchor='center')
self.logout_button.bind("<Button-1>", self.__logOutAction)
def __logOutAction(self, event):
self.dbConnection.close()
for child in self.parent.winfo_children():
child.destroy()
self.parent.destroy()
class BuyedBooks(object):
def __init__(self, root, width, height, color, font, dbConnection,
userInfo):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.dbConnection = dbConnection
self.userInfo = userInfo
self.gui_init()
def gui_init(self):
frame_up = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 1 / 12)
frame_up.grid_propagate(0)
frame_up.pack(side=TOP, expand=True, fill=BOTH)
frame_middle = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 10 / 12)
frame_middle.grid_propagate(0)
frame_middle.pack(side=TOP, expand=True, fill=BOTH)
frame_down = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 1 / 12)
frame_down.grid_propagate(0)
frame_down.pack(side=TOP, expand=True, fill=BOTH)
self.uploadedFilesLabel = Label(
frame_up, text="BuyedBooks", font=self.font, bg=self.color)
self.uploadedFilesLabel.place(relx=0.5, rely=0.5, anchor='center')
self.booksDisplay = ttk.Treeview(
frame_middle,
columns=('#1', '#2', '#3', '#4', '#5'),
height=20,
show='headings',
padding=(1, 1, 1, 1))
self.booksDisplay.heading('#1', text='Title')
self.booksDisplay.heading('#2', text='Author')
self.booksDisplay.heading('#3', text='Genre')
self.booksDisplay.heading('#4', text='Quantity')
self.booksDisplay.heading('#5', text='Review Score')
self.booksDisplay.column('#1', stretch=True, width=self.width / 5)
self.booksDisplay.column('#2', stretch=True, width=self.width / 5)
self.booksDisplay.column('#3', stretch=True, width=self.width / 5)
self.booksDisplay.column('#4', stretch=True, width=self.width / 5)
self.booksDisplay.column('#5', stretch=True, width=self.width / 5)
self.booksDisplay.pack(side=TOP, fill=BOTH, expand=True)
#self.booksDisplay.grid(row=5, columnspan=4, sticky='nw')
#self.booksDisplay.place(relx=0.5, rely=0.5, anchor='center')
self.booksDisplayStyle = ttk.Style()
self.booksDisplayStyle.configure(
"Treeview", font=self.font, rowheight=50)
self.booksDisplayStyle.configure("Treeview.Heading", font=self.font)
#bind treeview to mouse click
self.booksDisplay.bind("<ButtonRelease-1>", self.__bookInfo)
self.booksDisplay.tag_configure(
"tagBook", background="white", foreground="red", font=self.font)
self.addNewBookButton = Button(
frame_down, text="Buy new book", font=self.font)
self.addNewBookButton.place(relx=0.5, rely=0.5, anchor='center')
self.addNewBookButton.bind("<Button-1>", self.__buyNewBook)
self.__display_availableBooks()
def __buyNewBook(self, event):
new_window = Toplevel(self.root)
BuyBook.BuyBook(new_window, self.color, self.font, self.dbConnection,
self.userInfo)
new_window.wait_window()
self.__display_availableBooks()
def __bookInfo(self, event):
selectedItem = self.booksDisplay.focus()
valueItem = self.booksDisplay.item(selectedItem)['values']
bookName=valueItem[0]
new_window = Toplevel(self.root)
newBookInfo = BookInformationPage.BookInformation(
new_window, self.color, self.dbConnection, valueItem[0], self.userInfo)
new_window.wait_window()
self.__display_availableBooks()
def __display_availableBooks(self):
for child in self.booksDisplay.get_children():
self.booksDisplay.delete(child)
cursor = self.dbConnection.cursor()
args = (self.userInfo['userName'], )
cursor.callproc('getUsersBooks', args)
for result in cursor.stored_results():
books = result.fetchall()
for book in books:
self.booksDisplay.insert(
'', 'end', values=book, tags='tagBook')
cursor.close()
| 32.128405
| 84
| 0.592588
| 8,155
| 0.987647
| 0
| 0
| 0
| 0
| 0
| 0
| 523
| 0.06334
|
129b4ea5990948782bef80ca4f25a0a104636e5b
| 775
|
py
|
Python
|
migrations/versions/1b57e397deea_initial_migration.py
|
sicness9/BugHub
|
2af45b0840757f7826927d4fefc0e626fef136e1
|
[
"FTL"
] | null | null | null |
migrations/versions/1b57e397deea_initial_migration.py
|
sicness9/BugHub
|
2af45b0840757f7826927d4fefc0e626fef136e1
|
[
"FTL"
] | null | null | null |
migrations/versions/1b57e397deea_initial_migration.py
|
sicness9/BugHub
|
2af45b0840757f7826927d4fefc0e626fef136e1
|
[
"FTL"
] | null | null | null |
"""initial migration
Revision ID: 1b57e397deea
Revises:
Create Date: 2021-12-20 20:57:14.696646
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1b57e397deea'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'users', 'roles', ['role_id'], ['id'])
op.create_foreign_key(None, 'users', 'teams', ['team_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='foreignkey')
op.drop_constraint(None, 'users', type_='foreignkey')
# ### end Alembic commands ###
| 25
| 70
| 0.676129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.554839
|
129b54403eb231e9102fbf7abe8cda7f3996ce5b
| 5,596
|
py
|
Python
|
app/utility/base_planning_svc.py
|
scottctaylor12/caldera
|
4e81aaaf0ed592232a0474dda36ea2fd505da0de
|
[
"Apache-2.0"
] | null | null | null |
app/utility/base_planning_svc.py
|
scottctaylor12/caldera
|
4e81aaaf0ed592232a0474dda36ea2fd505da0de
|
[
"Apache-2.0"
] | null | null | null |
app/utility/base_planning_svc.py
|
scottctaylor12/caldera
|
4e81aaaf0ed592232a0474dda36ea2fd505da0de
|
[
"Apache-2.0"
] | null | null | null |
import copy
import itertools
import re
from base64 import b64decode
from app.utility.base_service import BaseService
from app.utility.rule import RuleSet
class BasePlanningService(BaseService):
async def trim_links(self, operation, links, agent):
"""
Trim links in supplied list. Where 'trim' entails:
- adding all possible test variants
- removing completed links (i.e. agent has already completed)
- removing links that did not have template fact variables replaced by fact values
:param operation:
:param links:
:param agent:
:return: trimmed list of links
"""
links[:] = await self.add_test_variants(links, agent, operation)
links = await self.remove_completed_links(operation, agent, links)
links = await self.remove_links_missing_facts(links)
links = await self.remove_links_missing_requirements(links, operation.all_relationships())
self.log.debug('Created %d links for %s' % (len(links), agent.paw))
return links
async def add_test_variants(self, links, agent, operation):
"""
Create a list of all possible links for a given phase
:param links:
:param agent:
:param operation:
:return: updated list of links
"""
group = agent.group
for link in links:
decoded_test = self.decode(link.command, agent, group)
variables = re.findall(r'#{(.*?)}', decoded_test, flags=re.DOTALL)
if variables:
agent_facts = await self._get_agent_facts(operation, agent.paw)
relevant_facts = await self._build_relevant_facts(variables, operation, agent_facts)
valid_facts = await RuleSet(rules=operation.rules).apply_rules(facts=relevant_facts[0])
for combo in list(itertools.product(*valid_facts)):
copy_test = copy.deepcopy(decoded_test)
copy_link = copy.deepcopy(link)
variant, score, used = await self._build_single_test_variant(copy_test, combo)
copy_link.command = self.encode_string(variant)
copy_link.score = score
copy_link.used.extend(used)
links.append(copy_link)
else:
link.command = self.encode_string(decoded_test)
return links
@staticmethod
async def remove_completed_links(operation, agent, links):
"""
Remove any links that have already been completed by the operation for the agent
:param operation:
:param links:
:param agent:
:return: updated list of links
"""
completed_links = [l.command for l in operation.chain
if l.paw == agent.paw and (l.finish or l.status == l.states["DISCARD"])]
links[:] = [l for l in links if l.command not in completed_links]
return links
@staticmethod
async def remove_links_missing_facts(links):
"""
Remove any links that did not have facts encoded into command
:param links:
:return: updated list of links
"""
links[:] = [l for l in links if
not re.findall(r'#{(.*?)}', b64decode(l.command).decode('utf-8'), flags=re.DOTALL)]
return links
async def remove_links_missing_requirements(self, links, relationships):
links[:] = [l for l in links if await self._do_enforcements(l, relationships)]
return links
""" PRIVATE """
@staticmethod
async def _build_single_test_variant(copy_test, combo):
"""
Replace all variables with facts from the combo to build a single test variant
"""
score, used = 0, list()
for var in combo:
score += (score + var.score)
used.append(var)
copy_test = copy_test.replace('#{%s}' % var.trait, var.value.strip())
return copy_test, score, used
@staticmethod
def _is_fact_bound(fact):
return not fact['link_id']
@staticmethod
async def _build_relevant_facts(variables, operation, agent_facts):
"""
Create a list of ([fact, value, score]) tuples for each variable/fact
"""
facts = operation.all_facts()
relevant_facts = []
for v in variables:
variable_facts = []
for fact in [f for f in facts if f.trait == v]:
if fact.trait.startswith('host'):
if fact.unique in agent_facts:
variable_facts.append(fact)
else:
variable_facts.append(fact)
relevant_facts.append(variable_facts)
return relevant_facts
@staticmethod
async def _get_agent_facts(operation, paw):
"""
get facts for given agent
"""
agent_facts = []
for link in [l for l in operation.chain if l.paw == paw]:
for f in link.facts:
agent_facts.append(f.unique)
return agent_facts
async def _do_enforcements(self, link, relationships):
"""
enforce any defined requirements on the link
"""
for req_inst in link.ability.requirements:
requirements_info = dict(module=req_inst.module, enforcements=req_inst.relationships[0])
requirement = await self.load_module('Requirement', requirements_info)
if not requirement.enforce(link.used, relationships):
return False
return True
| 38.593103
| 103
| 0.606862
| 5,438
| 0.971766
| 0
| 0
| 2,439
| 0.435847
| 5,150
| 0.9203
| 1,370
| 0.244818
|
129c2cba3840cfd8f3de73d2239ee04d334e5bc9
| 215
|
py
|
Python
|
pyclid/__init__.py
|
Kaundur/pyclid
|
c59865fed9120b76cba6e41a84653256ac3072ee
|
[
"MIT"
] | 2
|
2019-02-12T11:31:04.000Z
|
2021-12-31T10:39:01.000Z
|
pyclid/__init__.py
|
Kaundur/pyclid
|
c59865fed9120b76cba6e41a84653256ac3072ee
|
[
"MIT"
] | null | null | null |
pyclid/__init__.py
|
Kaundur/pyclid
|
c59865fed9120b76cba6e41a84653256ac3072ee
|
[
"MIT"
] | null | null | null |
import math
from pyclid.vector import *
from pyclid.matrix import *
from pyclid.quaternion import *
#from pyclid.vector import vector
#from pyclid.quaternion import quaternion
#from pyclid.matrix import matrix
| 16.538462
| 41
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.497674
|
129c738a3288c017144786e45c751a99bdb4acea
| 2,939
|
py
|
Python
|
tools/gen_histograms.py
|
mistajuliax/pbrt-v3-IILE
|
afda605d92517d2396e494d81465ead22d0c25e1
|
[
"BSD-2-Clause"
] | 16
|
2018-10-12T15:29:22.000Z
|
2022-03-16T11:24:10.000Z
|
tools/gen_histograms.py
|
mistajuliax/pbrt-v3-IILE
|
afda605d92517d2396e494d81465ead22d0c25e1
|
[
"BSD-2-Clause"
] | 16
|
2018-02-02T11:49:36.000Z
|
2018-04-21T09:07:08.000Z
|
tools/gen_histograms.py
|
giuliojiang/pbrt-v3-IISPT
|
b9be01096293ab0f50b14b9043556c93ff9e07ec
|
[
"BSD-2-Clause"
] | 2
|
2018-12-12T08:49:43.000Z
|
2019-12-03T12:20:04.000Z
|
import os
rootdir = os.path.abspath(os.path.join(__file__, "..", ".."))
mldir = os.path.join(rootdir, "ml")
import sys
sys.path.append(mldir)
import pfm
import iispt_transforms
import math
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
# =============================================================================
# Conf
NUM_BUCKETS = 100
INPUTDIR = "/home/gj/git/pbrt-v3-IISPT-dataset-indirect/breakfast"
SELECTOR = "p"
GAMMA_VALUE = 1.8
NORMALIZATION_INTENSITY = 3.807115077972
# =============================================================================
# Script
flist = []
for f in os.listdir(INPUTDIR):
fpath = os.path.join(INPUTDIR, f)
if f.startswith(SELECTOR) and f.endswith(".pfm"):
flist.append(fpath)
def histogram(images, plotname):
valmax = None
valmin = None
vals = []
for img in images:
height, width, _ = img.get_shape()
for y in range(height):
for x in range(width):
pixel = img.get_rgb(x, y)
for v in pixel:
if (valmax is None) or (v > valmax):
valmax = v
if v > 0.0:
if (valmin is None) or (v < valmin):
valmin = v
vals.append(v)
print("min {} max {}".format(valmin, valmax))
# Create buckets data structures
rng = valmax - valmin
step = rng / NUM_BUCKETS
buckets_starts = [0] * NUM_BUCKETS
buckets = [0] * NUM_BUCKETS
for i in range(NUM_BUCKETS):
buckets_starts[i] = valmin + (i * step)
# Populate buckets
for v in vals:
# Compute its bucket index
bindex = int(math.floor((v - valmin)/(float(step))))
# Exclude left-end out of bounds but include right-end
if bindex >= NUM_BUCKETS:
bindex = NUM_BUCKETS - 1
if bindex >= 0:
buckets[bindex] += 1
# Print buckets
for i in range(len(buckets)):
print("{} - {}".format(buckets_starts[i], buckets[i]))
# Plot
data = [
go.Bar(
x=buckets_starts,
y=buckets
)
]
plotly.offline.plot(
{
"data": data,
"layout": go.Layout(title=plotname)
}
)
# Generate histogram for raw data
standard_imgs = []
for fpath in flist:
standard_imgs.append(pfm.load(fpath))
histogram(standard_imgs, "Raw intensity")
# Generate histogram after log transform
log_imgs = []
for fpath in flist:
img = pfm.load(fpath)
img.map(iispt_transforms.LogTransform())
log_imgs.append(img)
histogram(log_imgs, "Log transform")
# GEnerate histogram after log + gamma transform
lg_imgs = []
for fpath in flist:
img = pfm.load(fpath)
img.normalize_log_gamma(NORMALIZATION_INTENSITY, GAMMA_VALUE)
lg_imgs.append(img)
histogram(lg_imgs, "Log + Gamma transform")
| 25.780702
| 79
| 0.555971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 611
| 0.207894
|
129ced52ad5bddf6d93136148de2d32cf2de02ec
| 4,762
|
py
|
Python
|
crownstone_uart/core/uart/UartBridge.py
|
RicArch97/crownstone-lib-python-uart
|
c0aaf1415936e5e622aa6395fdac4f88ebcf82bf
|
[
"MIT"
] | null | null | null |
crownstone_uart/core/uart/UartBridge.py
|
RicArch97/crownstone-lib-python-uart
|
c0aaf1415936e5e622aa6395fdac4f88ebcf82bf
|
[
"MIT"
] | null | null | null |
crownstone_uart/core/uart/UartBridge.py
|
RicArch97/crownstone-lib-python-uart
|
c0aaf1415936e5e622aa6395fdac4f88ebcf82bf
|
[
"MIT"
] | null | null | null |
import logging
import sys
import threading
import serial
import serial.tools.list_ports
from crownstone_uart.Constants import UART_READ_TIMEOUT, UART_WRITE_TIMEOUT
from crownstone_uart.core.UartEventBus import UartEventBus
from crownstone_uart.core.uart.UartParser import UartParser
from crownstone_uart.core.uart.UartReadBuffer import UartReadBuffer
from crownstone_uart.topics.SystemTopics import SystemTopics
_LOGGER = logging.getLogger(__name__)
class UartBridge(threading.Thread):
def __init__(self, port, baudrate, writeChunkMaxSize=0):
self.baudrate = baudrate
self.port = port
self.writeChunkMaxSize = writeChunkMaxSize
self.serialController = None
self.started = False
self.running = True
self.parser = UartParser()
self.eventId = UartEventBus.subscribe(SystemTopics.uartWriteData, self.write_to_uart)
threading.Thread.__init__(self)
def __del__(self):
self.stop()
def run(self):
self.start_serial()
self.start_reading()
def stop(self):
self.running = False
UartEventBus.unsubscribe(self.eventId)
self.parser.stop()
def start_serial(self):
_LOGGER.debug(F"UartBridge: Initializing serial on port {self.port} with baudrate {self.baudrate}")
try:
self.serialController = serial.Serial()
self.serialController.port = self.port
self.serialController.baudrate = int(self.baudrate)
self.serialController.timeout = UART_READ_TIMEOUT
self.serialController._write_timeout = UART_WRITE_TIMEOUT
self.serialController.open()
except OSError or serial.SerialException or KeyboardInterrupt:
self.stop()
def start_reading(self):
readBuffer = UartReadBuffer()
self.started = True
_LOGGER.debug(F"Read starting on serial port.{self.port} {self.running}")
try:
while self.running:
bytesFromSerial = self.serialController.read()
if bytesFromSerial:
# clear out the entire read buffer
if self.serialController.in_waiting > 0:
additionalBytes = self.serialController.read(self.serialController.in_waiting)
bytesFromSerial = bytesFromSerial + additionalBytes
readBuffer.addByteArray(bytesFromSerial)
# print("Cleaning up UartBridge")
except OSError or serial.SerialException:
_LOGGER.info("Connection to USB Failed. Retrying...")
except KeyboardInterrupt:
self.running = False
_LOGGER.debug("Closing serial connection.")
# close the serial controller
self.serialController.close()
self.serialController = None
# remove the event listener pointing to the old connection
UartEventBus.unsubscribe(self.eventId)
self.started = False
UartEventBus.emit(SystemTopics.connectionClosed, True)
def write_to_uart(self, data):
_LOGGER.debug(f"write_to_uart: {data}")
if self.serialController is not None and self.started:
try:
if self.writeChunkMaxSize == 0:
self.serialController.write(data)
else:
# writing in chunks solves issues writing to certain JLink chips. A max chunkSize of 64 was found to work well for our case.
chunkSize = self.writeChunkMaxSize
index = 0
while (index*chunkSize) < len(data):
chunkedData = data[index*chunkSize:chunkSize*(index+1)]
index += 1
self.serialController.write(chunkedData)
UartEventBus.emit(SystemTopics.uartWriteSuccess, data)
except serial.SerialTimeoutException as e:
UartEventBus.emit(SystemTopics.uartWriteError, {"message":"Timeout on uart write.", "error": e})
except serial.SerialException as e:
UartEventBus.emit(SystemTopics.uartWriteError, {"message":"SerialException occurred during uart write", "error": e})
except OSError as e:
UartEventBus.emit(SystemTopics.uartWriteError, {"message":"OSError occurred during uart write.", "error": e})
except Exception as e:
UartEventBus.emit(SystemTopics.uartWriteError, {"message": "Unknown Exception during uart write.", "error": e})
except:
e = sys.exc_info()[0]
UartEventBus.emit(SystemTopics.uartWriteError, {"message":"Unknown error during uart write.", "error": e})
else:
self.stop()
| 40.355932
| 144
| 0.640277
| 4,306
| 0.904242
| 0
| 0
| 0
| 0
| 0
| 0
| 768
| 0.161277
|
129d3359e74cfc680cc1a6d1b0edd803c1383270
| 20,753
|
py
|
Python
|
data-batch-treatment/test_agg_script/locations.py
|
coder-baymax/taxi-poc-aws
|
4be8021873ee6b58b2dba5a5d41df12cdd3b67fc
|
[
"MIT"
] | null | null | null |
data-batch-treatment/test_agg_script/locations.py
|
coder-baymax/taxi-poc-aws
|
4be8021873ee6b58b2dba5a5d41df12cdd3b67fc
|
[
"MIT"
] | null | null | null |
data-batch-treatment/test_agg_script/locations.py
|
coder-baymax/taxi-poc-aws
|
4be8021873ee6b58b2dba5a5d41df12cdd3b67fc
|
[
"MIT"
] | null | null | null |
class Location:
def __init__(self, location_id, borough, zone, lat, lng):
self.location_id = location_id
self.borough = borough
self.zone = zone
self.lat = lat
self.lng = lng
@property
def json(self):
return {
"location_id": self.location_id,
"borough": self.borough,
"zone": self.zone,
"lat": self.lat,
"lng": self.lng
}
Locations = [
Location(1, "EWR", "Newark Airport", 40.6895314, -74.1744624),
Location(2, "Queens", "Jamaica Bay", 40.6056632, -73.8713099),
Location(3, "Bronx", "Allerton/Pelham Gardens", 40.8627726, -73.84343919999999),
Location(4, "Manhattan", "Alphabet City", 40.7258428, -73.9774916),
Location(5, "Staten Island", "Arden Heights", 40.556413, -74.1735044),
Location(6, "Staten Island", "Arrochar/Fort Wadsworth", 40.6012117, -74.0579185),
Location(7, "Queens", "Astoria", 40.7643574, -73.92346189999999),
Location(8, "Queens", "Astoria Park", 40.7785364, -73.92283359999999),
Location(9, "Queens", "Auburndale", 40.7577672, -73.78339609999999),
Location(10, "Queens", "Baisley Park", 40.6737751, -73.786025),
Location(11, "Brooklyn", "Bath Beach", 40.6038852, -74.0062078),
Location(12, "Manhattan", "Battery Park", 40.703141, -74.0159996),
Location(13, "Manhattan", "Battery Park City", 40.7115786, -74.0158441),
Location(14, "Brooklyn", "Bay Ridge", 40.6263732, -74.0298767),
Location(15, "Queens", "Bay Terrace/Fort Totten", 40.7920899, -73.7760996),
Location(16, "Queens", "Bayside", 40.7585569, -73.7654367),
Location(17, "Brooklyn", "Bedford", 40.6872176, -73.9417735),
Location(18, "Bronx", "Bedford Park", 40.8700999, -73.8856912),
Location(19, "Queens", "Bellerose", 40.7361769, -73.7137365),
Location(20, "Bronx", "Belmont", 40.8534507, -73.88936819999999),
Location(21, "Brooklyn", "Bensonhurst East", 40.6139307, -73.9921833),
Location(22, "Brooklyn", "Bensonhurst West", 40.6139307, -73.9921833),
Location(23, "Staten Island", "Bloomfield/Emerson Hill", 40.6074525, -74.0963115),
Location(24, "Manhattan", "Bloomingdale", 40.7988958, -73.9697795),
Location(25, "Brooklyn", "Boerum Hill", 40.6848689, -73.9844722),
Location(26, "Brooklyn", "Borough Park", 40.6350319, -73.9921028),
Location(27, "Queens", "Breezy Point/Fort Tilden/Riis Beach", 40.5597687, -73.88761509999999),
Location(28, "Queens", "Briarwood/Jamaica Hills", 40.7109315, -73.81356099999999),
Location(29, "Brooklyn", "Brighton Beach", 40.5780706, -73.9596565),
Location(30, "Queens", "Broad Channel", 40.6158335, -73.8213213),
Location(31, "Bronx", "Bronx Park", 40.8608544, -73.8706278),
Location(32, "Bronx", "Bronxdale", 40.8474697, -73.8599132),
Location(33, "Brooklyn", "Brooklyn Heights", 40.6959294, -73.9955523),
Location(34, "Brooklyn", "Brooklyn Navy Yard", 40.7025634, -73.9697795),
Location(35, "Brooklyn", "Brownsville", 40.665214, -73.9125304),
Location(36, "Brooklyn", "Bushwick North", 40.6957755, -73.9170604),
Location(37, "Brooklyn", "Bushwick South", 40.7043655, -73.9383476),
Location(38, "Queens", "Cambria Heights", 40.692158, -73.7330753),
Location(39, "Brooklyn", "Canarsie", 40.6402325, -73.9060579),
Location(40, "Brooklyn", "Carroll Gardens", 40.6795331, -73.9991637),
Location(41, "Manhattan", "Central Harlem", 40.8089419, -73.9482305),
Location(42, "Manhattan", "Central Harlem North", 40.8142585, -73.9426617),
Location(43, "Manhattan", "Central Park", 40.7812199, -73.9665138),
Location(44, "Staten Island", "Charleston/Tottenville", 40.5083408, -74.23554039999999),
Location(45, "Manhattan", "Chinatown", 40.7157509, -73.9970307),
Location(46, "Bronx", "City Island", 40.8468202, -73.7874983),
Location(47, "Bronx", "Claremont/Bathgate", 40.84128339999999, -73.9001573),
Location(48, "Manhattan", "Clinton East", 40.7637581, -73.9918181),
Location(49, "Brooklyn", "Clinton Hill", 40.6896834, -73.9661144),
Location(50, "Manhattan", "Clinton West", 40.7628785, -73.9940134),
Location(51, "Bronx", "Co-Op City", 40.8738889, -73.82944440000001),
Location(52, "Brooklyn", "Cobble Hill", 40.686536, -73.9962255),
Location(53, "Queens", "College Point", 40.786395, -73.8389657),
Location(54, "Brooklyn", "Columbia Street", 40.6775239, -74.00634409999999),
Location(55, "Brooklyn", "Coney Island", 40.5755438, -73.9707016),
Location(56, "Queens", "Corona", 40.7449859, -73.8642613),
Location(57, "Queens", "Corona", 40.7449859, -73.8642613),
Location(58, "Bronx", "Country Club", 40.8391667, -73.8197222),
Location(59, "Bronx", "Crotona Park", 40.8400367, -73.8953489),
Location(60, "Bronx", "Crotona Park East", 40.8365344, -73.8933509),
Location(61, "Brooklyn", "Crown Heights North", 40.6694022, -73.9422324),
Location(62, "Brooklyn", "Crown Heights South", 40.6694022, -73.9422324),
Location(63, "Brooklyn", "Cypress Hills", 40.6836873, -73.87963309999999),
Location(64, "Queens", "Douglaston", 40.76401509999999, -73.7433727),
Location(65, "Brooklyn", "Downtown Brooklyn/MetroTech", 40.6930987, -73.98566339999999),
Location(66, "Brooklyn", "DUMBO/Vinegar Hill", 40.70371859999999, -73.98226830000002),
Location(67, "Brooklyn", "Dyker Heights", 40.6214932, -74.00958399999999),
Location(68, "Manhattan", "East Chelsea", 40.7465004, -74.00137370000002),
Location(69, "Bronx", "East Concourse/Concourse Village", 40.8255863, -73.9184388),
Location(70, "Queens", "East Elmhurst", 40.7737505, -73.8713099),
Location(71, "Brooklyn", "East Flatbush/Farragut", 40.63751329999999, -73.9280797),
Location(72, "Brooklyn", "East Flatbush/Remsen Village", 40.6511399, -73.9181602),
Location(73, "Queens", "East Flushing", 40.7540534, -73.8086418),
Location(74, "Manhattan", "East Harlem North", 40.7957399, -73.93892129999999),
Location(75, "Manhattan", "East Harlem South", 40.7957399, -73.93892129999999),
Location(76, "Brooklyn", "East New York", 40.6590529, -73.8759245),
Location(77, "Brooklyn", "East New York/Pennsylvania Avenue", 40.65845729999999, -73.8904498),
Location(78, "Bronx", "East Tremont", 40.8453781, -73.8909693),
Location(79, "Manhattan", "East Village", 40.7264773, -73.98153370000001),
Location(80, "Brooklyn", "East Williamsburg", 40.7141953, -73.9316461),
Location(81, "Bronx", "Eastchester", 40.8859837, -73.82794710000002),
Location(82, "Queens", "Elmhurst", 40.737975, -73.8801301),
Location(83, "Queens", "Elmhurst/Maspeth", 40.7294018, -73.9065883),
Location(84, "Staten Island", "Eltingville/Annadale/Prince's Bay", 40.52899439999999, -74.197644),
Location(85, "Brooklyn", "Erasmus", 40.649649, -73.95287379999999),
Location(86, "Queens", "Far Rockaway", 40.5998931, -73.74484369999999),
Location(87, "Manhattan", "Financial District North", 40.7077143, -74.00827869999999),
Location(88, "Manhattan", "Financial District South", 40.705123, -74.0049259),
Location(89, "Brooklyn", "Flatbush/Ditmas Park", 40.6414876, -73.9593998),
Location(90, "Manhattan", "Flatiron", 40.740083, -73.9903489),
Location(91, "Brooklyn", "Flatlands", 40.6232714, -73.9321664),
Location(92, "Queens", "Flushing", 40.7674987, -73.833079),
Location(93, "Queens", "Flushing Meadows-Corona Park", 40.7400275, -73.8406953),
Location(94, "Bronx", "Fordham South", 40.8592667, -73.8984694),
Location(95, "Queens", "Forest Hills", 40.718106, -73.8448469),
Location(96, "Queens", "Forest Park/Highland Park", 40.6960418, -73.8663024),
Location(97, "Brooklyn", "Fort Greene", 40.6920638, -73.97418739999999),
Location(98, "Queens", "Fresh Meadows", 40.7335179, -73.7801447),
Location(99, "Staten Island", "Freshkills Park", 40.5772365, -74.1858183),
Location(100, "Manhattan", "Garment District", 40.7547072, -73.9916342),
Location(101, "Queens", "Glen Oaks", 40.7471504, -73.7118223),
Location(102, "Queens", "Glendale", 40.7016662, -73.8842219),
Location(103, "Manhattan", "Governor's Island/Ellis Island/Liberty Island", 40.6892494, -74.04450039999999),
Location(104, "Manhattan", "Governor's Island/Ellis Island/Liberty Island", 40.6892494, -74.04450039999999),
Location(105, "Manhattan", "Governor's Island/Ellis Island/Liberty Island", 40.6892494, -74.04450039999999),
Location(106, "Brooklyn", "Gowanus", 40.6751161, -73.9879753),
Location(107, "Manhattan", "Gramercy", 40.7367783, -73.9844722),
Location(108, "Brooklyn", "Gravesend", 40.5918636, -73.9768653),
Location(109, "Staten Island", "Great Kills", 40.5543273, -74.156292),
Location(110, "Staten Island", "Great Kills Park", 40.5492367, -74.1238486),
Location(111, "Brooklyn", "Green-Wood Cemetery", 40.6579777, -73.9940634),
Location(112, "Brooklyn", "Greenpoint", 40.7304701, -73.95150319999999),
Location(113, "Manhattan", "Greenwich Village North", 40.7335719, -74.0027418),
Location(114, "Manhattan", "Greenwich Village South", 40.7335719, -74.0027418),
Location(115, "Staten Island", "Grymes Hill/Clifton", 40.6189726, -74.0784785),
Location(116, "Manhattan", "Hamilton Heights", 40.8252793, -73.94761390000001),
Location(117, "Queens", "Hammels/Arverne", 40.5880813, -73.81199289999999),
Location(118, "Staten Island", "Heartland Village/Todt Hill", 40.5975007, -74.10189749999999),
Location(119, "Bronx", "Highbridge", 40.836916, -73.9271294),
Location(120, "Manhattan", "Highbridge Park", 40.8537599, -73.9257492),
Location(121, "Queens", "Hillcrest/Pomonok", 40.732341, -73.81077239999999),
Location(122, "Queens", "Hollis", 40.7112203, -73.762495),
Location(123, "Brooklyn", "Homecrest", 40.6004787, -73.9565551),
Location(124, "Queens", "Howard Beach", 40.6571222, -73.8429989),
Location(125, "Manhattan", "Hudson Sq", 40.7265834, -74.0074731),
Location(126, "Bronx", "Hunts Point", 40.8094385, -73.8803315),
Location(127, "Manhattan", "Inwood", 40.8677145, -73.9212019),
Location(128, "Manhattan", "Inwood Hill Park", 40.8722007, -73.9255549),
Location(129, "Queens", "Jackson Heights", 40.7556818, -73.8830701),
Location(130, "Queens", "Jamaica", 40.702677, -73.7889689),
Location(131, "Queens", "Jamaica Estates", 40.7179512, -73.783822),
Location(132, "Queens", "JFK Airport", 40.6413111, -73.77813909999999),
Location(133, "Brooklyn", "Kensington", 40.63852019999999, -73.97318729999999),
Location(134, "Queens", "Kew Gardens", 40.705695, -73.8272029),
Location(135, "Queens", "Kew Gardens Hills", 40.724707, -73.8207618),
Location(136, "Bronx", "Kingsbridge Heights", 40.8711235, -73.8976328),
Location(137, "Manhattan", "Kips Bay", 40.74232920000001, -73.9800645),
Location(138, "Queens", "LaGuardia Airport", 40.7769271, -73.8739659),
Location(139, "Queens", "Laurelton", 40.67764, -73.7447853),
Location(140, "Manhattan", "Lenox Hill East", 40.7662315, -73.9602312),
Location(141, "Manhattan", "Lenox Hill West", 40.7662315, -73.9602312),
Location(142, "Manhattan", "Lincoln Square East", 40.7741769, -73.98491179999999),
Location(143, "Manhattan", "Lincoln Square West", 40.7741769, -73.98491179999999),
Location(144, "Manhattan", "Little Italy/NoLiTa", 40.7230413, -73.99486069999999),
Location(145, "Queens", "Long Island City/Hunters Point", 40.7485587, -73.94964639999999),
Location(146, "Queens", "Long Island City/Queens Plaza", 40.7509846, -73.9402762),
Location(147, "Bronx", "Longwood", 40.8248438, -73.8915875),
Location(148, "Manhattan", "Lower East Side", 40.715033, -73.9842724),
Location(149, "Brooklyn", "Madison", 40.60688529999999, -73.947958),
Location(150, "Brooklyn", "Manhattan Beach", 40.57815799999999, -73.93892129999999),
Location(151, "Manhattan", "Manhattan Valley", 40.7966989, -73.9684247),
Location(152, "Manhattan", "Manhattanville", 40.8169443, -73.9558333),
Location(153, "Manhattan", "Marble Hill", 40.8761173, -73.9102628),
Location(154, "Brooklyn", "Marine Park/Floyd Bennett Field", 40.58816030000001, -73.8969745),
Location(155, "Brooklyn", "Marine Park/Mill Basin", 40.6055157, -73.9348698),
Location(156, "Staten Island", "Mariners Harbor", 40.63677010000001, -74.1587547),
Location(157, "Queens", "Maspeth", 40.7294018, -73.9065883),
Location(158, "Manhattan", "Meatpacking/West Village West", 40.7342331, -74.0100622),
Location(159, "Bronx", "Melrose South", 40.824545, -73.9104143),
Location(160, "Queens", "Middle Village", 40.717372, -73.87425),
Location(161, "Manhattan", "Midtown Center", 40.7314658, -73.9970956),
Location(162, "Manhattan", "Midtown East", 40.7571432, -73.9718815),
Location(163, "Manhattan", "Midtown North", 40.7649516, -73.9851039),
Location(164, "Manhattan", "Midtown South", 40.7521795, -73.9875438),
Location(165, "Brooklyn", "Midwood", 40.6204388, -73.95997779999999),
Location(166, "Manhattan", "Morningside Heights", 40.8105443, -73.9620581),
Location(167, "Bronx", "Morrisania/Melrose", 40.824545, -73.9104143),
Location(168, "Bronx", "Mott Haven/Port Morris", 40.8022025, -73.9166051),
Location(169, "Bronx", "Mount Hope", 40.8488863, -73.9051185),
Location(170, "Manhattan", "Murray Hill", 40.7478792, -73.9756567),
Location(171, "Queens", "Murray Hill-Queens", 40.7634996, -73.8073261),
Location(172, "Staten Island", "New Dorp/Midland Beach", 40.5739937, -74.1159755),
Location(173, "Queens", "North Corona", 40.7543725, -73.8669188),
Location(174, "Bronx", "Norwood", 40.8810341, -73.878486),
Location(175, "Queens", "Oakland Gardens", 40.7408584, -73.758241),
Location(176, "Staten Island", "Oakwood", 40.563994, -74.1159754),
Location(177, "Brooklyn", "Ocean Hill", 40.6782737, -73.9108212),
Location(178, "Brooklyn", "Ocean Parkway South", 40.61287799999999, -73.96838620000001),
Location(179, "Queens", "Old Astoria", 40.7643574, -73.92346189999999),
Location(180, "Queens", "Ozone Park", 40.6794072, -73.8507279),
Location(181, "Brooklyn", "Park Slope", 40.6710672, -73.98142279999999),
Location(182, "Bronx", "Parkchester", 40.8382522, -73.8566087),
Location(183, "Bronx", "Pelham Bay", 40.8505556, -73.83333329999999),
Location(184, "Bronx", "Pelham Bay Park", 40.8670144, -73.81006339999999),
Location(185, "Bronx", "Pelham Parkway", 40.8553279, -73.8639594),
Location(186, "Manhattan", "Penn Station/Madison Sq West", 40.7505045, -73.9934387),
Location(187, "Staten Island", "Port Richmond", 40.63549140000001, -74.1254641),
Location(188, "Brooklyn", "Prospect-Lefferts Gardens", 40.6592355, -73.9533895),
Location(189, "Brooklyn", "Prospect Heights", 40.6774196, -73.9668408),
Location(190, "Brooklyn", "Prospect Park", 40.6602037, -73.9689558),
Location(191, "Queens", "Queens Village", 40.7156628, -73.7419017),
Location(192, "Queens", "Queensboro Hill", 40.7429383, -73.8251741),
Location(193, "Queens", "Queensbridge/Ravenswood", 40.7556711, -73.9456723),
Location(194, "Manhattan", "Randalls Island", 40.7932271, -73.92128579999999),
Location(195, "Brooklyn", "Red Hook", 40.6733676, -74.00831889999999),
Location(196, "Queens", "Rego Park", 40.72557219999999, -73.8624893),
Location(197, "Queens", "Richmond Hill", 40.6958108, -73.8272029),
Location(198, "Queens", "Ridgewood", 40.7043986, -73.9018292),
Location(199, "Bronx", "Rikers Island", 40.79312770000001, -73.88601),
Location(200, "Bronx", "Riverdale/North Riverdale/Fieldston", 40.89961830000001, -73.9088276),
Location(201, "Queens", "Rockaway Park", 40.57978629999999, -73.8372237),
Location(202, "Manhattan", "Roosevelt Island", 40.76050310000001, -73.9509934),
Location(203, "Queens", "Rosedale", 40.6584068, -73.7389596),
Location(204, "Staten Island", "Rossville/Woodrow", 40.5434385, -74.19764409999999),
Location(205, "Queens", "Saint Albans", 40.6895283, -73.76436880000001),
Location(206, "Staten Island", "Saint George/New Brighton", 40.6404369, -74.090226),
Location(207, "Queens", "Saint Michaels Cemetery/Woodside", 40.7646761, -73.89850419999999),
Location(208, "Bronx", "Schuylerville/Edgewater Park", 40.8235967, -73.81029269999999),
Location(209, "Manhattan", "Seaport", 40.70722629999999, -74.0027431),
Location(210, "Brooklyn", "Sheepshead Bay", 40.5953955, -73.94575379999999),
Location(211, "Manhattan", "SoHo", 40.723301, -74.0029883),
Location(212, "Bronx", "Soundview/Bruckner", 40.8247566, -73.8710929),
Location(213, "Bronx", "Soundview/Castle Hill", 40.8176831, -73.8507279),
Location(214, "Staten Island", "South Beach/Dongan Hills", 40.5903824, -74.06680759999999),
Location(215, "Queens", "South Jamaica", 40.6808594, -73.7919103),
Location(216, "Queens", "South Ozone Park", 40.6764003, -73.8124984),
Location(217, "Brooklyn", "South Williamsburg", 40.7043921, -73.9565551),
Location(218, "Queens", "Springfield Gardens North", 40.6715916, -73.779798),
Location(219, "Queens", "Springfield Gardens South", 40.6715916, -73.779798),
Location(220, "Bronx", "Spuyten Duyvil/Kingsbridge", 40.8833912, -73.9051185),
Location(221, "Staten Island", "Stapleton", 40.6264929, -74.07764139999999),
Location(222, "Brooklyn", "Starrett City", 40.6484272, -73.88236119999999),
Location(223, "Queens", "Steinway", 40.7745459, -73.9037477),
Location(224, "Manhattan", "Stuy Town/Peter Cooper Village", 40.7316903, -73.9778494),
Location(225, "Brooklyn", "Stuyvesant Heights", 40.6824166, -73.9319933),
Location(226, "Queens", "Sunnyside", 40.7432759, -73.9196324),
Location(227, "Brooklyn", "Sunset Park East", 40.65272, -74.00933479999999),
Location(228, "Brooklyn", "Sunset Park West", 40.65272, -74.00933479999999),
Location(229, "Manhattan", "Sutton Place/Turtle Bay North", 40.7576281, -73.961698),
Location(230, "Manhattan", "Times Sq/Theatre District", 40.759011, -73.9844722),
Location(231, "Manhattan", "TriBeCa/Civic Center", 40.71625299999999, -74.0122396),
Location(232, "Manhattan", "Two Bridges/Seward Park", 40.7149056, -73.98924699999999),
Location(233, "Manhattan", "UN/Turtle Bay South", 40.7571432, -73.9718815),
Location(234, "Manhattan", "Union Sq", 40.7358633, -73.9910835),
Location(235, "Bronx", "University Heights/Morris Heights", 40.8540855, -73.9198498),
Location(236, "Manhattan", "Upper East Side North", 40.7600931, -73.9598414),
Location(237, "Manhattan", "Upper East Side South", 40.7735649, -73.9565551),
Location(238, "Manhattan", "Upper West Side North", 40.7870106, -73.9753676),
Location(239, "Manhattan", "Upper West Side South", 40.7870106, -73.9753676),
Location(240, "Bronx", "Van Cortlandt Park", 40.8972233, -73.8860668),
Location(241, "Bronx", "Van Cortlandt Village", 40.8837203, -73.89313899999999),
Location(242, "Bronx", "Van Nest/Morris Park", 40.8459682, -73.8625946),
Location(243, "Manhattan", "Washington Heights North", 40.852476, -73.9342996),
Location(244, "Manhattan", "Washington Heights South", 40.8417082, -73.9393554),
Location(245, "Staten Island", "West Brighton", 40.6270298, -74.10931409999999),
Location(246, "Manhattan", "West Chelsea/Hudson Yards", 40.7542535, -74.0023331),
Location(247, "Bronx", "West Concourse", 40.8316761, -73.9227554),
Location(248, "Bronx", "West Farms/Bronx River", 40.8430609, -73.8816001),
Location(249, "Manhattan", "West Village", 40.73468, -74.0047554),
Location(250, "Bronx", "Westchester Village/Unionport", 40.8340447, -73.8531349),
Location(251, "Staten Island", "Westerleigh", 40.616296, -74.1386767),
Location(252, "Queens", "Whitestone", 40.7920449, -73.8095574),
Location(253, "Queens", "Willets Point", 40.7606911, -73.840436),
Location(254, "Bronx", "Williamsbridge/Olinville", 40.8787602, -73.85283559999999),
Location(255, "Brooklyn", "Williamsburg (North Side)", 40.71492, -73.9528472),
Location(256, "Brooklyn", "Williamsburg (South Side)", 40.70824229999999, -73.9571487),
Location(257, "Brooklyn", "Windsor Terrace", 40.6539346, -73.9756567),
Location(258, "Queens", "Woodhaven", 40.6901366, -73.8566087),
Location(259, "Bronx", "Woodlawn/Wakefield", 40.8955885, -73.8627133),
Location(260, "Queens", "Woodside", 40.7532952, -73.9068973),
Location(261, "Manhattan", "World Trade Center", 40.7118011, -74.0131196),
Location(262, "Manhattan", "Yorkville East", 40.7762231, -73.94920789999999),
Location(263, "Manhattan", "Yorkville West", 40.7762231, -73.94920789999999)
]
| 72.562937
| 112
| 0.679131
| 451
| 0.021732
| 0
| 0
| 226
| 0.01089
| 0
| 0
| 7,271
| 0.350359
|
129d53076c9002e63bb6e233e94f66b83a1c9e37
| 114
|
py
|
Python
|
main.py
|
viniciuslimafernandes/interpolation
|
1aff08cba6026143fd267a0c648bad8975ae5d74
|
[
"MIT"
] | null | null | null |
main.py
|
viniciuslimafernandes/interpolation
|
1aff08cba6026143fd267a0c648bad8975ae5d74
|
[
"MIT"
] | null | null | null |
main.py
|
viniciuslimafernandes/interpolation
|
1aff08cba6026143fd267a0c648bad8975ae5d74
|
[
"MIT"
] | null | null | null |
import math
from utils import *
def main():
showHome()
option = chooseOption()
handleOption(option)
main()
| 12.666667
| 25
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
129e3285af4caf68d1f91b717a406d9814f4383d
| 222
|
py
|
Python
|
tests/helper.py
|
blehers/PyViCare
|
e74b854afe6678f30c05bdef5e642ab66d1c0b6a
|
[
"Apache-2.0"
] | null | null | null |
tests/helper.py
|
blehers/PyViCare
|
e74b854afe6678f30c05bdef5e642ab66d1c0b6a
|
[
"Apache-2.0"
] | null | null | null |
tests/helper.py
|
blehers/PyViCare
|
e74b854afe6678f30c05bdef5e642ab66d1c0b6a
|
[
"Apache-2.0"
] | null | null | null |
import os
import simplejson as json
def readJson(fileName):
test_filename = os.path.join(os.path.dirname(__file__), fileName)
with open(test_filename, mode='rb') as json_file:
return json.load(json_file)
| 24.666667
| 69
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.018018
|
129f44f6dc7578a9b45f3abd7e3b50f1fe3a4274
| 1,999
|
py
|
Python
|
examples/client-example.py
|
pkalemba/python-warp10client
|
25a9b446a217066a7d6c39aeb7d19d1be93a7688
|
[
"BSD-3-Clause"
] | 8
|
2017-11-20T13:31:58.000Z
|
2021-07-13T08:34:52.000Z
|
examples/client-example.py
|
pkalemba/python-warp10client
|
25a9b446a217066a7d6c39aeb7d19d1be93a7688
|
[
"BSD-3-Clause"
] | 2
|
2017-11-20T21:16:16.000Z
|
2017-12-11T13:56:44.000Z
|
examples/client-example.py
|
regel/python-warp10client
|
bee380513d899ae7c55a26e43a8914f8c29b5279
|
[
"BSD-3-Clause"
] | 4
|
2017-11-21T07:51:01.000Z
|
2020-04-07T12:03:23.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import daiquiri
from time import time
import warp10client
LOG = daiquiri.getLogger(__name__)
warp10_api_url = '' # Add here backend url where metrics are stored
read_token = '' # Add here your metrics read token
write_token = '' # Add here your metrics write token
# To get metrics:
metric_get = {
'name': 'cpu_util',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
},
'aggregate': {
'type': 'mean',
'span': 1000000 * 3600,
},
'timestamp': {
'start': "2017-01-01T00:00:00.000Z",
'end': "2018-01-01T00:00:00.000Z"
}
# 'timestamp': { 'end': "2018-01-01T00:00:00.000Z" }
# 'timestamp': { 'start': None, 'end': None }
}
# To write metrics:
metric_write = {
'name': 'cpu_util_mjozefcz',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
'unit': '%',
},
'position': {
'longitude': None,
'latitude': None,
'elevation': None,
'timestamp': time() * 1000 * 1000,
},
'value': 11,
}
# To check metrics
metric_check = {
'name': 'cpu_util',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
},
}
# arguments need to authorize in metrics backend
kwargs = {
'write_token': write_token,
'read_token': read_token,
'warp10_api_url': warp10_api_url,
}
client = warp10client.Warp10Client(**kwargs)
# Consider to create timeseries, new object with included metrics as each point
# Thats goooood idea.
metric_get_test = client.get(metric_get)
metric_exists = client.exists(metric_check)
metric_obj = warp10client.Metric(**metric_write)
metric_send = client.set(metric_write)
# delete method is not yet implemented
# metric_send = client.delete(metric_write)
| 24.9875
| 79
| 0.64032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,127
| 0.563782
|
12a0170295fb80e383d69995765e135510da8362
| 3,094
|
py
|
Python
|
ports/stm32/boards/NUCLEO_WB55/rfcore_makefirmware.py
|
H-Grobben/micropython
|
fce96b11f3ff444c1ac24501db465dbe9e5902bf
|
[
"MIT"
] | null | null | null |
ports/stm32/boards/NUCLEO_WB55/rfcore_makefirmware.py
|
H-Grobben/micropython
|
fce96b11f3ff444c1ac24501db465dbe9e5902bf
|
[
"MIT"
] | null | null | null |
ports/stm32/boards/NUCLEO_WB55/rfcore_makefirmware.py
|
H-Grobben/micropython
|
fce96b11f3ff444c1ac24501db465dbe9e5902bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This script obfuscates the ST wireless binaries so they can be safely copied
# to the flash filesystem and not be accidentally discovered by the FUS during
# an update. See more information (and the corresponding de-obfuscation) in
# rfcore_firmware.py as well as instructions on how to use.
import os
import struct
import sys
# Must match rfcore_firmware.py.
_OBFUSCATION_KEY = 0x0573B55AA
_FIRMWARE_FILES = {
"stm32wb5x_FUS_fw_1_0_2.bin": "fus_102.bin",
"stm32wb5x_FUS_fw.bin": "fus_112.bin",
"stm32wb5x_BLE_HCILayer_fw.bin": "ws_ble_hci.bin",
}
def main(src_path, dest_path):
for src_file, dest_file in _FIRMWARE_FILES.items():
src_file = os.path.join(src_path, src_file)
dest_file = os.path.join(dest_path, dest_file)
if not os.path.exists(src_file):
print("Unable to find: {}".format(src_file))
continue
sz = 0
with open(src_file, "rb") as src:
with open(dest_file, "wb") as dest:
while True:
b = src.read(4)
if not b:
break
(v,) = struct.unpack("<I", b)
v ^= _OBFUSCATION_KEY
dest.write(struct.pack("<I", v))
sz += 4
print("Written {} ({} bytes)".format(dest_file, sz))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: {} src_path dest_path".format(sys.argv[0]))
print()
print(
'"src_path" should be the location of the ST binaries from https://github.com/STMicroelectronics/STM32CubeWB/tree/master/Projects/STM32WB_Copro_Wireless_Binaries/STM32WB5x'
)
print(
'"dest_path" will be where fus_102.bin, fus_110.bin, and ws_ble_hci.bin will be written to.'
)
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| 38.675
| 184
| 0.671946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,999
| 0.646089
|
12a080db56a168dea64d817c232a427dfdd87858
| 1,081
|
py
|
Python
|
universal/spiders/universalSpider.py
|
universalscraper/universal-spider
|
0b6d82ee0c749cf32dcf501e6d84f518ee2e8437
|
[
"MIT"
] | 2
|
2017-01-14T20:09:24.000Z
|
2019-09-23T09:26:23.000Z
|
universal/spiders/universalSpider.py
|
scraperize/universal-spider
|
0b6d82ee0c749cf32dcf501e6d84f518ee2e8437
|
[
"MIT"
] | null | null | null |
universal/spiders/universalSpider.py
|
scraperize/universal-spider
|
0b6d82ee0c749cf32dcf501e6d84f518ee2e8437
|
[
"MIT"
] | null | null | null |
import scrapy
import yaml
class universalSpider(scrapy.Spider):
name = "universal"
parameters = None
def __init__(self, *args, **kwargs):
worker = kwargs.get("worker")
if not worker:
exit("You must specify worker name : -a worker=name")
self.parameters = yaml.load(file("./workers/" + worker + ".yml", "r"))
super(universalSpider, self).__init__(*args, **kwargs)
self.start_urls = self.parameters["urls"]
self.allowed_domains = self.parameters["domains"]
def parse(self, response):
wrapper = "html"
if "wrapper" in self.parameters and "css" in self.parameters["wrapper"]:
wrapper = self.parameters["wrapper"]["css"]
for item in response.css(wrapper):
data = {}
for columnName in self.parameters["columns"]:
columnOptions = self.parameters["columns"][columnName]
data[columnName] = str(item.css(columnOptions["css"]).extract_first().strip().encode(self.parameters["charset"])),
yield data
| 29.216216
| 130
| 0.60592
| 1,053
| 0.974098
| 541
| 0.500463
| 0
| 0
| 0
| 0
| 177
| 0.163737
|
12a0f3a1d45fe59fa067cf5c06c3bffbb58f6bd1
| 11,715
|
py
|
Python
|
environments/IPP_BO_Ypacarai.py
|
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs
|
23b9b181499a4b06f2ca2951c002359c1959e727
|
[
"MIT"
] | null | null | null |
environments/IPP_BO_Ypacarai.py
|
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs
|
23b9b181499a4b06f2ca2951c002359c1959e727
|
[
"MIT"
] | null | null | null |
environments/IPP_BO_Ypacarai.py
|
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs
|
23b9b181499a4b06f2ca2951c002359c1959e727
|
[
"MIT"
] | null | null | null |
import warnings
import gym
import matplotlib.pyplot as plt
import numpy as np
from skopt.acquisition import gaussian_ei
from environments.groundtruthgenerator import GroundTruth
warnings.simplefilter("ignore", UserWarning)
from skopt.learning.gaussian_process import gpr, kernels
class ContinuousBO(gym.Env):
environment_name = "Continuous Informative Path Planning"
def __init__(self, scenario_map, initial_position=None, battery_budget=100, resolution=1, seed=0):
self.id = "Continuous BO Ypacarai"
# Map of the environment #
self.scenario_map = scenario_map
# Environment boundaries
self.map_size = self.scenario_map.shape
self.map_lims = np.array(self.map_size) - 1
# Action space and sizes #
self.action_space = gym.spaces.Box(low=np.array([0, 0]), high=np.array([1, 1]))
self.action_size = 2
# Internal state of the Markov Decision Process #
self.state = None
self.next_state = None
self.reward = None
self.done = False
# Create the ground truth object #
self.gt = GroundTruth(1 - self.scenario_map, 1, max_number_of_peaks=5)
# Initial position, referred as a [X,Y] vector #
self.initial_position = initial_position
self.step_count = 0
# Penalization for a collision in the reward funcion #
self.collision_penalization = 10
# Seed for deterministic replication #
self.seed = seed
np.random.seed(self.seed)
# Battery budget
self.battery = battery_budget
# Battery cost -> Cost of the battery per 1m movement
# This is calculated approximately using the long size of the map ~ 15km in the Ypacarai case
self.battery_cost = 100 / np.max(self.map_size) / resolution
# Gaussian Process parameters
# GP with RBF kernel of 10% long-size of the map lengthscale (in pixels)
self.gp = gpr.GaussianProcessRegressor(kernel=kernels.RBF(0.1 * np.min(self.map_size)), alpha=1e-7)
# Matrix de (num_muestra, features): num_muestra: numero de posiciones en la que se tomaron muestras
# features: cada una de las dimenciones, i.e., features son y, x
self.train_inputs = None
# Solution vector: y = f(x)
self.train_targets = None
# Matrix de (num_pos, features): num_pos: numero de posiciones en la que puede encontrarse el ASV
# features: cada una de las dimenciones, i.e., features son y, x
# [[2 17]
# [2 18]
# ...
# [y x]
# ...
# [54 31]
# [54 32]]
self.possible_locations = np.asarray(np.where(self.scenario_map == 1)).reshape(2, -1).T
# Generate vector of possible gt (speeds up the reward MSE process)
self.target_locations = None
# Vector!! de dimension 1 fila x (mxn) columnas representando el mapa de incertidumbre anterior
self.current_std = None
# Vector!! de dimension 1 fila x (mxn) columnas representando el mapa de media anterior
self.current_mu = None
# Current MSE for reward
self.previous_mse = None
self._max_step_distance = np.min(self.map_size)
# Initial position #
# The initial position HAS PIXEL UNITS:
# The action spaces translates to PIXEL UNITS TO FORM THE STATE
self.position = None
self.place_agent()
self.reset()
def max_step_distance(self):
# esto se puede convertir en funcion del length scale
return 0.2 * self._max_step_distance
# return lambda * np.exp(self.gp.kernel_.theta[0])
def reset(self):
""" Reset the internal parameters of the environment. """
# Place the agent in the initial point depending if it is defined a fixed initial position.
self.place_agent()
# Reset the battery
self.battery = 100
# Reset the internal MPD variables
self.reward = None
self.done = False
self.next_state = None
self.step_count = 0
# Generate another gt -> changes internally self.gt.normalized_z
# normalized_z is also achievable using self.gt.read()
self.gt.reset()
# Reset Gaussian Process parameters #
# Generate the first input X
self.train_inputs = np.array([self.position]).reshape(-1, 2)
# Evaluate the environment in this particular initial point
self.train_targets = np.array([self.gt.read(self.position)])
# Fit the Gaussian Process
self.gp.fit(self.train_inputs, self.train_targets)
# Generate the uncertainty map
self.current_mu, self.current_std = self.gp.predict(self.possible_locations, return_std=True)
# Fill vector of possible gt (speeds up the reward MSE process)
self.target_locations = [self.gt.read(pos) for pos in self.possible_locations]
# Calculate first MSE
self.previous_mse = (np.square(self.current_mu - self.target_locations)).mean()
# Process the state
self.process_state()
return self.state
def place_agent(self):
""" Place the agent in a random place. """
if self.initial_position is None:
indx = np.random.randint(0, len(self.possible_locations))
self.position = self.possible_locations[indx]
else:
self.position = np.copy(self.initial_position)
def process_state(self):
""" Process the state """
"""
state[0] -> position
state[1] -> boundaries
state[2] -> features
"""
state = np.zeros(shape=(4, self.scenario_map.shape[0], self.scenario_map.shape[1])).astype(float)
# State - position #
state[0, self.position[0], self.position[1]] = 1
# State - boundaries #
state[1] = np.copy(self.scenario_map)
# State - old standard deviation
state[2][self.possible_locations[:, 0], self.possible_locations[:, 1]] = self.current_std
# State - old standard deviation
state[3][self.possible_locations[:, 0], self.possible_locations[:, 1]] = self.current_mu
self.state = state
def render(self, **kwargs):
""" Render the state for visualization purposes. Outputs the stacked rgb resultant. """
red = np.copy(self.state[1]) + (1 - self.scenario_map)
# todo: agregar state[3]
green = np.copy(self.state[2]) + (1 - self.scenario_map)
blue = np.copy(self.state[0]) + (1 - self.scenario_map)
rgb = np.stack((red, green, blue), axis=-1)
fig, axs = plt.subplots(1, 4, figsize=(15, 3))
axs[0].imshow(self.state[0])
axs[0].set_title('Position')
axs[1].imshow(self.state[1])
axs[1].set_title('Navigation map')
axs[2].imshow(self.state[2])
axs[2].plot(self.train_inputs[:, 1], self.train_inputs[:, 0], 'xr')
axs[2].set_title('$\\sigma(x)$')
axs[3].imshow(self.state[3])
axs[3].plot(self.train_inputs[:, 1], self.train_inputs[:, 0], 'xr')
axs[3].set_title('$\\mu(x)$')
plt.show()
return rgb
def action2vector(self, desired_action):
""" Translate a desired action into a pixel velocity vector. """
desired_distance = self.max_step_distance() * desired_action[0]
desired_angle = 2 * 3.141592 * desired_action[1]
return np.array(
[-desired_distance * np.sin(desired_angle),
desired_distance * np.cos(desired_angle)]
)
def step(self, desired_action):
""" Process an action, generates the new state and the reward to that action. """
self.step_count += 1
next_position = self.action2vector(desired_action) + self.position # The next intended position
next_position = np.clip(next_position, (0, 0), self.map_lims) # Clip the intended position to be inside the map
next_position = np.floor(next_position).astype(int) # Discrete
if self.scenario_map[next_position[0], next_position[1]] == 1: # If the next position is navigable ...
valid = True
else:
valid = False
if valid:
distance = np.linalg.norm(next_position - self.position) # Compute the intended travel distance IN PIXELS
self.position = next_position # Update the position
self.battery -= distance * self.battery_cost # Compute the new battery level
self.train_inputs = np.vstack([self.train_inputs, self.position]) # Store the new sampling point
self.train_targets = np.append(self.train_targets, self.gt.read(self.position))
self.gp.fit(self.train_inputs, self.train_targets) # Fit the stored sampled points
else:
distance = np.linalg.norm(next_position - self.position) # If not valid, it consumes the intended battery
self.battery -= distance * self.battery_cost
self.compute_reward(valid) # Reward function evaluation
self.process_state() # Generate the new state
# Check the episodic-end condition
self.done = self.battery <= self.battery_cost
return self.state, self.reward, self.done, None
def compute_reward(self, valid):
r = 0
if not valid:
r -= self.collision_penalization
else:
self.current_mu, self.current_std = self.gp.predict(self.possible_locations, return_std=True)
r = (self.previous_mse - (np.square(self.current_mu - self.target_locations)).mean()) / self.previous_mse
self.previous_mse = (np.square(self.current_mu - self.target_locations)).mean()
self.reward = r
def get_action_using_bo(_env):
all_acq = gaussian_ei(_env.possible_locations, _env.gp, np.min(_env.train_targets), xi=1.0)
best_loc = _env.possible_locations[np.where(all_acq == np.max(all_acq))][0]
vect_dist = np.subtract(best_loc, _env.position)
ang = (np.arctan2(vect_dist[0], -vect_dist[1]) + np.pi) / (2 * np.pi)
# determina la distancia y luego encuentra el ratio con respecto al max dist (normaliza)
dist_ = np.exp(_env.gp.kernel_.theta[0]) * 0.375 / _env.max_step_distance()
if dist_ > 1.0:
dist_ = 1.0
acq_state = np.zeros(_env.map_size)
acq_state[_env.possible_locations[:, 0], _env.possible_locations[:, 1]] = all_acq
# plt.figure()
# plt.imshow(acq_state)
# plt.plot(_env.train_inputs[:, 1], _env.train_inputs[:, 0], 'xr')
# plt.plot(best_loc[1], best_loc[0], '^y')
# plt.plot(_env.position[1], _env.position[0], 'xb')
# action = _env.action2vector([dist_, ang]) + _env.position
# print("best: ", best_loc, "pos : ", _env.position, "dist: ", vect_dist, "next: ", action)
# plt.plot(action[1], action[0], '^b')
return [dist_, ang]
if __name__ == "__main__":
""" Test to check the wall-time for an episode to run and the average number of steps per episode """
my_map = np.genfromtxt('YpacaraiMap_big.csv', delimiter=',').astype(int) / 255
env = ContinuousBO(scenario_map=my_map, resolution=1)
# env.render()
import time
t0 = time.time()
for i in range(100):
env.reset()
d = False
print('Episode ', i)
avg_r_ep = 0
while not d:
a = get_action_using_bo(env)
s, r_, d, _ = env.step(a)
avg_r_ep += r_
if r_ == -10:
print("collision")
# env.render()
print('Number of steps: ', env.step_count)
print((time.time() - t0) / 100, ' segundos la iteracion')
| 38.284314
| 120
| 0.626376
| 9,525
| 0.81306
| 0
| 0
| 0
| 0
| 0
| 0
| 3,924
| 0.334955
|
12a151b9a4e765ed24ceecf3aa9bec0771ac3589
| 5,281
|
py
|
Python
|
utils/metrics.py
|
0b3d/Image-Map-Embeddings
|
a9fc65ac92094bcfcd0f19a3604f0b9d8bd3174f
|
[
"MIT"
] | 2
|
2022-02-11T06:05:35.000Z
|
2022-03-14T02:10:31.000Z
|
utils/metrics.py
|
0b3d/Image-Map-Embeddings
|
a9fc65ac92094bcfcd0f19a3604f0b9d8bd3174f
|
[
"MIT"
] | null | null | null |
utils/metrics.py
|
0b3d/Image-Map-Embeddings
|
a9fc65ac92094bcfcd0f19a3604f0b9d8bd3174f
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
class NumpyMetrics():
def __init__(self, metric='euclidean'):
self.metric = metric
def rank(self, x,y, x_labels, y_labels):
distances = pairwise_distances(x,y,self.metric)
batch_size = x_labels.shape[0]
sorted_distances_indices = np.argsort(distances, axis=1)
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size))
retrived_labels = np.take(labels_matrix, sorted_distances_indices)
labels_equal = np.equal(np.expand_dims(y_labels,axis=1), retrived_labels)
rank = np.argmax(labels_equal.astype(float), axis=1) + 1
return rank
def elements_by_class(self, x_labels):
'''Count the total of elements of each class in the eval set
Return unique_labels -> A numpy array with the index of the labels
count -> Number of elements of each class in the test set
'''
unique_labels = np.unique(x_labels) # Make and array of unique labels
label_matrix = np.equal(np.expand_dims(unique_labels, axis=1), np.expand_dims(x_labels, axis=0)) #shape [No.classes,1],[1,Eval_size] -> [No_classes,Eval_size]
count = label_matrix.sum(axis=1)
return unique_labels,count
def true_positives(self, distances, x_labels, y_labels, k):
'''
Find the k nearest y given x, then check if the label of y correnspond to x, and accumulate.
'''
sorted_distances_indices = np.argsort(distances,axis=1) #
batch_size = x_labels.shape[0]
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size)) # True label matrix
retrieved_labels = np.take(labels_matrix,sorted_distances_indices) #The sorted retrieved labels matrix
labels_equal = np.equal(np.expand_dims(y_labels, axis=1), retrieved_labels) # Where the retrieved label == true label
tp = np.sum(labels_equal[:,0:k], axis=1) # Aparece cuando debe aparecer
return tp
def false_negative(self, distances, x_labels, y_labels, k):
sorted_distances_indices = np.argsort(distances,axis=1) #
batch_size = x_labels.shape[0]
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size)) # True label matrix
retrieved_labels = np.take(labels_matrix,sorted_distances_indices) #The sorted retrieved labels matrix
labels_equal = np.equal(np.expand_dims(y_labels, axis=1), retrieved_labels) # Where the retrieved label == true label
fn = np.sum(labels_equal[:,k:], axis=1)
return fn
def false_positives(self, distances, x_labels, y_labels, k):
'Estan y no deberian estar'
sorted_distances_indices = np.argsort(distances,axis=1) #
batch_size = x_labels.shape[0]
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size)) # True label matrix
retrieved_labels = np.take(labels_matrix,sorted_distances_indices) #The sorted retrieved labels matrix
labels_equal = np.equal(np.expand_dims(y_labels, axis=1), retrieved_labels) # Where the retrieved label == true label
labels_not_equal = np.logical_not(labels_equal)
fp = np.sum(labels_not_equal[:,0:k], axis=1)
return fp
def precision_at_k(self, x,y, x_labels, y_labels, k):
''' The ability of a classificator model to identify only the relevant points.
Precision = true_positives /(true_positives + false_positives) '''
distances = pairwise_distances(x,y,self.metric)
tp = self.true_positives(distances, x_labels, y_labels, k)
#fp = self.false_positives(distances, x_labels, y_labels, k)
fn = self.false_negative(distances, x_labels, y_labels, k)
fp = np.minimum(k - tp, fn)
precision_at_k = tp / (tp + fp)
return precision_at_k
def recall_at_k(self, x, y, x_labels, y_labels, k):
'''
Percentage of total relevant results correctly classified by the algorithm
The ability of a model to find all relevant cases within a dataset.
Recall = true_positives / (true_positives + false_negatives)
The ability of the model to retrieve a relevenat pair of one domain given a query of the other domain
'''
distances = pairwise_distances(x,y,self.metric)
tp = self.true_positives(distances, x_labels, y_labels, k)
fn = self.false_negative(distances, x_labels, y_labels, k)
fn = np.minimum(fn,k-tp)
recall_at_k = tp / (tp + fn)
return recall_at_k
def average_rank_at_k(self, x, y, labels):
rank = self.rank(x,y,labels, labels)
for k in [1,5,10,20,50,100,500,5000]:
percentage = (rank <= k).sum() / x.shape[0]
print(' Top {:.3f}, {:.3f}'.format(k,percentage))
def rank_curve(self, x, y, labels):
rank = self.rank(x,y,labels,labels)
print("Average rank", rank.mean())
count_percentage = np.zeros((x.shape[0]), dtype=float)
for i in range(x.shape[0]):
count_percentage[i] = (rank <= i+1).sum() / x.shape[0]
plt.plot(count_percentage)
plt.show()
plt.waitforbuttonpress()
| 51.271845
| 166
| 0.667487
| 5,175
| 0.979928
| 0
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0.263018
|
12a1ccdc2c994161fe55e1738031ece8631b2305
| 693
|
py
|
Python
|
tests/bugs/test-200908181430.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 123
|
2015-01-12T06:43:22.000Z
|
2022-03-20T18:06:46.000Z
|
tests/bugs/test-200908181430.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 103
|
2015-01-08T18:35:57.000Z
|
2022-01-18T01:44:14.000Z
|
tests/bugs/test-200908181430.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 54
|
2015-02-15T17:12:00.000Z
|
2022-03-07T23:02:32.000Z
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="foo"/>
</xs:schema>'''
from pyxb.exceptions_ import *
import unittest
class TestTrac_200908181430 (unittest.TestCase):
def testParsing (self):
self.assertRaises(pyxb.SchemaValidationError, pyxb.binding.generate.GeneratePython, schema_text=xsd)
if __name__ == '__main__':
unittest.main()
| 25.666667
| 108
| 0.735931
| 185
| 0.266955
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.268398
|
12a26d1b84cfd62fa98cec13a5aa4a115ddadb78
| 779
|
py
|
Python
|
bin/print_data_structure.py
|
JohanComparat/pyEmerge
|
9b5bfa01959d48ea41221609b8f375f27e3e39ff
|
[
"Unlicense"
] | null | null | null |
bin/print_data_structure.py
|
JohanComparat/pyEmerge
|
9b5bfa01959d48ea41221609b8f375f27e3e39ff
|
[
"Unlicense"
] | null | null | null |
bin/print_data_structure.py
|
JohanComparat/pyEmerge
|
9b5bfa01959d48ea41221609b8f375f27e3e39ff
|
[
"Unlicense"
] | null | null | null |
import sys
ii = int(sys.argv[1])
env = sys.argv[2]
# python3 print_data_structure.py 22 MD10
import glob
import os
import numpy as n
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(ii, env)
iterate.open_snapshots()
def print_attr(h5item):
for attr in h5item:
print(attr, h5item[attr])
def print_all_key(h5item):
for key in h5item.keys():
print('========================================')
print(key, h5item[key])
print('- - - - - - - - - - - - - - - - - - - - ')
print_attr(h5item[key])
def print_data_structure(h5item):
print('+ + + + + + + HEADER + + + + + + + + +')
print_attr(h5item.attrs)
print('\n')
print('+ + + + + + + DATA + + + + + + + + + +')
print_all_key(h5item)
print_data_structure(iterate.f0)
| 23.606061
| 55
| 0.56611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.27086
|
12a383eaf645019cefa1dc9f3842290ed2752e23
| 1,999
|
py
|
Python
|
setup.py
|
ljdursi/mergevcf
|
b400385936417c6e517d3c7daec8b9ca6389c51f
|
[
"MIT"
] | 25
|
2015-06-22T15:30:32.000Z
|
2021-05-13T14:59:18.000Z
|
setup.py
|
ljdursi/mergevcf
|
b400385936417c6e517d3c7daec8b9ca6389c51f
|
[
"MIT"
] | 7
|
2015-08-14T11:20:35.000Z
|
2021-05-18T17:48:38.000Z
|
setup.py
|
ljdursi/mergevcf
|
b400385936417c6e517d3c7daec8b9ca6389c51f
|
[
"MIT"
] | 6
|
2017-04-17T18:35:43.000Z
|
2018-05-15T21:47:13.000Z
|
# based on https://github.com/pypa/sampleproject
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='mergevcf',
version='1.0.1',
description='Merge VCF calls',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ljdursi/mergevcf',
# Author details
author='Jonathan Dursi',
author_email='Jonathan.Dursi@oicr.on.ca',
# Choose your license
license='GPL',
classifiers=[
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.8',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
keywords='merge vcfs',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['pyvcf'],
test_suite='tests',
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
entry_points={
'console_scripts': [
'mergevcf=mergevcf:main',
],
},
)
| 26.653333
| 85
| 0.617309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,250
| 0.625313
|
12a4188c00b7c8a1abdb2f2f512a6ed7085ea497
| 1,291
|
py
|
Python
|
tests/test_coders.py
|
GlobalFishingWatch/pipe-tools
|
34dff591997bb2c25e018df86d13a9d42972032b
|
[
"Apache-2.0"
] | 1
|
2018-05-26T20:10:51.000Z
|
2018-05-26T20:10:51.000Z
|
tests/test_coders.py
|
GlobalFishingWatch/pipe-tools
|
34dff591997bb2c25e018df86d13a9d42972032b
|
[
"Apache-2.0"
] | 37
|
2017-10-22T12:00:59.000Z
|
2022-02-08T19:17:58.000Z
|
tests/test_coders.py
|
GlobalFishingWatch/pipe-tools
|
34dff591997bb2c25e018df86d13a9d42972032b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import six
import ujson
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline as _TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.coders import typecoders
from apache_beam.typehints import Dict, Union
from pipe_tools.coders import JSONDictCoder
from pipe_tools.coders import JSONDict
from pipe_tools.generator import MessageGenerator
class MyType():
pass
@pytest.mark.filterwarnings('ignore:Using fallback coder:UserWarning')
@pytest.mark.filterwarnings('ignore:The compiler package is deprecated and removed in Python 3.x.:DeprecationWarning')
class TestCoders():
def test_JSONDictCoder(self):
records = [
{},
{'a': 1, 'b': 2, 'c': None},
{"test":None},
]
coder = JSONDictCoder()
for r in records:
assert r == coder.decode(coder.encode(r))
def test_type_hints(self):
messages = MessageGenerator()
source = beam.Create(messages)
assert source.get_output_type() == Dict[six.binary_type, Union[float, int]]
with _TestPipeline() as p:
result = (
p | beam.Create(messages)
)
p.run()
| 26.346939
| 118
| 0.676995
| 635
| 0.491867
| 0
| 0
| 801
| 0.620449
| 0
| 0
| 145
| 0.112316
|