max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
generate_visualizations.py | IsaiahPressman/Kaggle_Santa_2020 | 0 | 12759651 | from jupyterthemes import jtplot
import numpy as np
import os
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.ndimage import filters
from textwrap import wrap
import torch
import vectorized_agents as va
import vectorized_env as ve
jtplot.style()
DEVICE = torch.device('cuda')
if DEVICE == torch.device('cpu'):
os.environ['OMP_NUM_THREADS'] = '4'
n_envs = 50
else:
os.environ['OMP_NUM_THREADS'] = '8'
n_envs = 200
ENV_KWARGS = dict(
n_envs=n_envs,
env_device=DEVICE,
out_device=DEVICE,
reward_type=ve.EVERY_STEP_EV_ZEROSUM
)
all_ensemble_names = ['a3c_agent_small_8_32', 'awac_agent_small_8_64_32_1_norm', 'a3c_agent_small_8_64_32_2']
PLAYER_1s = [
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[:2], weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble([all_ensemble_names[0], all_ensemble_names[2]], weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[-2:], weight_logits=False, deterministic_policy=True),
va.SavedRLAgentMultiObsEnsemble(all_ensemble_names, weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[:2], weight_logits=True, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble([all_ensemble_names[0], all_ensemble_names[2]], weight_logits=True, deterministic_policy=True),
### LEFT OFF HERE:
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[-2:], weight_logits=True, deterministic_policy=True),
va.SavedRLAgentMultiObsEnsemble(all_ensemble_names, weight_logits=True, deterministic_policy=True),
# va.SavedRLAgentEnsemble('a3c_agent_small_8_64_32_2', weight_logits=True, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgentEnsemble('awac_agent_small_8_64_32_1_norm', weight_logits=False, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgentEnsemble('a3c_agent_small_8_32', weight_logits=True, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('awac_agent_small_8_64_32_1_norm_v1-230', device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('a3c_agent_small_8_32-790', device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE, deterministic_policy=False)
]
PLAYER_2s = [
va.BasicThompsonSampling(),
va.PullVegasSlotMachines(),
va.PullVegasSlotMachinesImproved(),
va.SavedRLAgent('a3c_agent_small_8_32-790', device=DEVICE, deterministic_policy=True),
va.SavedRLAgent('awac_agent_small_8_64_32_1_norm_v1-230', deterministic_policy=True),
va.SavedRLAgent('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE, deterministic_policy=False),
#va.SavedRLAgentEnsemble('a3c_agent_small_8_32', weight_logits=True, device=DEVICE, deterministic_policy=True),
va.SavedRLAgentEnsemble('a3c_agent_small_8_64_32_2', weight_logits=True, device=DEVICE, deterministic_policy=True),
#va.SavedRLAgentEnsemble('awac_agent_small_8_64_32_1_norm', weight_logits=False, device=DEVICE, deterministic_policy=True),
]
def wrap_title(title):
return '\n'.join(wrap(title, 55, break_long_words=True))
if __name__ == '__main__':
for player_1 in PLAYER_1s:
for player_2 in PLAYER_2s:
if player_1 == player_2:
continue
p1_score, rewards_over_time = va.run_vectorized_vs(player_1, player_2, display_out=True, **ENV_KWARGS)
rewards_over_time = rewards_over_time.cpu().numpy().squeeze()
cumulative_ymax = 10
expected_ymax = 0.10
q = np.linspace(0., 100., 11)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
n_rows = 2
n_cols = 2
fig, axes = plt.subplots(n_rows, n_cols, figsize=(8 * n_cols, 8 * n_rows))
fig_title = (f'{player_1.name} -vs- {player_2.name}\n'
f'{p1_score * 100.:.2f}% winrate over {rewards_over_time.shape[1]} games')
fig.suptitle(wrap_title(fig_title))
axes = axes.ravel()
axes[0].plot(np.cumsum(rewards_over_time.mean(axis=1)))
axes[0].set_ylim((-cumulative_ymax, cumulative_ymax))
axes[0].set_title(wrap_title(f"{player_1.name} cumulative expected advantage"))
window_size = 50
axes[1].plot(filters.uniform_filter1d(rewards_over_time.mean(axis=1), window_size, mode='constant'))
axes[1].set_ylim((-expected_ymax, expected_ymax))
axes[1].set_title(wrap_title(f"{player_1.name} per-step expected advantage"))
for i, quantile, val in zip(
range(len(q)),
q,
np.percentile(np.cumsum(rewards_over_time,
axis=0),
q, axis=1)):
color_idx = int(abs((len(q) - 1.) / 2. - i))
axes[2].plot(val, label=f'Percentile: {quantile:.0f}',
color=colors[color_idx],
alpha=1. / (color_idx + 1),
# linewidth=3./(color_idx+1)
)
axes[2].set_ylim((-cumulative_ymax * 5, cumulative_ymax * 5))
if len(q) <= 5:
axes[2].legend()
axes[2].set_title(wrap_title(f"{player_1.name} cumulative expected advantage (percentiles)"))
for i, quantile, val in zip(
range(len(q)),
q,
np.percentile(filters.uniform_filter1d(rewards_over_time,
window_size * 5,
mode='reflect',
axis=0),
q, axis=1)):
color_idx = int(abs((len(q) - 1.) / 2. - i))
axes[3].plot(val, label=f'Percentile: {quantile:.0f}',
color=colors[color_idx],
alpha=1. / (color_idx + 1),
# linewidth=3./(color_idx+1)
)
axes[3].set_ylim((-expected_ymax, expected_ymax))
if len(q) <= 5:
axes[3].legend()
axes[3].set_title(wrap_title(f"{player_1.name} per-step expected advantage over time (percentiles)"))
plt.tight_layout(rect=[0., 0., 1., 0.9])
p_names_abbrev = []
for p in (player_1, player_2):
if type(p) == va.SavedRLAgent:
p_names_abbrev.append(p.agent_name)
if p.name.endswith('_deterministic'):
p_names_abbrev[-1] += '_deterministic'
else:
p_names_abbrev[-1] += '_stochastic'
elif type(p) in (va.SavedRLAgentEnsemble, va.SavedRLAgentMultiObsEnsemble):
if type(p) == va.SavedRLAgentEnsemble:
p_names_abbrev.append(f'ensemble_{p.ensemble_name}')
else:
p_names_abbrev.append(f'multiObsEnsemble_{p.ensemble_name}')
if p.ensemble_model.weight_logits:
p_names_abbrev[-1] += '_weight_logits'
else:
p_names_abbrev[-1] += '_weight_probs'
if p.name.endswith('_deterministic'):
p_names_abbrev[-1] += '_deterministic'
else:
p_names_abbrev[-1] += '_stochastic'
else:
p_names_abbrev.append(p.name)
save_fig_title = f'{p_names_abbrev[0]}__{p_names_abbrev[1]}'
if type(player_1) in (va.SavedRLAgent, va.SavedRLAgentEnsemble, va.SavedRLAgentMultiObsEnsemble):
save_fig_folder = f'saved_figures/{p_names_abbrev[0]}'
else:
save_fig_folder = 'saved_figures'
Path(save_fig_folder).mkdir(exist_ok=True)
fig.savefig(f'{save_fig_folder}/{save_fig_title}.png', dpi=100)
plt.close(fig)
| 1.742188 | 2 |
ml-progress-bot/ckpt_learning_curves.py | yell/kaggle-camera | 6 | 12759652 | <reponame>yell/kaggle-camera<gh_stars>1-10
import os
import glob
import torch
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
def latest_ckpt_learning_curves(dirpath, **kwargs):
ckpts = glob.glob(os.path.join(dirpath, '*.ckpt'))
last_ckpt = max(ckpts, key=os.path.getctime)
return ckpt_learning_curves(last_ckpt, **kwargs)
def ckpt_learning_curves(ckpt_path, b=8, gamma=0., **plt_learning_curves_kwargs):
checkpoint = torch.load(ckpt_path, map_location={'cuda:0': 'cpu'})
train_loss_history = checkpoint['train_loss']
train_acc_history = checkpoint['train_acc']
val_loss_history = [np.mean(v) for v in checkpoint['val_loss']]
val_acc_history = checkpoint['val_acc']
n = (len(train_loss_history) / b)
train_loss_history = train_loss_history[:b * n]
train_acc_history = train_acc_history[:b * n]
val_loss_history = val_loss_history[:n]
val_acc_history = val_acc_history[:n]
train_loss_history = [np.mean(v) for v in train_loss_history]
train_loss_history = np.asarray(train_loss_history).reshape(-1, b).mean(axis=1).tolist()
# train_loss_history = [[v] for v in train_loss_history]
train_acc_history = np.asarray(train_acc_history).reshape(-1, b).mean(axis=1).tolist()
train_loss_history = [[v] for v in ema(train_loss_history, gamma)]
train_acc_history = ema(train_acc_history, gamma)
val_loss_history = ema(val_loss_history, gamma)
val_acc_history = ema(val_acc_history, gamma)
plt_learning_curves_kwargs.setdefault('min_acc', 0.1)
plt_learning_curves_kwargs.setdefault('filepath', 'learning_curves.png')
# noinspection PyTypeChecker
plot_learning_curves(train_loss_history, train_acc_history,
val_loss_history, val_acc_history, **plt_learning_curves_kwargs)
return val_acc_history[-1]
def ema(w, gamma=0.9):
w_ema = [w[0]]
for i in xrange(1, len(w)):
w_ema.append(gamma * w_ema[-1] + (1. - gamma) * w[i])
return w_ema
def plot_learning_curves(l, a, vl, va, last_epochs=None, min_loss=0., max_loss=25., min_acc=0.4, filepath=None):
n_batches = len(l[0])
n_epochs = len(l)
x = np.linspace(1., n_epochs, n_epochs, endpoint=True)
z = np.linspace(1., n_epochs, (n_epochs - 1) * n_batches, endpoint=True)
if last_epochs:
l = l[-last_epochs:]
a = a[-last_epochs:]
vl = vl[-last_epochs:]
va = va[-last_epochs:]
x = x[-last_epochs:]
z = z[-((last_epochs - 1) * n_batches):]
plt.close("all")
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(20, 10))
ax2 = ax.twinx()
yl = ax.get_ygridlines()
for yl_ in yl:
yl_.set_color('w')
l_mean = [np.mean(l_) for l_ in l]
marker = 'o' if len(vl) < 100 else None
L1 = ax.plot(z, np.concatenate(l[1:]), color='#5a053f', lw=2, label='training loss')
# ax.plot(x, l_mean, color='r', lw=2, marker='o', label='training loss mean')
L2 = ax.plot(x, vl, color='#e6155a', lw=2, marker=marker, label='validation loss')
ax.set_ylim([min_loss, min(max_loss, max(max(max(l[1:])), max(vl[1:])))])
ax.set_xlim([1, n_epochs])
L3 = ax2.plot(x, a, color='#124f90', lw=2, marker=marker, label='training accuracy')
L4 = ax2.plot(x, va, color='#6dbb30', lw=2, marker=marker, label='validation accuracy')
ax2.set_ylim([max(min_acc, min(min(a), min(va))), 1.])
ax2.spines['left'].set_color('black')
ax2.spines['left'].set_linewidth(2)
ax2.spines['right'].set_color('black')
ax2.spines['right'].set_linewidth(2)
ax2.spines['top'].set_color('black')
ax2.spines['top'].set_linewidth(2)
ax2.spines['bottom'].set_color('black')
ax2.spines['bottom'].set_linewidth(2)
ax.tick_params(labelsize=16)
ax2.tick_params(labelsize=16)
ax.set_title('Learning curves', fontsize=27, weight='bold', y=1.03)
ax.set_ylabel('loss', fontsize=23)
ax2.set_ylabel('accuracy', fontsize=23)
ax.set_xlabel('epoch', fontsize=23)
Ls = L1 + L2 + L3 + L4
labs = [l.get_label() for l in Ls]
leg = plt.legend(Ls, labs, loc='lower left', fontsize=18, frameon=True)
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(2)
if filepath:
if not filepath.endswith('.png'):
filepath += '.png'
fig.savefig(filepath, dpi=fig.dpi)
plt.close(fig)
if __name__ == '__main__':
ckpt_learning_curves('../kaggle-camera/models/0.8826-1384.ckpt')
| 1.992188 | 2 |
generate_tree.py | cfpb/regulations-parser | 36 | 12759653 | import codecs
import sys
from regparser.tree.appendix.tree import trees_from as appendix_trees
from regparser.tree.interpretation import build as build_interp_tree
from regparser.tree.reg_text import build_reg_text_tree
from regparser.tree.struct import NodeEncoder
from regparser.tree.supplement import find_supplement_start
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: python generate_tree.py path/to/reg.txt part"
print " e.g.: python generate_tree.py rege.txt 1005"
exit()
with codecs.open(sys.argv[1], encoding='utf-8') as f:
reg = unicode(f.read())
interp = reg[find_supplement_start(reg):]
part = int(sys.argv[2])
reg_tree = build_reg_text_tree(reg, part)
interp_tree = build_interp_tree(interp, part)
appendix_trees = appendix_trees(reg, part, reg_tree.label)
reg_tree.children.extend(appendix_trees)
reg_tree.children.append(interp_tree)
print NodeEncoder().encode(reg_tree)
| 2.5625 | 3 |
tests/test_common.py | liminleitt/phausamanne | 94 | 12759654 | from unittest import TestCase
import numpy as np
import xarray as xr
from xarray.testing import assert_equal, assert_allclose
import numpy.testing as npt
from sklearn_xarray import wrap
from sklearn.base import clone
from sklearn.preprocessing import StandardScaler, KernelCenterer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVC
from tests.mocks import (
DummyEstimator,
DummyTransformer,
ReshapingEstimator,
)
class EstimatorWrapperTests(TestCase):
def setUp(self):
self.X = xr.Dataset(
{
"var_2d": (["sample", "feat_1"], np.random.random((100, 10))),
"var_3d": (
["sample", "feat_1", "feat_2"],
np.random.random((100, 10, 10)),
),
},
{
"sample": range(100),
"feat_1": range(10),
"feat_2": range(10),
"dummy": (["sample", "feat_1"], np.random.random((100, 10))),
},
)
def test_update_restore_dims(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 0, 5)),
reshapes={"feature": ["feat_1", "feat_2"]},
)
X = self.X.var_3d
estimator.fit(X)
X_out = estimator.estimator_.transform(X.values)
dims_new = estimator._update_dims(X, X_out)
Xt = xr.DataArray(X_out, dims=dims_new)
assert dims_new == ["sample", "feature"]
Xr_out = estimator.estimator_.inverse_transform(X_out)
dims_old = estimator._restore_dims(Xt, Xr_out)
assert dims_old == ["sample", "feat_1", "feat_2"]
def test_update_coords(self):
pass
def test_params(self):
estimator = StandardScaler(with_mean=False)
params = estimator.get_params()
params.update(
{"estimator": estimator, "reshapes": None, "sample_dim": None}
)
# check params set in constructor
wrapper = wrap(estimator)
self.assertEqual(wrapper.get_params(), params)
self.assertEqual(wrapper.with_mean, False)
# check params set by attribute
wrapper.with_std = False
params.update({"with_std": False})
self.assertEqual(wrapper.get_params(), params)
# check params set with set_params
wrapper.set_params(copy=False)
params.update({"copy": False})
self.assertEqual(wrapper.get_params(), params)
def test_attributes(self):
estimator = wrap(StandardScaler())
# check pass-through wrapper
estimator.fit(self.X.var_2d.values)
npt.assert_allclose(estimator.mean_, estimator.estimator_.mean_)
# check DataArray wrapper
estimator.fit(self.X.var_2d)
npt.assert_allclose(estimator.mean_, estimator.estimator_.mean_)
# check Dataset wrapper
estimator.fit(self.X.var_2d.to_dataset())
npt.assert_allclose(
estimator.mean_["var_2d"],
estimator.estimator_dict_["var_2d"].mean_,
)
class PublicInterfaceTests(TestCase):
def setUp(self):
self.X = xr.Dataset(
{
"var_2d": (["sample", "feat_1"], np.random.random((100, 10))),
"var_3d": (
["sample", "feat_1", "feat_2"],
np.random.random((100, 10, 10)),
),
},
{
"sample": range(100),
"feat_1": range(10),
"feat_2": range(10),
"dummy": (["sample", "feat_1"], np.random.random((100, 10))),
},
)
def test_dummy_estimator(self):
estimator = wrap(DummyEstimator())
# test DataArray
X_da = self.X.var_2d
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.predict(X_ds)
assert_equal(yp, X_ds)
def test_dummy_transformer(self):
estimator = wrap(DummyTransformer())
# test DataArray
X_da = self.X.var_2d
estimator.fit(X_da)
yp = estimator.transform(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.transform(X_ds)
assert_equal(yp, X_ds)
def test_wrapped_transformer(self):
estimator = wrap(StandardScaler())
# test DataArray
X_da = self.X.var_2d
estimator.partial_fit(X_da)
assert_allclose(
X_da, estimator.inverse_transform(estimator.transform(X_da))
)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
estimator.fit(X_ds)
assert_allclose(
X_ds, estimator.inverse_transform(estimator.transform(X_ds))
)
def test_ndim_dummy_estimator(self):
estimator = wrap(DummyEstimator())
# test DataArray
X_da = self.X.var_3d
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.predict(X_ds)
assert_equal(yp, X_ds)
def test_reshaping_estimator(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 2)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_2d
y = X_da[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
y = X_ds.var_2d[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_2d
assert_allclose(yp, y)
def test_reshaping_transformer(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 2)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_3d
y = X_da[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_da)
yp = estimator.transform(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
y = X_ds.var_2d[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.transform(X_ds).var_2d
assert_allclose(yp, y)
def test_reshaping_estimator_singleton(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 0)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_2d
y = X_da[:, 0].drop("feat_1")
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X
y = X_ds.var_2d[:, 0].drop("feat_1")
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_2d
assert_allclose(yp, y)
def test_ndim_reshaping_estimator(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 5, 0)),
reshapes={"feature": ["feat_1", "feat_2"]},
)
# test DataArray
X_da = self.X.var_3d
Xt = (
X_da[:, :5, 0]
.drop(["feat_1", "feat_2"])
.rename({"feat_1": "feature"})
)
Xt["dummy"] = Xt.dummy[:, 0]
estimator.fit(X_da)
Xt_da = estimator.transform(X_da)
estimator.inverse_transform(Xt_da)
assert_allclose(Xt_da, Xt)
# test Dataset
X_ds = self.X.var_3d.to_dataset()
y = X_ds.var_3d[:, :5, 0].drop(["feat_1", "feat_2"])
y = y.rename({"feat_1": "feature"})
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_3d
assert_allclose(yp, y)
def test_sample_dim(self):
from sklearn.decomposition import PCA
estimator = wrap(
PCA(n_components=5), reshapes="feat_1", sample_dim="sample"
)
# test DataArray
X_da = self.X.var_2d
Xt_da = estimator.fit_transform(X_da)
Xr_da = estimator.inverse_transform(Xt_da)
npt.assert_equal(Xt_da.shape, (100, 5))
npt.assert_equal(Xr_da.shape, (100, 10))
# test Dataset
X_ds = self.X.var_2d.to_dataset()
Xt = estimator.fit_transform(X_ds)
npt.assert_equal(Xt.var_2d.shape, (100, 5))
def test_score(self):
from sklearn.linear_model import LinearRegression
estimator = wrap(LinearRegression, reshapes="feat_1")
# test DataArray
X_da = self.X.var_2d
y = np.random.random(100)
estimator.fit(X_da, y)
estimator.score(X_da, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
wrapper = estimator.fit(X_ds, y)
wrapper.score(X_ds, y)
def test_partial_fit(self):
estimator = wrap(StandardScaler())
# check pass-through wrapper
estimator.partial_fit(self.X.var_2d.values)
assert hasattr(estimator, "mean_")
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X)
# check DataArray wrapper
estimator = clone(estimator)
estimator.partial_fit(self.X.var_2d)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d.values)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X)
assert hasattr(estimator, "mean_")
# check Dataset wrapper
estimator = clone(estimator)
estimator.partial_fit(self.X.var_2d.to_dataset())
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d.values)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d)
assert hasattr(estimator, "mean_")
def test_classifier():
lr = wrap(LogisticRegression)
# wrappers don't pass check_estimator anymore because estimators
# "should not set any attribute apart from parameters during init"
assert hasattr(lr, "predict")
assert hasattr(lr, "decision_function")
lr = wrap(LogisticRegression)
assert hasattr(lr, "C")
svc_proba = wrap(SVC(probability=True))
# check_estimator(svc_proba) fails because the wrapper is not excluded
# from tests that are known to fail for SVC...
assert hasattr(svc_proba, "predict_proba")
assert hasattr(svc_proba, "predict_log_proba")
def test_regressor():
lr = wrap(LinearRegression, compat=True)
assert hasattr(lr, "predict")
assert hasattr(lr, "score")
lr = wrap(LinearRegression)
assert hasattr(lr, "normalize")
def test_transformer():
wrap(KernelCenterer, compat=True)
tr = wrap(KernelCenterer)
assert hasattr(tr, "transform")
ss = wrap(StandardScaler)
# check_estimator(ss) fails because the wrapper is not excluded
# from tests that are known to fail for StandardScaler...
assert hasattr(ss, "partial_fit")
assert hasattr(ss, "inverse_transform")
assert hasattr(ss, "fit_transform")
| 2.46875 | 2 |
test/on_yubikey/test_cli_openpgp.py | amake/yubikey-manager | 0 | 12759655 | import unittest
from .framework import cli_test_suite
@cli_test_suite
def additional_tests(ykman_cli):
class TestOpenPGP(unittest.TestCase):
def setUp(self):
ykman_cli('openpgp', 'reset', '-f')
def test_openpgp_info(self):
output = ykman_cli('openpgp', 'info')
self.assertIn('OpenPGP version:', output)
def test_openpgp_reset(self):
output = ykman_cli('openpgp', 'reset', '-f')
self.assertIn(
'Success! All data has been cleared and default PINs are set.',
output)
return [TestOpenPGP]
| 2.625 | 3 |
hashes.py | SortAnon/PonySorter | 3 | 12759656 | <reponame>SortAnon/PonySorter<filename>hashes.py
friendly_names = {
"fim_s01e01": "Season 1, Episode 1",
"fim_s01e02": "Season 1, Episode 2",
"fim_s01e03": "Season 1, Episode 3",
"fim_s01e04": "Season 1, Episode 4",
"fim_s01e05": "Season 1, Episode 5",
"fim_s01e06": "Season 1, Episode 6",
"fim_s01e07": "Season 1, Episode 7",
"fim_s01e08": "Season 1, Episode 8",
"fim_s01e09": "Season 1, Episode 9",
"fim_s01e10": "Season 1, Episode 10",
"fim_s01e11": "Season 1, Episode 11",
"fim_s01e12": "Season 1, Episode 12",
"fim_s01e13": "Season 1, Episode 13",
"fim_s01e14": "Season 1, Episode 14",
"fim_s01e15": "Season 1, Episode 15",
"fim_s01e16": "Season 1, Episode 16",
"fim_s01e17": "Season 1, Episode 17",
"fim_s01e18": "Season 1, Episode 18",
"fim_s01e19": "Season 1, Episode 19",
"fim_s01e20": "Season 1, Episode 20",
"fim_s01e21": "Season 1, Episode 21",
"fim_s01e22": "Season 1, Episode 22",
"fim_s01e23": "Season 1, Episode 23",
"fim_s01e24": "Season 1, Episode 24",
"fim_s01e25": "Season 1, Episode 25",
"fim_s01e26": "Season 1, Episode 26",
"fim_s02e01": "Season 2, Episode 1",
"fim_s02e02": "Season 2, Episode 2",
"fim_s02e03": "Season 2, Episode 3",
"fim_s02e04": "Season 2, Episode 4",
"fim_s02e05": "Season 2, Episode 5",
"fim_s02e06": "Season 2, Episode 6",
"fim_s02e07": "Season 2, Episode 7",
"fim_s02e08": "Season 2, Episode 8",
"fim_s02e09": "Season 2, Episode 9",
"fim_s02e10": "Season 2, Episode 10",
"fim_s02e11": "Season 2, Episode 11",
"fim_s02e12": "Season 2, Episode 12",
"fim_s02e13": "Season 2, Episode 13",
"fim_s02e14": "Season 2, Episode 14",
"fim_s02e15": "Season 2, Episode 15",
"fim_s02e16": "Season 2, Episode 16",
"fim_s02e17": "Season 2, Episode 17",
"fim_s02e18": "Season 2, Episode 18",
"fim_s02e19": "Season 2, Episode 19",
"fim_s02e20": "Season 2, Episode 20",
"fim_s02e21": "Season 2, Episode 21",
"fim_s02e22": "Season 2, Episode 22",
"fim_s02e23": "Season 2, Episode 23",
"fim_s02e24": "Season 2, Episode 24",
"fim_s02e25": "Season 2, Episode 25",
"fim_s02e26": "Season 2, Episode 26",
"fim_s03e01": "Season 3, Episode 1",
"fim_s03e02": "Season 3, Episode 2",
"fim_s03e03": "Season 3, Episode 3",
"fim_s03e04": "Season 3, Episode 4",
"fim_s03e05": "Season 3, Episode 5",
"fim_s03e06": "Season 3, Episode 6",
"fim_s03e07": "Season 3, Episode 7",
"fim_s03e08": "Season 3, Episode 8",
"fim_s03e09": "Season 3, Episode 9",
"fim_s03e10": "Season 3, Episode 10",
"fim_s03e11": "Season 3, Episode 11",
"fim_s03e12": "Season 3, Episode 12",
"fim_s03e13": "Season 3, Episode 13",
"fim_s04e01": "Season 4, Episode 1",
"fim_s04e02": "Season 4, Episode 2",
"fim_s04e03": "Season 4, Episode 3",
"fim_s04e04": "Season 4, Episode 4",
"fim_s04e05": "Season 4, Episode 5",
"fim_s04e06": "Season 4, Episode 6",
"fim_s04e07": "Season 4, Episode 7",
"fim_s04e08": "Season 4, Episode 8",
"fim_s04e09": "Season 4, Episode 9",
"fim_s04e10": "Season 4, Episode 10",
"fim_s04e11": "Season 4, Episode 11",
"fim_s04e12": "Season 4, Episode 12",
"fim_s04e13": "Season 4, Episode 13",
"fim_s04e14": "Season 4, Episode 14",
"fim_s04e15": "Season 4, Episode 15",
"fim_s04e16": "Season 4, Episode 16",
"fim_s04e17": "Season 4, Episode 17",
"fim_s04e18": "Season 4, Episode 18",
"fim_s04e19": "Season 4, Episode 19",
"fim_s04e20": "Season 4, Episode 20",
"fim_s04e21": "Season 4, Episode 21",
"fim_s04e22": "Season 4, Episode 22",
"fim_s04e23": "Season 4, Episode 23",
"fim_s04e24": "Season 4, Episode 24",
"fim_s04e25": "Season 4, Episode 25",
"fim_s04e26": "Season 4, Episode 26",
"fim_s05e01": "Season 5, Episode 1",
"fim_s05e02": "Season 5, Episode 2",
"fim_s05e03": "Season 5, Episode 3",
"fim_s05e04": "Season 5, Episode 4",
"fim_s05e05": "Season 5, Episode 5",
"fim_s05e06": "Season 5, Episode 6",
"fim_s05e07": "Season 5, Episode 7",
"fim_s05e08": "Season 5, Episode 8",
"fim_s05e09": "Season 5, Episode 9",
"fim_s05e10": "Season 5, Episode 10",
"fim_s05e11": "Season 5, Episode 11",
"fim_s05e12": "Season 5, Episode 12",
"fim_s05e13": "Season 5, Episode 13",
"fim_s05e14": "Season 5, Episode 14",
"fim_s05e15": "Season 5, Episode 15",
"fim_s05e16": "Season 5, Episode 16",
"fim_s05e17": "Season 5, Episode 17",
"fim_s05e18": "Season 5, Episode 18",
"fim_s05e19": "Season 5, Episode 19",
"fim_s05e20": "Season 5, Episode 20",
"fim_s05e21": "Season 5, Episode 21",
"fim_s05e22": "Season 5, Episode 22",
"fim_s05e23": "Season 5, Episode 23",
"fim_s05e24": "Season 5, Episode 24",
"fim_s05e25": "Season 5, Episode 25",
"fim_s05e26": "Season 5, Episode 26",
"fim_s06e01": "Season 6, Episode 1",
"fim_s06e02": "Season 6, Episode 2",
"fim_s06e03": "Season 6, Episode 3",
"fim_s06e04": "Season 6, Episode 4",
"fim_s06e05": "Season 6, Episode 5",
"fim_s06e06": "Season 6, Episode 6",
"fim_s06e07": "Season 6, Episode 7",
"fim_s06e08": "Season 6, Episode 8",
"fim_s06e09": "Season 6, Episode 9",
"fim_s06e10": "Season 6, Episode 10",
"fim_s06e11": "Season 6, Episode 11",
"fim_s06e12": "Season 6, Episode 12",
"fim_s06e13": "Season 6, Episode 13",
"fim_s06e14": "Season 6, Episode 14",
"fim_s06e15": "Season 6, Episode 15",
"fim_s06e16": "Season 6, Episode 16",
"fim_s06e17": "Season 6, Episode 17",
"fim_s06e18": "Season 6, Episode 18",
"fim_s06e19": "Season 6, Episode 19",
"fim_s06e20": "Season 6, Episode 20",
"fim_s06e21": "Season 6, Episode 21",
"fim_s06e22": "Season 6, Episode 22",
"fim_s06e23": "Season 6, Episode 23",
"fim_s06e24": "Season 6, Episode 24",
"fim_s06e25": "Season 6, Episode 25",
"fim_s06e26": "Season 6, Episode 26",
"fim_s07e01": "Season 7, Episode 1",
"fim_s07e02": "Season 7, Episode 2",
"fim_s07e03": "Season 7, Episode 3",
"fim_s07e04": "Season 7, Episode 4",
"fim_s07e05": "Season 7, Episode 5",
"fim_s07e06": "Season 7, Episode 6",
"fim_s07e07": "Season 7, Episode 7",
"fim_s07e08": "Season 7, Episode 8",
"fim_s07e09": "Season 7, Episode 9",
"fim_s07e10": "Season 7, Episode 10",
"fim_s07e11": "Season 7, Episode 11",
"fim_s07e12": "Season 7, Episode 12",
"fim_s07e13": "Season 7, Episode 13",
"fim_s07e14": "Season 7, Episode 14",
"fim_s07e15": "Season 7, Episode 15",
"fim_s07e16": "Season 7, Episode 16",
"fim_s07e17": "Season 7, Episode 17",
"fim_s07e18": "Season 7, Episode 18",
"fim_s07e19": "Season 7, Episode 19",
"fim_s07e20": "Season 7, Episode 20",
"fim_s07e21": "Season 7, Episode 21",
"fim_s07e22": "Season 7, Episode 22",
"fim_s07e23": "Season 7, Episode 23",
"fim_s07e24": "Season 7, Episode 24",
"fim_s07e25": "Season 7, Episode 25",
"fim_s07e26": "Season 7, Episode 26",
"fim_s08e01": "Season 8, Episode 1",
"fim_s08e02": "Season 8, Episode 2",
"fim_s08e03": "Season 8, Episode 3",
"fim_s08e04": "Season 8, Episode 4",
"fim_s08e05": "Season 8, Episode 5",
"fim_s08e06": "Season 8, Episode 6",
"fim_s08e07": "Season 8, Episode 7",
"fim_s08e08": "Season 8, Episode 8",
"fim_s08e09": "Season 8, Episode 9",
"fim_s08e10": "Season 8, Episode 10",
"fim_s08e11": "Season 8, Episode 11",
"fim_s08e12": "Season 8, Episode 12",
"fim_s08e13": "Season 8, Episode 13",
"fim_s08e14": "Season 8, Episode 14",
"fim_s08e15": "Season 8, Episode 15",
"fim_s08e16": "Season 8, Episode 16",
"fim_s08e17": "Season 8, Episode 17",
"fim_s08e18": "Season 8, Episode 18",
"fim_s08e19": "Season 8, Episode 19",
"fim_s08e20": "Season 8, Episode 20",
"fim_s08e21": "Season 8, Episode 21",
"fim_s08e22": "Season 8, Episode 22",
"fim_s08e23": "Season 8, Episode 23",
"fim_s08e24": "Season 8, Episode 24",
"fim_s08e25": "Season 8, Episode 25",
"fim_s08e26": "Season 8, Episode 26",
"fim_s09e01": "Season 9, Episode 1",
"fim_s09e02": "Season 9, Episode 2",
"fim_s09e03": "Season 9, Episode 3",
"fim_s09e04": "Season 9, Episode 4",
"fim_s09e05": "Season 9, Episode 5",
"fim_s09e06": "Season 9, Episode 6",
"fim_s09e07": "Season 9, Episode 7",
"fim_s09e08": "Season 9, Episode 8",
"fim_s09e09": "Season 9, Episode 9",
"fim_s09e10": "Season 9, Episode 10",
"fim_s09e11": "Season 9, Episode 11",
"fim_s09e12": "Season 9, Episode 12",
"fim_s09e13": "Season 9, Episode 13",
"fim_s09e14": "Season 9, Episode 14",
"fim_s09e15": "Season 9, Episode 15",
"fim_s09e16": "Season 9, Episode 16",
"fim_s09e17": "Season 9, Episode 17",
"fim_s09e18": "Season 9, Episode 18",
"fim_s09e19": "Season 9, Episode 19",
"fim_s09e20": "Season 9, Episode 20",
"fim_s09e21": "Season 9, Episode 21",
# "fim_s09e22": "Season 9, Episode 22",
# "fim_s09e23": "Season 9, Episode 23",
# "fim_s09e24": "Season 9, Episode 24",
# "fim_s09e25": "Season 9, Episode 25",
# "fim_s09e26": "Season 9, Episode 26",
"eqg_original_movie": "Equestria Girls (Original)",
"eqg_friendship_games": "Equestria Girls: Friendship Games",
"eqg_legend_of_everfree": "Equestria Girls: Legend of Everfree",
"eqg_rainbow_rocks": "Equestria Girls: Rainbow Rocks",
}
original_hashes = {
"fim_s01e01": "5735ea49c43ea56109171cde2547a42cecb839cc",
"fim_s01e02": "8d61b132aaff821b0b93fbff5baf74a017b93942",
"fim_s01e03": "6c5e832dbdc91aec2159bd517a818c6aa1057613",
"fim_s01e04": "b18eb5b02993dfed9cd383e54207119ee5cb51e4",
"fim_s01e05": "7d7461461e39dd807ad5ace457d762c65075e62e",
"fim_s01e06": "94138a8ed565a1d39ab43cbf9244720372287d66",
"fim_s01e07": "a416681e16e9d8397e9ae06ecdfd9d2386e8f8f1",
"fim_s01e08": "fedfadaa3d737e59fe1617407f80157258858004",
"fim_s01e09": "75c31f8555f4b6a931b41185dd9aa645a6b5d9df",
"fim_s01e10": "868a4050fd56eb397b25624a6bd6efea7b05f4d1",
"fim_s01e11": "d9c603f4073e796454935eb855205f424b2bc0b0",
"fim_s01e12": "9aa15d4cc4d6a1596b1b964add7398149cff5d4d",
"fim_s01e13": "aa94e28636d9ad224b71c544a1ad84598a4899f6",
"fim_s01e14": "6ea73fc00e5626669269c207b3ee0e52f018e382",
"fim_s01e15": "701f2862a9aeefca70f6d314c0589c7a8a179ccc",
"fim_s01e16": "dfcb90fb26d55641b554268d1d93efaa11ad285c",
"fim_s01e17": "cc867f314d68564ce48e0bcad9b57a7af80a1520",
"fim_s01e18": "e54ae83f44efd7dcab1bafa88eb6f3a6bd7f6c2a",
"fim_s01e19": "96ca30cedca8170dad9611da02f774980ad83737",
"fim_s01e20": "f4dbd0df7214583e0848360c89a8bde432c91b6d",
"fim_s01e21": "ec5b1b9036ddce7cd62c0af9fb0c96a4dd85a52a",
"fim_s01e22": "89aae028e1658c083c18a99216d1a549b30bce21",
"fim_s01e23": "b18d2ab28435fbafb90046328e77751b8ca8cf6f",
"fim_s01e24": "2e72f04bb686ea6273ebdda7f712ae32d76d0594",
"fim_s01e25": "225228dcba4fdc08601b4b48cb182a9bc5d841f8",
"fim_s01e26": "69030f04a1395db8a732149d552c7f9a5f95cad7",
"fim_s02e01": "b188a4b7b3d9e74fa26dcb573e29216f450d677f",
"fim_s02e02": "9072ea4af815c04c86fe00884aa55acde51c8de8",
"fim_s02e03": "bc9bcab5e626e649185a237485cd6c45044febac",
"fim_s02e04": "0bf3465a2adb91829d8fe16d8354f4dea9a96a4f",
"fim_s02e05": "4d3033f9b443ebf892db563ecc183b15ad171134",
"fim_s02e06": "e80a27201a93226b99377e5f3c7d60a337bfb974",
"fim_s02e07": "5fd93be8ea58d26ed2eac7aee6cab6ea6d606da8",
"fim_s02e08": "0f4197de4d3e5e29bb916c68c989c42003a3cae7",
"fim_s02e09": "4aec2125d344f7651a25869f6fefe2d340cf255c",
"fim_s02e10": "2df0e6d01d8510db000065baa2d028240d7dae49",
"fim_s02e11": "7586a9586ffd57ddacda6201abd1e919cc6a782f",
"fim_s02e12": "dd027fe0d956c851f8d4b633cfc56e0b5b24f786",
"fim_s02e13": "667ef565d8fd3fa82dcd54cf2b469a5e1ae85c5e",
"fim_s02e14": "69d1e1a4610eba20343287f2fd5a3419b74fb6e7",
"fim_s02e15": "feedd3600151fa1834f9ff73363ce8931f7b5ec6",
"fim_s02e16": "d2bb3a3f5daa5553b5be15894b4e209b211ea178",
"fim_s02e17": "4524d942ca3834230e9766ba1e4ad4e51681ed0c",
"fim_s02e18": "6ac536af39ec9a3096296af9da5559ae4a5d8523",
"fim_s02e19": "82e77f72f7b67126ae7fd85e87cccb1d6fc586ff",
"fim_s02e20": "333994b8309e760d0eaeaa793b18020b5b7acd9c",
"fim_s02e21": "1b085707a89006b05290cc4df7e29030b42c85d0",
"fim_s02e22": "cee734596a7c3f57e7c87e8542746142745375c7",
"fim_s02e23": "e698ae4ec212fbef527914f5eeb4ee6cae8060fd",
"fim_s02e24": "14b3bb367891df57cbd39aa9f34309d6a1658884",
"fim_s02e25": "b6e1cc5ec6bcd7c9a14e901a278c7eb940ea93e1",
"fim_s02e26": "201d11c9eea301bb85f222b392f88289d1b966d9",
"fim_s03e01": "4408659e47d65176add7e46137f67b1756ea0265",
"fim_s03e02": "32ace5c7e56f0dc92d192ccc3c5c5eda4ff32f29",
"fim_s03e03": "60422fc559b26daeb7392b9c7839c6787e1bbdf0",
"fim_s03e04": "51487978bdf587624f56abeb18e98703bb738ab1",
"fim_s03e05": "ebd85277ee14163e7f5ec690197e617346032bd3",
"fim_s03e06": "36c4ac7e393b268ce76cf7f834a52bcdd170a22c",
"fim_s03e07": "7faadb4e07ea50f9660d4d29c5ec98e5080da2e2",
"fim_s03e08": "3b4001fce0ab49313f59afcecfdb3944564b3682",
"fim_s03e09": "5c045b5532829d502adcc9839cd22b961f5e7342",
"fim_s03e10": "8212168e108028e5e4906a4fabba10530f2550a9",
"fim_s03e11": "520d1362576df396c8db428fe430b4b14260346c",
"fim_s03e12": "<KEY>",
"fim_s03e13": "b9c1e54800c8965a2b3a3ab73ba8e1a3b24dd58b",
"fim_s04e01": "fd420d33b9f256868ee5e379eb9c8a2bc3b40b09",
"fim_s04e02": "73b72f84f85ee26ee9c2728b933c86d7d370a69b",
"fim_s04e03": "5bedfc8eb95c9b692daa4a806ff35e6d5c215d1b",
"fim_s04e04": "4f6aa4d45802b3304f04e6a4e3a7456933878a21",
"fim_s04e05": "a03e2eb3d089408e3b37d3cd258736e81ee2e2cb",
"fim_s04e06": "678bd6d70839761b9a4271af8cf55e526c12e6ca",
"fim_s04e07": "ca0cd431366b9d29282f1cafed3c4b1194670a89",
"fim_s04e08": "81f8192af3404dc563ba9d5567e89e781201226a",
"fim_s04e09": "aad68c1cb09aa95482e22cfc9fd4495445cfe1e6",
"fim_s04e10": "7be7993cc1a2ea9866f0ee6350765c0973fbab48",
"fim_s04e11": "a7476b04649ab027f6419d439b7d7add52342145",
"fim_s04e12": "977825b271c87e55d760ef3f93663f1491c16fcb",
"fim_s04e13": "3d0fc66b9f550f84d599ae26a0561888620b83a2",
"fim_s04e14": "ef3e76f455176c085d74f5571fbe20e378e2db15",
"fim_s04e15": "26ef4519511b99b13d112dca008f8933bb0417ef",
"fim_s04e16": "09155205ae4c06db7fc224016f3c529b2d72aeeb",
"fim_s04e17": "d4516745d73a6c6b06922367370d9b60c1e9c9e2",
"fim_s04e18": "0166151f5eb32b945fc0a7c87a83661193dc8a7e",
"fim_s04e19": "714b02bc8baf1d892cd7a40d1846e2a98e17f771",
"fim_s04e20": "a541743d1e4c68fec52ba8890ffde23b53d76df1",
"fim_s04e21": "92ce42d2b108fe9adf0b6be24e91d8f7772a2b97",
"fim_s04e22": "e274ef133ad4b64751737407ff2d56fafd404669",
"fim_s04e23": "19983b86893a51ff9a9c7dfd975724dcba07f6fd",
"fim_s04e24": "477c881d8da11102e280363b61a65fb3da41eecb",
"fim_s04e25": "51d3e59472d7393a46303f7f7be084929242145b",
"fim_s04e26": "97444afe8ab97a9d31586eb812a2fcd239fd055a",
"fim_s05e01": "5dbb872d9bff9054a28e7438558d6a1f8264ff91",
"fim_s05e02": "c3f15e75538b723a08fd602faba7714bd6e6d812",
"fim_s05e03": "4621407df2da79bd229df24599f5caff56b88ea1",
"fim_s05e04": "f839303bc2689ec96d61910ae6c7cf5c209616c7",
"fim_s05e05": "5115b64a61e0819007f6382425f397ebe48a06de",
"fim_s05e06": "0eb12c6d0baa38299b6afe70450e1c4bf02001fb",
"fim_s05e07": "2178552485f41cb2fc30a6e0b5e0b6ff70129ef9",
"fim_s05e08": "c5240a27e9956e388ab3aaa75078db473cf4f755",
"fim_s05e09": "def459ef5462a3ae16aee3d7897eeeb272f366fc",
"fim_s05e10": "b9c0b00bb44280c5a882ab86d65d62c864cd4aca",
"fim_s05e11": "c8b22e0bb8d1e8db700ab334b7e1af487028abd1",
"fim_s05e12": "d368b0c504d66794db1c576e2665f9e0bdc26149",
"fim_s05e13": "9205df05a50ccebe7ef943dd753add0c113977e4",
"fim_s05e14": "1f85660cc8a5eb6f02995b9d6791a2a693c78c15",
"fim_s05e15": "8a5e01c14f3506c24c62204d57e882b40e77fbad",
"fim_s05e16": "935236f72c771c29f37102ef039e91d6650ccb0b",
"fim_s05e17": "e0ae2f283a214a454c1f5dc2d7f5266fcf06b625",
"fim_s05e18": "aec9d8082e0b5d44df44530ac925eaf06d9eb36c",
"fim_s05e19": "897e301efe7314d8b9badce6a976201aeb4a85bc",
"fim_s05e20": "0bf2590abedcc2273813e641e19f6a85da0e868f",
"fim_s05e21": "319945de56b444a1b576158af1a85f93ee1cce47",
"fim_s05e22": "958dcd5354a6a1f92b9f7f825fa621a58a4f24b3",
"fim_s05e23": "5ba88f8254ef41fc344e1cc4e19696e1dda8ed4f",
"fim_s05e24": "a6f4b609f03c7c3db10f139ae9777b31faa87ade",
"fim_s05e25": "2a457cace44f83d75d75a97d51ef8f7610b1ee5e",
"fim_s05e26": "6109d956ffa8ef0e5654101188ceac10e0e4b00a",
"fim_s06e01": "2f5a0741966fd0a65f39f6a46cf7e211c4abd615",
"fim_s06e02": "772a81ef3a8bc4de77e9844a222024b7c6556902",
"fim_s06e03": "c1776d478795ef7c1a80163df1d2577020fd67c1",
"fim_s06e04": "31df7f9e1b14fe9e2cfda4bc374d42b5f6410ee8",
"fim_s06e05": "63ae25a890c4ce775dd1e468cce9ff53ce1555d6",
"fim_s06e06": "01eee5ecc47f9194252b0e6a79e3eab1e7c967bf",
"fim_s06e07": "fb37ddb83fd33fb21f7ec531d2ab22ee553bfcff",
"fim_s06e08": "e59cf84e2bda3c737e8e493eeacd8cc03226ed62",
"fim_s06e09": "af76a47b6a4cc4f70c90b5a7a01198c36f9cefe2",
"fim_s06e10": "64d83601772b03136d008755ac489d7db6f8782a",
"fim_s06e11": "ee4b97ba3d04e45fb95f0bdd3489d2e89082fe46",
"fim_s06e12": "624eb125ab329d0cbb6753e0485b3e898ec74f2a",
"fim_s06e13": "265a77bbec7ffda9c4bc984a771eac89046d3db4",
"fim_s06e14": "60f7e86420539011d092374e1fb9146f8a795108",
"fim_s06e15": "b8c38b7727fe2711d5b27f5fd128e4f43f1230c6",
"fim_s06e16": "e4e7c18f04dcfe19e6a122edd69dd0705b05815e",
"fim_s06e17": "d75ffe2da2f92f8f80671d6a2f6de5ec41909f99",
"fim_s06e18": "fb7b89e4a0984b3a31387199bc1e760e5e8e34fc",
"fim_s06e19": "e768fb304bc6d8de89496d20649a068c9c482419",
"fim_s06e20": "50b39be291c5d541587ec717df9703430f848266",
"fim_s06e21": "617546695b583ece594330c591a6e4427eaaf77a",
"fim_s06e22": "303d856ed659c809aab459684e1a94d1e4df5f76",
"fim_s06e23": "a32aa10d18294a43fd3d6df25a129117f3261397",
"fim_s06e24": "25fce908c96c825fd88e5b300f237eb34ec267b5",
"fim_s06e25": "0aa2b8bcdc0d515ce645abebf96cf50eabbb2d68",
"fim_s06e26": "e28ae456662b264cb791a071d9d8c9fca1b128c6",
"fim_s07e01": "c2edfa142bb6c91446a3a747c49c9f3ee2234b9d",
"fim_s07e02": "de9c1ca38abce51ed605d86bc6d85daf8fbe595a",
"fim_s07e03": "585e527dd175d89f58723725d5aa1d4d27949616",
"fim_s07e04": "f94a53d4ca0ff914b35de88f6488d5d0e666ce3b",
"fim_s07e05": "58695b0e626411828275efd146d1e2c0953cb054",
"fim_s07e06": "ed0d957a8e7f5b35835c9b617159dbef72378c6d",
"fim_s07e07": "e75b261b0420f2051558f5d9b7c5d427ce997cbe",
"fim_s07e08": "b1a72ee905e56567e9277247aee5ae78ce0ae3af",
"fim_s07e09": "59f1d24cdb1b89775ee1c08e697c534b30ee09c0",
"fim_s07e10": "fd83d40fcaf077b47fc3e953fcd71a9b55a5d230",
"fim_s07e11": "c9a867d1ddc812b691b50cd413aa74d854cbe69f",
"fim_s07e12": "df3ab652a7c120292069664886c72a05f6c3d31e",
"fim_s07e13": "637041b3645af31dd7467b566f512b491582f59e",
"fim_s07e14": "58ebd77188a430a23d63d36c317d40563265448c",
"fim_s07e15": "029953957f3f5f96430b9900286671c45ed5029f",
"fim_s07e16": "ebd3f72254542a3ab9bd05c8e8833f5ba4961d9e",
"fim_s07e17": "c85dbaec5fc5508269cc1a68d8bc2d0fd09a1c5d",
"fim_s07e18": "710e6e92c6e755ef6fb833b74b8335d2c4ae1855",
"fim_s07e19": "4bc91bc47cc7c1d49682cea2ca5ea0897a14792a",
"fim_s07e20": "1066134a286afa3ad42da6d40d800d4d70d10bb8",
"fim_s07e21": "438f7eb81b4ef8ef20376bb87a3b07a938354066",
"fim_s07e22": "b14f3aabbdafd30cdd91c18b4c8fd31ff6f50e8f",
"fim_s07e23": "35e529712b734da8d2e4026f1e7297c064bb5686",
"fim_s07e24": "a546204cece37896e6a49df7f53850586bb395ce",
"fim_s07e25": "cd3624133eeac941a5a6e26912f9416a762017ef",
"fim_s07e26": "7eb71360fafe1fb1f397b3e1ec5023e1a877e575",
"fim_s08e01": "076cc70aeedf3b775070a174d24899c99bba48e7",
"fim_s08e02": "ef55ff3f9e8be2c295687a9c05e607ef3902b74f",
"fim_s08e03": "5d389fd6b7eb480b02f1a42e97ad496349c95ce4",
"fim_s08e04": "c309a04a82c5d6b3a805de4ba6fdb1f2a7463283",
"fim_s08e05": "83ba497a8e0e6c3cb0e030a0cc45ca2c412f264d",
"fim_s08e06": "bb1a1e6aba6414d3d54e42dba64f636b6ccca1f4",
"fim_s08e07": "cd90c566bec22350c83c53aa9892b3a31ad6c69a",
"fim_s08e08": "7e100b9b553136604e411863d1b01f20b731c747",
"fim_s08e09": "4360eba6f9068ddda8534a034e3eaada342bac97",
"fim_s08e10": "94ad320e05ce30402d8f22b014f3004d76edfa6b",
"fim_s08e11": "03b9ff19482c8db8c700d547025a23b8ca0c9b74",
"fim_s08e12": "304d0723742c7fc10ef5d44868daef6684a5f674",
"fim_s08e13": "3531ea698101b35a0823a843725e51500b6d70f2",
"fim_s08e14": "4830050ce62167e61391b6cece11057caac273e6",
"fim_s08e15": "8a2fd1e83457459fe29c548df94de3461d46adfd",
"fim_s08e16": "2fb4a3ecc5062a1b5e7515c1f5e87f45151ca319",
"fim_s08e17": "27bc30641357df42cae1a8622a6653c29540b550",
"fim_s08e18": "c5f759656652a3c4745e2c25e90dae59442f46df",
"fim_s08e19": "cb8b76cfcae9ca0496df5c3e8570e927093bce79",
"fim_s08e20": "362db545da4fbaabcf31db03151f106ac66fecc1",
"fim_s08e21": "c397c7a2c59f905e30d8654ebcb86417bc349dfc",
"fim_s08e22": "9e790fb4b4796af8e8c9a7fd4a12cd69628243ba",
"fim_s08e23": "19df7e2a55ca6141ac8708d8552fca36e19b593b",
"fim_s08e24": "214f2c55f0d2bd625ed32d98bfda086c90d282b8",
"fim_s08e25": "ee960deb72558e988f896e6e4bee03972ac598c7",
"fim_s08e26": "af21facb8c787fe81b4fc03f7de529a87f425540",
"fim_s09e01": "b42a1efd4656e1a935dbdd83bd5090323ec1f3c1",
"fim_s09e02": "515fb107f552dfc34b85102276336c77a9daca37",
"fim_s09e03": "6a584f94467f7df36ba970c2136f824abcdc8c16",
"fim_s09e04": "72b9a3dc0493b064b12500bffaea6f7cf9206f41",
"fim_s09e05": "c5540523c71796cc073836e82aca115a4a1c79ba",
"fim_s09e06": "847bc311062d64baf047478323cc5aae20993eb9",
"fim_s09e07": "6b5a6224ba2d52df20b73e2357a5c78facb0a60f",
"fim_s09e08": "2a7fa34a6ddb7e8ee2263756f4d315bc323af94e",
"fim_s09e09": "c60d111f27ea50bdb886adc71a8c5f946bfce280",
"fim_s09e10": "8adfc1e86af3ec220c8d379a8a397588d98e11a6",
"fim_s09e11": "40fb13de7799c29f8bf6f41090a54d24d49233a4",
"fim_s09e12": "8cceb7e03154c46c61c6a0691180a654a8fe268d",
"fim_s09e13": "7700e2e51cb7c8e634f7cabe28f066c4b1f0f72a",
"fim_s09e14": "736f62c0c276f6aa1f911517b82d294dfce4b876",
"fim_s09e15": "6a1ddf8ba3c0cd2713522f7582b16a0b48828a41",
"fim_s09e16": "18b5e2fa57b4f82b76f3338a5ca44e95a289659e",
"fim_s09e17": "d319eedfebff339b116985eeec7db529f952c9de",
"fim_s09e18": "baa11cd12bee95a5bf2719b8f7bbe1fa3fad60f5",
"fim_s09e19": "1757cfd03e199649d11d566d2c74f9a52d660bc8",
"fim_s09e20": "5ba76c741cae8f83fb5ade47b946ba5925224577",
"fim_s09e21": "beb7a4d66acd631f2891d2491702db673f026935",
"fim_s09e22": "unknown",
"fim_s09e23": "unknown",
"fim_s09e24": "unknown",
"fim_s09e25": "unknown",
"fim_s09e26": "unknown",
"eqg_original_movie": "a7d91653a1e68c7c1be95cb6f20d334c66266e98",
"eqg_friendship_games": "41d5a6e45d13cde4220720e843ad5285d9ab95ff",
"eqg_legend_of_everfree": "0ae355e182f7ad116127dcede68c50f247db6876",
"eqg_rainbow_rocks": "3e9bccff77192b5acfce95d334805027f1de83e4",
}
izo_hashes = {
"fim_s01e01": "f6a9024c2d5f1b98ed6f65ffb9e633b731633ca1",
"fim_s01e02": "837028fbc12f6998da620bd687093cb71da44300",
"fim_s01e03": "46cb8d1bdf8a59dbc38a080f7c1ee69bcf6ebe50",
"fim_s01e04": "01fd19c33a7cfebae29630469f2b63f00e61ab67",
"fim_s01e05": "7ed175f523796df134fe4a91cd924f495e7fc6f0",
"fim_s01e06": "b44630407e4ba8aeecb9c9bce3094578a5989be5",
"fim_s01e07": "c606a59baf8ea44667cdc1ef32dd0a8b439aa832",
"fim_s01e08": "f9ece1c16c067f935f472ca44de2b42cbd4ce72c",
"fim_s01e09": "081c9ad4947c28f111c64a2d0dfa3dbb122865a5",
"fim_s01e10": "77ea25d4e5d6fbed37c87ef51498465c47850809",
"fim_s01e11": "55b6a431b0fdb124106dc3ec3c9b34426780c7be",
"fim_s01e12": "92d010ba3ac2b8fef2971ad95fa60b3d88b2dce3",
"fim_s01e13": "39c55e57ade0e1dd75ac9f1d9528ebc9eb4049c7",
"fim_s01e14": "ff9c70f14c1e9e17c90e5e74f0e6c830662e6641",
"fim_s01e15": "c06c7e448eee5390c203692550deaa56c7ca99fa",
"fim_s01e16": "f95e0b3e36bf56ad987a338d896c41ecfb5d6ef2",
"fim_s01e17": "337e1d6a9d60b516ad4c1c252a2b3b43b110cd4a",
"fim_s01e18": "f6adcde8831ca4801cd455c864e47ecf001becbd",
"fim_s01e19": "bc91d89bf4361306ee03fbd66161f2c09e932180",
"fim_s01e20": "077ef96ebe6b62f7b8dfbad054ccf48eeaa639ad",
"fim_s01e21": "e79c648baffc2889e1bcda8bb1e6180ed229bdd0",
"fim_s01e22": "d05cbf7e0062b23692542cfef4cb4509e1d1bb7c",
"fim_s01e23": "6a6edc7f89bb1f5d297b9163abde7b60218b2f72",
"fim_s01e24": "bd7bf5fca6f75306db9d06546a8151cdc8fa2af4",
"fim_s01e25": "1f323ccc45e5ed180b91636cafaec314070eeb45",
"fim_s01e26": "6c9f96dac12594083cadb1c520d9e9e238127414",
"fim_s02e01": "961f47fc2d8617cb513a5c6e963ab5405583b24f",
"fim_s02e02": "9cf031d0f8f5fa891a73f086417e3bf9f42f9ccc",
"fim_s02e03": "12685c6cf5f1fac7e9114e78f091f42ee5f52b7d",
"fim_s02e04": "3c789884ec1d340e55413a813bd39dab4290ef38",
"fim_s02e05": "618049dbb04f4c9325f93854e9dd5bf984a459a4",
"fim_s02e06": "de634231e70c2f23316944086f2c543c97c19a9f",
"fim_s02e07": "4866c807dadb346607332b25c08530931967b4e3",
"fim_s02e08": "d1f1885afd3eccc191b586ff7217be732b0d17a5",
"fim_s02e09": "d52d0c06cec5ffabc441f2d390ea8260b63b8d47",
"fim_s02e10": "9a9845afa5a4b5b64b418294debc45221042ba4f",
"fim_s02e11": "f307bf2614e1ce8e19e581a683694a7d37f318c3",
"fim_s02e12": "49bc1e0b89c944a5408d7d757779f130ff533ed6",
"fim_s02e13": "5e56f840d7d5fbcf4c0724f9fbf6cbf5f00e56f5",
"fim_s02e14": "ac86b17bfc3d9341dab3e6282506ead5326c3c70",
"fim_s02e15": "1dcb2629a203f0a4f758dfee25930fe18f228a9b",
"fim_s02e16": "c17161d4f77d44be1f8c72af6a15df630253ac53",
"fim_s02e17": "b08637bc0489faaf9e1e7392536a6c218fadc2ab",
"fim_s02e18": "a1191dd442d88bd50037cc8000eeb30b559c53e3",
"fim_s02e19": "d9de8c47d06b6a922ba058b72ebfa816f302c573",
"fim_s02e20": "fb72d51dab61f932313261a9a424f432e14df024",
"fim_s02e21": "319344dd49593b60b2f9188deaac64c7207a91e4",
"fim_s02e22": "3dacdacb0831dbf23ea02bc3edaa923b14c0216e",
"fim_s02e23": "0415b042a97b52f6d4ee68a7dab90e6032160ab0",
"fim_s02e24": "a0a48b6e4609e0c089966043ccdae842cfad7401",
"fim_s02e25": "9f52e54d5273baafc607bc2e6503c5b4283a202e",
"fim_s02e26": "5cd79ebce7f9302b4fd581c42d8f2ebb9d4dbf11",
"fim_s03e01": "ad581fa4b222d653f8f067bf073251aad5c508f5",
"fim_s03e02": "26043fa9c1ffd15ce10a32975a81cd0eb024a635",
"fim_s03e03": "4ccc98ae5ae3a6845973f05414ee9d5f6bd106e3",
"fim_s03e04": "2d028f89db0ab5ecf72126af65a0580d72f37fd8",
"fim_s03e05": "a0c2edcc17bb07d07988199d4b8699ae1311cf92",
"fim_s03e06": "fd7bdcd134c4e1a1039b58943143cd42c16daf22",
"fim_s03e07": "06c566eb542db2fa6591e7d0be03d0588ffc72ce",
"fim_s03e08": "fae0c07f7cdd4e071648477e861cf1e16a0bb705",
"fim_s03e09": "3753c33c68f01934bc189ec017317f2bcbd70dd6",
"fim_s03e10": "82844bb1ebabac572a239d5c08bc50ac602cc4b5",
"fim_s03e11": "7cbc0294c8fd6412cd3f60f4d9dfde5a3a4ecae1",
"fim_s03e12": "ba91ccd6ecb94859e895c8f3340ff4323ea8739f",
"fim_s03e13": "b2bab2e7fa9e171aefcf0996b0987b4af25f16fe",
"fim_s04e01": "0b8da7a6025a14aa2a2376d2519052fe883247cf",
"fim_s04e02": "16dc2f4e35644a3b6df37ca3270eafa27fbc1dab",
"fim_s04e03": "0bab184d4d58e520ea7a2ef54232a3f439076f83",
"fim_s04e04": "3e971bd50fd6801a69169c81c9b22d2054b9102e",
"fim_s04e05": "4efdae5536326d27db57cea43c8ffb9b486b2cbf",
"fim_s04e06": "edb0561371fc453e6fe2474b2401948daab43333",
"fim_s04e07": "4bbf58fdd9bc3f33376a44ccf164e8b33e14449e",
"fim_s04e08": "f2a5c2ab930579c52aab347e003b6b4bb72c96b6",
"fim_s04e09": "7d2f532d3540cd1147c9c3427e0f9a3bd6431162",
"fim_s04e10": "f1f1ca515bd1bf1d462c570921c3486ebe99e5ff",
"fim_s04e11": "4964977a3956c359d8800ee0f73a65bca0713805",
"fim_s04e12": "ef639444d94cb91f057be13feb7d6107011d1c63",
"fim_s04e13": "df95a0c61c2eaed4ea3a22d054a14d3533b5e61c",
"fim_s04e14": "308cb00a6ab8bd4458759627063b64cff9e71b2b",
"fim_s04e15": "19ac49592509505adb2e9cd6bdacb5c6e4ea3fcb",
"fim_s04e16": "68f5b5df443dd44f31ba98b797b799022e4b9f58",
"fim_s04e17": "06351f66834b2149ce3e4207af795d01f59986d7",
"fim_s04e18": "afadfb72c758af6722df942ceb117ff59e26ca83",
"fim_s04e19": "c48728696083634780d169c0334ef7570ff9b24c",
"fim_s04e20": "bda66b67600367b1a79368e160d94f3f8132bfc3",
"fim_s04e21": "36676b142a4765e1b4503067bae814245e5f9d9b",
"fim_s04e22": "de86d1413b2d0f6d218f36fa19816d087c8fffda",
"fim_s04e23": "06654109453e40c0a771f3f6f931c45638836eeb",
"fim_s04e24": "8f0b8efe53b924ede6174b81fc2981accb24a126",
"fim_s04e25": "ef09917da342d41a82a56c8688aa8de4fdaeca02",
"fim_s04e26": "be4f396dff757242c5eaab50a72e1fe5d1f53223",
"fim_s05e01": "dadbddb82ef59384c14c8ff341db3eff97f24ca8",
"fim_s05e02": "210dcfc2ae2f3d81ae141c0fe53df47c8a9ab59d",
"fim_s05e03": "78416fd1698876844ab838e55075c7d6224c9cc4",
"fim_s05e04": "f5e84b08970e3b473361617abdc1f6a0bbe17792",
"fim_s05e05": "304ed96576a36cd5646f1bcbe74279cd594871b3",
"fim_s05e06": "83cee832d68583db83e6066380ecd0086ca2f1b8",
"fim_s05e07": "1a64148801f604cf419f4edd3f0900af1f292112",
"fim_s05e08": "6ef331a7f08cd0923f2f0326bf0d4fa0c17626f0",
"fim_s05e09": "f7db68bf6e74be8ae26fe7d0c069db66dc48225f",
"fim_s05e10": "d8d915e674acab272a146af517629e6d45a2e5c9",
"fim_s05e11": "c07f5e9b1be669b59c28ce5aa26eb67546dd074f",
"fim_s05e12": "6ea0cf770a228b9537c3c14e1382dd14b50997f7",
"fim_s05e13": "27609192ad83af804295d8ae98c00ab8fc22eb5f",
"fim_s05e14": "38a6b06c2ab0b3fc4fd7f238f89fe8bd277d28ef",
"fim_s05e15": "4688d8c2050235dd5eebf7aa0099812eb5aeca34",
"fim_s05e16": "71b2c6c682e142bbd53e80adc298d6b5c6f54123",
"fim_s05e17": "a9ef2e4f7bbad929bd21f98563d07adfaab6129e",
"fim_s05e18": "806444a811e91498d3cbfb001811cb21d548d4a8",
"fim_s05e19": "f862eb7c844ae1f6c721e3fde703488b0e022dc2",
"fim_s05e20": "253dc31d2279bfb7ec24228042702ed9e8f27a9a",
"fim_s05e21": "beba681c7bf7e04f5537b9dda8d26eea81ad6abc",
"fim_s05e22": "4f1118498df9a8a098b945843e2d4708099da8b1",
"fim_s05e23": "700b3b57686af13fe370a509f7fe49c93fd12cb6",
"fim_s05e24": "eff6b8292ce12f9427a1612e1c5736fada62f071",
"fim_s05e25": "e0b85c17a79cb694cfedfe41c9f269ace64354ef",
"fim_s05e26": "94173ecac9fa6d993b9e321c24a21d37b5295bdf",
"fim_s06e01": "f9abcbff1c6559363dc2d01b1b131b1fd7652075",
"fim_s06e02": "4570cb5c8f677081856503e9473b829af9ea5279",
"fim_s06e03": "6da71d196d87b5a0d71a0d6cbc8d0739e6f08df3",
"fim_s06e04": "a1e365ea7e0a5eee9d3284dc59345ceab167d8a0",
"fim_s06e05": "717aca95a8877a9b5b5aaa4224c8eb595efbc8f8",
"fim_s06e06": "5a1422a00d41575285b653b087fd72c2880452b5",
"fim_s06e07": "e73b105b48d9554661558f0296bc4c34cb33c237",
"fim_s06e08": "dda06b88279e0d2bbc536526af548d48b5e4f1e4",
"fim_s06e09": "289234b9563758f0ced37eac3e6ed43e9bf2dd49",
"fim_s06e10": "0368cfdd86f97d4ba49076cb164452e3aa920beb",
"fim_s06e11": "11ab453a3f70f147952d35e9b4158920fc112524",
"fim_s06e12": "9b4182df2d0e21170e23552796bcfcd33ecad1f1",
"fim_s06e13": "b93b9faf3daa40b569c7d1200175a981dcc27335",
"fim_s06e14": "46d41de3ce8e1811225930d27691c4de2049de85",
"fim_s06e15": "fcb2fa148c53badc896b38e3b7ca9a9697ac063b",
"fim_s06e16": "f3ae9395e43b6f3690795d8ab187af799e87cf29",
"fim_s06e17": "498828cb2eee52431057d8b6728eccfb372df368",
"fim_s06e18": "<KEY>",
"fim_s06e19": "8b6e00372b21906b74161141745cbb8643f937d5",
"fim_s06e20": "2ea96c422a1d980cbc431da6df61cbf2bbb00ecf",
"fim_s06e21": "fee21ca21647b474b10c57cd95b915f9454e3e4c",
"fim_s06e22": "fd05dd1eafabb6ad23294f087400415428083952",
"fim_s06e23": "e6b38a84d01530fb1710417c37b14edb9ceb0318",
"fim_s06e24": "1d569bd9e83b93165a321d7f1137cc4f856e2f28",
"fim_s06e25": "0988ecb76b172b1e0f2ab83c8a81a2b21d90cb23",
"fim_s06e26": "7b255c9d24419c79b61cb03e1aa1c51ee869a59b",
"fim_s07e01": "13552a4e583122cb929c335f2aabb96868ebe8bf",
"fim_s07e02": "7168f16470d158d98e77cd2f956d3c3aeed950f0",
"fim_s07e03": "f9d114d4a6ba8728ab1e66e2c72c2ab49afdb425",
"fim_s07e04": "a4863d754430365e8f8352f1ea281b33152f62ec",
"fim_s07e05": "f7e82abd7dfb363f7cc5d88c056ff39bf88fc91a",
"fim_s07e06": "6f7d7a949963cf603d94a1d2f6ea2faba40d6ec0",
"fim_s07e07": "f554058057385bf47b8e44247296bdecd74245d7",
"fim_s07e08": "34a440cba25b5c0d28c4a4519fd666291c4eacb5",
"fim_s07e09": "0a80082dcb8261751db62316624ddb1a5413d084",
"fim_s07e10": "33959c2ac2e4210fe679a9d74ed035baa32ff581",
"fim_s07e11": "f262360d1b534b9893277f17065efbc006e86597",
"fim_s07e12": "decde67c0c63c957c89b3cc4814ec62010e0390f",
"fim_s07e13": "a1fd8783e398141ab29969c968a9ef8d926a2761",
"fim_s07e14": "538abc0eb0513c9d2915b7c226d63949977c8b45",
"fim_s07e15": "ce4422c88246c21768f4188843824dc48bf2da30",
"fim_s07e16": "ae4ba22190cdf32599bad2d1fa67371e31fa1bc5",
"fim_s07e17": "c6a47aab6a11fccb4846ddabf650a47ed3ad95d9",
"fim_s07e18": "800d3ba5074eb59556ff3d2d929fe55f33662467",
"fim_s07e19": "49566b3604ef3414064cc5f6e2ddb71716f3c55e",
"fim_s07e20": "856e6dc2c66f4c352fdb48625eae6cf8f8e8d0bf",
"fim_s07e21": "55d99812da760cd528a735887e822517e06a30f4",
"fim_s07e22": "62801071748a63e688e939b306d78b51f5f8e824",
"fim_s07e23": "3a70ac5a28606d4e92fb9fec484997c5c45454bc",
"fim_s07e24": "c8b532184c09ecb0f9eb468b18746edb95553053",
"fim_s07e25": "da047c5cf0eb2e93cd163775fe1729e64ad3b5ca",
"fim_s07e26": "b69f4c32c3717abef52fbf43e0f7d95e5ce0a9ae",
"fim_s08e01": "695f7365f9fbb7df059f38911ef397c2525ddd0f",
"fim_s08e02": "976df812644dbbaa5905dd9e6f2b712bfb9bce5a",
"fim_s08e03": "4f8f3f236ad697147aa002d545907ce6427c1ef2",
"fim_s08e04": "b3faf68eaf99deec4f5ef9ea162b5ec6df0412ff",
"fim_s08e05": "7bdd85dec8e09fda1ec57cf56ce5cafc75188b31",
"fim_s08e06": "6a53c91e3dd22ddc069d5cf85225d3ec9a141e2e",
"fim_s08e07": "5b788645db606822e99d6188cbad5a06556bcd80",
"fim_s08e08": "<KEY>",
"fim_s08e09": "1c01cfc679b442b2f05184327ca15fa0bb54a04c",
"fim_s08e10": "8d680da1acdd9c8636fb6d093e3cae5e1072916c",
"fim_s08e11": "3acc0bd1c3bd9ad1924a7baad0ae52d224f1b98f",
"fim_s08e12": "814e5e20a1d5cb11b2d40630d8d9f09aadf0f367",
"fim_s08e13": "f153eb6197d9587f10d5ef21b2564ecce8a0869c",
"fim_s08e14": "003127da6e5d687a916e8f0facb99130f34d6856",
"fim_s08e15": "c1db00dfe88352f6d09e90825ccf20aedda29416",
"fim_s08e16": "bd5879b90204a8e45534f8f1021aeb05085b0cfb",
"fim_s08e17": "d5d073670b0053b023bf7c82ba33bc58ae064e81",
"fim_s08e18": "8bac4e2877cbdb46df87f1ca61c4f78ca41cb491",
"fim_s08e19": "7d7cb95831868838738a12a215b04e97ab7d15d4",
"fim_s08e20": "f631a709607562db55fc15b36657ef4ecddcc039",
"fim_s08e21": "9690385208ee229254b635af82a09fa2ab9828c4",
"fim_s08e22": "4379434a499ec49628607955e9d711d001c2c709",
"fim_s08e23": "9d1bbd5ffa936a38dd50f819ee0ffa61bb8ce9b7",
"fim_s08e24": "ae1aa3fa3ad40e000d3e4ce4775d665ff9f54cda",
"fim_s08e25": "d51e3fe09bfcf10efcb7e81db41075d1afd48476",
"fim_s08e26": "db77fb88f9de6d48f1abd197f366d481be9b76c6",
"fim_s09e01": "697b309edad2fea1ac2d21e6c1d6e17dcedcabdb",
"fim_s09e02": "756036b6d4932190a97b08c3578a5fd67fce328d",
"fim_s09e03": "44a1060f5bf3d587e7bf85ad0dd172fefa636a83",
"fim_s09e04": "430e5a5756053b09780400e1cb4bdad0662f492b",
"fim_s09e05": "5d0fdc9c8dc60bdff56aec4c031de671f639749b",
"fim_s09e06": "bb38c9d23c41df9c22668da7bf535988c3f1356f",
"fim_s09e07": "ca180f475678b55150b92382edd7ce1c4350467d",
"fim_s09e08": "e01251baa48012eb5a75e7798ca0f7970b08bbd6",
"fim_s09e09": "f7d6a40d4c4405a96fdad66917fbb1b013b3d0aa",
"fim_s09e10": "5c726e521b2160c19b9010fab42103e181e60ed5",
"fim_s09e11": "54c9aedfe15155519c363c10133dd6d2277ad751",
"fim_s09e12": "60f163a0092527684966532789bc2d90b8ee6986",
"fim_s09e13": "6008d29554185bd36deec16b72368101b791fda3",
"fim_s09e14": "44b896f80f0393559ee001e05c64df30a5dda905",
"fim_s09e15": "6d880d843f8adecd6ec664128c1d355c486f2753",
"fim_s09e16": "682776123da12ab638f3af15a75d7915dab38b4d",
"fim_s09e17": "79e62638d6628c7ba97af6f6187a864e15787a7f",
"fim_s09e18": "2974b45463f9ac41b4a29f63d9b0d013e311771e",
"fim_s09e19": "8989d8a96822f29839bc3330487d7586aa927d37",
"fim_s09e20": "088134fe57889434089515c889e590f33afeb099",
"fim_s09e21": "7d6749aeb470e71c0e8bd51e3760ae9562f6579d",
"fim_s09e22": "a164e1f92185d6fb51f1b5a25196c2377498ea43",
"fim_s09e23": "3094600e69bd6934e925f58cb95a2430d1839f54",
"fim_s09e24": "ded8ba2b9887a14597e41ec7256d9f45b0f61abc",
"fim_s09e25": "ebab1fc09dadb040388d89486aeef1b75885b0b5",
"fim_s09e26": "2ba14c1d174eb33155dd3f6ebace2279ba8b4ef6",
"eqg_original_movie": "edea58393759cf74326e05d0b0a821e7ff54dc03",
"eqg_friendship_games": "9619ab463a28b5498490ed1e43ff83ba247ac309",
"eqg_legend_of_everfree": "6b65399c961f71afd2dfa2189d493a412ee3300a",
"eqg_rainbow_rocks": "b5a8d331a5e6b37931282207874b334d488d562d",
}
unmix_hashes = {
"fim_s01e01": "104d5aaa0c37225c200ad7b83fcf63034f05522f",
"fim_s01e02": "e0d7b62bb2291a3d692f3feccdeefa184e80e7c1",
"fim_s01e03": "c29cb960fae5e435016e02fa107ea1bcdbc75eae",
"fim_s01e04": "19a6c63c6ebf8be3e040003e8c08627df98e5a44",
"fim_s01e05": "9ef84b6c65927182d21b3cc3d4b2a847e6c16c18",
"fim_s01e06": "50594babaf65ec42243c4a9800ee1bfebc6bc7d8",
"fim_s01e07": "eb05c17e53a166ae660d1f3da6e90b8f14b7c79a",
"fim_s01e08": "41577ab4d5a133397e55c03a747d41f4658b4481",
"fim_s01e09": "59a587efd4f2292c9969f0e391e54ecf9d7cb594",
"fim_s01e10": "df48e4108bfb89a9e6b490b07bf04dbd1bc3494b",
"fim_s01e11": "907907fdc17d15f7e65d8ca1e079095b508b18a6",
"fim_s01e12": "5a8007535d7b0925f077fde57a63791b71bad523",
"fim_s01e13": "d6e26ed11d68262ebd32becca6529509f97c3a58",
"fim_s01e14": "a5784986a38730fc1fb430ad0f0272fd0c7d0cf4",
"fim_s01e15": "7fd50b75fe4a02337c25f8ff694e390f268a9456",
"fim_s01e16": "0344d2ae7ee8f2f6461798e2900ed0ad3bd0b11d",
"fim_s01e17": "28b9582d04a27dd13b0557730cf0eadaaa2cd629",
"fim_s01e18": "6e84cf840ba430e9527f71ef2a0dc8ce6c218875",
"fim_s01e19": "4a9a544a5663a6d7ac731a9083c9cce71aefdb6b",
"fim_s01e20": "fa7733b6ab981829b4466c26f185738509ec062e",
"fim_s01e21": "388cc179f81c44d23aee3fcffec64b17d3c10f01",
"fim_s01e22": "e46ae2a8f92e82be3172ffd7d25f491de7cd0289",
"fim_s01e23": "661828a394087d716376dc0139b61137389ae5de",
"fim_s01e24": "b227ac1c89f3a25dc4522118750a58e978475b9c",
"fim_s01e25": "2004b2aa9015498e1f2d8454856e5883bfbb3315",
"fim_s01e26": "f66ca26045de350188d476ab5b501408c551acba",
"fim_s02e01": "44b3fe6e76e60b7e70ef85aed2c0b2756c69cef7",
"fim_s02e02": "35fff48ca55ecb1e601b57d896330900a55cbc92",
"fim_s02e03": "87e1d81dcb3cffbca12550e143cc922e6a4a68b1",
"fim_s02e04": "e5463604e47ddcc6c15552e65c4a8ae75ba55730",
"fim_s02e05": "33149416515e8a3f04e1db6f636afecd20e4572c",
"fim_s02e06": "8516c352f688e3be76cd9db8ca709d5dd62dc5bf",
"fim_s02e07": "00f060faae1f1e0b6b82361b4c0fc4ddde90fd1d",
"fim_s02e08": "059f6d9b21d6e78d7ff2593f01a032f045bb3246",
"fim_s02e09": "153a013f94d0272ccaee7a4197f353a0756b917e",
"fim_s02e10": "13de5f5c20c11bcf6afdf01a76db934f370c5afa",
"fim_s02e11": "1f1b0e38ec868d3d976444b6b28548e023fd2508",
"fim_s02e12": "cc9a39c32d44632161968b5b5babd9a38bc9b385",
"fim_s02e13": "00ccf5ce90f50db65a4aeec6a8b6c75f056e9e19",
"fim_s02e14": "b737ccfc861470abbc6e9e1d8d4d11dae78cfe8f",
"fim_s02e15": "9e03f0e03c39f797f16211b086aea4f660f72684",
"fim_s02e16": "ae02167827b0a27794154534fcb16c489df6418c",
"fim_s02e17": "4a002a75c00b34ca9cf932a5cf7987b7abf84292",
"fim_s02e18": "8fe1a7765ddf8ab1b062ea7ddb968d1c92569876",
"fim_s02e19": "c4f73cec0071d37a021d96a0c7e8e8269cfa6b21",
"fim_s02e20": "a8a392cbfe39a7f478f308dde7774f5264da1bef",
"fim_s02e21": "c19ce429444c7862f6de83fcee8479ede4d2de0b",
"fim_s02e22": "9054930ee5a9403da56794aa30ee8a9b8bc66c54",
"fim_s02e23": "96baecf46098eccca9b26a587f8e588c622a0443",
"fim_s02e24": "d304f143b81026f93628bc26f3018442d6b494b4",
"fim_s02e25": "258b39c68661c89a7a3f564f0827382c365ac824",
"fim_s02e26": "58df95dbcd6998ace149d16479add90f670b5156",
"fim_s03e01": "197d03426d4bbdebff15e7723bb95f5cffccd230",
"fim_s03e02": "3623a1cb7dea3c5fd43ed5b5e77c91f83d7ced4b",
"fim_s03e03": "4df72e4dc9580acf9a9c598d5fbcb36e1f1429f7",
"fim_s03e04": "213ba1e62e23931e58dc4ebb5f4b2dfd735f47ca",
"fim_s03e05": "564a8c54b13320aa982ff2209565ef4750564194",
"fim_s03e06": "ed1b6fb5b071ab56a78492e54875b082c0c8a074",
"fim_s03e07": "a37fc358a86679130e9a4ff8baf0ba55ccf28b56",
"fim_s03e08": "e050d4d8d14c26c6ebb859b01a4569d871fcd2b0",
"fim_s03e09": "229dbc841e643c820653af1e7f3bd14f07ef1e1b",
"fim_s03e10": "50f5e34647763ab9b007c7e86d0d7be94c297845",
"fim_s03e11": "df0c039168e062c9dd57e77c974e810255dceb4f",
"fim_s03e12": "3bf02a42574ea2a703639adddb3614172d50a525",
"fim_s03e13": "788d4bc2660f27bf51fa57c469155d4e3a4488e4",
"fim_s04e01": "b538e685b444022c8fce4916da0d422d13f6e576",
"fim_s04e02": "aa205fdfd60da95fc4f99fffba883c567421dab1",
"fim_s04e03": "fe64dcf231ccc42722fc855a3e74ae3bbbf0e643",
"fim_s04e04": "001c014fe5324332f8d00f012efb35fefa47f533",
"fim_s04e05": "e8a338e557652d21b161b8026dd5768e05976027",
"fim_s04e06": "321426038fc6cc0bc8ddd086a79c3946a27cd436",
"fim_s04e07": "611e78559560a6b63a099fc3397ef3a8eb0db4c7",
"fim_s04e08": "3a463dd8573a4eb51dabd2efb28cd099681e110e",
"fim_s04e09": "de37b8d59676f5f598ea6bda080077a371fbf601",
"fim_s04e10": "9f40be459463d5513ba274189548c2b0b3552d36",
"fim_s04e11": "bfa1dc10e4c64bdefe2c7ebe0fc2ffb9f4f1e443",
"fim_s04e12": "13adfc2233f9a005b6bf708e7abd0b93faba828b",
"fim_s04e13": "3fc37f27939d951f313111a9f792715731ed059d",
"fim_s04e14": "88a29e567214de2c72b8b5a11466ea8f2e2b5c96",
"fim_s04e15": "2728093f445fd3a21d0ac86598cf07f9b20fc45d",
"fim_s04e16": "9f720280324c0d491fc75f179d69ca4965f46821",
"fim_s04e17": "77437fa49ab73a880859c76ee4b6b522e60e5b5e",
"fim_s04e18": "ee79772dd96bb4cf78e53879168b52044a80f83a",
"fim_s04e19": "3a93bca017d5fe1f28a1f89c10df0abc49a33e42",
"fim_s04e20": "8481897fa44d02624b3a8799ceba22a1b9087060",
"fim_s04e21": "74f6f18d61e52673b99ba1c1379fcad2a1125899",
"fim_s04e22": "cec016654cdd2f7c73864229f54760ea20b86c8a",
"fim_s04e23": "1f894ae6cf86a865988b72b3d1a5060cf2f89a86",
"fim_s04e24": "c6a4fe51123ba3ce0b0716df21e9989e8b555ade",
"fim_s04e25": "204666928675f6b9bff9e92ef3b2176099180e9b",
"fim_s04e26": "47de6d5d70bb61fc746a47d28fc53854d2be441b",
"fim_s05e01": "2cafa7cc6ca0414c294c824d63c3a433e07b2f13",
"fim_s05e02": "69260d52ed666d0dc97544ed5019e1914d674c53",
"fim_s05e03": "5c7db95c1f78f2e7425096b67ce534fc872e6618",
"fim_s05e04": "91da34c761324628156d1db94b5a06b77a327c41",
"fim_s05e05": "ee74a927b5a5419fe9e851a132fe2d814f699f10",
"fim_s05e06": "84d7d6c7cce9a4cca502d9c8a24ed0be862ea702",
"fim_s05e07": "2c128bf9d45e75ffddd452ac2f20abd553203641",
"fim_s05e08": "2954354ce57c7fedd61d372e8fa908731e3407bb",
"fim_s05e09": "317ac1df8d316d175fca6e1715bb9f97cfc1ca13",
"fim_s05e10": "f977396916f65a96ca0aa40fee7345d3ce0d34a8",
"fim_s05e11": "1df05e8363c41ba68f5eaa2af0c22e29998d066d",
"fim_s05e12": "bc08072dc6be4e21dae03db1464ae0b83f657a7f",
"fim_s05e13": "674758bd23d8ba73d14858580b60fa5062e28fe0",
"fim_s05e14": "047799ab041a30d774938d0dc2672c34935371f8",
"fim_s05e15": "66ec2eb2ce01dc382cc32c9c6530374e8a96a938",
"fim_s05e16": "4bc308416d420583a69642bfa83c8235fb84e1e1",
"fim_s05e17": "8fe15d113baf7a5e140f53729aa11a4722aaec64",
"fim_s05e18": "9ff2bf125cf6a07af325cb5cbba3d17b2579c498",
"fim_s05e19": "af9a63a629b1fc154e600ab058878457bdc799e1",
"fim_s05e20": "ea0635bf7757e53300a07d56887e4b1857cde93c",
"fim_s05e21": "3fa37e113e81c54667db87204d68958cb4dfb50f",
"fim_s05e22": "b5f54f2e4736b4e1be4d537d8d0edaa219ca16c7",
"fim_s05e23": "40bce23e28e12686b667c431072f925b01bdf817",
"fim_s05e24": "f08b14a1e6e41dccc4315aade503ed91ce2f71b3",
"fim_s05e25": "85272b7929fa219f503ca08637f04b52458b2636",
"fim_s05e26": "abe7acfacc78bba55e4da71eab592383e8c2010d",
"fim_s06e01": "bb554e4b036f19bfe98b0e11db71732204e71747",
"fim_s06e02": "9bcd6ba7d6da353e49da88ee9702a1c9e7b58f9d",
"fim_s06e03": "44f278c1c3b1642f92dae0f1dc9d1869cb5edd3e",
"fim_s06e04": "a23fc5a9b0171265d4c37debd3e6888b7d5cfadd",
"fim_s06e05": "717c3c0a63515266079cafe0d0076cedf3eeb2c9",
"fim_s06e06": "0c7f49103a41190b115f8d4d4a4134c10d03c441",
"fim_s06e07": "dc1011e5ee80a45544ee9d9c5edd6bca2e0b9b10",
"fim_s06e08": "70ebfee1498e73c6e0432e279cda32efc6c611d7",
"fim_s06e09": "0ecdf3735a03bb49075e94221e840815e960e736",
"fim_s06e10": "eac675ef07f63907353d758cc5dccf82c75bcf9c",
"fim_s06e11": "43bd3f861e85bc7e21dc4523e112d82a3193edd0",
"fim_s06e12": "cee4185a9957ecbd00b6b6f374fa56715ef04fcd",
"fim_s06e13": "e040a7ec92cf7139a415a5e27bff994c8604c320",
"fim_s06e14": "b1f071de18a25d9ee6f70674ff3c00379ed44e09",
"fim_s06e15": "75473364424e5b4dd91e918657e41b5d4e7f0b61",
"fim_s06e16": "a59a451317257474359cc85e523892e94c2f393c",
"fim_s06e17": "0bd07609e7209aac921e21d9f33f6a8b16172d65",
"fim_s06e18": "3d16b21da67c674735b61adddb831a0e2240d3b9",
"fim_s06e19": "3594101ff21359203054973ead72905fd09b5680",
"fim_s06e20": "562c8a5c548deef2a915c8fdd9f3ab069e1222ae",
"fim_s06e21": "38a558ff7551b56511cb9644dfe2ceb7b0c635ec",
"fim_s06e22": "5735868115acbe5c18d0370592eeff689402a3b3",
"fim_s06e23": "9a40a8b6b6c304d286f34d2cee80535dc46c618a",
"fim_s06e24": "8987aaca1bb4395673093c28ec00023b65abf451",
"fim_s06e25": "f33e490c411fe249d90be1e23b455f422bbfea7d",
"fim_s06e26": "70a737ebe36d85801294b2d22a462bf41a211a77",
"fim_s07e01": "unknown",
"fim_s07e02": "unknown",
"fim_s07e03": "unknown",
"fim_s07e04": "unknown",
"fim_s07e05": "unknown",
"fim_s07e06": "unknown",
"fim_s07e07": "unknown",
"fim_s07e08": "unknown",
"fim_s07e09": "unknown",
"fim_s07e10": "unknown",
"fim_s07e11": "unknown",
"fim_s07e12": "unknown",
"fim_s07e13": "unknown",
"fim_s07e14": "unknown",
"fim_s07e15": "unknown",
"fim_s07e16": "unknown",
"fim_s07e17": "unknown",
"fim_s07e18": "unknown",
"fim_s07e19": "unknown",
"fim_s07e20": "unknown",
"fim_s07e21": "unknown",
"fim_s07e22": "unknown",
"fim_s07e23": "unknown",
"fim_s07e24": "unknown",
"fim_s07e25": "unknown",
"fim_s07e26": "unknown",
"fim_s08e01": "unknown",
"fim_s08e02": "unknown",
"fim_s08e03": "unknown",
"fim_s08e04": "unknown",
"fim_s08e05": "unknown",
"fim_s08e06": "unknown",
"fim_s08e07": "unknown",
"fim_s08e08": "unknown",
"fim_s08e09": "unknown",
"fim_s08e10": "unknown",
"fim_s08e11": "unknown",
"fim_s08e12": "unknown",
"fim_s08e13": "unknown",
"fim_s08e14": "unknown",
"fim_s08e15": "unknown",
"fim_s08e16": "unknown",
"fim_s08e17": "unknown",
"fim_s08e18": "unknown",
"fim_s08e19": "unknown",
"fim_s08e20": "unknown",
"fim_s08e21": "unknown",
"fim_s08e22": "unknown",
"fim_s08e23": "unknown",
"fim_s08e24": "unknown",
"fim_s08e25": "unknown",
"fim_s08e26": "unknown",
"fim_s09e01": "unknown",
"fim_s09e02": "unknown",
"fim_s09e03": "unknown",
"fim_s09e04": "unknown",
"fim_s09e05": "unknown",
"fim_s09e06": "unknown",
"fim_s09e07": "unknown",
"fim_s09e08": "unknown",
"fim_s09e09": "unknown",
"fim_s09e10": "unknown",
"fim_s09e11": "unknown",
"fim_s09e12": "unknown",
"fim_s09e13": "unknown",
"fim_s09e14": "unknown",
"fim_s09e15": "unknown",
"fim_s09e16": "unknown",
"fim_s09e17": "unknown",
"fim_s09e18": "unknown",
"fim_s09e19": "unknown",
"fim_s09e20": "unknown",
"fim_s09e21": "unknown",
"fim_s09e22": "eec65346f3f84a2200a5e5918e89d91a2a549a29",
"fim_s09e23": "1426f4afd21cc478d175cc89d342d4cb42d358b7",
"fim_s09e24": "14ac91b7eda5698feed15ec53084aee5cf0fb571",
"fim_s09e25": "60cbea49cec1cd14c94921c0990a42df754abf51",
"fim_s09e26": "9b2d98c2eb5e336a97e49b3e35bffee39238414c",
"eqg_original_movie": "5ecf9491a5808e7b32accabdf7c57273ad197192",
"eqg_friendship_games": "31097404640cced077169c41e7cdf925ced9f165",
"eqg_legend_of_everfree": "3104471033ef47b7cd303812dfeb720bd0d63f8a",
"eqg_rainbow_rocks": "ff4e528045e846f2221229a3f38bababd5c0b733",
}
| 2.078125 | 2 |
jpgrep/test/test_util.py | momijiame/jpgrep | 2 | 12759657 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import sys
import pickle
import nose
from nose.tools.trivial import eq_
from nose.tools.trivial import ok_
from jpgrep.util import binary2unicode
from jpgrep.util import FileObjectWrapper
from jpgrep.util import ByteWrapper
class Test_binary2unicode(object):
def test(self):
""" バイト列をユニコード文字列に変換する """
expect = u'吾輩は猫である'
binary = expect.encode('utf-8')
text = binary2unicode(binary)
eq_(text, expect)
class Test_FileObjectWrapper(object):
def test_str(self):
""" パスからラッパーオブジェクトを作る """
wrapper = FileObjectWrapper('/dev/null')
eq_(wrapper.name, '/dev/null')
with wrapper.file as f:
f.read(1)
def test_file(self):
""" ファイルオブジェクトからラッパーオブジェクトを作る """
f = open('/dev/null')
wrapper = FileObjectWrapper(f)
eq_(wrapper.name, '/dev/null')
with wrapper.file as f:
f.read(1)
def test_std(self):
""" sys.stdin からラッパーオブジェクトを作る """
wrapper = FileObjectWrapper(sys.stdin)
eq_(wrapper.name, '<stdin>')
ok_(hasattr(wrapper.file, 'read'))
with wrapper.file as _:
pass
def test_pickle(self):
""" Pickle 化、非 Pickle 化する """
wrapper = FileObjectWrapper('/dev/null')
binary = pickle.dumps(wrapper)
restored_object = pickle.loads(binary)
eq_(restored_object.name, '/dev/null')
with restored_object.file as f:
f.read(1)
def test_pickle_std(self):
""" sts.stdin のラッパーオブジェクトの Pickle を確認する """
wrapper = FileObjectWrapper(sys.stdin)
binary = pickle.dumps(wrapper)
restored_object = pickle.loads(binary)
ok_(hasattr(restored_object.file, 'read'))
def test_pickle_unicode_file(self):
""" 文字列モードで開いたファイルからバイト列を取り出す """
message = u'こんにちは、世界'
file_ = io.StringIO(message)
byte_wrapper = ByteWrapper(file_)
file_wrapper = FileObjectWrapper(byte_wrapper)
with file_wrapper.file as f:
binary = f.read()
expect = message.encode(encoding='utf-8')
eq_(expect, binary)
class Test_ByteWrapper(object):
def test(self):
""" 文字列モードのファイルライクオブジェクトからバイト列を取り出す """
message = u'こんにちは、世界'
file_ = io.StringIO(message)
wrapper = ByteWrapper(file_)
with wrapper as f:
data = f.read()
expect = message.encode('utf-8')
eq_(data, expect)
if __name__ == '__main__':
nose.main(argv=['nosetests', '-s', '-v'], defaultTest=__file__)
| 2.625 | 3 |
tests/v100/teest_rpc_edition.py | octabytes/quran | 0 | 12759658 | <filename>tests/v100/teest_rpc_edition.py
import grpc
import quran.endpoints.grpc.entity_pb2 as entity_proto
import quran.endpoints.grpc.shared_pb2 as shared_entity
import quran.endpoints.grpc.edition_pb2_grpc as edition_rpc
channel = grpc.insecure_channel("localhost:50051")
stub = edition_rpc.EditionStub(channel)
def test_create_edition():
edition = entity_proto.EditionEntity(id='edition-1', language='en', name='edition-name-1',
translator='edition-english-name-1', type='Arabic', format='format-1',
direction='direction-1')
res = stub.CreateEdition(edition)
assert res.data.edition.id == edition.id
assert res.data.edition.language == edition.language
assert res.data.edition.name == edition.name.title()
assert res.data.edition.translator == edition.translator.title()
assert res.data.edition.type == 'Arabic'
assert res.data.edition.format == edition.format.title()
assert res.data.edition.direction == edition.direction
# Second insert
edition = entity_proto.EditionEntity(id='edition-2', language='en', name='edition-name-1',
translator='edition-english-name-1', type='Arabic', format='format-2',
direction='direction-2')
res = stub.CreateEdition(edition)
assert res.data.edition.id == edition.id
assert res.data.edition.language == edition.language
assert res.data.edition.name == edition.name.title()
assert res.data.edition.translator == edition.translator.title()
assert res.data.edition.type == 'Arabic'
assert res.data.edition.format == edition.format.title()
assert res.data.edition.direction == edition.direction
# Third insert with type Translation
edition = entity_proto.EditionEntity(id='edition-3', language='en', name='edition-name-3',
translator='edition-english-name-3', type='Translation', format='format-1',
direction='direction-1')
res = stub.CreateEdition(edition)
assert res.data.edition.id == edition.id
assert res.data.edition.language == edition.language
assert res.data.edition.name == edition.name.title()
assert res.data.edition.translator == edition.translator.title()
assert res.data.edition.type == 'Translation'
assert res.data.edition.format == edition.format.title()
assert res.data.edition.direction == edition.direction
def test_get_all_edition():
edition_stream = stub.GetAll(shared_entity.EmptyMessage())
count = 0
for edition in edition_stream.data.edition:
assert edition.language in ['en']
count = count + 1
assert count >= 3
def test_find_edition_by_id():
res = stub.FindEditionById(shared_entity.IDRequest(id='edition-1'))
assert res.data.edition.id == 'edition-1'
assert res.data.edition.name == 'Edition-Name-1'
def test_find_edition_by_language():
edition_stream = stub.FindEditionByLanguage(shared_entity.NameRequest(name='en'))
for edition in edition_stream.data.edition:
assert edition.language == 'en'
def test_find_edition_by_name():
edition_stream = stub.FindEditionByName(shared_entity.NameRequest(name='edition-name-1'))
for edition in edition_stream.data.edition:
assert edition.name == 'Edition-Name-1'
def test_find_edition_by_translator():
edition_stream = stub.FindEditionByTranslator(shared_entity.NameRequest(name='edition-english-name-1'))
for edition in edition_stream.data.edition:
assert edition.translator == 'Edition-English-Name-1'
def test_find_edition_by_type():
edition_stream = stub.FindEditionByType(shared_entity.NameRequest(name='Arabic'))
for edition in edition_stream.data.edition:
assert edition.type == 'Arabic'
def test_find_edition_by_format():
edition_stream = stub.FindEditionByFormat(shared_entity.NameRequest(name='format-1'))
for edition in edition_stream.data.edition:
assert edition.format == 'Format-1'
| 2.203125 | 2 |
src/hello_client.py | JonReinhold/Rube-GoldBerg-Hello-World | 0 | 12759659 | import socket
ip = socket.gethostbyname('localhost.localdomain')
port = 10000
buffer_size = 1024
with open("payload.c", "r") as file:
message_list = file.read()
message = str.encode(message_list)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(message)
data = s.recv(buffer_size)
s.close()
data = str(data)
print(data[2:-1])
| 2.65625 | 3 |
tokenizer_tools/transform/conllz_to_conll.py | lanSeFangZhou/tokenizer_tools | 0 | 12759660 | from tokenizer_tools.conllz.iterator_reader import read_conllz_iterator
from tokenizer_tools.conll.writer import write_conll
def conllz_to_conll(conllz_file, conll_file):
sentence_iterator = read_conllz_iterator(conllz_file)
conll_data = []
for sentence in sentence_iterator:
conll_data.append((sentence.word_lines, sentence.get_attribute_by_index(0)))
write_conll(conll_data, conll_file)
| 3.046875 | 3 |
make_images.py | TactileUniverse/3D-Printed-Galaxy-Software | 3 | 12759661 | #!/usr/bin/env pythonw
import numpy as np
from astropy.visualization import stretch, interval
from astropy.io import fits
from astropy import wcs
from reproject import reproject_interp
from matplotlib import pyplot as plt
def scaleImage(image, a=1, stretch_type='asinh'):
reagon = interval.AsymmetricPercentileInterval(10., 99.95)
vmin, vmax = reagon.get_limits(image)
if stretch_type == 'log':
scale = stretch.LogStretch(a=a)
elif stretch_type == 'asinh':
scale = stretch.AsinhStretch(a=a)
elif stretch_type == 'sqrt':
scale = stretch.SqrtStretch()
image_scaled = (scale + reagon)(image)
return image_scaled
def removeNaN(data):
bdx = ~np.isfinite(data)
data[bdx] = 0
def make_images(base, index_cut=1300, filters='gri', gzip=False, **kwargs):
hdus = []
images_scaled = []
for fdx, filt in enumerate(filters):
file_name = '{0}-{1}.fits'.format(base, filt)
if gzip:
file_name += '.gz'
hdu = fits.open(file_name)
w = wcs.WCS(hdu[0].header)
newf = fits.PrimaryHDU()
newf.data = hdu[0].data[index_cut:-index_cut, index_cut:-index_cut]
newf.header = hdu[0].header
newf.header.update(w[index_cut:-index_cut, index_cut:-index_cut].to_header())
hdus.append(newf)
if fdx > 0:
scidata, footprint = reproject_interp(newf, hdus[0].header)
scidata = newf.data
scidata[scidata < 0] = 0
image = scaleImage(scidata, **kwargs)
removeNaN(image)
images_scaled.append(image)
plt.imsave('{0}_{1}_{2}.png'.format(base, filt, kwargs.get('stretch_type', 'asinh')), image, cmap='Greys_r', origin='lower')
RGB_image = np.zeros([images_scaled[0].shape[0], images_scaled[0].shape[1], 3])
RGB_image[:, :, 0] = images_scaled[2]
RGB_image[:, :, 1] = images_scaled[1]
RGB_image[:, :, 2] = images_scaled[0]
RGB_image[RGB_image > 1] = 1
RGB_image[RGB_image < 0] = 0
plt.imsave('{0}_{1}_{2}.png'.format(base, filters, kwargs.get('stretch_type', 'asinh')), RGB_image, origin='lower')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Create single band and false color images from fits files'
)
parser.add_argument(
'base_name',
type=str,
help='the base name of the fits files (note: all files must be named `{base_name}-{filter_letter}`)'
)
parser.add_argument(
'-c',
'--crop',
type=int,
default=1,
help='an integer used to corp the fits images (by index of array)'
)
parser.add_argument(
'-f',
'--filters',
type=str,
default='gri',
choices=['gri', 'rbi', 'ugr'],
help='a three letter string representing the filters contained in each fits file'
)
parser.add_argument(
'-a',
type=float,
default=0.1,
help='the `a` parameter used in the streact function'
)
parser.add_argument(
'-s',
'--stretch',
type=str,
default='asinh',
choices=['asinh', 'log', 'sqrt'],
help='the type of stretch to use for the fits image'
)
parser.add_argument(
'-g',
'--gzip',
action='store_true',
help='use this flag if the input files are gzipped'
)
args = parser.parse_args()
make_images(
args.base_name,
index_cut=args.crop,
filters=args.filters,
gzip=args.gzip,
a=args.a,
stretch_type=args.stretch
)
| 2.125 | 2 |
modules/models/conv2dretina.py | janko-petkovic/retinet | 0 | 12759662 | <filename>modules/models/conv2dretina.py
# A custom conv2d mimicking the retinic color pathway segregation:
# while L and M pipelines interact with each other (in an antagonistic
# center-surround fashion in the human visual system), the S route
# is kept separate, following the dedicated koniocellular pathway.
# The module is basically a wrapper of a
# nn.Conv2D(in_channels = 3,
# out_channels = 3,
# kernel_size = xxxxxx, <- you can choose how big your
# receptive field is
# stride = 1,
# padding = int((kernel_size-1)/2))
# where the first two outputs don't see the third input and the third
# output doesn't see the first two inputs.
# CAVEAT: to train this module it is necessary to use the custom Trainer
# rememberting the retitrain = True flag. If not the segregations will not
# be considered during parameter update.
import torch
import torch.nn as nn
class Conv2dRetina(nn.Module):
def __init__(self, kernel_size):
super().__init__()
# automatic even padding
if (kernel_size+1) % 2:
raise Exception("Kernel size must be even")
else:
padding = int((kernel_size-1) / 2)
self.conv = nn.Conv2d(in_channels = 3,
out_channels = 3,
kernel_size = kernel_size,
stride= 1,
padding = padding)
#self._split_color_pathways(kernel_size)
# different cones have different pathways: L and M interact
# while S cones are segregated to the koniocellular pathway
def _split_color_pathways(self, kernel_size):
for i, cube in enumerate(self.conv.weight):
if i != 2:
cube.data[2] = torch.zeros(1,kernel_size,kernel_size)
else:
cube.data[:2] = torch.zeros(2,kernel_size,kernel_size)
def forward(self, x):
return self.conv(x)
# testing section
def test(channels):
from pytorch_model_summary import summary
model = Conv2dRetina(channels)
input = torch.zeros(1,channels,28,28)
print("\n","-"*70)
print(f"Input shape: {input.shape}")
print(summary(model,
torch.zeros(1,3,28,28),
show_input=False))
if __name__ == "__main__":
import sys
channels = int(sys.argv[1])
test(channels)
print("\nTesting successful!\n")
| 3.109375 | 3 |
integration-tests/bin_command.py | luispedro/SemiBin | 25 | 12759663 | import os
import subprocess
# Test different input formats
for ifile, odir in [
('input.fasta', 'output_bin_fa'),
('input.fasta.gz', 'output_bin_gz'),
('input.fasta.bz2', 'output_bin_bz2'),
('input.fasta.xz', 'output_bin_xz'),
]:
odir = f'test-outputs/{odir}'
subprocess.check_call(
['SemiBin', 'bin',
'--data', 'test/bin_data/data.csv',
'--minfasta-kbs', '200',
'--max-edges', '20',
'--max-node', '1',
'--model', 'test/bin_data/model.h5',
'-i', f'test/bin_data/{ifile}',
'-o', odir,
'-m', '2500',
'--ratio', '0.05',
'-p', '1'])
assert len(os.listdir(f'{odir}/output_bins')) > 0
assert len(os.listdir(f'{odir}/output_recluster_bins')) > 0
ifile = 'input.fasta'
odir = 'test-outputs/no_recluster'
subprocess.check_call(
['SemiBin', 'bin',
'--data', 'test/bin_data/data.csv',
'--minfasta-kbs', '200',
'--max-edges', '20',
'--max-node', '1',
'--no-recluster',
'--model', 'test/bin_data/model.h5',
'-i', f'test/bin_data/{ifile}',
'-o', odir,
'-m', '2500',
'--ratio', '0.05',
'-p', '1'])
assert len(os.listdir(f'{odir}/output_bins')) > 0
assert not os.path.exists(f'{odir}/output_recluster_bins')
# Different pretrained models
for env,odir in [
('human_gut', 'output_human_gut'),
('dog_gut', 'output_dog_gut'),
('ocean', 'output_ocean'),
]:
odir = f'test-outputs/{odir}'
subprocess.check_call(
['SemiBin', 'bin',
'--data', 'test/bin_data/data.csv',
'--minfasta-kbs', '200',
'--max-edges', '20',
'--max-node', '1',
'--environment', env,
'-i', 'test/bin_data/input.fasta.xz',
'-o', odir,
'-m', '2500',
'--ratio', '0.05',
'-p', '1'])
assert len(os.listdir(odir+'/output_bins')) > 0
assert len(os.listdir(odir+'/output_recluster_bins')) > 0
| 1.898438 | 2 |
Exercicios/exe091.py | EmersonLCruz/Python | 0 | 12759664 | <gh_stars>0
# Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicionário em Python. No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado.
from random import randint
jogadores = {'j1':randint(1,6),'j2':randint(1,6),'j3':randint(1,6),'j4':randint(1,6)}
aux = 0
print('='*5,'Jogadores Lançam os dados','='*5)
for i,j in enumerate(jogadores):
print(f'O jogador {i+1} tirou {jogadores[j]}')
print('='*10,'Classificação','='*10)
for i in sorted(jogadores, key = jogadores.get, reverse=True):# ordenar jogadores pela chave em ordem decrescente
aux += 1
print(f'{aux}º posição {i} com resultado {jogadores[i]}')
| 3.46875 | 3 |
hutch_python/options.py | cristinasewell/hutch-python | 0 | 12759665 | <reponame>cristinasewell/hutch-python
"""
This module defines options and settings the user may opt to change.
"""
from pcdsdevices.interface import set_engineering_mode
def set_default_options():
set_engineering_mode(False)
def load_options(cache):
cache(set_engineering_mode=set_engineering_mode)
cache.doc(set_engineering_mode='Tab interface verbosity settings.')
set_default_options()
| 2.140625 | 2 |
camomilla/models/__init__.py | lotrekagency/camomilla | 4 | 12759666 | <filename>camomilla/models/__init__.py
from .article import *
from .category import *
from .content import *
from .media import *
from .page import *
from .tag import *
| 1.15625 | 1 |
ampel/aux/filter/PrimitiveTypeArrayFilter.py | AmpelProject/Ampel-core | 5 | 12759667 | <filename>ampel/aux/filter/PrimitiveTypeArrayFilter.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-core/ampel/aux/filter/PrimitiveTypeArrayFilter.py
# License : BSD-3-Clause
# Author : vb <<EMAIL>>
# Date : 14.01.2020
# Last Modified Date: 18.06.2020
# Last Modified By : vb <<EMAIL>>
from typing import List, Sequence, TypeVar
from ampel.model.aux.FilterCriterion import FilterCriterion
from ampel.aux.filter.AbsLogicOperatorFilter import AbsLogicOperatorFilter
T = TypeVar("T", int, str, float, str)
class PrimitiveTypeArrayFilter(AbsLogicOperatorFilter[T]):
"""
In []: f = PrimitiveTypeArrayFilter(filters={'operator': '>', 'value': 2})
In []: f.apply([1, 2, 3, 4])
Out[]: [3, 4]
In []: f = PrimitiveTypeArrayFilter(filters={
'all_of': [
{'operator': '>', 'value': 2},
{'operator': '<', 'value': 4}
]
})
In []: f.apply([1, 2, 3, 4])
Out[]: [3]
"""
@staticmethod
def _apply_filter(args: Sequence[T], f: FilterCriterion) -> List[T]:
return [s for s in args if f.operator(s, f.value)]
| 2.375 | 2 |
dags/box2lake_sensor.py | shermanflan/airflow-local | 2 | 12759668 | """
## box2lake_sensor.py
Example using Box.com API.
- Demonstrates a Box sensor for file availability before proceeding with ETL.
### References
Box APIs used
- REST: https://developer.box.com/reference/
- Python SDK: https://box-python-sdk.readthedocs.io/en/stable/boxsdk.html
"""
from datetime import datetime, timedelta
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import (
KubernetesPodOperator
)
from airflow.kubernetes.secret import Secret
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from bsh_azure.sensors.box_sensor import BoxSensor, BoxItemType
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(seconds=5),
# 'retry_exponential_backoff': True,
'queue': 'airq2',
'catchup': False,
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
# 'wait_for_downstream': False,
# 'dag': dag,
# 'sla': timedelta(hours=2),
# 'execution_timeout': timedelta(minutes=30),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'sla_miss_callback': yet_another_function,
# 'trigger_rule': 'all_success'
}
with DAG('box2lake_sensor',
default_args=default_args,
description='Example using Box.com api',
schedule_interval="0 0 * * *", # "0 0 * * *" or "@daily" or timedelta(hours=2)
start_date=days_ago(1),
tags=['azure', 'aks', 'box.com']
) as dag:
dag.doc_md = __doc__
wait_for_box_daily = BoxSensor(
task_id='wait_for_daily_box_task',
box_item_path='Utilization Reports/Daily Schedule Status Reports/2020 Reports/11-November/Branch Scheduled Hours Breakdown_11_15_2020.xlsx',
box_item_type=BoxItemType.FILE,
poke_interval=5,
timeout=600,
mode='poke'
)
wait_for_box_weekly = BoxSensor(
task_id='wait_for_weekly_box_task',
box_item_path='Utilization Reports/Weekly Utilization Reports/2020 Reports/11-November/November - 13/Telephony Usage By Branch 11.13.2020.xlsx',
box_item_type=BoxItemType.FILE,
poke_interval=5,
timeout=300,
mode='poke'
)
box2adls_pod_task = KubernetesPodOperator(
task_id="box2adls_pod_task",
namespace='airflow-tls',
service_account_name='airflow-rbac',
name='boxflow',
image='rkoH1pVL.azurecr.io/box2adls:latest',
image_pull_policy='Always',
labels={'name': 'boxflow', 'instance': 'boxflow-pod',
'version': '1.0.0', 'component': 'batch-service',
'part-of': 'pods'},
env_vars={
"SIMMER": "False",
"BROKER_URL": "redis://airflow-redis-service:6379/0",
"BOX_CONFIG": "/opt/airflow/box-sec/box-auth",
"BOX_FOLDER_PATH": "Utilization Reports/Daily Schedule Status Reports/2020 Reports/11-November",
"BOX_FOLDER_PATH2": "Utilization Reports/Weekly Utilization Reports/2020 Reports/11-November/November - 13",
"BOX_FILE_MASK": "Branch Scheduled Hours Breakdown_11_14_2020.xlsx",
"BOX_FILE_MASK2": "Telephony Usage By Branch 11.13.2020.xlsx",
"BOX_FILE_RENAME": "Branch Scheduled Hours Breakdown_af-on-k8s.xlsx",
"WS_PREV_NAME": "PriorMonth",
"WS_CURR_NAME": "CurrentMonth",
"WS_NEXT_NAME": "NextMonth",
"BOX_FILE_RENAME2": "Telephony Usage By Branch_af-on-k8s.xlsx",
"WS_HIDDEN_NAME": "{0} Tele Stats",
"WS_HIDDEN_RENAME": "Tele Stats",
"LAKE_ACCOUNT_NAME": "airflowstoragesandbox",
# "LAKE_ACCOUNT_KEY": "",
"LAKE_CONTAINER_NAME": "enterprisedata",
"LAKE_FOLDER_PATH": "Raw/BOX Reports"
},
secrets=[
Secret(deploy_type='env', deploy_target='LAKE_ACCOUNT_KEY',
secret='az-file-secret', key='azurestorageaccountkey'),
Secret(deploy_type='volume', deploy_target='/opt/airflow/box-sec',
secret='box-secret', key=None)
],
resources={
'request_memory': '200Mi', 'request_cpu': '200m',
'limit_memory': '2Gi', 'limit_cpu': '2000m'
},
in_cluster=True,
is_delete_operator_pod=True,
get_logs=True,
log_events_on_failure=True
# config_file='/opt/airflow/dags/config/kube.config',
# NOTE: this will not work until 1.10.13
# pod_template_file='/opt/airflow/dags/config/aks-geonames.yaml'
)
# body = """
# Log: <a href="{{ ti.log_url }}">Link</a><br>
# Host: {{ ti.hostname }}<br>
# Log file: {{ ti.log_filepath }}<br>
# Mark success: <a href="{{ ti.mark_success_url }}">Link</a><br>
# """
#
# email_task = EmailOperator(
# task_id= 'email_task',
# to='<EMAIL>',
# subject="Test from Airflow: {{ ti.xcom_pull(task_ids='wait_for_box_daily') }}",
# html_content=body,
# pool='utility_pool',
# )
print_date2 = BashOperator(
task_id='print_date2',
bash_command="echo {{ ts }}"
)
[wait_for_box_daily, wait_for_box_weekly] >> box2adls_pod_task
box2adls_pod_task >> print_date2
| 2.421875 | 2 |
challenges/left_join/test_left_join.py | seattlechem/data-structures-and-algorithms | 0 | 12759669 | """."""
import pytest
from .hash_table import HashTable as HT
from .left_join import left_join
def test_left_join_true(six_key_ht, five_key_ht):
"""True case for left join."""
result = left_join(six_key_ht, five_key_ht)
assert result.get('cost') == (0, None)
def test_both_empty_hash_table():
"""Result when both inputs are empty HashTable."""
hash1 = HT()
hash2 = HT()
result = left_join(hash1, hash2)
for i in range(0, 1023):
assert result.buckets[i]._len == 0
def test_value_error(five_key_ht):
"""Value error check."""
with pytest.raises(ValueError) as err:
left_join(five_key_ht)
assert err == 'At least one input must be HashTable'
def test_type_error_one(six_key_ht):
"""Type Error check when only one input is not HashTable."""
with pytest.raises(TypeError) as err:
left_join(six_key_ht, 15)
assert err == 'Input must be HashTable.'
| 2.90625 | 3 |
backend/models/metrics.py | anujkumar93/metrics-mvp | 0 | 12759670 | import math
import pytz
import sys
import time
from datetime import date
from . import wait_times, util, arrival_history, trip_times, errors, constants, timetables, routeconfig
import pandas as pd
import numpy as np
# Represents a range of days with a time range within each day.
# RouteMetrics can calculate various statistics over a range.
class Range:
def __init__(self, dates: list, start_time_str: str, end_time_str: str, tz: pytz.timezone):
self.dates = dates # list of datetime.date objects
self.start_time_str = start_time_str # if None, no start time filter
self.end_time_str = end_time_str # if None, no end time filter
self.tz = tz
# RouteMetrics allows computing various metrics for a particular route,
# such as headways, wait times, and trip times,
# including over various date and time ranges.
#
# It caches the arrival history and data frames so that the different
# metrics calculations can reuse the same arrivals data without
# needing to reload it from disk each time.
#
class RouteMetrics:
def __init__(self, agency_id, route_id):
self.agency_id = agency_id
self.route_id = route_id
self.arrival_histories = {}
self.data_frames = {}
self.timetables = {}
def get_arrival_history(self, d):
if d in self.arrival_histories:
return self.arrival_histories[d]
print(f'loading arrival history for route {self.route_id} on {d}', file=sys.stderr)
try:
self.arrival_histories[d] = history = arrival_history.get_by_date(self.agency_id, self.route_id, d)
except FileNotFoundError as ex:
print(f'Arrival history not found for route {self.route_id} on {d}', file=sys.stderr)
history = arrival_history.ArrivalHistory(self.agency_id, self.route_id, {});
return history
def get_history_data_frame(self, d, direction_id=None, stop_id=None):
key = f'history_{str(d)}_{stop_id}_{direction_id}'
if key in self.data_frames:
return self.data_frames[key]
history = self.get_arrival_history(d)
print(f'loading data frame {key} for route {self.route_id}', file=sys.stderr)
df = history.get_data_frame(stop_id=stop_id, direction_id=direction_id)
self.data_frames[key] = df
return df
def get_timetable(self, d):
if d not in self.timetables.keys():
self.timetables[d] = timetables.get_by_date(self.agency_id, self.route_id, d)
return self.timetables[d]
def get_timetable_data_frame(self, d, direction_id=None, stop_id=None):
timetable = self.get_timetable(d)
timetable_key = f'timetable_{str(d)}_{stop_id}_{direction_id}'
if timetable_key not in self.data_frames:
self.data_frames[timetable_key] = timetable.get_data_frame(stop_id=stop_id, direction_id=direction_id)
return self.data_frames[timetable_key]
def get_wait_time_stats(self, direction_id, stop_id, rng: Range):
return self._get_wait_time_stats(direction_id, stop_id, rng, self.get_history_data_frame)
def get_scheduled_wait_time_stats(self, direction_id, stop_id, rng: Range):
return self._get_wait_time_stats(direction_id, stop_id, rng, self.get_timetable_data_frame)
def _get_wait_time_stats(self, direction_id, stop_id, rng: Range, get_data_frame):
wait_stats_arr = []
for d in rng.dates:
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
df = get_data_frame(d, stop_id=stop_id, direction_id=direction_id)
departure_time_values = np.sort(df['DEPARTURE_TIME'].values)
wait_stats = wait_times.get_stats(departure_time_values, start_time, end_time)
wait_stats_arr.append(wait_stats)
if len(wait_stats_arr) == 1:
return wait_stats_arr[0]
else:
return wait_times.combine_stats(wait_stats_arr)
def get_arrivals(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_history_data_frame, 'TIME')
def get_departures(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_history_data_frame, 'DEPARTURE_TIME')
def get_scheduled_arrivals(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_timetable_data_frame, 'TIME')
def get_scheduled_departures(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_timetable_data_frame, 'DEPARTURE_TIME')
def _get_count(self, direction_id, stop_id, rng: Range, get_data_frame, time_field):
if stop_id is None:
return None
count = 0
for d in rng.dates:
df = get_data_frame(d, direction_id=direction_id, stop_id=stop_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
df = df[df[time_field] >= start_time]
if end_time is not None:
df = df[df[time_field] < end_time]
count += len(df)
return count
def get_departure_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range):
return self._get_schedule_adherence(direction_id, stop_id, early_sec, late_sec, rng, 'DEPARTURE_TIME')
def get_arrival_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range):
return self._get_schedule_adherence(direction_id, stop_id, early_sec, late_sec, rng, 'TIME')
def _get_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range, time_field):
if stop_id is None:
return None
compared_timetable_arr = []
now = time.time()
for d in rng.dates:
stop_timetable = self.get_timetable_data_frame(d, direction_id=direction_id, stop_id=stop_id)
stop_arrivals = self.get_history_data_frame(d, direction_id=direction_id, stop_id=stop_id)
scheduled_time_values = np.sort(stop_timetable[time_field].values)
actual_time_values = np.sort(stop_arrivals[time_field].values)
comparison_df = timetables.match_schedule_to_actual_times(
scheduled_time_values,
actual_time_values,
early_sec = early_sec,
late_sec = late_sec,
)
comparison_df[time_field] = scheduled_time_values
if len(comparison_df) and comparison_df[time_field].iloc[-1] >= now:
comparison_df = comparison_df[comparison_df[time_field] < now]
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
comparison_df = comparison_df[comparison_df[time_field] >= start_time]
if end_time is not None:
comparison_df = comparison_df[comparison_df[time_field] < end_time]
compared_timetable_arr.append(comparison_df)
return pd.concat(compared_timetable_arr)
def get_headway_schedule_deltas(self, direction_id, stop_id, rng: Range):
headway_delta_arr = []
now = time.time()
for d in rng.dates:
timetable_df = self.get_timetable_data_frame(d, direction_id=direction_id, stop_id=stop_id)
history_df = self.get_history_data_frame(d, direction_id=direction_id, stop_id=stop_id)
departure_time_values = np.sort(history_df['DEPARTURE_TIME'].values)
scheduled_departure_time_values = np.sort(timetable_df['DEPARTURE_TIME'].values)
comparison_df = timetables.match_actual_times_to_schedule(
departure_time_values,
scheduled_departure_time_values
)
comparison_df['DEPARTURE_TIME'] = departure_time_values
comparison_df['headway'] = np.r_[np.nan, compute_headway_minutes(departure_time_values)]
comparison_df = comparison_df[np.isfinite(comparison_df['headway'].values) & np.isfinite(comparison_df['closest_scheduled_headway'].values)]
if len(comparison_df) and comparison_df['DEPARTURE_TIME'].iloc[-1] >= now:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] < now]
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] >= start_time]
if end_time is not None:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] < end_time]
headway_delta = comparison_df['headway'].values - comparison_df['closest_scheduled_headway'].values
headway_delta_arr.append(headway_delta)
return np.concatenate(headway_delta_arr)
def get_scheduled_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range):
return self._get_trip_times(direction_id, start_stop_id, end_stop_id, rng, self.get_timetable_data_frame)
def get_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range):
return self._get_trip_times(direction_id, start_stop_id, end_stop_id, rng, self.get_history_data_frame)
def _get_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range, get_data_frame):
completed_trips_arr = []
if end_stop_id is None:
return None
is_loop = False
route_config = routeconfig.get_route_config(self.agency_id, self.route_id)
if route_config is not None:
if direction_id is not None:
dir_info = route_config.get_direction_info(direction_id)
else:
direction_ids = route_config.get_directions_for_stop(start_stop_id)
dir_info = route_config.get_direction_info(direction_ids[0]) if len(direction_ids) > 0 else None
if dir_info is not None:
is_loop = dir_info.is_loop()
for d in rng.dates:
s1_df = get_data_frame(d, stop_id=start_stop_id, direction_id=direction_id)
s2_df = get_data_frame(d, stop_id=end_stop_id, direction_id=direction_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
s1_df = s1_df[s1_df['DEPARTURE_TIME'] >= start_time]
if end_time is not None:
s1_df = s1_df[s1_df['DEPARTURE_TIME'] < end_time]
completed_trip_times = trip_times.get_completed_trip_times(
s1_df['TRIP'].values,
s1_df['DEPARTURE_TIME'].values,
s2_df['TRIP'].values,
s2_df['TIME'].values,
is_loop = is_loop
)
completed_trips_arr.append(completed_trip_times)
return np.concatenate(completed_trips_arr)
def get_headways(self, direction_id, stop_id, rng: Range):
return self._get_headways(direction_id, stop_id, rng, self.get_history_data_frame)
def get_scheduled_headways(self, direction_id, stop_id, rng: Range):
return self._get_headways(direction_id, stop_id, rng, self.get_timetable_data_frame)
def _get_headways(self, direction_id, stop_id, rng: Range, get_data_frame):
headway_min_arr = []
for d in rng.dates:
df = get_data_frame(d, direction_id=direction_id, stop_id=stop_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
departure_time_values = np.sort(df['DEPARTURE_TIME'].values)
headway_min = compute_headway_minutes(departure_time_values, start_time, end_time)
headway_min_arr.append(headway_min)
return np.concatenate(headway_min_arr)
def compute_headway_minutes(time_values, start_time=None, end_time=None):
if start_time is not None:
start_index = np.searchsorted(time_values, start_time, 'left')
else:
start_index = 0
if end_time is not None:
end_index = np.searchsorted(time_values, end_time, 'left')
else:
end_index = len(time_values)
if start_index == 0:
start_index = 1
if start_index > end_index:
end_index = start_index
return (time_values[start_index:end_index] - time_values[start_index - 1 : end_index - 1]) / 60
| 2.84375 | 3 |
project/data_science/__init__.py | RiccardoNizzolo/prime_num | 1 | 12759671 | from flask.config import Config
import os
ds_settings = os.getenv(
"DS_SETTINGS", "project.config.data_science_config.DsDevelopmentConfig"
)
ds_config=Config(None)
ds_config.from_object(ds_settings) | 1.539063 | 2 |
src/pretix/plugins/banktransfer/mt940.py | abrock/pretix | 1 | 12759672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013, <NAME>
# Copyright (c) 2014-2015, <NAME>
# Copyright (c) 2013-2015, B2CK
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""a parser for MT940 files
"""
__version__ = '0.2'
__all__ = ['MT940', 'rabo_description', 'abn_amro_description',
'ing_description']
import datetime
import re
from collections import defaultdict, namedtuple
from decimal import Decimal
SECTIONS = {
'begin': [':940:'],
'statement': [':20:'],
'account': [':25:'],
'information': [':28:', ':28C:'],
'start_balance': [':60F:'],
'transaction': [':61:'],
'description': [':86:'],
'end_balance': [':62F:'],
}
def _parse_date(date):
return datetime.datetime.strptime(date, '%y%m%d').date()
def _parse_amount(amount, sign='C'):
amount = Decimal(amount.replace(',', '.'))
if sign in ('D', 'RC'):
return -amount
return amount
TRANSACTION_RE = re.compile(r"""
(?P<date>\d{6})
(?P<booking>\d{4})?
(?P<sign>D|C|RC|RD)
(?P<code>\w)?? # ING skips this mandatory field
(?P<amount>(\d|,){1,15})
(?P<id>\w{4})
(?P<reference>.{0,34})""", re.VERBOSE)
class MT940(object):
def __init__(self, name):
self.statements = []
with open(name, 'rU') as f:
values = defaultdict(str)
transactions = []
for line in self._readline(f):
for name, sections in SECTIONS.iteritems():
if name == 'begin':
continue
for section in sections:
if line.startswith(section):
if name in values and name == 'statement':
self._set_statement(values, transactions)
if name.endswith('_balance'):
values[name] = self._get_balance(
line[len(section):])
elif name == 'transaction':
transactions.append(
self._get_transaction(line[len(section):]))
elif name == 'description':
transactions[-1] = (transactions[-1][:-1]
+ (line[len(section):],))
else:
values[name] += line[len(section):]
if values:
self._set_statement(values, transactions)
@staticmethod
def _readline(f):
buf = []
for line in f:
line = line.strip('\n')
if buf:
if (line.startswith(':')
or line.startswith('-')):
yield '\n'.join(buf)
del buf[:]
buf.append(line)
if buf:
yield '\n'.join(buf)
@staticmethod
def _get_balance(balance):
date = _parse_date(balance[1:7])
amount = _parse_amount(balance[10:], balance[0])
return Balance(date=date, amount=amount, currency=balance[7:10])
@staticmethod
def _get_transaction(transaction):
lines = transaction.splitlines()
if len(lines) == 1:
transaction, = lines
additional_data = None
else:
transaction, additional_data = lines
transaction = TRANSACTION_RE.match(transaction)
date = _parse_date(transaction.group('date'))
if transaction.group('booking'):
booking = _parse_date(
transaction.group('date')[:2]
+ transaction.group('booking'))
else:
booking = None
amount = _parse_amount(transaction.group('amount'),
transaction.group('sign'))
id_ = transaction.group('id')
reference = transaction.group('reference')
reference, _, institution_reference = reference.partition('//')
return (date, booking, amount, id_, reference,
institution_reference, additional_data, '')
def _set_statement(self, values, transactions):
self.statements.append(
Statement(
transactions=[Transaction(*t) for t in transactions],
**values))
values.clear()
del transactions[:]
Statement = namedtuple('Statement', ['statement', 'account', 'information',
'start_balance', 'transactions', 'end_balance'])
Balance = namedtuple('Balance', ['date', 'amount', 'currency'])
Transaction = namedtuple('Transaction', ['date', 'booking', 'amount', 'id',
'reference', 'institution_reference', 'additional_data',
'description'])
def _find_swift_tags(tags, description):
values = {}
for tag, name in tags:
if description.startswith(tag):
description = description[len(tag):]
try:
i = description.index('/')
except ValueError:
i = len(description)
values[name] = description[:i]
description = description[i:]
if not description:
break
return values
RABO_TAGS = [
('/MARF/', 'marf'),
('/EREF/', 'eref'),
('/PREF/', 'pref'),
('/BENM/', 'benm'),
('/ORDP/', 'ordp'),
('/NAME/', 'name'),
('/ID/', 'id'),
('/ADDR/', 'addr'),
('/REMI/', 'remi'),
('/CDTRREFTP//CD/SCOR/ISSR/CUR/CDTRREF/', 'cdtrref'),
('/CSID/', 'csid'),
('/ISDT/', 'isdt'),
('/RTRN/', 'rtrn'),
]
def rabo_description(description):
"Return dictionnary with Rabo informations"
description = ''.join(description.splitlines())
return _find_swift_tags(RABO_TAGS, description)
ABN_AMRO_ACCOUNT = re.compile(r"""
^([0-9]{1,3}\.[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,3})""", re.VERBOSE)
ABN_AMRO_GIRO = re.compile(r"""
^GIRO\ +([0-9]+)""", re.VERBOSE)
ABN_AMRO_TAGS = [
('/TRTP/', 'trtp'),
('/IBAN/', 'iban'),
('/BIC/', 'bic'),
('/CSID', 'csid'),
('/NAME/', 'name'),
('/REMI/', 'remi'),
('/EREF/', 'eref'),
('/ORDP//ID/', 'ordp'),
('/BENM//ID/', 'benm'),
]
def abn_amro_description(description):
"Retrun dictionnary with ABN AMRO informations"
description = ''.join(description.splitlines())
values = {}
m = ABN_AMRO_ACCOUNT.match(description)
if m:
values['account'] = m.group(1).replace('.', '')
m = ABN_AMRO_GIRO.match(description)
if m:
values['account'] = m.group(1)
values.update(_find_swift_tags(ABN_AMRO_TAGS, description))
return values
ING_TAGS = re.compile(r'/(RTRN|EREF|PREF|MARF|CSID|CNTP|REMI|PURP|ULT[CD])/')
ING_TAGS_DEFINITION = {
'RTRN': ('rtrn', []),
'EREF': ('eref', []),
'PREF': ('pref', []),
'MARF': ('marf', []),
'CSID': ('csid', []),
'CNTP': ('cntp', ['account_number', 'bic', 'name', 'city']),
'REMI': ('remi', ['code', 'issuer', 'remittance_info']),
'PURP': ('purp', []),
'ULTC': ('ultc', ['name', 'id']),
'ULTD': ('ultd', ['name', 'id']),
}
def ing_description(description):
"Return dictionnary with ING informations"
description = ''.join(description.splitlines())
values = {}
ing_tags = iter(ING_TAGS.split(description)[1:])
for tag, tag_value in zip(ing_tags, ing_tags):
tag_value = tag_value[:-1]
name, subfields = ING_TAGS_DEFINITION[tag]
if not subfields:
values[name] = tag_value
continue
values[name] = {}
if 'name' in subfields or 'remittance_info' in subfields:
special_tag = 'name' if 'name' in subfields else 'remittance_info'
tag_idx = subfields.index(special_tag)
subtags = tag_value.split('/', tag_idx)
for sf_name, sf_value in zip(subfields[:tag_idx], subtags[:-1]):
values[name][sf_name] = sf_value
subtags = subtags[-1].rsplit('/', len(subfields) - tag_idx - 1)
for sf_name, sf_value in zip(subfields[tag_idx:], subtags):
values[name][sf_name] = sf_value
else:
subtags = tag_value.split('/')
for sf_name, sf_value in zip(subfields, subtags):
values[name][sf_name] = sf_value
return values
| 1.46875 | 1 |
pox/04_learning.py | pedrobellotti/pox | 0 | 12759673 | # Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#ovs-vsctl -- --id=@ft create Flow_Table flow_limit=100 overflow_policy=refuse -- set Bridge br0 flow_tables=0=@ft
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.ipv4 import ipv4
from pox.lib.addresses import IPAddr, EthAddr
log = core.getLogger()
#Tabela de enderecos mac->porta
tabela_mac = {}
def _handle_ConnectionUp (event):
####################### REGRAS PRINCIPAIS #############################
#Regra de encaminhamento para o controlador
#msgc = of.ofp_flow_mod()
#msgc.match.in_port = 3
#msgc.priority = 2
#msgc.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
#event.connection.send(msgc)
if (dpidToStr(event.dpid) == '00-e0-4c-2a-33-4f'):
log.info("Switch UL conectado.")
else:
log.info("Switch %s conectado.", dpidToStr(event.dpid))
def _handle_PacketIn (event):
global tabela_mac
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Pacote incompleto!")
return
packet_in = event.ofp # The actual ofp_packet_in message.
#Aprendendo a porta de origem, caso ela nao esteja na tabela
if packet.src not in tabela_mac:
log.info("Aprendendo: MAC " + str(packet.src) + " esta na porta " + str(packet_in.in_port))
tabela_mac[packet.src] = packet_in.in_port
try:
porta = tabela_mac[packet.dst] #Porta destino
log.info(str(packet.dst) + " e um MAC conhecido. Instalando regra: porta " + str(packet_in.in_port) + "->" + str(porta))
msg = of.ofp_flow_mod()
#print packet
#print packet.next
#print packet.next.next
if (packet.find('arp')):
tipo = 0x0806
else:
tipo = 0x0800
msg.match.dl_type = tipo
msg.match.in_port = packet_in.in_port #Porta origem
msg.match.dl_src = packet.src #MAC origem
msg.match.dl_dst = packet.dst #MAC destino
#Packet.next sobe para a proxima camada
#Packet = camada enlace
#Packet.next = camada rede
#Packet.next.next = camada de transporte
msg.match.nw_src = packet.next.srcip #IP origem
msg.match.nw_dst = packet.next.dstip #IP destino
msg.match.nw_proto = packet.next.protocol #Protocolo
msg.match.tp_src = packet.next.next.srcport #Porta de origem (Protocolo)
msg.match.tp_dst = packet.next.next.dstport #Porta de origem (Protocolo)
msg.priority = 10
msg.actions.append(of.ofp_action_output(port = porta)) #Porta destino
event.connection.send(msg)
except:
log.info(str(packet.dst) + " nao e um MAC conhecido, enviando pacote para todos")
porta = of.OFPP_FLOOD #Manda para todas as portas (pode usar of.OFPP_ALL tambem)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = porta))
msg.data = packet_in
msg.in_port = event.port
event.connection.send(msg)
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Executando codigo...") | 1.65625 | 2 |
aula02.py | melqrozendo/Projeto-codigo-em-Python | 0 | 12759674 | #Aula02
#operadores aritmetricos
print(5+8)
print(10-5)
print(5*3)
print(17/3)
print(2**3)
#calculando a parte inteira da divisão
print(5//2)
print(5%2) | 3.046875 | 3 |
tests/bugs/core_5705_test.py | reevespaul/firebird-qa | 0 | 12759675 | <filename>tests/bugs/core_5705_test.py
#coding:utf-8
#
# id: bugs.core_5705
# title: Store precision of DECFLOAT in RDB$FIELDS
# decription:
# Checked on LI-T4.0.0.940.
#
# tracker_id: CORE-5705
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
set count on;
create domain dm_df16 as decfloat(16);
create domain dm_df34 as decfloat(34);
commit;
select rdb$field_name, rdb$field_precision
from rdb$fields
where rdb$field_name in (upper('dm_df16'), upper('dm_df34'))
order by 1;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
RDB$FIELD_NAME DM_DF16
RDB$FIELD_PRECISION 16
RDB$FIELD_NAME DM_DF34
RDB$FIELD_PRECISION 34
Records affected: 2
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 1.75 | 2 |
depccg/allennlp/dataset/ja_supertagging_dataset.py | masashi-y/depccg | 75 | 12759676 | <filename>depccg/allennlp/dataset/ja_supertagging_dataset.py
import json
import logging
import random
from typing import Dict, List
import numpy
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import (
ArrayField,
MetadataField,
SequenceLabelField,
TextField,
)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from depccg import utils
from depccg.tools.ja.data import convert_ccgbank_to_json
from overrides import overrides
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def read_dataset_ccgbank_or_json(file_path: str):
if utils.is_json(file_path):
logger.info(f"Reading instances from lines in json file at: {file_path}")
with open(file_path, "r") as data_file:
json_data = json.load(data_file)
else:
logger.info(f"Reading trees in auto file at: {file_path}")
json_data = convert_ccgbank_to_json(file_path)
logger.info(f"loaded {len(json_data)} instances")
return json_data
@DatasetReader.register("ja_supertagging_dataset")
class JaSupertaggingDatasetReader(DatasetReader):
def __init__(self, token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path):
json_data = read_dataset_ccgbank_or_json(cached_path(file_path))
for instance in json_data:
sentence, labels = instance
tags, deps = labels
yield self.text_to_instance(sentence, tags, deps)
@overrides
def text_to_instance(
self,
sentence: str,
tags: List[str] = None,
deps: List[int] = None,
weight: float = 1.0,
) -> Instance:
tokens = [Token(token) for token in sentence.split(" ")]
token_field = TextField(tokens, self._token_indexers)
metadata = MetadataField({"words": sentence})
weight = ArrayField(numpy.array([weight], "f"))
fields = {
"words": token_field,
"metadata": metadata,
"weight": weight,
}
if tags is not None and deps is not None:
fields["head_tags"] = SequenceLabelField(
tags, token_field, label_namespace="head_tags"
)
fields["head_indices"] = SequenceLabelField(
deps, token_field, label_namespace="head_indices"
)
result = Instance(fields)
if random.random() <= 0.01:
logger.info(str(result))
return result
| 2.171875 | 2 |
aries-backchannels/vcx/vcx_backchannel.py | AdamJLemmon/aries-agent-test-harness | 0 | 12759677 | <gh_stars>0
import asyncio
import asyncpg
import functools
import json
import logging
import os
import random
import subprocess
import sys
from timeit import default_timer
from ctypes import cdll
from time import sleep
from aiohttp import (
web,
ClientSession,
ClientRequest,
ClientResponse,
ClientError,
ClientTimeout,
)
from agent_backchannel import AgentBackchannel, default_genesis_txns, RUN_MODE, START_TIMEOUT
from utils import require_indy, flatten, log_json, log_msg, log_timer, output_reader, prompt_loop, file_ext, create_uuid
from storage import store_resource, get_resource, delete_resource, pop_resource, get_resources
from vcx.api.connection import Connection
from vcx.api.credential_def import CredentialDef
from vcx.api.issuer_credential import IssuerCredential
from vcx.api.proof import Proof
from vcx.api.schema import Schema
from vcx.api.utils import vcx_agent_provision
from vcx.api.vcx_init import vcx_init_with_config
from vcx.state import State, ProofState
LOGGER = logging.getLogger(__name__)
MAX_TIMEOUT = 5
DEFAULT_BIN_PATH = "../venv/bin"
DEFAULT_PYTHON_PATH = ".."
if RUN_MODE == "docker":
DEFAULT_BIN_PATH = "./bin"
DEFAULT_PYTHON_PATH = "."
elif RUN_MODE == "pwd":
DEFAULT_BIN_PATH = "./bin"
DEFAULT_PYTHON_PATH = "."
# 'agency_url': URL of the agency
# 'agency_did': public DID of the agency
# 'agency_verkey': public verkey of the agency
# 'wallet_name': name for newly created encrypted wallet
# 'wallet_key': encryption key for encoding wallet
# 'payment_method': method that will be used for payments
provisionConfig = {
'agency_url': 'http://$DOCKERHOST:$AGENCY_PORT',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': '<KEY>',
'wallet_name': 'faber_wallet',
'wallet_key': '123',
'payment_method': 'null',
'enterprise_seed': '000000000000000000000000Trustee1',
'protocol_type': '2.0',
'communication_method': 'aries'
}
def state_text(connection_state):
if connection_state == State.OfferSent:
return "invitation"
elif connection_state == State.RequestReceived:
return "request"
elif connection_state == State.Unfulfilled:
return "response"
elif connection_state == State.Accepted:
return "active"
return str(connection_state)
class VCXAgentBackchannel(AgentBackchannel):
def __init__(
self,
ident: str,
http_port: int,
admin_port: int,
genesis_data: str = None,
params: dict = {}
):
super().__init__(
ident,
http_port,
admin_port,
genesis_data,
params
)
def rewrite_config(self, input_config, output_config, mappings):
"""Substitute our specific config parameters"""
print("Writing config file:", output_config)
with open(input_config,"r") as in_file:
with open(output_config,"w") as out_file:
config = in_file.read()
for k, v in mappings.items():
config = config.replace(k, v)
out_file.write(config)
async def start_vcx(self):
payment_plugin = cdll.LoadLibrary('libnullpay' + file_ext())
payment_plugin.nullpay_init()
print("Start vcx agency process")
# http port is the main agency port
# admin_port and admin_port+1 are used by the agency server
# we need to rewrite these to the config file
input_config = "./vcx_agency/agency_config.json.template"
self.output_config = "./vcx_agency/agency_config.json"
agency_host = os.getenv("DOCKERHOST") or "host.docker.internal"
self.rewrite_config(
input_config,
self.output_config,
{
"$DOCKERHOST": agency_host,
"$AGENCY_PORT": str(self.http_port),
"$AGENCY_ADDRESS_1": str(self.admin_port),
"$AGENCY_ADDRESS_2": str(self.admin_port+1),
}
)
await self.start_vcx_agency()
print("Provision an agent and wallet, get back configuration details")
provisionConfig["agency_url"] = "http://localhost:" + str(self.admin_port)
config = await vcx_agent_provision(json.dumps(provisionConfig))
config = json.loads(config)
# Set some additional configuration options specific to faber
config['institution_name'] = 'Faber'
config['institution_logo_url'] = 'http://robohash.org/234'
config['genesis_path'] = 'genesis_txn.txt'
with open(config['genesis_path'], "w") as f_genesis:
f_genesis.write(self.genesis_data)
print("Initialize libvcx with new configuration")
await vcx_init_with_config(json.dumps(config))
pass
async def make_agent_POST_request(
self, op, rec_id=None, data=None, text=False, params=None
) -> (int, str):
if op["topic"] == "connection":
operation = op["operation"]
if operation == "create-invitation":
connection_id = create_uuid()
connection = await Connection.create(connection_id)
await connection.connect('{"use_public_did": true}')
invitation = await connection.invite_details(False)
store_resource(connection_id, "connection", connection)
connection_dict = await connection.serialize()
resp_status = 200
resp_text = json.dumps({"connection_id": connection_id, "invitation": invitation, "connection": connection_dict})
return (resp_status, resp_text)
elif operation == "receive-invitation":
connection_id = create_uuid()
connection = await Connection.create_with_details(connection_id, json.dumps(data))
await connection.connect('{"use_public_did": true}')
connection_state = await connection.update_state()
store_resource(connection_id, "connection", connection)
connection_dict = await connection.serialize()
resp_status = 200
resp_text = json.dumps({"connection_id": connection_id, "invitation": data, "connection": connection_dict})
return (resp_status, resp_text)
elif (operation == "accept-invitation"
or operation == "accept-request"
or operation == "remove"
or operation == "start-introduction"
or operation == "send-ping"
):
connection_id = rec_id
connection = get_resource(rec_id, "connection")
if connection:
# wait for a small period just in case ...
await asyncio.sleep(0.1)
# make sure we have latest & greatest connection state
await connection.update_state()
store_resource(connection_id, "connection", connection)
connection_dict = await connection.serialize()
connection_state = await connection.get_state()
resp_status = 200
resp_text = json.dumps({"connection_id": rec_id, "state": state_text(connection_state), "connection": connection_dict})
return (resp_status, resp_text)
return (404, '404: Not Found\n\n'.encode('utf8'))
async def make_agent_GET_request(
self, op, rec_id=None, text=False, params=None
) -> (int, str):
if op["topic"] == "status":
status = 200 if self.ACTIVE else 418
status_msg = "Active" if self.ACTIVE else "Inactive"
return (status, json.dumps({"status": status_msg}))
elif op["topic"] == "connection":
if rec_id:
log_msg("Getting connection for", rec_id)
connection = get_resource(rec_id, "connection")
if connection:
connection_dict = await connection.serialize()
connection_state = await connection.get_state()
resp_status = 200
resp_text = json.dumps({"connection_id": rec_id, "state": state_text(connection_state), "connection": connection_dict})
return (resp_status, resp_text)
else:
log_msg("Getting connections")
connections = get_resources("connection")
log_msg(connections)
ret_connections = []
for connection_id in connections:
connection = connections[connection_id]
connection_dict = await connection.serialize()
connection_state = await connection.get_state()
ret_connections.append({"connection_id": connection_id, "state": state_text(connection_state), "connection": connection_dict})
resp_status = 200
resp_text = json.dumps(ret_connections)
log_msg(resp_status, resp_text)
return (resp_status, resp_text)
log_msg("Returning 404")
return (404, '404: Not Found\n\n'.encode('utf8'))
async def make_agent_GET_request_response(
self, topic, rec_id=None, text=False, params=None
) -> (int, str):
if topic == "connection" and rec_id:
connection = get_resource(rec_id, "connection")
connection_state = await connection.update_state()
store_resource(rec_id, "connection", connection)
resp_status = 200
connection_dict = await connection.serialize()
resp_text = json.dumps({"connection_id": rec_id, "connection": connection_dict})
return (resp_status, resp_text)
return (404, '404: Not Found\n\n'.encode('utf8'))
def _process(self, args, env, loop):
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
encoding="utf-8",
)
loop.run_in_executor(
None,
output_reader,
proc.stdout,
functools.partial(self.handle_output, source="stdout"),
)
loop.run_in_executor(
None,
output_reader,
proc.stderr,
functools.partial(self.handle_output, source="stderr"),
)
return proc
def get_agent_args(self):
result = [self.output_config,]
return result
def get_process_args(self, bin_path: str = None):
#TODO aca-py needs to be in the path so no need to give it a cmd_path
cmd_path = "indy-dummy-agent"
if bin_path is None:
bin_path = DEFAULT_BIN_PATH
if bin_path:
cmd_path = os.path.join(bin_path, cmd_path)
return list(flatten((cmd_path, self.get_agent_args())))
async def start_vcx_agency(
self, python_path: str = None, bin_path: str = None, wait: bool = True
):
my_env = os.environ.copy()
python_path = DEFAULT_PYTHON_PATH if python_path is None else python_path
if python_path:
my_env["PYTHONPATH"] = python_path
agent_args = self.get_process_args(bin_path)
# start agent sub-process
self.log(f"Starting agent sub-process ...")
loop = asyncio.get_event_loop()
self.proc = await loop.run_in_executor(
None, self._process, agent_args, my_env, loop
)
if wait:
await asyncio.sleep(5.0)
def _terminate(self):
if self.proc and self.proc.poll() is None:
self.proc.terminate()
try:
self.proc.wait(timeout=0.5)
self.log(f"Exited with return code {self.proc.returncode}")
except subprocess.TimeoutExpired:
msg = "Process did not terminate in time"
self.log(msg)
raise Exception(msg)
async def terminate(self):
loop = asyncio.get_event_loop()
if self.proc:
await loop.run_in_executor(None, self._terminate)
await self.client_session.close()
if self.webhook_site:
await self.webhook_site.stop()
async def main(start_port: int, show_timing: bool = False, interactive: bool = True):
genesis = await default_genesis_txns()
if not genesis:
print("Error retrieving ledger genesis transactions")
sys.exit(1)
agent = None
try:
agent = VCXAgentBackchannel(
"vcx", start_port+1, start_port+2, genesis_data=genesis
)
# start backchannel (common across all types of agents)
await agent.listen_backchannel(start_port)
# TODO start VCX agent sub-process
await agent.register_did()
await agent.start_vcx()
agent.activate()
# now wait ...
if interactive:
async for option in prompt_loop(
"(X) Exit? [X] "
):
if option is None or option in "xX":
break
else:
print("Press Ctrl-C to exit ...")
remaining_tasks = asyncio.Task.all_tasks()
await asyncio.gather(*remaining_tasks)
finally:
terminated = True
try:
if agent:
await agent.terminate()
except Exception:
LOGGER.exception("Error terminating agent:")
terminated = False
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs a VCX demo agent.")
parser.add_argument(
"-p",
"--port",
type=int,
default=8050,
metavar=("<port>"),
help="Choose the starting port number to listen on",
)
parser.add_argument(
"-i",
"--interactive",
type=str2bool,
default=True,
metavar=("<interactive>"),
help="Start agent interactively",
)
args = parser.parse_args()
require_indy()
try:
asyncio.get_event_loop().run_until_complete(main(args.port, interactive=args.interactive))
except KeyboardInterrupt:
os._exit(1)
| 1.765625 | 2 |
pret/redtools/insert_object_data.py | etdv-thevoid/pokemon-rgb-enhanced | 1 | 12759678 | <filename>pret/redtools/insert_object_data.py
from __future__ import print_function
from __future__ import absolute_import
#author: <NAME> <<EMAIL>>
#date: 2012-01-05
#insert object data into pokered.asm
from . import extract_maps
from .pretty_map_headers import map_name_cleaner, object_data_pretty_printer, make_object_label_name, make_text_label, map_constants
from .analyze_incbins import asm, offset_to_pointer, find_incbin_to_replace_for, split_incbin_line_into_three, generate_diff_insert, load_asm, isolate_incbins, process_incbins
from . import analyze_incbins
import os, sys
import subprocess
spacing = " "
def insert_object(map_id):
map = extract_maps.map_headers[map_id]
object = map["object_data"]
size = extract_maps.compute_object_data_size(object)
address = int(map["object_data_pointer"], 16)
line_number = find_incbin_to_replace_for(address)
if line_number == None:
print("skipping object data for map " + str(map["id"]) + " at " + map["object_data_pointer"] + " for " + str(size) + " bytes.")
return
newlines = split_incbin_line_into_three(line_number, address, size)
object_asm = object_data_pretty_printer(map_id)
newlines = newlines.split("\n")
if len(newlines) == 2: index = 0 #replace the 1st line with new content
elif len(newlines) == 3: index = 1 #replace the 2nd line with new content
newlines[index] = object_asm
if len(newlines) == 3 and newlines[2][-2:] == "$0":
#get rid of the last incbin line if it is only including 0 bytes
del newlines[2]
#note that this has to be done after adding in the new asm
newlines = "\n".join(line for line in newlines)
diff = generate_diff_insert(line_number, newlines)
print(diff)
print("... Applying diff.")
#write the diff to a file
fh = open("temp.patch", "w")
fh.write(diff)
fh.close()
#apply the patch
os.system("patch ../pokered.asm temp.patch")
#remove the patch
os.system("rm temp.patch")
#confirm it's working
subprocess.check_call("cd ../; make clean; LC_CTYPE=UTF-8 make", shell=True)
def insert_all_objects():
for map_id in extract_maps.map_headers.keys():
if map_id not in extract_maps.bad_maps:
insert_object(map_id)
analyze_incbins.asm = None
analyze_incbins.incbin_lines = []
analyze_incbins.processed_incbins = {}
load_asm()
isolate_incbins()
process_incbins()
if __name__ == "__main__":
#load map headers and object data
extract_maps.load_rom()
extract_maps.load_map_pointers()
extract_maps.read_all_map_headers()
#load incbins
load_asm()
isolate_incbins()
process_incbins()
#insert_object(1)
insert_all_objects()
| 2.359375 | 2 |
Arrays and Matrix/P27 - removeElement.py | HarshOza36/LeetCode_Problems | 0 | 12759679 | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
# Simple brute force
# while val in nums:
# nums.remove(val)
# return (len(nums))
# Trying a 2 pointer approach which should be faster
k = 0
for i in range(len(nums)):
if(nums[i] != val):
nums[k] = nums[i]
k+=1
return k | 3.375 | 3 |
Surf_counter/spot_urls.py | SimonHollands/crowdfactor3 | 2 | 12759680 |
class SpotUrls:
token='<PASSWORD>'
CFID='459565'
venice_morning_good='https://camrewinds.cdn-surfline.com/live/wc-venicebeachclose.stream.20191103T162900647.mp4'
venice_static='https://camrewinds.cdn-surfline.com/live/wc-venicebeachclose.stream.20191027T235900139.mp4'
lookup={'breakwater': 'http://www.surfline.com/surfdata/video-rewind/video_rewind.cfm?id=150603&CFID=459565&CFTOKEN=<PASSWORD>',
'topanga': 'http://www.surfline.com/surfdata/video-rewind/video_rewind.cfm?id=150605&CFID=491164&CFTOKEN=<PASSWORD>'}
lookupmp4={'breakwater': "https://camrewinds.cdn-surfline.com/live/wc-venicebeachclose.stream.",
'topanga': "https://camrewinds.cdn-surfline.com/live/wc-topangaclose.stream."}
| 1.382813 | 1 |
tests/unit/core/metrics/test_regression_metrics.py | cswarth/whylogs | 603 | 12759681 | import os
import pandas as pd
import pytest
from whylogs.core.metrics.regression_metrics import RegressionMetrics
from whylogs.proto import RegressionMetricsMessage
TEST_DATA_PATH = os.path.abspath(
os.path.join(
os.path.realpath(os.path.dirname(__file__)),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
"testdata",
)
)
def my_test():
regmet = RegressionMetrics()
assert regmet.count == 0
assert regmet.sum_diff == 0.0
assert regmet.sum2_diff == 0.0
assert regmet.sum_abs_diff == 0.0
assert regmet.mean_squared_error() is None
assert regmet.mean_absolute_error() is None
assert regmet.root_mean_squared_error() is None
def test_load_parquet():
mean_absolute_error = 85.94534216005789
mean_squared_error = 11474.89611670205
root_mean_squared_error = 107.12094154133472
regmet = RegressionMetrics()
df = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-12.parquet")))
regmet.add(df["predictions"].to_list(), df["targets"].to_list())
assert regmet.count == len(df["predictions"].to_list())
assert regmet.mean_squared_error() == pytest.approx(mean_squared_error, 0.01)
assert regmet.mean_absolute_error() == pytest.approx(mean_absolute_error, 0.01)
assert regmet.root_mean_squared_error() == pytest.approx(root_mean_squared_error, 0.01)
msg = regmet.to_protobuf()
new_regmet = RegressionMetrics.from_protobuf(msg)
assert regmet.count == new_regmet.count
assert regmet.mean_squared_error() == new_regmet.mean_squared_error()
assert regmet.root_mean_squared_error() == new_regmet.root_mean_squared_error()
assert regmet.mean_absolute_error() == new_regmet.mean_absolute_error()
def test_empty_protobuf_should_return_none():
empty_message = RegressionMetricsMessage()
assert RegressionMetrics.from_protobuf(empty_message) is None
def test_merging():
regmet_sum = RegressionMetrics()
regmet = RegressionMetrics(prediction_field="predictions", target_field="targets")
df = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-12.parquet")))
regmet.add(df["predictions"].to_list(), df["targets"].to_list())
regmet_sum.add(df["predictions"].to_list(), df["targets"].to_list())
regmet_2 = RegressionMetrics(prediction_field="predictions", target_field="targets")
df_2 = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-13.parquet")))
regmet_2.add(df_2["predictions"].to_list(), df_2["targets"].to_list())
regmet_sum.add(df_2["predictions"].to_list(), df_2["targets"].to_list())
merged_reg_metr = regmet.merge(regmet_2)
assert merged_reg_metr.count == regmet_sum.count
assert merged_reg_metr.mean_squared_error() == pytest.approx(regmet_sum.mean_squared_error(), 0.001)
assert merged_reg_metr.root_mean_squared_error() == pytest.approx(regmet_sum.root_mean_squared_error(), 0.001)
assert merged_reg_metr.mean_absolute_error() == pytest.approx(regmet_sum.mean_absolute_error(), 0.001)
| 2.484375 | 2 |
src/python/shared/output_utils.py | dsyme/ADBench | 58 | 12759682 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
PRECISION = 8 # in signs after dot
def objective_file_name(output_prefix, input_basename, module_basename):
return output_prefix + input_basename + "_F_" + module_basename + ".txt"
def jacobian_file_name(output_prefix, input_basename, module_basename):
return output_prefix + input_basename + "_J_" + module_basename + ".txt"
def time_to_string(objective_time, derivative_time):
obj_time_str = np.format_float_scientific(
objective_time,
unique=False,
precision=PRECISION
)
der_time_str = np.format_float_scientific(
derivative_time,
unique=False,
precision=PRECISION
)
return f"{obj_time_str}\n{der_time_str}"
def save_time_to_file(filepath, objective_time, derivative_time):
# open file in write mode or create new one if it does not exist
out = open(filepath,"w")
out.write(time_to_string(objective_time, derivative_time))
out.close()
def value_to_string(value):
return np.format_float_scientific(value, unique=False, precision=PRECISION)
def save_value_to_file(filepath, value):
out = open(filepath,"w")
out.write(value_to_string(value))
out.close()
def save_vector_to_file(filepath, gradient):
out = open(filepath,"w")
for value in gradient:
out.write(value_to_string(value) + '\n')
out.close()
def save_jacobian_to_file(filepath, jacobian):
out = open(filepath,"w")
# output row-major matrix
for row in jacobian:
out.write(value_to_string(row[0]))
for value in row[1:]:
out.write('\t' + value_to_string(value))
out.write('\n')
out.close()
def save_errors_to_file(filepath, reprojection_error, zach_weight_error):
out = open(filepath,"w")
out.write("Reprojection error:\n")
for value in reprojection_error:
out.write(value_to_string(value) + '\n')
out.write("Zach weight error:\n")
for value in zach_weight_error:
out.write(value_to_string(value) + '\n')
out.close()
def save_sparse_j_to_file(filepath, J):
out = open(filepath,"w")
out.write(f"{J.nrows} {J.ncols}\n")
out.write(f"{len(J.rows)}\n")
for row in J.rows:
out.write(f"{row} ")
out.write('\n')
out.write(f"{len(J.cols)}\n")
for column in J.cols:
out.write(f"{column} ")
out.write('\n')
for value in J.vals:
out.write(value_to_string(value) + ' ')
out.close() | 2.65625 | 3 |
blogapp/admin.py | heyylateef/lateeflab | 0 | 12759683 | from django.contrib import admin
from .models import Blogpost
from django_summernote.admin import SummernoteModelAdmin
# class BlogpostAdmin(admin.ModelAdmin):
# list_display = ('title', 'slug', 'status','created_on')
# list_filter = ("status",)
# search_fields = ['title', 'content']
# prepopulated_fields = {'slug': ('title',)}
class BlogpostAdmin(SummernoteModelAdmin):
summernote_fields = ('content',)
list_display = ('title', 'slug', 'status', 'updated_on', 'created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Blogpost, BlogpostAdmin)
| 1.960938 | 2 |
utilities/html_creator.py | nerji/FF-Bingo | 1 | 12759684 | from bs4 import BeautifulSoup as bs
import json
import uuid
class htmlCreator:
def generate_html_file(self, jsonObject):
soup = self.__getTemplateFileData()
jsonData = json.loads(jsonObject)
self.__appendDivs(soup, jsonData)
self.__saveFile(soup)
def __saveFile(self, soup):
myuuid = uuid.uuid4()
resultFilename = "./output_folder/bingo-{}.html".format(myuuid)
with open(resultFilename, "w") as file:
file.write(str(soup))
def __appendDivs(self, soup, jsonObject):
container = soup.div
freeSpaceLocation = self.__getFreeSpaceLocation(jsonObject)
for i in range(0, 25):
tag = soup.new_tag("div")
tag["class"] = "grid-item"
tag["id"] = "element{}".format(i)
if i is not freeSpaceLocation:
tag.string = jsonObject['spaces'][i]
else:
tag["class"] += " free-item"
tag.string = jsonObject['free_spaces'][0]
container.append(tag)
def __getFreeSpaceLocation(self, jsonObject):
print(jsonObject)
x, y = jsonObject['free space coordinates']
return x + y * 5
def __getTemplateFileData(self):
with open("./resources/templates/websiteTemplate.html") as file:
txt = file.read()
return bs(txt, "lxml")
| 2.71875 | 3 |
BasicBlocksPyew/gcluster.py | JoeyJiao/peach | 10 | 12759685 | #!/usr/bin/env python
"""
A program's clusterization tool based on Pyew
Copyright (C) 2010, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, sys
from hashlib import sha256
from pyew_core import CPyew
def primes(n):
if n==2: return [2]
elif n<2: return []
s=range(3,n+1,2)
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=(m*m-3)/2
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
class CAdjacencyList(object):
def __init__(self, data):
self.data = data
self.adjacency_lists = {}
def createAdjacencyList(self, pyew):
al = []
ep = pyew.ep
try:
l = pyew.exports.keys()
l.append(pyew.ep)
except:
print "Error:", sys.exc_info()[1]
l = [pyew.ep]
functions = []
for ep in l:
if pyew.functions.has_key(ep):
fep = pyew.functions[ep]
for c in fep.connections:
if c in pyew.functions:
if c not in functions:
functions.append(c)
al.append((pyew.function_stats[ep], pyew.function_stats[c]))
dones = []
while len(functions) > 0:
addr = functions.pop()
f = pyew.functions[addr]
for c in f.connections:
if c in pyew.functions and c not in dones:
functions.append(c)
dones.append(c)
al.append((pyew.function_stats[addr], pyew.function_stats[c]))
return al
def getSimilarity(self, s1, s2):
m = max(len(s1), len(s2))
diff1 = len(s1.difference(s2))
diff2 = len(s2.difference(s1))
diff = (diff1 + diff2)*100./m
simil1 = len(s1.intersection(s2))
simil = simil1*100. / m
metric = simil + diff
diff = diff * 100. / metric
return diff
def compareTwoSets(self, set1, set2):
pyew1 = set1.values()[0]
pyew2 = set2.values()[0]
al1 = self.createAdjacencyList(pyew1)
al2 = self.createAdjacencyList(pyew2)
if al1 == al2:
return 0
else:
s1 = set(al1)
s2 = set(al2)
diff = len(s1.difference(s2)) + len(s2.difference(s1))
total = max(len(s1), len(s2))
simil = diff * 100. / total
return simil
def cluster(self):
if len(self.data) == 2:
set1 = self.data[0]
set2 = self.data[1]
return self.compareTwoSets(set1, set2)
class CPrimesCluster(object):
def __init__(self, data):
self.primes = primes(1024*1024)
self.data = data
def generateHash(self, pyew):
val = 1.
dones = []
primes_done = []
for f in pyew.functions:
nodes, edges, cc = pyew.function_stats[f]
if cc > 1 and (nodes, edges, cc) not in dones:
p = self.primes[cc]
if p not in primes_done:
val *= p
primes_done.append(p)
dones.append((nodes, edges, cc))
return val, dones
def compareManySets(self, sets):
files = {}
primes = {}
values = {}
print "File1;File2;Difference"
for s in sets:
pyew = s.values()[0]
val, prime = self.generateHash(pyew)
hash = sha256(pyew.getBuffer()).hexdigest()
primes[hash] = prime
values[hash] = val
files[hash] = pyew.filename
del pyew
dones = []
size = len(primes)
for h1 in values:
for h2 in values:
if h1 == h2 or (h1, h2) in dones or (h2, h1) in dones:
continue
if values[h1] == values[h2]:
print "%s;%s;0" % (files[h1], files[h2])
dones.append((h1, h2))
dones.append((h2, h1))
else:
dones.append((h1, h2))
dones.append((h2, h1))
s1 = set(primes[h1])
s2 = set(primes[h2])
diff = self.getSimilarity(s1, s2)
print "%s;%s;%f" % (files[h1], files[h2], diff)
def getSimilarity(self, s1, s2):
m = max(len(s1), len(s2))
diff1 = len(s1.difference(s2))
diff2 = len(s2.difference(s1))
diff = (diff1 + diff2)*100./m
simil1 = len(s1.intersection(s2))
simil = simil1*100. / m
metric = simil + diff
diff = diff * 100. / metric
return diff
def compareTwoSets(self, set1, set2):
pyew1 = set1.values()[0]
val1, primes1 = self.generateHash(pyew1)
pyew2 = set2.values()[0]
val2, primes2 = self.generateHash(pyew2)
s1 = set(primes1)
s2 = set(primes2)
if val1 == val2:
return 0
else:
diff = self.getSimilarity(s1, s2)
return diff
def cluster(self):
if len(self.data) == 2:
set1 = self.data[0]
set2 = self.data[1]
return self.compareTwoSets(set1, set2)
else:
return self.compareManySets(self.data)
class CExpertCluster(object):
def __init__(self, data):
self.data = data
def compareTwoSets(self, set1, set2):
# Get the ciclomatic complexity statistical data of the 2 samples
ccs1 = set1.values()[0].program_stats["ccs"]
ccs2 = set2.values()[0].program_stats["ccs"]
avg_cc_distance = abs(ccs1["avg"] - ccs2["avg"])
max_cc_distance = abs(ccs1["max"] - ccs2["max"])
min_cc_distance = abs(ccs1["min"] - ccs2["min"])
total_functions = abs(len(set1.values()[0].functions) - len(set2.values()[0].functions))
difference = avg_cc_distance*0.5 + \
max_cc_distance*0.3 + \
min_cc_distance*0.1 + \
total_functions*0.1
return difference
def cluster(self):
set1 = self.data[0]
set2 = self.data[1]
return self.compareTwoSets(set1, set2)
class CGraphCluster(object):
def __init__(self):
self.clear()
self.deep = True
self.timeout = 0
def addFile(self, filename):
self.files.append(filename)
def clear(self):
self.files = []
self.results = []
self.data = []
def processFile(self, filename):
#print "[+] Analyzing file %s" % filename
pyew = CPyew(batch=True)
pyew.deepcodeanalysis = self.deep
pyew.analysis_timeout = 0
pyew.loadFile(filename)
if pyew.format in ["PE", "ELF"]:
hash = sha256(pyew.getBuffer()).hexdigest()
self.data.append({hash:pyew})
else:
sys.stderr.writelines("Not a PE/ELF file")
sys.stderr.flush()
def comparePrimes(self):
cluster = CPrimesCluster(self.data)
val = cluster.cluster()
if val == 0:
print "Primes system: Programs are 100% equals"
else:
print "Primes system: Programs differs in", val, "% percent"
def compareAdjacencyLists(self):
cluster = CAdjacencyList(self.data)
val = cluster.cluster()
if val == 0:
print "ALists system: Programs are 100% equals"
else:
print "ALists System: Programs differs in %f%%" % val
def compareExpert(self):
cluster = CExpertCluster(self.data)
val = cluster.cluster()
if val == 0:
print "Expert system: Programs are 100% equals"
else:
print "Expert system: Programs differs in %f%s" % (round(val, 1), "%")
return val
def processFiles(self):
for f in self.files:
self.processFile(f)
def main(prog1, prog2):
cluster = CGraphCluster()
cluster.addFile(prog1)
cluster.addFile(prog2)
cluster.processFiles()
cluster.compareExpert()
cluster.comparePrimes()
cluster.compareAdjacencyLists()
def compareDirectory(path):
cluster = CGraphCluster()
cprimes = CPrimesCluster([])
alist = CAdjacencyList([])
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
fname = os.path.join(root, name)
cluster.addFile(fname)
else:
cluster.addFile(path)
cluster.processFiles()
print "hash:filename:primes_hash:nodes_total:nodes_max:nodes_avg:nodes_min:edges_total:edges_max:edges_avg:edges_min:ccs_total:ccs_max:ccs_avg:ccs_min:functions:adjacency_list"
for x in cluster.data:
hash = x.keys()[0]
pyew = x.values()[0]
data = ""
for stat in pyew.program_stats:
data = data + ":".join(map(str, pyew.program_stats[stat].values())).replace(".", ",") + ":"
phash, dones = cprimes.generateHash(pyew)
print "%s:%s:%s:%s%d:%s" % (hash, pyew.f.name, str(phash.as_integer_ratio()[0]), data, len(pyew.functions), str(alist.adjacency_lists(pyew)))
def usage():
print "Usage:", sys.argv[0], "<prog 1> <prog 2> | <directory>"
print
print "When comparing 2 binaries the difference between them is printed out."
print "When comparing a directory, a csv file with all the relevant data is printed out."
print
print "Examples:"
print "%s /bin/ls /bin/cp" % sys.argv[0]
print "%s /bin" % sys.argv[0]
print
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
else:
compareDirectory(sys.argv[1])
| 2.640625 | 3 |
incrowd/incrowd/production_settings.py | incrowdio/incrowd | 4 | 12759686 | MAILGUN_ACCESS_KEY = 'ACCESS-KEY'
MAILGUN_SERVER_NAME = 'SERVER-NAME'
| 1.046875 | 1 |
src/nncomp_molecule/criterions/focal_loss.py | k-fujikawa/Kaggle-BMS-Molecular-Translation | 3 | 12759687 | <filename>src/nncomp_molecule/criterions/focal_loss.py
import torch
import nncomp.registry as R
@R.CriterionRegistry.add
class FocalLoss(torch.nn.Module):
def __init__(self, alpha=0.25, gamma=2.0, ignore_index=0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.lossfunc = torch.nn.BCEWithLogitsLoss(reduction="none")
def forward(self, input: torch.Tensor, target: torch.Tensor):
n_output = input.shape[-1]
one_hot_target = torch.eye(n_output, device=input.device)[target]
bceloss = self.lossfunc(input, one_hot_target)
probs = torch.sigmoid(input)
probs_gt = torch.where(
one_hot_target == 1,
probs,
1 - probs,
)
modulator = torch.pow(1 - probs_gt, self.gamma)
weighted_loss = torch.where(
one_hot_target == 1,
self.alpha * modulator * bceloss,
(1 - self.alpha) * modulator * bceloss
)
weighted_loss = torch.where(
(target != self.ignore_index)[:, :, None].expand(input.shape),
weighted_loss,
torch.zeros_like(weighted_loss),
)
weighted_loss = weighted_loss.sum(dim=(1, 2))
weighted_loss = weighted_loss.mean()
return weighted_loss
@R.CriterionRegistry.add
class FocalLossEx(torch.nn.Module):
def __init__(self, alpha=0.25, gamma=2.0, ignore_index=0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.lossfunc = torch.nn.BCEWithLogitsLoss(reduction="none")
def forward(self, input: torch.Tensor, target: torch.Tensor):
n_output = input.shape[-1]
one_hot_target = torch.eye(n_output, device=input.device)[target]
bceloss = self.lossfunc(input, one_hot_target)
probs = torch.sigmoid(input)
probs_gt = torch.where(
one_hot_target == 1,
probs,
1 - probs,
)
modulator = torch.pow(1 - probs_gt, self.gamma)
breakpoint()
weighted_loss = torch.where(
one_hot_target == 1,
self.alpha * modulator * bceloss,
(1 - self.alpha) * modulator * bceloss
)
weighted_loss = torch.where(
(target != self.ignore_index)[:, :, None].expand(input.shape),
weighted_loss,
torch.zeros_like(weighted_loss),
)
weighted_loss = weighted_loss.sum(dim=(1, 2))
weighted_loss = weighted_loss.mean()
return weighted_loss
| 1.984375 | 2 |
spatial_lda/__init__.py | MagdalenaMat/spatial_lda | 12 | 12759688 | from .online_lda import LatentDirichletAllocation
| 0.996094 | 1 |
Exercise05/5-18.py | ywyz/IntroducingToProgrammingUsingPython | 0 | 12759689 | <reponame>ywyz/IntroducingToProgrammingUsingPython
'''
@Date: 2019-09-09 19:17:47
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-09-09 19:17:47
'''
number = eval(input("Enter the number: "))
fNumber = 2
print(number, "can be decomposed into :")
while fNumber <= number:
if number % fNumber == 0:
number = number / fNumber
print(fNumber, end=" ")
else:
fNumber += 1
| 3.609375 | 4 |
tests/mutate/test_remove_repeated_time_signatures.py | gilbertohasnofb/auxjad | 6 | 12759690 | import abjad
import auxjad
def test_remove_repeated_time_signatures_01():
staff = abjad.Staff(r"c'4 d'8 | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[2])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
\time 3/8
c'4
d'8
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
c'4
d'8
}
"""
)
def test_remove_repeated_time_signatures_02():
staff = abjad.Staff(r"c'4 d'8 | e'4. | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[3])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
e'4.
\time 3/8
c'4
d'8
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
e'4.
c'4
d'8
}
"""
)
def test_remove_repeated_time_signatures_03():
staff = abjad.Staff([abjad.Note("c'2"),
abjad.Chord("<d' f'>2"),
abjad.Tuplet((2, 3), "g2 a2 b2"),
])
abjad.attach(abjad.TimeSignature((2, 2)), staff[0])
abjad.attach(abjad.TimeSignature((2, 2)), staff[2][0])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 2/2
c'2
<d' f'>2
\times 2/3
{
\time 2/2
g2
a2
b2
}
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 2/2
c'2
<d' f'>2
\times 2/3
{
g2
a2
b2
}
}
"""
)
def test_remove_repeated_time_signatures_04():
staff = abjad.Staff(r"c'2 d'2 | e'2 d'2")
abjad.attach(abjad.TimeSignature((4, 4)), staff[2])
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
c'2
d'2
e'2
d'2
}
"""
)
def test_remove_repeated_time_signatures_05():
staff = abjad.Staff(r"c'4 d'8 | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[2])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
\time 3/8
c'4
d'8
}
"""
)
abjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
c'4
d'8
}
"""
)
| 2.46875 | 2 |
tests/unit/datetime_/test_datetime_.py | matthewgdv/subtypes | 1 | 12759691 | import pytest
import datetime as dt
from subtypes import DateTime
@pytest.fixture
def example_datetime():
return DateTime(1994, 3, 24, 12, 30, 15)
class TestDateTime:
def test___str__(self): # synced
assert True
def test_shift(self, example_datetime): # synced
assert example_datetime.shift(years=26, months=-2, days=-23, hours=-12, minutes=-30, seconds=-15) == DateTime(2020, 1, 1)
def test_date(self): # synced
assert True
def test_time(self): # synced
assert True
def test_to_stdlib(self): # synced
assert True
def test_to_isoformat(self, example_datetime): # synced
assert example_datetime.to_isoformat() == "1994-03-24 12:30:15"
def test_to_format(self): # synced
assert True
def test_from_datetime(self, example_datetime): # synced
assert example_datetime == DateTime.from_datetime(dt.datetime(1994, 3, 24, 12, 30, 15))
def test_from_isoformat(self): # synced
assert True
def test_from_format(self): # synced
assert True
def test_from_string(self): # synced
assert True
def test_from_parts(self): # synced
assert True
def test_infer(self): # synced
assert True
def test_TimeZone(self): # synced
assert True
def test_Hour(self): # synced
assert True
def test_Minute(self): # synced
assert True
def test_Second(self): # synced
assert True
def test_MicroSecond(self): # synced
assert True
| 2.71875 | 3 |
Scripts/convolve_BTC.py | pjuckem/GRTD | 3 | 12759692 | <filename>Scripts/convolve_BTC.py
import numpy as np
import pandas as pd
import scipy.optimize as so
import scipy.stats as ss
class Tracer(object):
def __init__(self, tracer_input_df, tr):
self.tracer_input_df = tracer_input_df
self.tr = tr
self.tr_size = tracer_input_df.shape[0]
self.input_date = tracer_input_df.index
self.start_date = tracer_input_df.index.min()
self.julian_time = self.input_date.to_julian_date()
self.elapsed_time = self.julian_time - self.julian_time[0]
def pad_tracer(self, rtd_size):
self.tr_pad = np.zeros((self.tr_size + rtd_size * 2))
self.lbackground = self.tracer_input_df.loc[self.tracer_input_df.index[0], self.tr]
self.rbackground = self.tracer_input_df.loc[self.tracer_input_df.index[-1], self.tr]
self.tr_pad[0 : rtd_size] = self.lbackground
self.tr_pad[rtd_size : (self.tr_size + rtd_size)] = self.tracer_input_df.loc[:, self.tr]
self.tr_pad[(self.tr_size + rtd_size) : ] = self.rbackground
class Sample_gdf(Tracer):
def __init__(self, model_ws):
super().__init__(self, tracer_input_df, tr)
self.tracer_input_df = tracer_input_df
self.model_ws = model_ws
self.src = os.path.join(self.model_ws, 'WEL', 'sample_gdf.shp')
self.sample_gdf = gp.read_file(self.src)
self.sample_gdf['STAID'] = self.sample_gdf.STAID.astype(np.int64())
self.sample_gdf['DATES'] = pd.to_datetime(self.sample_gdf['DATES'])
self.sample_gdf.index = self.sample_gdf['DATES']
def extract_well(self, well, sa_in):
self.well = well
self.sa_in = sa_in
self.well_data = self.sample_gdf.loc[self.sample_gdf.STAID == well, ['DATES', 'NetworkTyp', 'SuCode', self.sa_in]]
self.well_data = self.well_data.dropna(axis=0, how='any', inplace=False)
self.well_data['jdate'] = self.well_data.index.to_julian_date()
self.elapsed_sample_time = self.well_data['jdate'] - self.start_date.to_julian_date()
class Resepy(object):
def __init__(self, por, freq):
self.por = por
self.freq = freq
def get_fit(self, well_dict, method):
# compute the pdf in log space
self.px = np.linspace(0.1, 10, 50000, endpoint=True)
self.pxp = np.exp(self.px) * self.por * 365.25
self.p = well_dict['par'][method]
self.py = self.explicit_pdf(self.px, *self.p)
self.xi = np.arange(0, 1E+04 * self.freq, self.freq)
self.yi = np.interp(self.xi, self.pxp, self.py)
self.pmf = self.yi / self.yi.sum()
self.rtd_size = self.pmf.shape[0]
def explicit_pdf(self, t, sh_1, lo_1, sc_1, sh_2, lo_2, sc_2, fy):
_cdf_1 = dist.pdf(t, sh_1, lo_1, sc_1)
_cdf_2 = dist.pdf(t, sh_2, lo_2, sc_2)
return fy * _cdf_1 + (1 - fy) * _cdf_2
def dk(self, thalf):
self.pmfdk = self.pmf * np.exp(-self.xi * np.log(2) / thalf) | 2.234375 | 2 |
PolyEngine/Scripts/InitializeSubmodules.py | PiotrMoscicki/PolyEngine | 65 | 12759693 | import argparse
import os
import git
from enum import Enum
script_location = os.path.dirname(__file__)
repo_path = os.path.abspath(os.sep.join([script_location, '..', '..']))
print('Using git repository location {}'.format(repo_path))
repo = git.Repo(repo_path)
repo_submodules = repo.submodules
patches_folder = os.sep.join([repo_path, 'PolyEngine', 'ThirdParty', 'patches'])
for submodule in repo_submodules:
sub_name = os.path.basename(os.path.normpath(submodule.path))
print('Initializing submodule [{}] in path: {}'.format(sub_name, submodule.path))
submodule.update(init=True, force=True)
patch_name = os.sep.join([patches_folder, '{}.patch'.format(sub_name)])
# Apply patch if needed
if os.path.isfile(patch_name):
print('Applying patch to submodule {} found in {}'.format(sub_name,patch_name))
sub_repo = submodule.module()
sub_repo.git.reset(['--hard']) # Reset first
sub_repo.git.apply([patch_name]) # Apply patch
| 2.3125 | 2 |
engine/tilemap.py | polowis/GodOfWar | 0 | 12759694 | <filename>engine/tilemap.py
"""
Author: <NAME>
Date Created: 15 April 2021
Date Last Changed: 23 April 2021
This file is for creating a console map, responsible for handling character movement
and encoutering monsters
Files read: map.json, mobs.json
"""
import json
import random
from entity.constants import ENEMY_OBJECTS
from engine.battle import Battle
MOBS_RARITY_CHANCE = {
'common': 0.6,
'uncommon': 0.25,
'rare': 0.12,
'boss': 0.03
}
class Tilemap(object):
"""Construct a tilemap object that is responsible for map drawing and movement functions"""
def __init__(self, player, scene, map_file: str, map_level: int, blocked_objects=['■']):
self.player = player
self.scene = scene
self.map_file = map_file
self.map_level = map_level
self.blocked_objects = blocked_objects
self.player_symbol = '◉'
self.pathway_symbol = '□'
# current coordinates of entity
self.current_x = 0
self.current_y = 0
self.mob_spawn_rate = 30
self.mob_holders = [] # cached purpose
self.mob_data = {}
def construct(self):
"""
Construct the text based map. This will load JSON map file provided
It will also try to locate the charater position in the map. Return at position (0, 0)
if no player postion found.
"""
with open(self.map_file, 'r') as f:
data = json.load(f)
for i in data['map']:
if i['mapLevel'] == self.map_level:
self.map_grid = i['grid']
self.locate_player_position()
self.load_mobs()
def set_player_symbol(self, symbol):
"""Set the character that symbolises player on the map"""
self.player_symbol = symbol
return self
def set_pathway_symbol(self, symbol):
"""
Set the character that symbolises pathway (walkable area) on the map"""
self.pathway_symbol = symbol
return self
@property
def player_position(self):
"""return the player position as tuple formatted: (x_position, y_position)"""
return (self.current_x, self.current_y)
def locate_player_position(self):
"""Locate the player postion and update the current position of the player"""
x = 0
y = 0
for i in range(len(self.map_grid)):
for j in range(len(self.map_grid[i])):
if self.map_grid[i][j] == self.player_symbol:
y = i
x = j
self.current_x = x
self.current_y = y
return None
def display_current_map(self):
"""
Display the current map
Note: This is the current map not the original map
"""
for grid in self.map_grid:
self.scene.write(grid)
self.scene.write("")
def cannot_move(self, x, y) -> bool:
"""Return boolean value whether or not the given coordinate is allowed to go through"""
return self.map_grid[y][x] in self.blocked_objects
def move_player_right(self):
"""Move the entity to the right of the map by 1 step"""
if self.cannot_move(self.current_x + 1, self.current_y):
self.scene.write("\n You cannot go that way")
self.display_current_map()
else:
self.update_map_data(self.current_x, self.current_y, self.pathway_symbol)
self.update_map_data(self.current_x + 1, self.current_y, self.player_symbol)
self.current_x += 1
self.display_current_map()
return self
def move_player_left(self):
"""Move the entity to the left of the map by 1 step"""
if self.cannot_move(self.current_x - 1, self.current_y):
self.scene.write(" \n You cannot go that way")
self.display_current_map()
else:
self.update_map_data(self.current_x, self.current_y, self.pathway_symbol)
self.update_map_data(self.current_x - 1, self.current_y, self.player_symbol)
self.current_x -= 1
self.display_current_map()
return self
def move_player_up(self):
"""Move the entity to the top of the map by 1 step"""
if self.cannot_move(self.current_x, self.current_y - 1):
self.scene.write("\n You cannot go that way")
self.display_current_map()
else:
self.update_map_data(self.current_x, self.current_y, self.pathway_symbol)
self.update_map_data(self.current_x, self.current_y - 1, self.player_symbol)
self.current_y -= 1
self.display_current_map()
return self
def move_player_down(self):
"""Move the entity to the bottom of the map by 1 step"""
if not self.cannot_move(self.current_x, self.current_y + 1):
self.scene.write("\n You cannot go that way!!")
self.display_current_map()
else:
self.update_map_data(self.current_x, self.current_y, self.pathway_symbol)
self.update_map_data(self.current_x, self.current_y + 1, self.player_symbol)
self.current_y += 1
self.display_current_map()
return self
def update_map_data(self, x_value, y_value, value_to_update):
"""Update data at given coordinate"""
self.map_grid[y_value][x_value] = value_to_update
return None
def load_mobs(self):
"""Open mobs.json file and load the appropriate mobs into python dictionaries for performance purpose."""
with open('data/mobs.json', 'r') as file:
data = json.load(file)
for i in data['mobs_rarity']:
if i['map_level'] == self.map_level:
self.mob_rarity = i
for mob in data['mobs']:
if mob['name'] in self.mob_rarity.values():
self.mob_holders.append(mob)
self.mob_data = data
def get_encounter_mob_name(self):
"""Return an enemy name"""
rarity_type = random.choices(list(MOBS_RARITY_CHANCE.keys()), list(MOBS_RARITY_CHANCE.values()), k=1)[0]
return self.mob_rarity[rarity_type]
def create_mob(self, mob_name):
"""
Create an enemy object
"""
for mob in self.mob_holders:
if mob['name'] == mob_name:
enemy_class = ENEMY_OBJECTS[mob['class'].lower()]
enemy = enemy_class(mob_name, self.scene)
enemy.exp = random.randint(mob['min_exp'], mob['max_exp'])
return enemy
def start_battle(self):
"""Start battle"""
number_of_mob = random.randint(1, 3)
mob_list = []
for i in range(number_of_mob):
mob_name = self.get_encounter_mob_name()
if mob_name is not None:
mob = self.create_mob(mob_name)
mob_list.append(mob)
battle = Battle(self.player, mob_list, self.scene)
battle.set_mob_data(self.mob_data)
battle.begin()
return None
@property
def encounter_mobs(self):
"""Return boolean value whether or not the player encounters monster"""
return random.randint(1, 100) <= self.mob_spawn_rate
| 3.078125 | 3 |
network_construction/algorithm.py | YoungLemon/Project-KnowNet | 4 | 12759695 | <filename>network_construction/algorithm.py
# encoding=utf-8
# import ssl
import nltk
import nltk.stem
from nltk.tokenize import WordPunctTokenizer
from nltk.corpus import wordnet as wn
# from nltk.corpus import brown
# from nltk.corpus import conll2000
from rake_nltk import Rake
from textblob import TextBlob
from textblob import Word
# from textblob.wordnet import VERB
# from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import word2vec
# import collections
# ssl._create_default_https_context = ssl._create_unverified_context
# nltk.download()
# this class is used for chunk.
class UnigramChunker(nltk.ChunkParserI):
"""
一元分块器,
该分块器可以从训练句子集中找出每个词性标注最有可能的分块标记,
然后使用这些信息进行分块
"""
def __init__(self, train_sents):
"""
构造函数
:param train_sents: Tree对象列表
"""
train_data = []
for sent in train_sents:
# 将Tree对象转换为IOB标记列表[(word, tag, IOB-tag), ...]
conlltags = nltk.chunk.tree2conlltags(sent)
# 找出每个词性标注对应的IOB标记
ti_list = [(t, i) for w, t, i in conlltags]
train_data.append(ti_list)
# 使用一元标注器进行训练
self.__tagger = nltk.UnigramTagger(train_data)
def parse(self, tokens):
"""
对句子进行分块
:param tokens: 标注词性的单词列表
:return: Tree对象
"""
# 取出词性标注
tags = [tag for (word, tag) in tokens]
# 对词性标注进行分块标记
ti_list = self.__tagger.tag(tags)
# 取出IOB标记
iob_tags = [iob_tag for (tag, iob_tag) in ti_list]
# 组合成conll标记
conlltags = [(word, pos, iob_tag) for ((word, pos), iob_tag) in zip(tokens, iob_tags)]
return nltk.chunk.conlltags2tree(conlltags)
# test_sents = conll2000.chunked_sents("test.txt", chunk_types=["NP"])
# train_sents = conll2000.chunked_sents("train.txt", chunk_types=["NP"])
# unigram_chunker = UnigramChunker(train_sents)
# print(unigram_chunker.evaluate(test_sents))
# rake-nltk
# Uses stopwords for english from NLTK, and all puntuation characters by
# default
# r = Rake()
# Extraction given the text.
# r.extract_keywords_from_text("Python is a high-level, general-purpose programming language.")
# print(r.get_ranked_phrases())
# print(r.get_ranked_phrases_with_scores())
# print(r.get_word_degrees())
# print(r.get_word_frequency_distribution())
# Extraction given the list of strings where each string is a sentence.
# r.extract_keywords_from_sentences(["Uses stopwords for english from NLTK, and all puntuation
# characters by","Uses stopwords for english from NLTK, and all puntuation characters by"])
# text-blob http://textblob.readthedocs.io/en/dev/quickstart.html
# text-blob wordnet interface http://www.nltk.org/howto/wordnet.html
# w = Word("octopi")
# print(w.lemmatize())
# w = Word("went")
# print(w.lemmatize("v"))
# WordNet Integration
# 'And now for something completely different'
def extract_keyword(text):
sentences = splitSentence(text)
result = []
for sentence in sentences:
r = Rake()
r.extract_keywords_from_text(sentence)
result += list(r.get_word_degrees().keys())
return result
def extract_keyword2(text):
r = Rake()
r.extract_keywords_from_text(text)
result = list(r.get_word_degrees().keys())
return result
def extract_word(text):
words = nltk.word_tokenize(text)
return words
def extract_noun(text):
sentences = splitSentence(text)
noun = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
word_tag = nltk.pos_tag(words)
for word in word_tag:
if word[1] == "NN" or word[1] == "NNP":
noun.append(word[0])
return noun
def extract_noun2(text):
words = nltk.word_tokenize(text)
word_tag = nltk.pos_tag(words)
noun = []
for word in word_tag:
if word[1] == "NN" or word[1] == "NNP":
noun.append(word[0])
return noun
def extract_word_freq(text):
words = nltk.word_tokenize(text)
freq = nltk.FreqDist(words)
return freq
def extract_adj(text):
sentences = splitSentence(text)
adj = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
word_tag = nltk.pos_tag(words)
for word in word_tag:
if word[1] == "JJ":
adj.append(word[0])
return adj
def extract_adj2(text):
words = nltk.word_tokenize(text)
word_tag = nltk.pos_tag(words)
adj = []
for word in word_tag:
if word[1] == "JJ":
adj.append(word[0])
return adj
def extract_verb(text):
sentences = splitSentence(text)
verb = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
word_tag = nltk.pos_tag(words)
for word in word_tag:
if word[1] == "VB":
verb.append(word[0])
return verb
def extract_verb2(text):
words = nltk.word_tokenize(text)
word_tag = nltk.pos_tag(words)
verb = []
for word in word_tag:
if word[1] == "VB":
verb.append(word[0])
return verb
def extract_noun_phrase(text):
sentences = splitSentence(text)
noun_phrases = []
for sentence in sentences:
nounphrase_tb = TextBlob(sentence)
noun_phrases += nounphrase_tb.noun_phrases
return noun_phrases
def extract_noun_phrase2(text):
nounphrase_tb = TextBlob(text)
return nounphrase_tb.noun_phrases
def extract_ner(text):
sentences = splitSentence(text)
# tokens = nltk.word_tokenize('I am very excited about the next generation of Apple products.')
# tokens = nltk.pos_tag(tokens)
# tree = nltk.ne_chunk(tokens)
# print(tree)
ner = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
word_tag = nltk.pos_tag(words)
for word in word_tag:
if word[1] == "NNP":
ner.append(word[0])
return ner
def extract_ner2(text):
# tokens = nltk.word_tokenize('I am very excited about the next generation of Apple products.')
# tokens = nltk.pos_tag(tokens)
# tree = nltk.ne_chunk(tokens)
# print(tree)
words = nltk.word_tokenize(text)
word_tag = nltk.pos_tag(words)
ner = []
for word in word_tag:
if word[1] == "NNP":
ner.append(word[0])
return ner
def splitSentence(paragraph):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = tokenizer.tokenize(paragraph)
return sentences
def wordtokenizer(sentence):
words = WordPunctTokenizer().tokenize(sentence)
return words
def para2senc2words(text):
sen = splitSentence(text)
result = []
for i in sen:
words = wordtokenizer(i)
result.append(words)
return result
def extract_relation_noun_co(text):
sentences = splitSentence(text)
relation = []
for sentence in sentences:
words = extract_noun2(sentence)
temp = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
relation.append((temp[i], temp[j], "co"))
return relation
def extract_relation_noun_phrase_co(text):
sentences = splitSentence(text)
relation = []
for sentence in sentences:
words = extract_noun_phrase2(sentence)
temp = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
relation.append((temp[i], temp[j], "co"))
return relation
def extract_relation_keyword_co(text):
sentences = splitSentence(text)
relation = []
for sentence in sentences:
words = extract_keyword2(sentence)
temp = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp) - 1):
for j in range(i + 1, len(temp)):
relation.append((temp[i], temp[j], "co"))
return relation
def extract_relation_adj_co(text):
sentences = splitSentence(text)
relation = []
for sentence in sentences:
words = extract_adj2(sentence)
temp = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
relation.append((temp[i], temp[j], "co"))
return relation
def extract_relation_verb_co(text):
sentences = splitSentence(text)
relation = []
for sentence in sentences:
words = extract_verb2(sentence)
temp = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
relation.append((temp[i], temp[j], "co"))
return relation
def extract_relation_ner_co(text):
sentences = splitSentence(text)
relation = []
for sentence in sentences:
words = extract_ner2(sentence)
temp = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
relation.append((temp[i], temp[j], "co"))
return relation
def extract_relation_noun_wordnet(text):
words = extract_noun(text)
temp = []
relation = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
if wordnet_similarity(temp[i], temp[j]):
similarity = wordnet_similarity(temp[i], temp[j])
relation.append((temp[i], temp[j], "wordnet", similarity))
return relation
def extract_relation_adj_wordnet(text):
words = extract_adj(text)
temp = []
relation = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
if wordnet_similarity(temp[i], temp[j]):
similarity = wordnet_similarity(temp[i], temp[j])
relation.append((temp[i], temp[j], "wordnet", similarity))
return relation
def extract_relation_verb_wordnet(text):
words = extract_verb(text)
temp = []
relation = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
if wordnet_similarity(temp[i], temp[j]):
similarity = wordnet_similarity(temp[i], temp[j])
relation.append((temp[i], temp[j], "wordnet", similarity))
return relation
def extract_relation_keyword_wordnet(text):
words = extract_keyword(text)
temp = []
relation = []
for word in words:
if word not in temp:
temp.append(word)
if len(temp) >= 2:
for i in range(0, len(temp)-1):
for j in range(i+1, len(temp)):
if wordnet_similarity(temp[i], temp[j]):
similarity = wordnet_similarity(temp[i], temp[j])
relation.append((temp[i], temp[j], "wordnet", similarity))
return relation
def word2vec_initialize(text):
# sent = para2senc2words(text)
# print(sent)
# open('corpus.txt','w').write(text)
# sentences = word2vec.Text8Corpus("corpus.txt")
sentences = para2senc2words(text)
model = word2vec.Word2Vec(sentences, size=100, window=5, min_count=1, workers=4)
model.save("knowledgeB.model")
return 0
# this function doesn't work now!!!! And I reaaly can't find out why.
def word2vec_trainmore(text):
text = para2senc2words(text)
model = word2vec.Word2Vec.load("knowledgeB.model")
model.train(text, total_examples=1, epochs=1)
return 0
def word2vec_result(word):
new_model = word2vec.Word2Vec.load("knowledgeB.model")
# print(new_model[word])
return new_model[word]
# 词干提取 fishing-fish shops-shop
def word_stem(word):
s = nltk.stem.SnowballStemmer('english')
if s.stem(word):
return s.stem(word)
return word
# 词形还原 octopi-octopus
def word_lemmatized(word):
w = Word(word)
return w.lemmatize()
# 返回一个词语所在的词语集合,一个词语会在多个词语集合中
def wordnet_synsets(word):
if wn.synsets(word):
return wn.synsets(word)
return 1
# 输入一个同义词集,返回词集中的所有词条
def wordnet_lemma_names(wordset):
return wn.synset(wordset).lemma_names
def wordnet_similarity(word1, word2):
worda = word_stem(word1)
wordb = word_stem(word2)
if wordnet_synsets(worda) != 1 and wordnet_synsets(wordb) != 1:
word1_synsets = wordnet_synsets(worda)
word2_synsets = wordnet_synsets(wordb)
word1_synset = word1_synsets[0]
word2_synset = word2_synsets[0]
return word1_synset.path_similarity(word2_synset)
return 0
# if __name__ == '__main__':
# print(para2senc2words('I am very excited about the next generation of Apple products. But
# I am csk! So I am not afraid of you. I am very excited about the next generation of
# Apple products. But I am csk! So I am not afraid of you.'))
# word2vec_initialize("I am very excited about the ne
# xt generation of Apple products. But I am csk! So I am not afraid of you.")
# word2vec_trainmore("And now for something completely different.")
# word2vec_trainmore("I hate Apple products.")
# word2vec_result("I")
# extract_word_freq("Hello world, I am csk")
# model = word2vec.Word2Vec.load("knowledgeB.model")
# print(model.similarity("I","Apple"))
# print(word_stem('shops'))
# print(word_stem('chicken'))
| 2.703125 | 3 |
src/MMSA/trains/singleTask/MISA.py | dumpmemory/MMSA | 1 | 12759696 | <gh_stars>1-10
import logging
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from ...utils import MetricsTop, dict_to_str
logger = logging.getLogger('MMSA')
class MISA():
def __init__(self, args):
self.args = args
self.criterion = nn.MSELoss() if args.train_mode == 'regression' else nn.CrossEntropyLoss()
self.loss_diff = DiffLoss()
self.loss_recon = MSE()
self.loss_cmd = CMD()
self.metrics = MetricsTop(args.train_mode).getMetics(args.dataset_name)
def do_train(self, model, dataloader, return_epoch_results=False):
self.model = model
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=self.args.learning_rate)
# initilize results
epochs, best_epoch = 0, 0
if return_epoch_results:
epoch_results = {
'train': [],
'valid': [],
'test': []
}
min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
best_valid = 1e8 if min_or_max == 'min' else 0
while True:
epochs += 1
# train
y_pred, y_true = [], []
losses = []
model.train()
train_loss = 0.0
left_epochs = self.args.update_epochs
with tqdm(dataloader['train']) as td:
for batch_data in td:
# using accumulated gradients
if left_epochs == self.args.update_epochs:
optimizer.zero_grad()
left_epochs -= 1
text = batch_data['text'].to(self.args.device)
audio = batch_data['audio'].to(self.args.device)
vision = batch_data['vision'].to(self.args.device)
labels = batch_data['labels']['M'].to(self.args.device)
if self.args.train_mode == 'classification':
labels = labels.view(-1).long()
else:
labels = labels.view(-1, 1)
# forward
outputs = model(text, audio, vision)['M']
# compute loss
cls_loss = self.criterion(outputs, labels)
diff_loss = self.get_diff_loss()
domain_loss = self.get_domain_loss()
recon_loss = self.get_recon_loss()
cmd_loss = self.get_cmd_loss()
if self.args.use_cmd_sim:
similarity_loss = cmd_loss
else:
similarity_loss = domain_loss
loss = cls_loss + \
self.args.diff_weight * diff_loss + \
self.args.sim_weight * similarity_loss + \
self.args.recon_weight * recon_loss
# backward
loss.backward()
if self.args.grad_clip != -1.0:
torch.nn.utils.clip_grad_value_([param for param in model.parameters() if param.requires_grad], self.args.grad_clip)
# store results
train_loss += loss.item()
y_pred.append(outputs.cpu())
y_true.append(labels.cpu())
if not left_epochs:
optimizer.step()
left_epochs = self.args.update_epochs
if not left_epochs:
# update
optimizer.step()
train_loss = train_loss / len(dataloader['train'])
pred, true = torch.cat(y_pred), torch.cat(y_true)
train_results = self.metrics(pred, true)
logger.info(
f"TRAIN-({self.args.model_name}) [{epochs - best_epoch}/{epochs}/{self.args.cur_seed}] >> loss: {round(train_loss, 4)} {dict_to_str(train_results)}"
)
# validation
val_results = self.do_test(model, dataloader['valid'], mode="VAL")
cur_valid = val_results[self.args.KeyEval]
# save best model
isBetter = cur_valid <= (best_valid - 1e-6) if min_or_max == 'min' else cur_valid >= (best_valid + 1e-6)
# save best model
if isBetter:
best_valid, best_epoch = cur_valid, epochs
# save model
torch.save(model.cpu().state_dict(), self.args.model_save_path)
model.to(self.args.device)
# epoch results
if return_epoch_results:
train_results["Loss"] = train_loss
epoch_results['train'].append(train_results)
epoch_results['valid'].append(val_results)
test_results = self.do_test(model, dataloader['test'], mode="TEST")
epoch_results['test'].append(test_results)
# early stop
if epochs - best_epoch >= self.args.early_stop:
return epoch_results if return_epoch_results else None
def do_test(self, model, dataloader, mode="VAL", return_sample_results=False):
model.eval()
y_pred, y_true = [], []
eval_loss = 0.0
if return_sample_results:
ids, sample_results = [], []
all_labels = []
features = {
"Feature_t": [],
"Feature_a": [],
"Feature_v": [],
"Feature_f": [],
}
with torch.no_grad():
with tqdm(dataloader) as td:
for batch_data in td:
vision = batch_data['vision'].to(self.args.device)
audio = batch_data['audio'].to(self.args.device)
text = batch_data['text'].to(self.args.device)
labels = batch_data['labels']['M'].to(self.args.device)
if self.args.train_mode == 'classification':
labels = labels.view(-1).long()
else:
labels = labels.view(-1, 1)
outputs = model(text, audio, vision)
if return_sample_results:
ids.extend(batch_data['id'])
# TODO: add features
# for item in features.keys():
# features[item].append(outputs[item].cpu().detach().numpy())
all_labels.extend(labels.cpu().detach().tolist())
preds = outputs["M"].cpu().detach().numpy()
# test_preds_i = np.argmax(preds, axis=1)
sample_results.extend(preds.squeeze())
loss = self.criterion(outputs['M'], labels)
eval_loss += loss.item()
y_pred.append(outputs['M'].cpu())
y_true.append(labels.cpu())
eval_loss = eval_loss / len(dataloader)
pred, true = torch.cat(y_pred), torch.cat(y_true)
eval_results = self.metrics(pred, true)
eval_results["Loss"] = round(eval_loss, 4)
logger.info(f"{mode}-({self.args.model_name}) >> {dict_to_str(eval_results)}")
if return_sample_results:
eval_results["Ids"] = ids
eval_results["SResults"] = sample_results
# for k in features.keys():
# features[k] = np.concatenate(features[k], axis=0)
eval_results['Features'] = features
eval_results['Labels'] = all_labels
return eval_results
def get_domain_loss(self,):
if self.args.use_cmd_sim:
return 0.0
# Predicted domain labels
domain_pred_t = self.model.Model.domain_label_t
domain_pred_v = self.model.Model.domain_label_v
domain_pred_a = self.model.Model.domain_label_a
# True domain labels
domain_true_t = torch.LongTensor([0]*domain_pred_t.size(0)).to(self.device)
domain_true_v = torch.LongTensor([1]*domain_pred_v.size(0)).to(self.device)
domain_true_a = torch.LongTensor([2]*domain_pred_a.size(0)).to(self.device)
# Stack up predictions and true labels
domain_pred = torch.cat((domain_pred_t, domain_pred_v, domain_pred_a), dim=0)
domain_true = torch.cat((domain_true_t, domain_true_v, domain_true_a), dim=0)
return self.domain_loss_criterion(domain_pred, domain_true)
def get_cmd_loss(self,):
if not self.args.use_cmd_sim:
return 0.0
# losses between shared states
loss = self.loss_cmd(self.model.Model.utt_shared_t, self.model.Model.utt_shared_v, 5)
loss += self.loss_cmd(self.model.Model.utt_shared_t, self.model.Model.utt_shared_a, 5)
loss += self.loss_cmd(self.model.Model.utt_shared_a, self.model.Model.utt_shared_v, 5)
loss = loss/3.0
return loss
def get_diff_loss(self, ):
shared_t = self.model.Model.utt_shared_t
shared_v = self.model.Model.utt_shared_v
shared_a = self.model.Model.utt_shared_a
private_t = self.model.Model.utt_private_t
private_v = self.model.Model.utt_private_v
private_a = self.model.Model.utt_private_a
# Between private and shared
loss = self.loss_diff(private_t, shared_t)
loss += self.loss_diff(private_v, shared_v)
loss += self.loss_diff(private_a, shared_a)
# Across privates
loss += self.loss_diff(private_a, private_t)
loss += self.loss_diff(private_a, private_v)
loss += self.loss_diff(private_t, private_v)
return loss
def get_recon_loss(self, ):
loss = self.loss_recon(self.model.Model.utt_t_recon, self.model.Model.utt_t_orig)
loss += self.loss_recon(self.model.Model.utt_v_recon, self.model.Model.utt_v_orig)
loss += self.loss_recon(self.model.Model.utt_a_recon, self.model.Model.utt_a_orig)
loss = loss/3.0
return loss
class MSE(nn.Module):
def __init__(self):
super(MSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
mse = torch.sum(diffs.pow(2)) / n
return mse
class SIMSE(nn.Module):
def __init__(self):
super(SIMSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, - pred)
n = torch.numel(diffs.data)
simse = torch.sum(diffs).pow(2) / (n ** 2)
return simse
class DiffLoss(nn.Module):
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, input1, input2):
batch_size = input1.size(0)
input1 = input1.view(batch_size, -1)
input2 = input2.view(batch_size, -1)
# Zero mean
input1_mean = torch.mean(input1, dim=0, keepdims=True)
input2_mean = torch.mean(input2, dim=0, keepdims=True)
input1 = input1 - input1_mean
input2 = input2 - input2_mean
input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach()
input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-6)
input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach()
input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-6)
diff_loss = torch.mean((input1_l2.t().mm(input2_l2)).pow(2))
return diff_loss
class CMD(nn.Module):
"""
Adapted from https://github.com/wzell/cmd/blob/master/models/domain_regularizer.py
"""
def __init__(self):
super(CMD, self).__init__()
def forward(self, x1, x2, n_moments):
mx1 = torch.mean(x1, 0)
mx2 = torch.mean(x2, 0)
sx1 = x1-mx1
sx2 = x2-mx2
dm = self.matchnorm(mx1, mx2)
scms = dm
for i in range(n_moments - 1):
scms += self.scm(sx1, sx2, i + 2)
return scms
def matchnorm(self, x1, x2):
power = torch.pow(x1-x2,2)
summed = torch.sum(power)
sqrt = summed**(0.5)
return sqrt
# return ((x1-x2)**2).sum().sqrt()
def scm(self, sx1, sx2, k):
ss1 = torch.mean(torch.pow(sx1, k), 0)
ss2 = torch.mean(torch.pow(sx2, k), 0)
return self.matchnorm(ss1, ss2)
| 2.234375 | 2 |
setup.py | Security-Banana-Group/MoA | 0 | 12759697 | <reponame>Security-Banana-Group/MoA<filename>setup.py
import os
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup , find_packages
setup(name='MoA',
version='0.1',
description='Tools for testing a network',
urls='https://github.com/Security-Banana-Group/MoA',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
scripts= [],
license='MIT',
include_package_data=False,
package_data={ '': ['*.txt', '*.rst']},
zip_safe=False,
entry_points ={
'console_scripts': [
'pytrace = moa.trace.exec:run_trace'
],
})
BIN_PATH = '/usr/local/bin'
| 1.421875 | 1 |
crawlster/helpers/http/requests.py | vladcalin/crawlster | 1 | 12759698 | <gh_stars>1-10
import requests
import requests.auth
import requests.exceptions
from crawlster.helpers.base import BaseHelper
from crawlster.helpers.http.request import (
HttpRequest, GetRequest, PostRequest)
from crawlster.helpers.http.response import HttpResponse
class RequestsHelper(BaseHelper):
"""Helper for making HTTP requests using the requests library"""
name = 'http'
STAT_DOWNLOAD = 'http.download'
STAT_UPLOAD = 'http.upload'
STAT_REQUESTS = 'http.requests'
STAT_HTTP_ERRORS = 'http.errors'
def __init__(self):
super(RequestsHelper, self).__init__()
self.session = None
def initialize(self):
"""Initializes the session used for making requests"""
self.session = requests.session()
def open(self, http_request: HttpRequest):
"""Opens a given HTTP request.
Args:
http_request (HttpRequest):
The crawlster.helpers.http.request.HttpRequest instance
with the required info for making the request
Returns:
crawlster.helpers.http.response.HttpResponse
"""
self.crawler.stats.incr(self.STAT_REQUESTS)
try:
resp = self.session.request(
http_request.method, http_request.url,
http_request.query_params,
http_request.data, http_request.headers
)
http_resp = HttpResponse(
http_request, resp.status_code, resp.headers, resp.content
)
self.crawler.stats.incr(self.STAT_DOWNLOAD,
by=self._compute_resp_size(http_resp))
self.crawler.stats.incr(self.STAT_UPLOAD,
by=self._compute_req_size(http_request))
return http_resp
except requests.exceptions.RequestException as e:
self.crawler.stats.add(self.STAT_HTTP_ERRORS, e)
self.crawler.log.error(str(e))
def get(self, url, query_params=None, headers=None):
"""Makes a GET request"""
return self.open(
GetRequest(url, query_params or {}, headers or {})
)
def post(self, url, data=None, query_params=None, headers=None):
"""Makes a POST request"""
return self.open(PostRequest(url, data, query_params, headers))
def patch(self, url, data=None, query_params=None, headers=None):
"""Makes a PATCH request"""
return self.open(
HttpRequest(url, 'PATCH', query_params, data, headers))
def delete(self, url, data=None, query_params=None, headers=None):
"""Makes a DELETE request"""
return self.open(
HttpRequest(url, 'DELETE', query_params, data, headers))
def options(self, url, query_params=None, headers=None):
"""Makes an OPTIONS request"""
return self.open(
HttpRequest(url, 'OPTIONS', query_params, None, headers))
def _compute_resp_size(self, response):
return len(response.body)
def _compute_req_size(self, request):
return len(request.data or '')
| 3.046875 | 3 |
inscript/urls.py | alanbato/inscribetec | 0 | 12759699 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("reportes", views.reportes, name="reportes"),
path(
"courses_by/<str:teacher_name>", views.teacher_courses, name="teacher_courses"
),
path(
"courses_of/<str:subject_name>", views.subject_courses, name="subject_courses"
),
path(
"classrooms_at/<str:time_slot>",
views.available_classrooms,
name="available_classrooms",
),
path("teaching_at/<str:time_slot>", views.busy_teachers, name="busy_teachers"),
path(
"not_teaching_at/<str:time_slot>",
views.available_teachers,
name="available_teachers",
),
path(
"course_at/<str:day>/<str:classroom>", views.which_course, name="which_course"
),
path(
"check_classroom/<str:classroom>/<str:time_slot>",
views.validate_slot,
name="validate_slot",
),
]
| 2.109375 | 2 |
pycaw/etherscan/__init__.py | Unique-Divine/crypto-apis | 0 | 12759700 | <reponame>Unique-Divine/crypto-apis
"""TODO module docs for pycaw.etherscan"""
from pycaw.etherscan import types
from pycaw.etherscan import etherscan_connector
from pycaw.etherscan import token_info_connector
EtherscanConnector = etherscan_connector.EtherscanConnector
TokenInfoConnector = token_info_connector.TokenInfoConnector
# TokenInfoConnector.__doc__ =
"""TODO doc"""
InternalMsgCall = types.InternalMsgCall
NormalTx = types.NormalTx
TxReceipt = types.TxReceipt
__all__ = ['EtherscanConnector', 'TokenInfoConnector']
| 1.789063 | 2 |
api/int_news/views.py | sp-team-lutsk/docker_polls_group | 0 | 12759701 | <filename>api/int_news/views.py
from rest_framework import status
from django.http import Http404
from rest_framework.response import Response
from rest_framework.views import APIView
from int_news.serializers import NewsIntSerializer
from int_news.models import NewsInt
from utils.decorators import permission, permissions
from ext_news.serializers import SetNewsSerializer
from utils.permissions import AllowAny
from utils.views import get_int_news
class PostUpdInt(APIView):
permission_classes = [AllowAny, ]
serializer_class = NewsIntSerializer
queryset = NewsInt.objects.all()
lookup_field = 'id'
def get(self, request, id,*args, **kwargs):
news = get_int_news(id)
serializer = NewsIntSerializer(news)
return Response(serializer.data)
def put(self, request, id,*args, **kwargs):
news = get_int_news(id)
serializer = NewsIntSerializer(news, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id):
news = get_int_news(id)
news.delete()
return Response({'Status': 'OK'}, status=status.HTTP_200_OK)
class News_Bulk(APIView):
permission_classes = [AllowAny, ]
serializer_class = NewsIntSerializer
queryset = NewsInt.objects.none()
def get(self, request, *args, **kwargs):
news = NewsInt.objects.all()
serializer = NewsIntSerializer(news, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
self.serializer_class = NewsIntSerializer
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
news_saved = serializer.save()
return Response(
data={"success": "News '{}' created successfully".format((news_saved))},
status=status.HTTP_201_CREATED)
def put(self, request, *args, **kwargs):
self.serializer_class = NewsIntSerializer
queryset = NewsInt.objects.all()
for nev in list(queryset):
serializer = NewsIntSerializer(nev, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(data={"200": "OK"}, status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
news = NewsInt.objects.all()
news.delete()
return Response({'Status': 'OK'},status=status.HTTP_200_OK)
class ModeratorCheckNewsAPIView(APIView):
queryset = NewsInt.objects.none()
permission_classes = [AllowAny, ]
serializer_class = SetNewsSerializer
def post(self, request, *args, **kwargs):
try:
news = NewsInt.objects.get(id=request.data.get('id'))
serializer = self.serializer_class(news, data=request.data)
check = request.data.get('status')
news.is_checked = check
news.save()
return Response(data={"is_checked": "{}".format(str(check))}, status=status.HTTP_200_OK)
except:
return Response(data={"News": "Not Found"}, status=status.HTTP_404_NOT_FOUND)
| 2.0625 | 2 |
hdm/core/dao/netezza_jdbc.py | hashmapinc/hdm | 1 | 12759702 | <reponame>hashmapinc/hdm
# Copyright © 2020 Hashmap, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jaydebeapi as connector
import yaml
from hdm.core.dao.netezza import Netezza
from hdm.core.utils.project_config import ProjectConfig
class NetezzaJDBC(Netezza):
def _validate_configuration(self) -> bool:
with open(f"{ProjectConfig.hdm_home()}/{ProjectConfig.profile_path()}", 'r') as stream:
conn_conf = yaml.safe_load(stream)[ProjectConfig.hdm_env()][self._connection_name]
required_keys = ['host', 'port', 'database', 'user', 'password', 'driver']
is_valid = all([key in conn_conf.keys() for key in required_keys])
if is_valid:
required_keys = ['name', 'path']
return all([key in conn_conf['driver'].keys() for key in required_keys])
return is_valid
def _get_connection_config(self, config: dict):
return dict(driver_name=config['driver']['name'],
driver_location=config['driver']['path'],
connection_string=f"jdbc:netezza://{config['host']}:{config['port']}/{config['database']}",
user=config['user'],
password=config['password'])
def _connect_by_connector(self, config: dict) -> None:
return connector.connect(config['driver_name'],
config['connection_string'],
{
'user': config['user'],
'password': config['password']
},
jars=config['driver_location'])
| 2.03125 | 2 |
tests/classification/test_osvgp_classification.py | wjmaddox/online_gp | 31 | 12759703 | <reponame>wjmaddox/online_gp<filename>tests/classification/test_osvgp_classification.py
import unittest
from online_gp.datasets.classification import Banana
from online_gp.models.online_svgp_classifier import OnlineSVGPClassifier
from online_gp.models.stems import Identity
import torch
class TestOnlineSVGPClassifier(unittest.TestCase):
def test_batch_classification(self):
datasets = Banana()
train_dataset, test_dataset = datasets.train_dataset, datasets.test_dataset
train_x, train_y = train_dataset[:]
test_x, test_y = test_dataset[:]
test_x = test_x / train_x.abs().max(0)[0]
train_x = train_x / train_x.abs().max(0)[0]
input_dim = train_x.size(-1)
stem = Identity(input_dim)
num_inducing = 128
lr = 1e-2
classifier = OnlineSVGPClassifier(stem, train_x, num_inducing, lr, streaming=False)
if torch.cuda.is_available():
classifier = classifier.cuda()
classifier.fit(train_x, train_y, num_epochs=100)
test_pred = classifier.predict(test_x)
test_acc = test_pred.eq(test_y).float().mean()
self.assertGreaterEqual(test_acc, 0.85)
def test_online_classification(self):
datasets = Banana()
train_dataset, test_dataset = datasets.train_dataset, datasets.test_dataset
train_x, train_y = train_dataset[:]
test_x, test_y = test_dataset[:]
test_x = test_x / train_x.abs().max(0)[0]
train_x = train_x / train_x.abs().max(0)[0]
num_train, input_dim = train_x.shape
stem = Identity(input_dim)
num_inducing = 128
lr = 1e-2
beta = 1e-3
num_update_steps = 1
batch_size = 1
train_x = torch.chunk(train_x, train_x.size(0) // batch_size)
train_y = torch.chunk(train_y, train_y.size(0) // batch_size)
classifier = OnlineSVGPClassifier(stem, train_x[0], num_inducing, lr, streaming=True, beta=beta,
num_update_steps=num_update_steps, learn_inducing_locations=True)
if torch.cuda.is_available():
classifier = classifier.cuda()
correct = 0
for t, (x, y) in enumerate(zip(train_x, train_y)):
pred_y = classifier.predict(x)
classifier.update(x, y)
correct += pred_y.eq(y).sum().float().item()
cum_acc = correct / num_train
self.assertGreaterEqual(cum_acc, 0.65)
test_pred = classifier.predict(test_x)
test_acc = test_pred.eq(test_y).float().mean()
self.assertGreaterEqual(test_acc, 0.75)
if __name__ == '__main__':
unittest.main()
| 2.359375 | 2 |
inquire_sql_backend/semantics/embeddings/vector_models.py | PervasiveWellbeingTech/inquire-web-backend | 1 | 12759704 | <reponame>PervasiveWellbeingTech/inquire-web-backend
from functools import partial
# from gensim.models.wrappers.fasttext import FastText
import numpy as np
from sentence_transformers import SentenceTransformer
from inquire_sql_backend.semantics.embeddings.glove_wrapper import GloveWrapper
from inquire_sql_backend.semantics.embeddings.util import tokenize, stopwords
import pickle as pkl
import logging
log = logging.getLogger(__name__)
# These should point to the pkl files that were specified as the output file in the "finalize_lstm_model" script
LSTM_PATHS = {
"lstm_bc": '/commuter/inquire_data_root/bookCorpus/lstm/finalized_lstm_glove_bc.pkl',
"lstm_lj": "/commuter/inquire_data_root/livejournal_sample/lstm/finalized_lstm_lj_glove.pkl",
}
# These paths should always point at the plain text files that have, on each line, a word followed by its vector
GLOVE_PATHS = {
"commoncrawl": "/commuter/inquire_data_root/default/model/glove.840B.300d.txt",
"glove_bc": "/commuter/inquire_data_root/bookCorpus/glove/vectors.txt",
"glove_lj": "/commuter/inquire_data_root/livejournal_sample/glove/vectors.txt"
}
# FASTTEXT_PATH = "/commuter/bookCorpus/fasttext/model.300.bin"
_nlp = {}
# _fasttext = None
_glove_wrapped = {}
_lstm_model = {}
bert_model = SentenceTransformer('bert-base-nli-mean-tokens',device="cpu")
_encode = None # just to make the theano imports optional
def _get_lstm(model_name):
global _lstm_model
global _encode
if model_name not in _lstm_model:
from python_skipthought_training.training.tools import encode
_encode = encode
with open(LSTM_PATHS[model_name], "rb") as inf:
log.debug("Loading LSTM model from %s" % LSTM_PATHS[model_name])
model = pkl.load(inf)
_lstm_model[model_name] = model
return _lstm_model[model_name]
def _get_glove(model_name):
global _glove_wrapped
if _glove_wrapped.get(model_name, None) is None:
_glove_wrapped[model_name] = GloveWrapper(path=GLOVE_PATHS[model_name])
return _glove_wrapped[model_name]
# def _get_fasttext():
# global _fasttext
# if _fasttext is None:
# log.debug("Loading fasttext model..")
# _fasttext = FastText.load_fasttext_format(FASTTEXT_PATH)
# return _fasttext
def _get_nlp(lang):
if lang not in _nlp:
import spacy
log.debug("Loading %s spacy pipeline.." % lang)
_nlp[lang] = spacy.load(lang)
return _nlp[lang]
def vector_embed_sentence_spacy(sentences, batch=False, tokenized=False):
if not batch:
sentences = [sentences]
nlp = _get_nlp("en")
res = []
for sent in sentences:
if tokenized:
sent = " ".join(sent) # undo tokenization since spacy does it anyway
tokens = nlp.tokenize(sent)
vecs = [word.vector if word.has_vector else None for word in tokens]
vecs = [v for v in vecs if v is not None]
if not vecs:
res.append(None)
else:
res.append(np.array(vecs).mean(0))
if batch:
return res
return res[0]
def vector_embed_sentence_glove(sentences, model_name, batch=False, tokenized=False):
if not batch:
sentences = [sentences]
res = []
for sent in sentences:
if tokenized:
tokens = sent
else:
tokens = tokenize(sent)
glove = _get_glove(model_name)
w_vecs = [glove[word] for word in tokens]
w_vecs = [v for v in w_vecs if v is not None]
if not w_vecs:
res.append(None)
else:
sent_vector = np.array(w_vecs).mean(0)
res.append(sent_vector)
if batch:
return res
return res[0]
warned = False
def vector_embed_sentence_lstm(sentences, model_name, batch=False, tokenized=False):
global warned
global _encode
if not batch:
sentences = [sentences]
if tokenized:
# special case for LSTM: we need to undo the tokenization
if not warned:
log.warn("Don't pass tokenized data to LSTM! Has its own tokenization rules.")
warned = True
sentences = [" ".join(sent) for sent in sentences]
lstm = _get_lstm(model_name=model_name)
res = _encode(lstm, sentences, use_norm=True, batch_size=4096, verbose=False)
if batch:
return res
return res[0]
# def vector_embed_sentence_fasttext(sentence):
# tokens = tokenize(sentence)
# ft = _get_fasttext()
# vecs = []
# for word in tokens:
# try:
# v = ft[word]
# vecs.append(v)
# except KeyError:
# pass
#
# if not vecs:
# return None
# return np.array(vecs).mean(0)
def vector_embed_sentence_bert(sentences, model_name, batch=False, tokenized=False):
global bert_model
#print("Batch : "+str(batch))
# print(sentences)
if not batch:
sentences = [sentences]
if tokenized:
# special case for LSTM: we need to undo the tokenization
if not warned:
log.warn("Don't pass tokenized data to BERT ! Has its own tokenization rules.")
warned = True
#sentences = [" ".join(sent) for sent in sentences]
sentence_embeddings = bert_model.encode(sentences)
res = sentence_embeddings #[embedding for sentence, embedding in zip(sentences, sentence_embeddings)]
#print(res)
if batch:
#print(np.array(res).shape)
return res
return res[0]
# THIS IS THE CENTRAL LIST OF ALL MODELS
VECTOR_EMBEDDERS = {
"default": partial(vector_embed_sentence_glove, model_name="commoncrawl"),
"spacy": vector_embed_sentence_spacy,
# "fasttext": vector_embed_sentence_fasttext,
"lstm_bc": partial(vector_embed_sentence_lstm, model_name="lstm_bc"),
"lstm_lj": partial(vector_embed_sentence_lstm, model_name="lstm_lj"),
"glove_lj": partial(vector_embed_sentence_glove, model_name="glove_lj"),
"glove_bc": partial(vector_embed_sentence_glove, model_name="glove_bc"),
"bert": partial(vector_embed_sentence_bert, model_name="bert")
} | 2.265625 | 2 |
week2/week2_lab4.py | mdegasperis/pynet | 0 | 12759705 | #! /usr/bin/env python
from snmp_helper import snmp_get_oid,snmp_extract
import getpass
def main():
'''
Prompt user for IP address and community string.
Returns output for SysDescr and SysName MIBs.
'''
# For reference
# ip_addr = '172.16.58.3'
# community = 'galileo'
# Prompt for IP address and sanitize of trailing spaces
ip_addr = raw_input("Please enter an IP: ")
ip_addr = ip_addr.strip()
# Prompt for community string, text will be masked using getpass
community = getpass.getpass(prompt="Enter Community String: ")
# Tuples for each router
rt1 = (ip_addr, community, 7961)
rt2 = (ip_addr, community, 8061)
# List of routers
device_list = [rt1, rt2]
# List of OIDS: SysName, SysDescr
oid_list = ['1.3.6.1.2.1.1.5.0', '1.3.6.1.2.1.1.1.0']
for device in device_list:
print "\n********************"
for oid in oid_list:
snmp_data = snmp_get_oid(rt1, oid)
output = snmp_extract(snmp_data)
print output
print "********************"
print
if __name__ == '__main__':
main()
| 3.015625 | 3 |
zcore/action/__init__.py | razaibi/autocode | 0 | 12759706 | from . import service
from . import generator_service
__all__ = ['service', 'generator_service'] | 1.132813 | 1 |
Compiler/compilerLib.py | GnarlyMshtep/DORAM | 0 | 12759707 | from Compiler.program import Program
from .GC import types as GC_types
import sys
import re, tempfile, os
def run(args, options):
""" Compile a file and output a Program object.
If options.merge_opens is set to True, will attempt to merge any
parallelisable open instructions. """
prog = Program(args, options)
VARS['program'] = prog
if options.binary:
VARS['sint'] = GC_types.sbitintvec.get_type(int(options.binary))
VARS['sfix'] = GC_types.sbitfixvec
for i in 'cint', 'cfix', 'cgf2n', 'sintbit', 'sgf2n', 'sgf2nint', \
'sgf2nuint', 'sgf2nuint32', 'sgf2nfloat', 'sfloat', 'cfloat', \
'squant':
del VARS[i]
print('Compiling file', prog.infile)
f = open(prog.infile, 'rb')
changed = False
if options.flow_optimization:
output = []
if_stack = []
for line in open(prog.infile):
if if_stack and not re.match(if_stack[-1][0], line):
if_stack.pop()
m = re.match(
'(\s*)for +([a-zA-Z_]+) +in +range\(([0-9a-zA-Z_]+)\):',
line)
if m:
output.append('%s@for_range_opt(%s)\n' % (m.group(1),
m.group(3)))
output.append('%sdef _(%s):\n' % (m.group(1), m.group(2)))
changed = True
continue
m = re.match('(\s*)if(\W.*):', line)
if m:
if_stack.append((m.group(1), len(output)))
output.append('%s@if_(%s)\n' % (m.group(1), m.group(2)))
output.append('%sdef _():\n' % (m.group(1)))
changed = True
continue
m = re.match('(\s*)elif\s+', line)
if m:
raise CompilerError('elif not supported')
if if_stack:
m = re.match('%selse:' % if_stack[-1][0], line)
if m:
start = if_stack[-1][1]
ws = if_stack[-1][0]
output[start] = re.sub(r'^%s@if_\(' % ws, r'%s@if_e(' % ws,
output[start])
output.append('%s@else_\n' % ws)
output.append('%sdef _():\n' % ws)
continue
output.append(line)
if changed:
infile = tempfile.NamedTemporaryFile('w+', delete=False)
for line in output:
infile.write(line)
infile.seek(0)
else:
infile = open(prog.infile)
else:
infile = open(prog.infile)
# make compiler modules directly accessible
sys.path.insert(0, 'Compiler')
# create the tapes
exec(compile(infile.read(), infile.name, 'exec'), VARS)
if changed and not options.debug:
os.unlink(infile.name)
prog.finalize()
if prog.req_num:
print('Program requires at most:')
for x in prog.req_num.pretty():
print(x)
if prog.verbose:
print('Program requires:', repr(prog.req_num))
print('Cost:', 0 if prog.req_num is None else prog.req_num.cost())
print('Memory size:', dict(prog.allocated_mem))
return prog
| 2.53125 | 3 |
contest/abc033/C.py | mola1129/atcoder | 0 | 12759708 | <reponame>mola1129/atcoder
s = input()
n = len(s)
formulas = s.split('+')
ans = 0
for f in formulas:
for i in range(len(f)):
if f[i] == '0':
break
if i == len(f) - 1:
ans += 1
print(ans)
| 3.328125 | 3 |
lfp4uda/local_feature_alignment.py | nogmat/master_AIC_advanced_ML | 1 | 12759709 | #!/bin/python
from __future__ import absolute_import, division, print_function, \
unicode_literals
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
class LocalFeatureAlignment(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(LocalFeatureAlignment, self).__init__(self, **kwargs)
def call(self, inputs):
distance, similarities = inputs
_, i, j, k, d = distance.shape
_, i, j, k_ = similarities.shape
assert(k == k_)
distance = array_ops.reshape(
distance,
(array_ops.shape(distance)[0],)+(i*j, k, d))
argmx = tf.cast(
array_ops.reshape(
tf.keras.backend.argmax(similarities),
(array_ops.shape(similarities)[0],)+(i*j, 1)),
dtype=tf.int32)
ones = tf.cast(tf.keras.backend.ones_like(argmx), dtype=tf.int32)
selector = tf.concat(
[tf.math.multiply(
ones,
tf.keras.backend.reshape(tf.range(i*j), shape=(i*j, 1))),
argmx], axis=-1)
residuals = tf.gather_nd(distance, selector, batch_dims=1)
aligned_residuals = tf.concat(
[residuals, tf.cast(argmx, dtype=tf.float32)],
axis=-1)
return [aligned_residuals]
| 2.296875 | 2 |
zerver/webhooks/splunk/tests.py | DD2480-group7-2020/zulip | 1 | 12759710 | <reponame>DD2480-group7-2020/zulip
# -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class SplunkHookTests(WebhookTestCase):
STREAM_NAME = 'splunk'
URL_TEMPLATE = "/api/v1/external/splunk?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'splunk'
def test_splunk_search_one_result(self) -> None:
self.url = self.build_webhook_url(topic=u"New Search Alert")
# define the expected message contents
expected_topic = u"New Search Alert"
expected_message = """
Splunk alert from saved search:
* **Search**: [sudo](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: myserver
* **Source**: `/var/log/auth.log`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
# using fixture named splunk_search_one_result, execute this test
self.send_and_test_stream_message('search_one_result',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_short_search_name(self) -> None:
# don't provide a topic so the search name is used instead
expected_topic = u"This search's name isn't that long"
expected_message = """
Splunk alert from saved search:
* **Search**: [This search's name isn't that long](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: myserver
* **Source**: `/var/log/auth.log`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
self.send_and_test_stream_message('short_search_name',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_long_search_name(self) -> None:
# don't provide a topic so the search name is used instead
expected_topic = u"this-search's-got-47-words-37-sentences-58-words-we-wanna..."
expected_message = """
Splunk alert from saved search:
* **Search**: [this-search's-got-47-words-37-sentences-58-words-we-wanna-know-details-of-the-search-time-of-the-search-and-any-other-kind-of-thing-you-gotta-say-pertaining-to-and-about-the-search-I-want-to-know-authenticated-user's-name-and-any-other-kind-of-thing-you-gotta-say](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: myserver
* **Source**: `/var/log/auth.log`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
self.send_and_test_stream_message('long_search_name',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_missing_results_link(self) -> None:
self.url = self.build_webhook_url(topic=u"New Search Alert")
expected_topic = u"New Search Alert"
expected_message = """
Splunk alert from saved search:
* **Search**: [sudo](Missing results_link)
* **Host**: myserver
* **Source**: `/var/log/auth.log`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
self.send_and_test_stream_message('missing_results_link',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_missing_search_name(self) -> None:
self.url = self.build_webhook_url(topic=u"New Search Alert")
expected_topic = u"New Search Alert"
expected_message = """
Splunk alert from saved search:
* **Search**: [Missing search_name](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: myserver
* **Source**: `/var/log/auth.log`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
self.send_and_test_stream_message('missing_search_name',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_missing_host(self) -> None:
self.url = self.build_webhook_url(topic=u"New Search Alert")
expected_topic = u"New Search Alert"
expected_message = """
Splunk alert from saved search:
* **Search**: [sudo](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: Missing host
* **Source**: `/var/log/auth.log`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
self.send_and_test_stream_message('missing_host',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_missing_source(self) -> None:
self.url = self.build_webhook_url(topic=u"New Search Alert")
expected_topic = u"New Search Alert"
expected_message = """
Splunk alert from saved search:
* **Search**: [sudo](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: myserver
* **Source**: `Missing source`
* **Raw**: `Jan 4 11:14:32 myserver sudo: pam_unix(sudo:session): session closed for user root`
""".strip()
self.send_and_test_stream_message('missing_source',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def test_splunk_missing_raw(self) -> None:
self.url = self.build_webhook_url(topic=u"New Search Alert")
expected_topic = u"New Search Alert"
expected_message = """
Splunk alert from saved search:
* **Search**: [sudo](http://example.com:8000/app/search/search?q=%7Cloadjob%20rt_scheduler__admin__search__sudo_at_1483557185_2.2%20%7C%20head%201%20%7C%20tail%201&earliest=0&latest=now)
* **Host**: myserver
* **Source**: `/var/log/auth.log`
* **Raw**: `Missing _raw`
""".strip()
self.send_and_test_stream_message('missing_raw',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("splunk", fixture_name, file_type="json")
| 1.960938 | 2 |
tencentcloud/platform/v20190314/platform_client.py | FatAnker/tencentcloud-sdk-python | 0 | 12759711 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.platform.v20190314 import models
class PlatformClient(AbstractClient):
_apiVersion = '2019-03-14'
_endpoint = 'platform.tencentcloudapi.com'
def DescribePasswords(self, request):
"""获取密码库设备密码
:param request: 调用DescribePasswords所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.DescribePasswordsRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.DescribePasswordsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePasswords", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePasswordsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyPassword(self, request):
"""修改设备密码
:param request: 调用ModifyPassword所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.ModifyPasswordRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.ModifyPasswordResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyPassword", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyPasswordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def Passwords(self, request):
"""获取密码
:param request: 调用Passwords所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.PasswordsRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.PasswordsResponse`
"""
try:
params = request._serialize()
body = self.call("Passwords", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.PasswordsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def QueryPasswords(self, request):
"""查询密码
:param request: 调用QueryPasswords所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.QueryPasswordsRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.QueryPasswordsResponse`
"""
try:
params = request._serialize()
body = self.call("QueryPasswords", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.QueryPasswordsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ResetPassword(self, request):
"""重置密码库设备密码
:param request: 调用ResetPassword所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.ResetPasswordRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.ResetPasswordResponse`
"""
try:
params = request._serialize()
body = self.call("ResetPassword", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ResetPasswordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def SetPasswordTypes(self, request):
"""重新设置密码类型(长期或临时密码)并修改密码
:param request: 调用SetPasswordTypes所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.SetPasswordTypesRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.SetPasswordTypesResponse`
"""
try:
params = request._serialize()
body = self.call("SetPasswordTypes", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.SetPasswordTypesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | 2.09375 | 2 |
tests/modules/epp/test_epp_login.py | bladeroot/heppy | 20 | 12759712 | <filename>tests/modules/epp/test_epp_login.py
#!/usr/bin/env python
import unittest
from ..TestCase import TestCase
class TestEppLogin(TestCase):
def test_render_epp_login_request_min(self):
self.assertRequest('''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<login>
<clID>ClientX</clID>
<pw>2fooBar</pw>
<options>
<version>1.0</version>
<lang>en</lang>
</options>
<svcs>
<objURI>urn:ietf:params:xml:ns:obj1</objURI>
<objURI>urn:ietf:params:xml:ns:obj2</objURI>
<objURI>urn:ietf:params:xml:ns:obj3</objURI>
</svcs>
</login>
<clTRID>AA-00</clTRID>
</command>
</epp>''', {
'command': 'epp:login',
'clID': 'ClientX',
'pw': '2fooBar',
'objURIs': [
'urn:ietf:params:xml:ns:obj1',
'urn:ietf:params:xml:ns:obj2',
'urn:ietf:params:xml:ns:obj3'
],
})
def test_render_epp_login_request(self):
self.assertRequest('''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<login>
<clID>ClientX</clID>
<pw>2fooBar</pw>
<newPW>bar-FOO2</newPW>
<options>
<version>4.2</version>
<lang>ua</lang>
</options>
<svcs>
<objURI>urn:ietf:params:xml:ns:obj1</objURI>
<objURI>urn:ietf:params:xml:ns:obj2</objURI>
<objURI>urn:ietf:params:xml:ns:obj3</objURI>
<svcExtension>
<extURI>http://custom/obj1ext-1.0</extURI>
<extURI>http://custom/obj1ext-2.0</extURI>
</svcExtension>
</svcs>
</login>
<clTRID>AA-00</clTRID>
</command>
</epp>''', {
'command': 'epp:login',
'clID': 'ClientX',
'pw': '2fooBar',
'newPW': 'bar-<PASSWORD>',
'version': 4.2,
'lang': 'ua',
'objURIs': [
'urn:ietf:params:xml:ns:obj1',
'urn:ietf:params:xml:ns:obj2',
'urn:ietf:params:xml:ns:obj3'
],
'extURIs': [
'http://custom/obj1ext-1.0',
'http://custom/obj1ext-2.0'
]
})
def test_render_epp_login_request_alt(self):
self.assertRequest('''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<login>
<clID>ClientX</clID>
<pw>2fooBar</pw>
<newPW>bar-FOO2</newPW>
<options>
<version>4.2</version>
<lang>ua</lang>
</options>
<svcs>
<objURI>urn:ietf:params:xml:ns:obj1</objURI>
<objURI>urn:ietf:params:xml:ns:obj2</objURI>
<objURI>urn:ietf:params:xml:ns:obj3</objURI>
<svcExtension>
<extURI>http://custom/obj1ext-1.0</extURI>
<extURI>http://custom/obj1ext-2.0</extURI>
</svcExtension>
</svcs>
</login>
<clTRID>AA-00</clTRID>
</command>
</epp>''', {
'command': 'epp:login',
'login': 'ClientX',
'password': '<PASSWORD>',
'newPassword': '<PASSWORD>',
'version': 4.2,
'lang': 'ua',
'objURIs': [
'urn:ietf:params:xml:ns:obj1',
'urn:ietf:params:xml:ns:obj2',
'urn:ietf:params:xml:ns:obj3'
],
'extURIs': [
'http://custom/obj1ext-1.0',
'http://custom/obj1ext-2.0'
]
})
if __name__ == '__main__':
unittest.main()
| 2.625 | 3 |
stdimage/utils.py | IgorCode/django-stdimage | 0 | 12759713 | from django.core.files.storage import default_storage
from .models import StdImageField, StdImageFieldFile
def pre_delete_delete_callback(sender, instance, **kwargs):
for field in instance._meta.fields:
if isinstance(field, StdImageField):
getattr(instance, field.name).delete(False)
def pre_save_delete_callback(sender, instance, **kwargs):
if instance.pk:
obj = sender.objects.get(pk=instance.pk)
for field in instance._meta.fields:
if isinstance(field, StdImageField):
obj_field = getattr(obj, field.name)
instance_field = getattr(instance, field.name)
if obj_field and obj_field != instance_field:
obj_field.delete(False)
def render_variations(file_name, variations, replace=False,
storage=default_storage, field_class=StdImageFieldFile):
"""Render all variations for a given field."""
for key, variation in variations.items():
field_class.render_variation(
file_name, variation, replace, storage
)
| 2.25 | 2 |
backend/app/server/routes/verify.py | MyrrhDev/SocioMark | 10 | 12759714 | from fastapi import APIRouter, Body, Depends
from ..models.post import ResponseModel
from ..controllers.auth import auth_handler
from ..controllers.verify import verify_post
router = APIRouter()
@router.post("/", response_description="Verify the post's authenticity")
async def verify_post_data(post_id: str = Body(..., embed=True), current_user=Depends(auth_handler.auth_wrapper)):
verified_post = await verify_post(post_id)
if verified_post["is_authentic"]:
return ResponseModel(verified_post, "Verification successful!")
else:
return ResponseModel(verified_post, "Verification failed!")
| 2.4375 | 2 |
uqcsbot/scripts/ascii.py | dhood/uqcsbot | 38 | 12759715 | <reponame>dhood/uqcsbot<filename>uqcsbot/scripts/ascii.py
from uqcsbot import bot, Command
from requests import get
from requests.exceptions import RequestException
from uqcsbot.utils.command_utils import loading_status
import random
NO_QUERY_MESSAGE = "Can't ASCIIfy nothing... try `!asciify <TEXT>`"
BOTH_OPTIONS_MESSAGE = "Font can only be random OR specified"
ERROR_MESSAGE = "Trouble with HTTP Request, can't ASCIIfy :("
NO_FONT_MESSAGE = "Cannot find the specified font in the fontslist."
ASCII_URL = "http://artii.herokuapp.com/make?text="
FONT_URL = "http://artii.herokuapp.com/fonts_list"
@bot.on_command("asciify")
@loading_status
def handle_asciify(command: Command):
"""
`!asciify [--fontslist] [--randomfont | --<CUSTOM FONT>] <TEXT>` - Returns ASCIIfyed text.
`--fontslist` also returns a URL to available fonts,
`--randomfont` returns, well... a random font.
A custom font from the fonts list can also be specified.
"""
# Makes sure the query is not empty
if not command.has_arg():
bot.post_message(command.channel_id, NO_QUERY_MESSAGE)
return
command_args = command.arg.split()
random_font = False
custom_font = False
return_fonts = False
# check for font list option
if '--fontslist' in command_args:
return_fonts = True
command_args.remove('--fontslist')
# check for random font option
if '--randomfont' in command_args:
random_font = True
command_args.remove('--randomfont')
# check for custom font option
fontslist = get_fontslist()
if not fontslist:
bot.post_message(command.channel_id, ERROR_MESSAGE)
return
for i in command_args:
if '--' in i:
if i.strip('--') in fontslist:
custom_font = True
selected_font = i.strip('--')
command_args.remove(i)
break
else:
bot.post_message(command.channel_id, NO_FONT_MESSAGE)
return
# check for invalid options
if random_font and custom_font:
bot.post_message(command.channel_id, BOTH_OPTIONS_MESSAGE)
return
if not command_args:
text = None
else:
text = ' '.join(command_args)
# asciification
if text is None:
bot.post_message(command.channel_id, NO_QUERY_MESSAGE)
ascii_text = None
else:
if random_font:
font = get_random_font()
elif custom_font:
font = selected_font
else:
font = None
ascii_text = asciify(text, font)
if ascii_text is None:
bot.post_message(command.channel_id, ERROR_MESSAGE)
return
# message posts
if return_fonts:
bot.post_message(command.channel_id, FONT_URL)
if ascii_text:
bot.post_message(command.channel_id, ascii_text)
else:
return
return
def asciify(text: str, font: str) -> str:
try:
if font is not None:
url = ASCII_URL + text + '&font=' + font
else:
url = ASCII_URL + text
resp = get(url)
ascii_text = f"```\n{resp.text}\n```"
return ascii_text
except RequestException:
return None
def get_random_font() -> str:
fontslist = get_fontslist()
if fontslist:
return random.choice(tuple(fontslist))
else:
return None
def get_fontslist() -> set:
try:
resp = get('http://artii.herokuapp.com/fonts_list')
fontslist = set(resp.text.split())
return fontslist
except RequestException:
return None
| 2.859375 | 3 |
pyexam/exporter.py | ruial/pyexam | 0 | 12759716 | <filename>pyexam/exporter.py
import logging
import os
import subprocess
from functools import lru_cache
from pathlib import Path
from typing import Optional
from jinja2 import Environment, FileSystemLoader, PackageLoader
from . import Exam
logger = logging.getLogger(__name__)
@lru_cache
def get_jinja_env(template_dir: Optional[str]) -> Environment:
# only load each template once if multiple exams rendered
loader = (FileSystemLoader(template_dir) if template_dir
else PackageLoader('pyexam', 'templates'))
env = Environment(loader=loader)
env.globals['render_question'] = question_renderer(env)
return env
def question_renderer(env: Environment):
def render_question(question: dict, solution: bool = False, is_part: bool = False,) -> str:
question_type = question['type']
template = env.get_template(f'{question_type}.j2')
return template.render(question | {'is_part': is_part, 'solution': solution})
return render_question
def render(exam: Exam, solution: bool = False, template_dir: Optional[str] = None) -> str:
env = get_jinja_env(template_dir)
template = env.get_template('exam.j2') # templates are cached
return template.render(exam.dict() | {'solution': solution})
def export(exam: Exam, dir: str, file_name: str, solution: bool, template_dir: Optional[str]) -> None:
Path(dir).mkdir(parents=False, exist_ok=True)
file_path = os.path.join(dir, file_name)
rendered = render(exam, solution, template_dir)
with open(file_path, 'w') as file:
file.write(rendered)
def export_latex(exam: Exam, dir: str, solution: bool = False, template_dir: Optional[str] = None) -> None:
export(exam, dir, f'{exam.name}.tex', solution, template_dir)
def export_pdf(exam: Exam, dir: str, solution: bool = False, template_dir: Optional[str] = None) -> None:
export_latex(exam, dir, solution, template_dir)
args = ['latexmk', '-interaction=nonstopmode', '-pdf', f'{exam.name}.tex']
result = subprocess.run(args, capture_output=True, cwd=dir)
logger.debug(f'stdout {result.stdout.decode("utf-8")}')
if result.stderr:
logger.debug(f'stderr {result.stderr.decode("utf-8")}')
result.check_returncode()
| 2.453125 | 2 |
prism/core/modules/timer.py | ii-Python/Prism-v3 | 3 | 12759717 | # Copyright 2021-xx iiPython
# Modules
from typing import Union
from datetime import datetime
from secrets import token_hex
# Timer class
class Timer(object):
def __init__(self) -> None:
self._st_times = {}
self._ret_keys = {"s": lambda x: x, "ms": lambda x: float(x) * 1000}
def start(self) -> str:
timer_id = token_hex(26)
self._st_times[timer_id] = datetime.now()
return timer_id
def end(self, timer_id: str, return_as: str = "s", as_int: bool = False) -> Union[str, int]:
if timer_id not in self._st_times:
raise RuntimeError("invalid timer id: '{}'".format(timer_id))
st_time = self._st_times[timer_id]
del self._st_times[timer_id]
# Handle return value
secs = round((datetime.now() - st_time).total_seconds(), 2)
vals = self._ret_keys[return_as](secs)
return str(vals) if not as_int else int(round(vals))
# Initialization
timer = Timer()
| 2.703125 | 3 |
src/settings.py | uadson/pandas | 0 | 12759718 | <gh_stars>0
import os
BASE_DIR = os.getcwd()
database = os.path.join(BASE_DIR, 'database')
datalist = os.listdir(database)
| 1.6875 | 2 |
html_parsing/get_population_from_wikidata.py | DazEB2/SimplePyScripts | 117 | 12759719 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
from bs4 import BeautifulSoup
def get_populations(url: str) -> dict:
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# P1082 -- идентификатор для population
population_node = root.select_one('#P1082')
populations = dict()
# Перебор строк в соседнем от population столбце
for row in population_node.select('.wikibase-statementview'):
# Небольшая хитрость -- берем только первые 2 значения, поидеи это будут: количество людей и дата
number_str, data_str = row.select('.wikibase-snakview-value')[:2]
# Вытаскиваем текст из
number_str = number_str.text.strip()
data_str = data_str.text.strip()
# Делаем разделение и берем последнуюю часть, после приводим к числу
# "1 July 2012" -> 2012, "2010" -> 2010
year = int(data_str.split()[-1])
# Добавляем в словарь
populations[year] = number_str
return populations
def get_population_by_year(populations: dict, year: int) -> str:
# Если такой год не будет найден, вернем -1
return populations.get(year, -1)
# Аналогично get_population_by_year, но сначала вытащит данные из
# указанного url, а после достанет значение по year
def get_population_from_url_by_year(url: str, year: int) -> str:
populations = get_populations(url)
return get_population_by_year(populations, year)
if __name__ == '__main__':
url = 'https://www.wikidata.org/wiki/Q148'
populations = get_populations(url)
print(populations) # {2012: '1,375,198,619', 2010: '1,359,755,102', 2015: '1,397,028,553', ...
# Выводим данные с сортировкой по ключу: по возрастанию
for year in sorted(populations):
print("{}: {}".format(year, populations[year]))
# 2010: 1,359,755,102
# 2011: 1,367,480,264
# 2012: 1,375,198,619
# 2013: 1,382,793,212
# 2014: 1,390,110,388
# 2015: 1,397,028,553
# 2016: 1,403,500,365
# 2017: 1,409,517,397
print(get_population_by_year(populations, 2012)) # 1,375,198,619
print(get_population_by_year(populations, 2013)) # 1,382,793,212
print(get_population_by_year(populations, 2014)) # 1,390,110,388
| 3.53125 | 4 |
spectral_cube/tests/test_projection.py | Jiangxuejian/spectral-cube | 0 | 12759720 | <reponame>Jiangxuejian/spectral-cube<gh_stars>0
from __future__ import print_function, absolute_import, division
import warnings
import pytest
import numpy as np
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from radio_beam import Beam, Beams
from .helpers import assert_allclose
from .test_spectral_cube import cube_and_raw
from ..spectral_cube import SpectralCube
from ..masks import BooleanArrayMask
from ..lower_dimensional_structures import (Projection, Slice, OneDSpectrum,
VaryingResolutionOneDSpectrum)
from ..utils import SliceWarning, WCSCelestialError
from . import path
# set up for parametrization
LDOs = (Projection, Slice, OneDSpectrum)
LDOs_2d = (Projection, Slice,)
two_qty_2d = np.ones((2,2)) * u.Jy
twelve_qty_2d = np.ones((12,12)) * u.Jy
two_qty_1d = np.ones((2,)) * u.Jy
twelve_qty_1d = np.ones((12,)) * u.Jy
data_two = (two_qty_2d, two_qty_2d, two_qty_1d)
data_twelve = (twelve_qty_2d, twelve_qty_2d, twelve_qty_1d)
data_two_2d = (two_qty_2d, two_qty_2d,)
data_twelve_2d = (twelve_qty_2d, twelve_qty_2d,)
def load_projection(filename):
hdu = fits.open(filename)[0]
proj = Projection.from_hdu(hdu)
return proj, hdu
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_two_2d))
def test_slices_of_projections_not_projections(LDO, data):
# slices of projections that have <2 dimensions should not be projections
p = LDO(data, copy=False)
assert not isinstance(p[0,0], LDO)
assert not isinstance(p[0], LDO)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_copy_false(LDO, data):
# copy the data so we can manipulate inplace without affecting other tests
image = data.copy()
p = LDO(image, copy=False)
image[3,4] = 2 * u.Jy
assert_allclose(p[3,4], 2 * u.Jy)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_write(LDO, data, tmpdir):
p = LDO(data)
p.write(tmpdir.join('test.fits').strpath)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_preserve_wcs_to(LDO, data):
# regression for #256
image = data.copy()
p = LDO(image, copy=False)
image[3,4] = 2 * u.Jy
p2 = p.to(u.mJy)
assert_allclose(p[3,4], 2 * u.Jy)
assert_allclose(p[3,4], 2000 * u.mJy)
assert p2.wcs == p.wcs
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_multiplication(LDO, data):
# regression: 265
p = LDO(data, copy=False)
p2 = p * 5
assert p2.unit == u.Jy
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value == 5)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_unit_division(LDO, data):
# regression: 265
image = data
p = LDO(image, copy=False)
p2 = p / u.beam
assert p2.unit == u.Jy/u.beam
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_isnan(LDO, data):
# Check that np.isnan strips units
image = data.copy()
image[5,6] = np.nan
p = LDO(image, copy=False)
mask = np.isnan(p)
assert mask.sum() == 1
assert not hasattr(mask, 'unit')
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_self_arith(LDO, data):
image = data
p = LDO(image, copy=False)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_self_arith_with_beam(LDO, data):
exp_beam = Beam(1.0 * u.arcsec)
image = data
p = LDO(image, copy=False)
p = p.with_beam(exp_beam)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
assert p2.beam == exp_beam
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
assert p2.beam == exp_beam
@pytest.mark.xfail(raises=ValueError, strict=True)
def test_VRODS_wrong_beams_shape():
'''
Check that passing Beams with a different shape than the data
is caught.
'''
exp_beams = Beams(np.arange(1, 4) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False,
beams=exp_beams)
def test_VRODS_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False, beams=exp_beams)
assert (p.beams == exp_beams).all()
new_beams = Beams(np.arange(2, twelve_qty_1d.size + 2) * u.arcsec)
p = p.with_beams(new_beams)
assert np.all(p.beams == new_beams)
def test_VRODS_slice_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False,
wcs=WCS(naxis=1),
beams=exp_beams)
assert np.all(p[:5].beams == exp_beams[:5])
def test_VRODS_arith_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False, beams=exp_beams)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
assert np.all(p2.beams == exp_beams)
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
assert np.all(p2.beams == exp_beams)
def test_onedspectrum_specaxis_units():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs)
assert p.spectral_axis.unit == u.Unit("m/s")
def test_onedspectrum_with_spectral_unit():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs)
p_new = p.with_spectral_unit(u.km/u.s)
assert p_new.spectral_axis.unit == u.Unit("km/s")
np.testing.assert_equal(p_new.spectral_axis.value,
1e-3*p.spectral_axis.value)
def test_onedspectrum_input_mask_type():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
np_mask = np.ones(twelve_qty_1d.shape, dtype=bool)
np_mask[1] = False
bool_mask = BooleanArrayMask(np_mask, wcs=test_wcs,
shape=np_mask.shape)
# numpy array
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=np_mask)
assert (p.mask.include() == bool_mask.include()).all()
# MaskBase
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=bool_mask)
assert (p.mask.include() == bool_mask.include()).all()
# No mask
ones_mask = BooleanArrayMask(np.ones(twelve_qty_1d.shape, dtype=bool),
wcs=test_wcs, shape=np_mask.shape)
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=None)
assert (p.mask.include() == ones_mask.include()).all()
def test_slice_tricks():
test_wcs_1 = WCS(naxis=1)
test_wcs_2 = WCS(naxis=2)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
im = Slice(twelve_qty_2d, wcs=test_wcs_2)
with warnings.catch_warnings(record=True) as w:
new = spec[:,None,None] * im[None,:,:]
assert new.ndim == 3
# two warnings because we're doing BOTH slices!
assert len(w) == 2
assert w[0].category == SliceWarning
with warnings.catch_warnings(record=True) as w:
new = spec.array[:,None,None] * im.array[None,:,:]
assert new.ndim == 3
assert len(w) == 0
def test_array_property():
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
arr = spec.array
# these are supposed to be the same object, but the 'is' tests fails!
assert spec.array.data == spec.data
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, u.Quantity)
def test_quantity_property():
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
arr = spec.quantity
# these are supposed to be the same object, but the 'is' tests fails!
assert spec.array.data == spec.data
assert isinstance(arr, u.Quantity)
assert not isinstance(arr, OneDSpectrum)
def test_projection_with_beam(data_55):
exp_beam = Beam(1.0 * u.arcsec)
proj, hdu = load_projection(data_55)
# uses from_hdu, which passes beam as kwarg
assert proj.beam == exp_beam
assert proj.meta['beam'] == exp_beam
# load beam from meta
exp_beam = Beam(1.5 * u.arcsec)
meta = {"beam": exp_beam}
new_proj = Projection(hdu.data, wcs=proj.wcs, meta=meta)
assert new_proj.beam == exp_beam
assert new_proj.meta['beam'] == exp_beam
# load beam from given header
exp_beam = Beam(2.0 * u.arcsec)
header = hdu.header.copy()
header = exp_beam.attach_to_header(header)
new_proj = Projection(hdu.data, wcs=proj.wcs, header=header,
read_beam=True)
assert new_proj.beam == exp_beam
assert new_proj.meta['beam'] == exp_beam
# load beam from beam object
exp_beam = Beam(3.0 * u.arcsec)
header = hdu.header.copy()
del header["BMAJ"], header["BMIN"], header["BPA"]
new_proj = Projection(hdu.data, wcs=proj.wcs, header=header,
beam=exp_beam)
assert new_proj.beam == exp_beam
assert new_proj.meta['beam'] == exp_beam
# Slice the projection with a beam and check it's still there
assert new_proj[:1, :1].beam == exp_beam
def test_ondespectrum_with_beam():
exp_beam = Beam(1.0 * u.arcsec)
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
# load beam from meta
meta = {"beam": exp_beam}
new_spec = OneDSpectrum(spec.data, wcs=spec.wcs, meta=meta)
assert new_spec.beam == exp_beam
assert new_spec.meta['beam'] == exp_beam
# load beam from given header
hdu = spec.hdu
exp_beam = Beam(2.0 * u.arcsec)
header = hdu.header.copy()
header = exp_beam.attach_to_header(header)
new_spec = OneDSpectrum(hdu.data, wcs=spec.wcs, header=header,
read_beam=True)
assert new_spec.beam == exp_beam
assert new_spec.meta['beam'] == exp_beam
# load beam from beam object
exp_beam = Beam(3.0 * u.arcsec)
header = hdu.header.copy()
new_spec = OneDSpectrum(hdu.data, wcs=spec.wcs, header=header,
beam=exp_beam)
assert new_spec.beam == exp_beam
assert new_spec.meta['beam'] == exp_beam
# Slice the spectrum with a beam and check it's still there
assert new_spec[:1].beam == exp_beam
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_ldo_attach_beam(LDO, data):
exp_beam = Beam(1.0 * u.arcsec)
newbeam = Beam(2.0 * u.arcsec)
p = LDO(data, copy=False, beam=exp_beam)
new_p = p.with_beam(newbeam)
assert p.beam == exp_beam
assert p.meta['beam'] == exp_beam
assert new_p.beam == newbeam
assert new_p.meta['beam'] == newbeam
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_two_2d))
def test_projection_from_hdu(LDO, data):
p = LDO(data, copy=False)
hdu = p.hdu
p_new = LDO.from_hdu(hdu)
assert (p == p_new).all()
def test_projection_subimage(data_55):
proj, hdu = load_projection(data_55)
proj1 = proj.subimage(xlo=1, xhi=3)
proj2 = proj.subimage(xlo=24.06269 * u.deg,
xhi=24.06206 * u.deg)
assert proj1.shape == (5, 2)
assert proj2.shape == (5, 2)
assert proj1.wcs.wcs.compare(proj2.wcs.wcs)
assert proj.beam == proj1.beam
assert proj.beam == proj2.beam
proj3 = proj.subimage(ylo=1, yhi=3)
proj4 = proj.subimage(ylo=29.93464 * u.deg,
yhi=29.93522 * u.deg)
assert proj3.shape == (2, 5)
assert proj4.shape == (2, 5)
assert proj3.wcs.wcs.compare(proj4.wcs.wcs)
proj5 = proj.subimage()
assert proj5.shape == proj.shape
assert proj5.wcs.wcs.compare(proj.wcs.wcs)
assert np.all(proj5.value == proj.value)
def test_projection_subimage_nocelestial_fail(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
proj = cube.moment0(axis=1)
with pytest.raises(WCSCelestialError,
match="WCS does not contain two spatial axes."):
proj.subimage(xlo=1, xhi=3)
@pytest.mark.parametrize('LDO', LDOs_2d)
def test_twod_input_mask_type(LDO):
test_wcs = WCS(naxis=2)
test_wcs.wcs.cunit = ["deg", "deg"]
test_wcs.wcs.ctype = ["RA---SIN", 'DEC--SIN']
np_mask = np.ones(twelve_qty_2d.shape, dtype=bool)
np_mask[1] = False
bool_mask = BooleanArrayMask(np_mask, wcs=test_wcs,
shape=np_mask.shape)
# numpy array
p = LDO(twelve_qty_2d, wcs=test_wcs,
mask=np_mask)
assert (p.mask.include() == bool_mask.include()).all()
# MaskBase
p = LDO(twelve_qty_2d, wcs=test_wcs,
mask=bool_mask)
assert (p.mask.include() == bool_mask.include()).all()
# No mask
ones_mask = BooleanArrayMask(np.ones(twelve_qty_2d.shape, dtype=bool),
wcs=test_wcs, shape=np_mask.shape)
p = LDO(twelve_qty_2d, wcs=test_wcs,
mask=None)
assert (p.mask.include() == ones_mask.include()).all()
@pytest.mark.xfail
def test_mask_convolve():
# Numpy is fundamentally incompatible with the objects we have created.
# np.ma.is_masked(array) checks specifically for the array's _mask
# attribute. We would have to refactor deeply to correct this, and I
# really don't want to do that because 'None' is a much more reasonable
# and less dangerous default for a mask.
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
assert spec.mask is False
from astropy.convolution import convolve,Box1DKernel
convolve(spec, Box1DKernel(3))
def test_convolve():
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
from astropy.convolution import Box1DKernel
specsmooth = spec.spectral_smooth(Box1DKernel(1))
np.testing.assert_allclose(spec, specsmooth)
def test_spectral_interpolate():
test_wcs_1 = WCS(naxis=1)
test_wcs_1.wcs.cunit[0] = 'GHz'
spec = OneDSpectrum(np.arange(12)*u.Jy, wcs=test_wcs_1)
new_xaxis = test_wcs_1.wcs_pix2world(np.linspace(0,11,23), 0)[0] * u.Unit(test_wcs_1.wcs.cunit[0])
new_spec = spec.spectral_interpolate(new_xaxis)
np.testing.assert_allclose(new_spec, np.linspace(0,11,23)*u.Jy)
def test_spectral_interpolate_with_mask(data_522_delta, use_dask):
hdu = fits.open(data_522_delta)[0]
# Swap the velocity axis so indiff < 0 in spectral_interpolate
hdu.header["CDELT3"] = - hdu.header["CDELT3"]
cube = SpectralCube.read(hdu, use_dask=use_dask)
mask = np.ones(cube.shape, dtype=bool)
mask[:2] = False
masked_cube = cube.with_mask(mask)
spec = masked_cube[:, 0, 0]
# midpoint between each position
sg = (spec.spectral_axis[1:] + spec.spectral_axis[:-1])/2.
result = spec.spectral_interpolate(spectral_grid=sg[::-1])
# The output makes CDELT3 > 0 (reversed spectral axis) so the masked
# portion are the final 2 channels.
np.testing.assert_almost_equal(result.filled_data[:].value,
[0.0, 0.5, np.NaN, np.NaN])
def test_spectral_interpolate_reversed(data_522_delta, use_dask):
cube, data = cube_and_raw(data_522_delta, use_dask=use_dask)
# Reverse spectral axis
sg = cube.spectral_axis[::-1]
spec = cube[:, 0, 0]
result = spec.spectral_interpolate(spectral_grid=sg)
np.testing.assert_almost_equal(sg.value, result.spectral_axis.value)
def test_spectral_interpolate_with_fillvalue(data_522_delta, use_dask):
cube, data = cube_and_raw(data_522_delta, use_dask=use_dask)
# Step one channel out of bounds.
sg = ((cube.spectral_axis[0]) -
(cube.spectral_axis[1] - cube.spectral_axis[0]) *
np.linspace(1,4,4))
spec = cube[:, 0, 0]
result = spec.spectral_interpolate(spectral_grid=sg,
fill_value=42)
np.testing.assert_almost_equal(result.value,
np.ones(4)*42)
def test_spectral_units(data_255_delta, use_dask):
# regression test for issue 391
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
assert sp.spectral_axis.unit == u.km/u.s
assert sp.header['CUNIT1'] == 'km s-1'
sp = cube.with_spectral_unit(u.m/u.s)[:,0,0]
assert sp.spectral_axis.unit == u.m/u.s
assert sp.header['CUNIT1'] in ('m s-1', 'm/s')
def test_repr_1d(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
print(sp)
print(sp[1:-1])
assert 'OneDSpectrum' in sp.__repr__()
assert 'OneDSpectrum' in sp[1:-1].__repr__()
def test_1d_slices(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
assert sp.max() == cube.max(axis=0)[0,0]
assert not isinstance(sp.max(), OneDSpectrum)
sp = cube[:-1,0,0]
assert sp.max() == cube[:-1,:,:].max(axis=0)[0,0]
assert not isinstance(sp.max(), OneDSpectrum)
@pytest.mark.parametrize('method',
('min', 'max', 'std', 'mean', 'sum', 'cumsum',
'nansum', 'ptp', 'var'),
)
def test_1d_slice_reductions(method, data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
if hasattr(cube, method):
assert getattr(sp, method)() == getattr(cube, method)(axis=0)[0,0]
else:
getattr(sp, method)()
assert hasattr(sp, '_fill_value')
assert 'OneDSpectrum' in sp.__repr__()
assert 'OneDSpectrum' in sp[1:-1].__repr__()
def test_1d_slice_round(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
assert all(sp.value.round() == sp.round().value)
assert hasattr(sp, '_fill_value')
assert hasattr(sp.round(), '_fill_value')
assert 'OneDSpectrum' in sp.round().__repr__()
assert 'OneDSpectrum' in sp[1:-1].round().__repr__()
def test_LDO_arithmetic(data_vda, use_dask):
cube, data = cube_and_raw(data_vda, use_dask=use_dask)
sp = cube[:,0,0]
spx2 = sp * 2
assert np.all(spx2.value == sp.value*2)
assert np.all(spx2.filled_data[:].value == sp.value*2)
def test_beam_jtok_2D(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'Jy / beam'
cube._unit = u.Jy / u.beam
plane = cube[0]
freq = cube.with_spectral_unit(u.GHz).spectral_axis[0]
equiv = plane.beam.jtok_equiv(freq)
jtok = plane.beam.jtok(freq)
Kplane = plane.to(u.K, equivalencies=equiv, freq=freq)
np.testing.assert_almost_equal(Kplane.value,
(plane.value * jtok).value)
# test that the beam equivalencies are correctly automatically defined
Kplane = plane.to(u.K, freq=freq)
np.testing.assert_almost_equal(Kplane.value,
(plane.value * jtok).value)
def test_basic_arrayness(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
assert cube.shape == data.shape
spec = cube[:,0,0]
assert np.all(np.asanyarray(spec).value == data[:,0,0])
assert np.all(np.array(spec) == data[:,0,0])
assert np.all(np.asarray(spec) == data[:,0,0])
# These are commented out because it is presently not possible to convert
# projections to masked arrays
# assert np.all(np.ma.asanyarray(spec).value == data[:,0,0])
# assert np.all(np.ma.asarray(spec) == data[:,0,0])
# assert np.all(np.ma.array(spec) == data[:,0,0])
slc = cube[0,:,:]
assert np.all(np.asanyarray(slc).value == data[0,:,:])
assert np.all(np.array(slc) == data[0,:,:])
assert np.all(np.asarray(slc) == data[0,:,:])
# assert np.all(np.ma.asanyarray(slc).value == data[0,:,:])
# assert np.all(np.ma.asarray(slc) == data[0,:,:])
# assert np.all(np.ma.array(slc) == data[0,:,:])
def test_spatial_world_extrema_2D(data_522_delta, use_dask):
hdu = fits.open(data_522_delta)[0]
cube = SpectralCube.read(hdu, use_dask=use_dask)
plane = cube[0]
assert (cube.world_extrema == plane.world_extrema).all()
assert (cube.longitude_extrema == plane.longitude_extrema).all()
assert (cube.latitude_extrema == plane.latitude_extrema).all()
@pytest.mark.parametrize('view', (np.s_[:, :],
np.s_[::2, :],
np.s_[0]))
def test_spatial_world(view, data_adv, use_dask):
p = path(data_adv)
# d = fits.getdata(p)
# wcs = WCS(p)
# c = SpectralCube(d, wcs)
c = SpectralCube.read(p, use_dask=use_dask)
plane = c[0]
wcs = plane.wcs
shp = plane.shape
inds = np.indices(plane.shape)
pix = np.column_stack([i.ravel() for i in inds[::-1]])
world = wcs.all_pix2world(pix, 0).T
world = [w.reshape(shp) for w in world]
world = [w[view] * u.Unit(wcs.wcs.cunit[i])
for i, w in enumerate(world)][::-1]
w2 = plane.world[view]
for result, expected in zip(w2, world):
assert_allclose(result, expected)
# Test world_flattened here, too
# TODO: Enable once 2D masking is a thing
w2_flat = plane.flattened_world(view=view)
for result, expected in zip(w2_flat, world):
print(result.shape, expected.flatten().shape)
assert_allclose(result, expected.flatten())
| 2.046875 | 2 |
whapCloud.py | andregtorres/whatsapp-scripts | 0 | 12759721 |
#!/usr/bin/env python
#Generates a wordcloud from a exported whatsapp chat
#3/06/2018
from os import path
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
import emoji
import re
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
#d = path.dirname(__file__)
d="./"
# Read the whole text.
text=""
#f= open(path.join(d, '../wappStats/310118.txt'))
f= open(path.join(d, 'agatka.txt'))
i=0
for line in f:
if i ==0:
print(line, datetime.strptime(line[0:15].rstrip(" "),'%d/%m/%y, %H:%M'), line[18:],"".join(line[18:].split(": ")[1:]).rstrip("\n"))
try:
date=datetime.strptime(line[0:15].rstrip(" "),'%d/%m/%y, %H:%M')
restOfLine="".join(line[18:].split(": ")[1:]).rstrip("\n")
except: #messages with \n (date fails)
restOfLine=line.rstrip("\n")
if (restOfLine != "<Media omitted>"):
for word in restOfLine.split():
decode= word#.decode('utf-8')
good= True
for c in decode:
if c in emoji.UNICODE_EMOJI:
good= False
break
if good:
text+=(''.join(decode))
text+=(" ")
i+=1
#print(i)
#print text
stopwords = set(STOPWORDS)
stopwords.add("ye")
stopwords.add("know")
stopwords.add("one")
stopwords.add("lot")
stopwords.add("tell")
stopwords.add("say")
stopwords.add("think")
stopwords.add("yes")
stopwords.add("will")
stopwords.add("maybe")
stopwords.add("even")
stopwords.add("still")
stopwords.add("now")
stopwords.add("really")
stopwords.add("later")
#stopwords.add("ok")
#stopwords.add("going")
stopwords.add("go")
#stopwords.add("well")
stopwords.add("nd")
#stopwords.add("yeah")
stopwords.add("got")
stopwords.add("'m'")
stopwords.add("o")
stopwords.add("ut")
stopwords.add("ou")
stopwords.add("ricardo")
stopwords.add("aby")
stopwords.add("'m'")
stopwords.add("t's")
#print stopwords
# Initializing Dictionary
dic = {}
# Count number of times each word comes up in list of words (in dictionary)
for word in text.split():
if word.lower() not in stopwords:
if word not in dic:
dic[word] = 0
dic[word] += 1
word_freq = []
for key, value in dic.items():
word_freq.append((value, key))
word_freq.sort(reverse=True)
# read the mask image
h_mask = np.array(Image.open(path.join(d, "blue.png")))
wc = WordCloud(background_color="white", max_words=10000, mask=h_mask,
stopwords=stopwords)
# generate word cloud
#print wc.process_text(text)
#print dic
wc.generate_from_frequencies(dic)
#wc.generate_from_frequencies(wc.process_text(text))
#wc.generate(text)
# create coloring from image
image_colors = ImageColorGenerator(h_mask)
# store to file
wc.recolor(color_func=image_colors).to_file(path.join(d, "wordcloudtest.png"))
# show
#plt.imshow(wc, interpolation='bilinear')
plt.imshow(wc.recolor(color_func=image_colors), interpolation="bilinear")
plt.axis("off")
#plt.figure()
#plt.imshow(h_mask, cmap=plt.cm.gray, interpolation='bilinear')
#plt.axis("off")
plt.show()
| 2.8125 | 3 |
infra/services/master_manager_launcher/test/desired_state_parser_test.py | eunchong/infra | 0 | 12759722 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import os
import unittest
from infra.libs.buildbot import master
from infra_libs.time_functions import timestamp
from infra_libs.utils import temporary_directory
from infra.services.master_manager_launcher import desired_state_parser
from testing_support import auto_stub
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
# UNIX timestamp corresponding to 500 seconds past epoch.
UNIX_TIMESTAMP_0500 = '1970-01-01T00:08:20Z'
UNIX_TIMESTAMP_1000 = '1970-01-01T00:16:40Z'
UNIX_TIMESTAMP_4000 = '1970-01-01T01:06:40Z'
UNIX_TIMESTAMP_5000 = '1970-01-01T01:23:20Z'
UNIX_TIMESTAMP_6000 = '1970-01-01T01:40:00Z'
UNIX_TIMESTAMP_7000 = '1970-01-01T01:56:40Z'
UNIX_TIMESTAMP_8000 = '1970-01-01T02:13:20Z'
class TestDesiredStateValidation(auto_stub.TestCase):
def setUp(self):
super(TestDesiredStateValidation, self).setUp()
self.mock(timestamp, 'utcnow_ts', lambda: 5000)
def _stateConfig(self, states, **params):
c = {
'version': desired_state_parser.VERSION,
'master_states': {
'master.chromium.fyi': states,
},
}
if params:
c['master_params'] = {
'master.chromium.fyi': params,
}
return c
def testValidState(self):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
drain_timeout_sec=1300,
builder_filters=[
r'^valid$',
],
))
def testValidStateZulu(self):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
]))
def testNoDesiredState(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
]))
def testNoTransitionTime(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline'},
]))
def testTransitionTimeInvalid(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running', 'transition_time_utc': 'boats'},
{'desired_state': 'offline', 'transition_time_utc': 'llama'},
]))
def testNotSorted(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
]))
def testNotSortedZulu(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
]))
def testInvalidState(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'pajamas',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
]))
def testUncertainPresent(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_8000},
]))
def testUnknownKeyPresent(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
unknown_key=1337,
))
def testNonNumericDrainTimeout(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
drain_timeout_sec='abc',
))
def testInvalidBuilderFilter(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
builder_filters=[
r'+invalid-regex+',
],
))
def testDifferentVersion(self):
# Confirm that the configuration loads.
c = self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
])
desired_state_parser.validate_desired_master_state(c)
# Modify the version to invalidate it.
c['version'] = 'test'
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(c)
def testValidFile(self):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'valid.json'))
def testValidPrevVersion(self):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'valid_prev_version.json'))
def testInvalidFile(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'invalid.json'))
def testBrokenFile(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'broken.json'))
def testIllegallyManaged(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
manually_managed='<EMAIL>',
))
class TestMasterStateLookup(unittest.TestCase):
STATE_CONFIG = [
{'desired_state': 'pajamas', 'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline', 'transition_time_utc': UNIX_TIMESTAMP_6000},
]
def testUnknownPast(self):
state = desired_state_parser.get_master_state(self.STATE_CONFIG, now=300)
self.assertIsNone(state)
def testMiddle(self):
state = desired_state_parser.get_master_state(self.STATE_CONFIG, now=4500)
self.assertEqual(state, self.STATE_CONFIG[0])
def testEnd(self):
state = desired_state_parser.get_master_state(self.STATE_CONFIG, now=8000)
self.assertEqual(state, self.STATE_CONFIG[1])
class TestHostnameLookup(auto_stub.TestCase):
def setUp(self):
super(TestHostnameLookup, self).setUp()
self.mock(master, 'get_mastermap_for_host', lambda _x, _y: [
{'dirname': 'master.chromium', 'internal': False},
{'dirname': 'master.chromium.fyi', 'internal': False},
{'dirname': 'master.supersecret', 'internal': True},
{'dirname': 'master.ultrasecret', 'internal': True},
])
def testHostnameLookup(self):
"""Test that selected masters are triggered and all else are ignored."""
desired_state = {
'version': desired_state_parser.VERSION,
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
],
'master.supersecret': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
],
},
'master_params': {
'master.chromium.fyi': {
'drain_timeout_sec': 1337,
},
},
}
triggered, ignored = desired_state_parser.get_masters_for_host(
desired_state,
'bananas/',
'impenetrablefortress.cool'
)
self.assertEqual(
[t['dirname'] for t in triggered],
['master.chromium.fyi', 'master.supersecret'])
self.assertEqual(ignored, set(['master.chromium', 'master.ultrasecret']))
self.assertEqual(triggered[0]['params'], {
'drain_timeout_sec': 1337,
})
self.assertEqual(triggered[1]['params'], {})
self.assertEqual(sorted(ignored), [
'master.chromium',
'master.ultrasecret',
])
for master_dict in triggered:
self.assertIn(master_dict['dirname'], desired_state['master_states'])
class TestWritingState(auto_stub.TestCase):
def setUp(self):
super(TestWritingState, self).setUp()
self.mock(timestamp, 'utcnow_ts', lambda: 5000)
def testPruneOldEntries(self):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_0500},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_1000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
with open(filename) as f:
parsed_data = json.load(f)
self.assertEqual(parsed_data, {
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_1000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
def testInvalidState(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': 'toast'},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
def testNothingInPast(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
def testNothing(self):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({}, filename)
with open(filename) as f:
parsed_data = json.load(f)
self.assertEqual(parsed_data, {
'master_states': {},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
})
| 2.078125 | 2 |
spacewalk14.py | Benniah/Space-Exploration-NLP | 0 | 12759723 | #!/usr/bin/python
""" DEBUGGING PATTERNS
Both patterns in this exercise contain mistakes and won’t match as expected.
Can you fix them? If you get stuck, try printing the tokens in the doc to see
how the text will be split and adjust the pattern so that each dictionary
represents one token.
"""
# Edit pattern1 so that it correctly matches all case-insensitive mentions
# of "Amazon" plus a title-cased proper noun.
# Edit pattern2 so that it correctly matches all case-insensitive mentions
# of "ad-free", plus the following noun.
import spacy
from spacy.matcher import Matcher
nlp = spacy.load("en_core_web_sm")
doc = nlp(
"Twitch Prime, the perks program for Amazon Prime members offering free "
"loot, games and other benefits, is ditching one of its best features: "
"ad-free viewing. According to an email sent out to Amazon Prime members "
"today, ad-free viewing will no longer be included as a part of Twitch "
"Prime for new members, beginning on September 14. However, members with "
"existing annual subscriptions will be able to continue to enjoy ad-free "
"viewing until their subscription comes up for renewal. Those with "
"monthly subscriptions will have access to ad-free viewing until October 15."
)
# Create the match patterns
pattern1 = [{"LOWER": "amazon"}, {"IS_TITLE": True, "POS": "PROPN"}]
pattern2 = [{"LOWER": "ad"}, {"TEXT": "-"}, {"LOWER": "free"}, {"POS": "NOUN"}]
# Initialize the Matcher and add the patterns
matcher = Matcher(nlp.vocab)
matcher.add("PATTERN1", None, pattern1)
matcher.add("PATTERN2", None, pattern2)
# Iterate over the matches
for match_id, start, end in matcher(doc):
# Print pattern string name and text of matched span
print(doc.vocab.strings[match_id], doc[start:end].text) | 3.40625 | 3 |
Multi-Agent/main.py | arunbalas/ATC-RL | 2 | 12759724 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 14:30:36 2020
@author: Arun
"""
#import Simurgh-multi-agent-main
from Simurgh_multi_agent_main import mddpg
import streamlit as st
################################
## ##
## <NAME> ##
## github.com/arunbalas ##
## ##
################################
if __name__ == "__main__":
st.write("Training Started")
scores = mddpg(n_episodes=1500, max_t=1000, print_every=10) | 2.078125 | 2 |
remind/migrations/0005_auto_20190320_0125.py | yejunzhou/quantserver | 0 | 12759725 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-19 17:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('remind', '0004_custom_cost'),
]
operations = [
migrations.AddField(
model_name='custom',
name='stop_loss',
field=models.FloatField(default=0, help_text='止损线', verbose_name='percent_min'),
),
migrations.AddField(
model_name='custom',
name='target_profit',
field=models.FloatField(default=0, help_text='止盈线', verbose_name='percent_min'),
),
]
| 1.445313 | 1 |
apps/manax_theme_alpha/apps.py | manax-dojo/cashflow | 0 | 12759726 | <filename>apps/manax_theme_alpha/apps.py
from django.apps import AppConfig
class ManaxThemeAlphaConfig(AppConfig):
name = 'manax_theme_alpha'
verbose_name = "Manax Theme Alpha"
| 1.21875 | 1 |
python/pyqt/pyqt5/sql_connect_sqlite.py | jeremiedecock/snippets | 23 | 12759727 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME> (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# https://doc.qt.io/qtforpython/PySide2/QtSql/QSqlDatabase.html
# https://doc.qt.io/qt-5/sql-programming.html
# https://www.developpez.net/forums/d1590644/autres-langages/python/gui/pyqt/qtsql-probleme-setquery/
# TO MAKE THE TEST DATABASE
# -------------------------
# $ sqlite3 test.sqlite
# sqlite> CREATE TABLE t_pers (nom VARCHAR, age INTEGER);
# sqlite> INSERT INTO t_pers VALUES ("john", 30);
# sqlite> INSERT INTO t_pers VALUES ("billy", 25);
from PyQt5 import QtSql
db = QtSql.QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("test.sqlite")
if not db.open():
db = None # Erreur d'ouverture de la base de données basesql
#query = QtSql.QSqlQuery("SELECT * FROM t_pers", db)
query = QtSql.QSqlQuery()
query.exec("SELECT * FROM t_pers")
while query.next():
name = query.value(0) # QString
age = query.value(1)
print(name, age)
| 1.632813 | 2 |
kmri/__init__.py | robianmcd/keras-mri | 12 | 12759728 | from kmri.visualizer import visualize_model
| 1.070313 | 1 |
src/pandas_helpers.py | wilsonify/oklahoma_city_thunder | 0 | 12759729 | <gh_stars>0
import re
import numpy as np
import pandas as pd
import logging
def clean_column_names(self):
logging.info("clean_column_names")
new_column_names = {old: old.lower() for old in self.columns}
replacements = [(r"\W", "_"),
(r"_+", "_")
]
for pat, rep in replacements:
logging.debug("replacing {} with {}".format(pat, rep))
new_column_names = {old: re.sub(string=new_column_names[old],
pattern=pat,
repl=rep)
for old in self.columns}
logging.debug("stripping _")
new_column_names = {old: new_column_names[old].strip("_") for old in self.columns}
logging.debug("rename dict = {}".format(new_column_names))
return self.rename(columns=new_column_names)
pd.DataFrame.clean_column_names = clean_column_names
def parse_date_columns(self):
logging.info("parse_date_columns")
for date_column in self.filter(regex="date").columns:
logging.debug("parsing {}".format(date_column))
self[date_column] = pd.to_datetime(self[date_column])
return self
pd.DataFrame.parse_date_columns = parse_date_columns
def zero_to_null(self, subset):
logging.info("zero_to_null")
for column in subset:
logging.debug("converting {} zero to null".format(column))
self[column] = self[column].apply(lambda x: x if x != 0 else np.nan)
return self
pd.DataFrame.zero_to_null = zero_to_null
def merge_multi(self, df, **kwargs):
logging.info("multi index merge")
try:
left = self.reset_index()
except ValueError as ve:
logging.debug("{}, try reset_index".format(ve))
left = self.reset_index(drop=True)
try:
right = df.reset_index()
except ValueError as ve:
logging.debug("{}, try reset_index".format(ve))
right = df.reset_index(drop=True)
return left.merge(right, **kwargs).set_index(
self.index.names
)
pd.DataFrame.merge_multi = merge_multi
def deduplicate(self, key, numeric_column="max", non_numeric="first", override: dict = None):
logging.info("deduplicate")
if override is None:
override = dict([])
how_to_agg = {
index: numeric_column if np.issubdtype(value, np.number) else non_numeric
for (index, value) in self.dtypes.iteritems()
}
how_to_agg.update(override)
logging.debug("aggregating {} groups {}".format(key, how_to_agg))
return self.groupby(key).agg(how_to_agg)
pd.DataFrame.deduplicate = deduplicate
def parse_api_columns(self):
logging.info("parse_api_columns")
for api_column in self.filter(regex="api").columns:
logging.debug("formatting {}".format(api_column))
self[api_column] = self[api_column] \
.apply(str) \
.str.replace(r"\W", "") \
.str.pad(14, side="right", fillchar="0")
return self
pd.DataFrame.parse_api_columns = parse_api_columns
| 2.765625 | 3 |
Random-Programs/dev/games/misc/computarr.py | naumoff0/Archive | 0 | 12759730 | <filename>Random-Programs/dev/games/misc/computarr.py<gh_stars>0
import time
import sys
import os
from random import randint
#imports
def fail():
#defining computer failure
print("FATAL_ERROR:: {} strings unhandled".format(randint(1, 100)))
time.sleep(1)
print("Attempting to handle strings")
time.sleep(1)
print("Could not decorrupt streams")
time.sleep(1)
print("Deleting system32 in:")
time.sleep(0.3)
print("3")
time.sleep(1)
print("2")
time.sleep(1)
print("1")
time.sleep(1)
print("Done!")
def main():
direct = '/>'
adminEnabled = 'no'
adminIP = '14.754.01'
adminPass = '<PASSWORD>'
while True:
sysFold = 'system32'
print("This is a file explorer")
print("dont enter \"./delete.bat -i @ {}\"".format(sysFold))
print("if you do bad things will happen")
doIT = input("C:{}".format(direct))
if doIT == "./delete.bat -i @ system32":
# YOU DID THE THING!!!!!!!!!!
fail()
break
if doIT == "cd":
print("Directory?")
directChange = input('Directory:')
direct = directChange + '/>'
if doIT == './net user -m push admin':
adminEnabled == 'yes'
print("admin enabled")
if doIT == './net user -i info @ admin' and adminEnabled == 'yes':
print("{}".format(adminIP))
if doIT == './ipconfig push ufoNET @' + adminIP:
print("{}".format(adminPass))
if doIT == './force LOGIN admin @ pass:' + adminPass:
print('access granted')
if __name__ == "__main__":
main() | 2.984375 | 3 |
tests/test_exceptions.py | iamdual/ua-generator | 0 | 12759731 | <gh_stars>0
"""
Random User-Agent
Copyright: 2022 <NAME> (github.com/iamdual)
License: Apache License 2.0
"""
import unittest
import src.ua_generator as ua_generator
from src.ua_generator import exceptions
def raised_call():
ua_generator.generate(device='desktop', platform='commodore_64')
def raised_call_2():
ua_generator.generate(browser=('netscape', 'ie'))
class TestExceptions(unittest.TestCase):
def test_value_error(self):
self.assertRaises(exceptions.InvalidArgumentError, raised_call)
def test_value_error_2(self):
self.assertRaises(exceptions.InvalidArgumentError, raised_call_2)
if __name__ == '__main__':
unittest.main()
| 2.71875 | 3 |
site_scons/site_init.py | talih0/dps-for-iot | 57 | 12759732 | <gh_stars>10-100
def DPS(env):
bld = Builder(action=build_function,
emitter=modify_targets)
env.Append(BUILDERS = {'SwigDox' : bld})
import swig_doc
def build_function(target, source, env):
for t in target:
if "py" in str(t):
swig_doc.generate("py", source[0].srcnode().path, t.path)
elif "js" in str(t):
swig_doc.generate("js", source[0].srcnode().path, t.path)
return None
def modify_targets(target, source, env):
xml = [s for s in source if s.path.endswith(".xml")]
return target, xml
| 2.4375 | 2 |
sciQt/widgets/timing/dds_table.py | robertfasano/sciQt | 0 | 12759733 | <reponame>robertfasano/sciQt
''' The DDSTable is a child class of IOTable, providing a grid for defining
sequences of DDS frequency and attenuation updates. Each item in the grid is
a DDSButton which, when clicked, opens a dialog box allowing the user to set
a frequency and/or attenuation update for the step. These values remain
constant until another DDS event is reached. When a DDSButton is queried for
its state, it returns a dictionary like {'frequency': 1e6, 'attenuation': 3},
indicating a frequency update of 100 MHz and an attenuation update of 3 dB
(half of full power).
A timestep in a sequence might contain a DDS field like
'DDS': {'A0': {'frequency': 100e6, 'attenuation': 3}, 'A1': {'frequency': 1e6}},
indicating that channel A0 should be updated to 100 MHz and 3 dB attenuation,
while channel A1 should be updated to 1 MHz. All other channels are unaffected.
'''
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QDialogButtonBox
from PyQt5.QtCore import Qt
from sciQt.widgets import LabeledEdit, DictDialog
from sciQt.widgets.timing import IOTable, IOButton
from sciQt.tools import parse_units
class DDSButton(IOButton):
''' A widget which allows specification of a frequency and attenuation via a popup dialog. '''
def __init__(self, channel):
IOButton.__init__(self, channel, size=(75, 50))
self.clicked.connect(self.create_event)
self.state = {}
def get_state(self):
''' Returns a dictionary representing the event state with magnitude scaled
to the base unit. For example, if the button reads '100 MHz', then this
method will return {'frequency': 100e6}. '''
if self.state != {}:
if 'frequency' in self.state:
self.state['frequency'], freq_string = parse_units(self.state['frequency'], base_unit='Hz')
return {self.channel: self.state}
else:
return {}
def set_state(self, state):
''' Takes a state dictionary of the form {'frequency': 100e6, 'attenuation': 3}
and displays a unitful string converted to the most compact representation -
for example, 100e6 is converted to '100 MHz'. '''
self.state = state
string = ''
if 'frequency' in state:
magnitude, state['frequency'] = parse_units(state['frequency'], base_unit='Hz')
string += f"{state['frequency']}"
if 'attenuation' in state:
if 'frequency' in state:
string += '\n'
string += f"-{state['attenuation']} dB"
self.setText(string)
self.setProperty('active', string != '')
self.setStyle(self.style())
def create_event(self):
''' Open a dialog to allow user input of a new frequency and/or attenuation. '''
state = {'frequency': '', 'attenuation': ''}
state.update(self.state)
state, updated = DictDialog(state).get_parameters()
if not updated:
return
self.set_state(state)
class DDSTable(IOTable):
''' A table of buttons allowing specification of settings for each DDS
in the passed "dds" list. '''
def __init__(self, timing_table, dds):
self.button_widget = DDSButton
IOTable.__init__(self, timing_table, dds, 'DDS')
self.verticalHeader().setDefaultSectionSize(50+self.vertical_margin)
| 2.765625 | 3 |
colosseumrl/BaseEnvironment.py | carletonz/colosseumrl | 8 | 12759734 | """ Abstract definition of a game environment. Whenever you wish to make a new environment, make sure to subclass
this to have all of the correct functions. """
import numpy as np
from abc import ABC, abstractmethod
from typing import Tuple, List, Union, Dict
class BaseEnvironment(ABC):
""" Base class for all environments that can be used by colosseum. """
def __init__(self, config: str = ""):
""" The base server environment that all implementations should follow.
Parameters
----------
config : str
Optional config string that will be passed into the constructor. You can use this however you like.
Load options from string, have it point to a file and read it, etc.
"""
self._config = config
@property
@abstractmethod
def min_players(self) -> int:
""" Property holding the number of players present required to play game.
Returns
-------
int
Minimum number of players for game to start.
"""
raise NotImplementedError
@property
@abstractmethod
def max_players(self) -> int:
""" Property holding the max number of players present for a game.
Currently, this should be the same as min_players. Future additions will allow for dynamic games.
Returns
-------
int
Maximum number of players allowed in game.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def observation_names() -> List[str]:
""" Static method for returning the names of the observation objects.
This needs to be static to setup spacetime dataframes.
Returns
-------
List[str]
The keys of the observation dictionary.
"""
raise NotImplementedError
@property
@abstractmethod
def observation_shape(self) -> Dict[str, tuple]:
""" Describe the fixed numpy shapes of each observation.
Returns
-------
Dict[str, Tuple[int]]
The shape, as a tuple, of each numpy array by their name.
"""
raise NotImplementedError
@abstractmethod
def new_state(self, num_players: int = None) -> Tuple[object, List[int]]:
""" Create a fresh state. This could return a fixed object or randomly initialized on, depending on the game.
Note that player numbers must be numbers in the set {0, 1, ..., n-1} for an n player game.
Parameters
----------
num_players : int
Total number of players in this game.
Returns
-------
new_state : object
The initial state for the game. This can be any python object you wish.
new_players: List[int]
List of players who's turn it is now.
"""
raise NotImplementedError
def add_player(self, state: object) -> object:
""" OPTIONAL Add a new player to an already existing game.
If your game cannot dynamically change, then you can leave these methods alone.
Notes
-----
Currently not used in any environment and support for dynamic games is still pending.
Parameters
----------
state : object
The current state of the game
Returns
-------
new_state : object
The state of the game after adding a new player.
"""
raise RuntimeError("Cannot add new players to an existing game.")
def remove_player(self, state: object, player: int) -> object:
""" OPTIONAL Remove a player from the current game if they disconnect somehow.
Notes
-----
Currently not used in any environment and support for dynamic games is still pending.
Parameters
----------
state : object
The current state of the game.
player : int
The player number of remove.
Returns
-------
new_state : object
The state of the game after remove the player.
"""
raise RuntimeError("Cannot remove players from an existing game.")
@abstractmethod
def next_state(self, state: object, players: [int], actions: [str]) \
-> Tuple[object, List[int], List[float], bool, Union[List[int], None]]:
"""
Compute a single step in the game.
Notes
-----
Player numbers must be numbers in the set {0, 1, ..., n-1} for an n player game.
Parameters
----------
state : object
The current state of the game.
players: [int]
The players which are taking the given actions.
actions : [str]
The actions of each player.
Returns
-------
new_state : object
The new state of the game.
new_players: List[int]
List of players who's turn it is in the new state now.
rewards : List[float]
The reward for each player that acted.
terminal : bool
Whether or not the game has ended.
winners: List[int]
If the game has ended, who are the winners.
"""
raise NotImplementedError
def compute_ranking(self, state: object, players: [int], winners: [int]) -> Dict[int, int]:
""" OPTIONAL
Compute the final ranking of all of the players in the game. The state object will be a terminal object.
By default, this will simply give a list of players that won with a ranking 0 and losers with ranking 1.
Parameters
----------
state: object
Terminal state of the game, right after the final move.
players: List[int]
A list of all players in the game
winners: List[int]
A list of final winners in the game.
Returns
-------
Dict[int, int]
A Dictionary mapping player number to of rankings for each player.
Lower rankings indicating better placement.
"""
winner_set = set(winners)
return {player: (0 if player in winner_set else 1) for player in players}
@abstractmethod
def valid_actions(self, state: object, player: int) -> [str]:
""" Valid actions for a specific state.
Parameters
----------
state : object
The current state of the game.
player : int
The player who is executing this action.
Returns
-------
List[str]
All possible actions for the game.
"""
raise NotImplementedError
@abstractmethod
def is_valid_action(self, state: object, player: int, action: str) -> bool:
""" Whether or not an action is valid for a specific state.
Parameters
----------
state : object
The current state of the game.
player : int
The player who is executing this action.
action : str
The action the player is executing.
Returns
-------
bool
Whether or not this is a valid action in the current state.
"""
raise NotImplementedError
@abstractmethod
def state_to_observation(self, state: object, player: int) -> Dict[str, np.ndarray]:
""" Convert the raw game state to the observation for the agent. Maps each observation name into an observation.
This can return different values for the different players. Default implementation is just the identity."""
raise NotImplementedError
# Serialization Methods
@staticmethod
def serializable() -> bool:
""" Some environments may allow for the full game state to be serializable and transferable to the agents.
Returns
-------
bool
Whether or not this class supports serialization of the state."""
return False
@staticmethod
def serialize_state(state: object) -> bytearray:
""" Serialize a game state and convert it to a bytearray to be saved or sent over a network.
Parameters
----------
state : object
The current game state.
Returns
-------
bytearray
Serialized byte-string for the state.
"""
raise NotImplementedError
@staticmethod
def deserialize_state(serialized_state: bytearray) -> object:
""" Convert a serialized bytearray back into a game state.
Parameters
----------
serialized_state : bytearray
Serialized byte-string for the state.
Returns
-------
object
The current game state.
"""
raise NotImplementedError
class SimpleConfigParser:
def __init__(self, *types):
""" Helper class to create config parsers for environments.
Parameters
----------
types:
list of types with optional default values. Each entry can either be
a single type or a tuple containing the type and default value.
Note: All default-values arguments must come after the required arguments.
"""
self.types = []
for type in types:
if isinstance(type, (tuple, list)):
type, default, value = type[0], True, type[1]
else:
default, value = False, None
self.types.append((type, default, value))
def parse(self, config):
config = [] if config is None else config.split(";")
options = []
last = -1
for last, (option, (type, _, _)) in enumerate(zip(config, self.types)):
option = type(option) if option != "None" else None
options.append(option)
# If we have values for everything, then return now
if len(config) == len(self.types):
return options
# If we're missing a required option, then error out
last = last + 1
if not self.types[last][1]:
raise ValueError("Required Argument not provided: Option {}".format(last))
# If we have default values remaining, fill them out.
for (type, _, option) in self.types[last:]:
options.append(option)
return options
def store(self, *args):
options = [str(value) for _, _, value in self.types]
i = 0
for i, option in enumerate(args):
options[i] = str(option)
if (len(args) < len(self.types)) and (not self.types[i + 1][1]):
raise ValueError("Not enough required arguments provided.")
return ";".join(options) | 3.828125 | 4 |
blog/posts.py | Lanseuo/lucas-blog | 1 | 12759735 | import re
from . import top_level_path
from .post import Post
class Posts:
@staticmethod
def get_posts():
posts_path = top_level_path / "posts"
posts = []
for filename in posts_path.iterdir():
permalink = re.sub(
r"[-_\w/]*\d\d\d\d-\d\d-\d\d-([\w\d_-]*).md",
lambda x: x.group(1),
filename.name
)
post = Post(permalink)
if post.is_published():
posts.append(post)
posts.sort()
return posts
@staticmethod
def get_posts_as_json():
posts = Posts.get_posts()
return [post.to_json() for post in posts]
| 2.546875 | 3 |
servers/Radiation/plots/.py | arpitgogia/mars_city | 25 | 12759736 | <reponame>arpitgogia/mars_city
from pymongo import MongoClient
import numpy as np
import matplotlib.pyplot as plt
cli = MongoClient()
db = cli.ascrapper
pre = db.prediccs
x = []
y = []
for i in pre.find():
if '2016.' in i['data'][-1][1]:
data = i['data']
print i
for j in data:
x.append(float(j[1]))
y.append(float(j[5]))
break
x = np.array(x)
y = np.array(y)
plt.plot(x, y, 'b', label="data")
'''
f_data = [2016.20901826,2016.38270548,2016.6409532,2016.64657534,2016.64694635,2016.64694863,2016.64697489,2016.64981735,2016.65111301,2016.68752854,2016.99810502,2016.99880137]
for i in f_data:
plt.axvline(x=i,color='k', linestyle='--')
'''
plt.xlabel('Time(UTC)')
plt.ylabel('Dosage(cGy/day')
plt.legend(loc='upper left')
plt.title('Dose Mars')
plt.show()
| 2.671875 | 3 |
src/Sudoku.py | kingkong135/Sudoku-solver | 0 | 12759737 | <filename>src/Sudoku.py<gh_stars>0
from fuzzywuzzy import process
import numpy as np
from tool import Singleton
import sudoku_solving
from Block import Block
@Singleton
class Sudoku:
def __init__(self):
size = (9, 9)
self.already_solved = {}
self.already_solved_numbers = {}
self.already_solved_false = []
self.puzzle = np.empty(size, dtype=np.object)
for i in range(size[0]):
for j in range(size[1]):
self.puzzle[i, j] = Block()
def update_block(self, img, block_pos, physical_pos):
self.puzzle[block_pos].update(img, block_pos, physical_pos)
def guess_sudoku(self, confidence_threshold=0):
for i in range(9):
for j in range(9):
block = self.puzzle[i, j]
block.guess_number(confidence_threshold=confidence_threshold)
def write_solution(self, sudoku_image, solution, ignore=None):
if solution is not False:
cols = '123456789'
rows = 'ABCDEFGHI'
for i in range(9):
for j in range(9):
number = solution[rows[i] + cols[j]]
block = self.puzzle[i, j]
if ignore is None:
if block.number == 0:
block.write(sudoku_image, number)
else:
if (i, j) not in ignore:
block.write(sudoku_image, number)
def get_existing_numbers(self):
existing_numbers = []
for i in range(9):
for j in range(9):
block = self.puzzle[i, j]
if block.number != 0:
existing_numbers.append((i, j))
return existing_numbers
def as_string(self):
string = ''
array = np.ravel(self.puzzle)
for guy in array:
string += str(guy.number)
return string
def solve_basic(self):
string = self.as_string()
if string in self.already_solved.keys():
return self.already_solved[string]
else:
solved = sudoku_solving.solve(string)
return solved
def solve_approximate(self, approximate=False):
'If it finds a sudoku similar to one it has already done, uses its solution'
string = self.as_string()
if string in self.already_solved.keys():
return self.already_solved[string], self.already_solved_numbers[string]
else:
# We save the attempts that we already did but were unsuccesful
if string in self.already_solved_false:
solved = False
else:
solved = sudoku_solving.solve(string)
# Print answer
sudoku_solving.result(string)
# If the sudoku is unsolvable but very similar to one we already did
# we assume it's the same one but we couldn't quite catch some numbers
# Approximate is percent-based, 90 = 90%
if solved is False:
# Saves this sudoku as false so we don't have to try to solve it every frame
self.already_solved_false.append(string)
if self.already_solved.keys():
guesses = process.extract(string, self.already_solved.keys())
if guesses:
# Prioritizes length, then similarity to the guess
if approximate is False:
best = max(guesses, key=lambda x: (x[1], len(self.already_solved_numbers[x[0]])))[0]
return self.already_solved[best], self.already_solved_numbers[best]
else:
sorty = sorted(guesses, key=lambda x: (len(self.already_solved_numbers[x[0]]), x[1]),
reverse=True)
for item in sorty:
if item[1] > approximate:
# Sort them by length and then get the one with biggest length that has addecuate ratio?
return self.already_solved[item[0]], self.already_solved_numbers[item[0]]
else:
best = max(guesses, key=lambda x: (x[1], len(self.already_solved_numbers[x[0]])))[0]
return self.already_solved[best], self.already_solved_numbers[best]
# Only saves correct solutions
if solved is not False:
# also save the numbers that already exist in the array
# (so we don't write over them if we can't see them)
self.already_solved_numbers[string] = self.get_existing_numbers()
self.already_solved[string] = solved
return solved, self.already_solved_numbers[string]
return False, False
def solve(self, img_cropped_sudoku, approximate=False):
solution, existing_numbers = self.solve_approximate(approximate)
self.write_solution(img_cropped_sudoku, solution, ignore=existing_numbers)
| 2.65625 | 3 |
py/heka/heka_log.py | mabotech/mabo.io | 0 | 12759738 | # -*- coding: utf-8 -*-
"""
log for heka Logfile input
"""
import logbook
from datetime import datetime
logbook.set_datetime_format("local")
import socket
import gevent
logger = logbook.Logger('app')
log = logbook.FileHandler('test.log')
log.push_application()
def main():
while True:
logger.info("info")
gevent.sleep(3)
if __name__ == '__main__':
main() | 2.609375 | 3 |
main_for_test.py | Yacine22/test_dome | 0 | 12759739 | #!/usr/bin/ python3
#! /usr/bin/env python
from subprocess import call
call(['espeak "Welcome to granDome" 2>/dev/null'], shell=True)
"""
User interface to control simultanous captures and leds
-- Using i2c from Raspberry and Arduino
@ mercurio
"""
from tkinter import *
from tkinter.ttk import Progressbar
from PIL import ImageTk, Image, ImageGrab
import os, shutil, subprocess, signal
import smbus, time, datetime
import json
from sh import gphoto2 as gp
import shutil
from glob import glob
import settings
import webbrowser, threading
from i2c_devices import i2c_checker
import RPi.GPIO as GPIO
###### metadata ---------------------------------
# focal = settings.focal_length()
today_time = datetime.datetime.now().strftime("%H:%M")
today_date = datetime.datetime.now().strftime("%d/%m/%Y")
who = {"Actor":"", "Company":""}
where = {"Place":""}
when = {"Date":today_date, "Time":today_time}
what = {"Appelation":"rti", "Description":""}
how = {"Modality":{"Technique":"RTI", "Protocol":{"Automation":"", "Detail":{"AcquisitionType":"", "LPFilename":"LP", "DomeDiameterinmm":750}}}}
which = {"Camera":{"Type":"DSRL", "Model":"", "Focal":"", "Iso":"", "Aperture":"", "Whitebalance":"", "Shutterspeed":""},
"Light":{"SourceType":"LED", "Number":"", "Natural":"True"}}
why = {"Project":""}
def metadata(who=who, where=where, when=when, what=what, how=how, which=which, why=why):
inside_data = {'WHO':who, 'WHERE':where, 'WHEN':when, 'WHAT':what, 'HOW':how, 'WHICH':which, 'WHY':why}
metadata = {'Activity':inside_data}
return metadata
try:
os.mkdir("./json")
except:
pass
subprocess.run(["sudo", "chmod", "777", "./json/"+str(datetime.datetime.now().strftime("%d%m%Y%H%M%S"))+".json"]) #### Get permission to make an empty json file
#### Json file
json_file_name = "./json/"+str(datetime.datetime.now().strftime("%d%m%Y%H%M%S"))+".json"
def json_file(metadata, path=None): ##### Save data
json_object = json.dumps(metadata, indent=4)
with open(json_file_name, "w") as json_file:
json_file.write(json_object)
print(json_file)
if path is None:
pass
else:
shutil.move(json_file_name, path)
######
####### ------------- Clavier
cara = settings.clavier()
class user_interface:
def __init__(self):
self.interface = Tk()
#self.interface.geometry("800x480")
self.interface.attributes("-fullscreen", True)
self.interface.configure(bg="#212121")
self.interface.title("Dome")
self.w = self.interface.winfo_screenwidth()
self.frame = Frame(self.interface, bg="#212121")
self.frame_exit = Frame(self.interface, bg="#212121")
self.frame_menu_reglages = Frame(self.interface, bg="#212121")
self.frame_shutdown = Frame(self.interface, bg="#212121")
self.frame_version = Frame(self.interface, bg="#212121")
self.frame_bienvenue = Frame(self.interface, bg="#212121")
self.label_bienvenue = Label(self.frame_bienvenue, text="DÔME Mercurio V1", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 35, "bold"))
self.icon_exit = ImageTk.PhotoImage(Image.open(icons_path_+"IconeAnnuler.png").resize((70, 70)), Image.BILINEAR)
self.icon_reglages = ImageTk.PhotoImage(Image.open(icons_path_+"IconeSettings.png").resize((70, 70)), Image.BILINEAR)
self.icon_menu_capture = ImageTk.PhotoImage(Image.open(icons_path_+"menu_capture.png").resize((165, 165)), Image.BILINEAR)
self.icon_menu_projects = ImageTk.PhotoImage(Image.open(icons_path_+"menu_projets.png").resize((165, 165)), Image.BILINEAR)
self.icon_shutdown = ImageTk.PhotoImage(Image.open(icons_path_+"IconeEteindre.png").resize((70, 70)), Image.BILINEAR)
self.icon_mercurio = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.label_mercurio_icon = Label(self.interface, image=self.icon_mercurio, bg="#212121")
self.info_label = Label(self.frame, bitmap='info', bg="#212121", fg="#FFF3AE")
self.memory_label = Label(self.frame, text="Free Memory : "+str(settings.check_memory()[2])+" Go", fg="#FFF3AE", bg="#212121")
self.button_exit = Button(self.frame_exit, text="Sortir", bg="#212121", fg="#212121", relief="flat"
,cursor="tcross", command=self.close_window)
self.button_exit.grid(row=0, column=0, sticky='news')
self.button_reglages = Button(self.frame_menu_reglages, text="Reglages",relief=FLAT, bg="#212121", fg="#212121", activebackground = "#33B5E5", bd=0
, cursor="tcross", command=self.menu_reglages)
self.button_capture = Button(self.frame, text="Commencer",relief="flat", bg="#212121", fg="#FFF3AE"
,compound=TOP, cursor="tcross", font=("Roboto Mono", 18 * -1), command=self.start_captures)
self.button_projects = Button(self.frame, text="Projets",relief="flat", bg="#212121", fg="#FFF3AE"
,compound=TOP, cursor="tcross", font=("Roboto Mono", 18 * -1), command=self.projects)
self.button_shutdown = Button(self.frame_shutdown, text="Eteindre",relief="flat", bg="#212121", fg="#212121"
,compound=TOP, cursor="tcross", command=self.shutdown)
self.button_exit['image'] = self.icon_exit
self.button_reglages['image'] = self.icon_reglages
self.button_capture['image'] = self.icon_menu_capture
self.button_projects['image'] = self.icon_menu_projects
self.button_shutdown['image'] = self.icon_shutdown
self.interface.rowconfigure(0, weight=1)
self.interface.columnconfigure(0, weight=1)
self.frame_exit.grid(row=0, column=0, stick='nw')
self.frame_menu_reglages.grid(row=0, column=0, stick='ne')
self.frame_shutdown.grid(row=0, column=0, stick='se')
self.frame_version.grid(row=0, column=0, stick='s')
self.frame_bienvenue.grid(row=0, column=0, stick='n')
self.label_bienvenue.grid(row=0, column=0, sticky='n')
self.button_exit.grid(row=0, column=0, sticky='news')
self.button_reglages.grid(row=0, column=0, sticky='news')
self.button_capture.grid(row=4, column=2, padx=10, pady=30, sticky='news')
self.button_projects.grid(row=4, column=3, padx=10, pady=30, sticky='news')
self.info_label.grid(row=5, column=2, padx=10, pady=20, sticky='news')
self.memory_label.grid(row=5, column=3, pady=20, sticky='news')
self.button_shutdown.grid(row=0, column=0, sticky='news')
self.label_mercurio_icon.place(x=-15, y=425)
self.frame.grid(row=0, column=0, padx=10, pady=50, sticky='n')
self.interface.update()
def close_window(self):
try:
bus = smbus.SMBus(1)
bus.write_byte(0x44, 1)
except:
pass
mario_sound(100)
self.interface.destroy()
### ---------------------------------------- Menu Reglages ---------------------------------------------------------------------
def menu_reglages(self):
self.reglage_interface = Toplevel()
self.reglage_interface.attributes('-fullscreen', True)
#self.reglage_interface.geometry("800x480")
self.reglage_interface.configure(bg="#212121")
self.reglage_frame = Frame(self.reglage_interface, bg="#212121", relief=FLAT)
self.reglage_frame_retour = Frame(self.reglage_interface, bg="#212121", relief=FLAT)
self.icon_apropos = ImageTk.PhotoImage(Image.open(icons_path_+"IconeFaq.png").resize((160, 160)), Image.BILINEAR)
self.icon_metadata = ImageTk.PhotoImage(Image.open(icons_path_+"reglage_metadata.png").resize((160, 160)), Image.BILINEAR)
self.icon_dometester = ImageTk.PhotoImage(Image.open(icons_path_+"reglage_dome_tester.png").resize((160, 160)), Image.BILINEAR)
self.icon_cameratester = ImageTk.PhotoImage(Image.open(icons_path_+"reglage_camera_tester.png").resize((160, 160)), Image.BILINEAR)
self.icon_retour = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self._icon_mercurio_ = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.__label_mercurio_icon = Label(self.reglage_frame, image=self._icon_mercurio_, bg="#212121")
self.button_retour = Button(self.reglage_frame_retour, text="Sortir", bg="#212121", fg="#212121",
relief="flat", compound=TOP, cursor="tcross",
command=self.reglage_interface.destroy)
self.button_retour['image'] = self.icon_retour
self.button_apropos = Button(self.reglage_frame, text="A Propos", bg="#212121", fg="#FFF3AE", cursor="tcross", relief="flat",
font=("Roboto Mono", 13 * -1), compound=TOP, command=self.apropos)
self.button_metadata = Button(self.reglage_frame, text='Meta Data', cursor="tcross", bg="#212121", fg="#FFF3AE", relief="flat",
compound=TOP, font=("Roboto Mono", 13 * -1), command=self._reglage_metadata_)
self.button_dometester = Button(self.reglage_frame, text='Tester le Dome', cursor="tcross", bg="#212121", fg="#FFF3AE", relief="flat",
compound=TOP, font=("Roboto Mono", 13 * -1), command=self.reglage_dometester)
self.button_cameratester = Button(self.reglage_frame, text='Réglages de la Camera', cursor="tcross", bg="#212121", fg="#FFF3AE", relief="flat",
compound=TOP, font=("Roboto Mono", 13 * -1), command=self.reglage_cameratester)
self.button_apropos['image'] = self.icon_apropos
self.button_metadata['image'] = self.icon_metadata
self.button_dometester['image'] = self.icon_dometester
self.button_cameratester['image'] = self.icon_cameratester
self.reglage_interface.rowconfigure(0, weight=1)
self.reglage_interface.columnconfigure(0, weight=1)
self.reglage_frame.grid(row=0, column=0, sticky='news')
self.reglage_frame_retour.grid(row=0, column=0, stick='nw')
self.button_retour.pack(anchor=NW)
self.button_metadata.place(x=250, y=250)
self.button_cameratester.place(x=250, y=50)
self.button_dometester.place(x=450, y=50)
self.button_apropos.place(x=450, y=250)
self.__label_mercurio_icon.place(x=-15, y=425)
### ---------------------------------------- start Captures ---------------------------------------------------------------------
def apropos(self):
others()
def start_captures(self):
self.capture_wind = Toplevel()
self.capture_wind.attributes('-fullscreen', True)
#self.capture_wind.geometry("800x480")
self.capture_wind.configure(bg="#212121")
self.capture_frame = Frame(self.capture_wind, bg="#212121")
self.capture_frame_exit = Frame(self.capture_wind, bg="#212121")
self.label_projectName = Label(self.capture_frame, text="Nom du Projet", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1), width=20)
self.entry_projectName = Entry(self.capture_frame, width=50, bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 14 * -1, "bold"))
self.entry_projectName.insert(END, what["Appelation"]) ###
self.icon_mercurio_cap = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.label_mercurio_icone_ = Label(self.capture_frame, image=self.icon_mercurio_cap, bg="#212121")
self.icon_retour = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self.capture_button_exit = Button(self.capture_frame_exit, text="Sortir", bg="#212121", fg="#212121",
relief="flat", cursor="tcross", command=self.capture_wind.destroy)
self.capture_button_exit['image'] = self.icon_retour
self.mode_aq_icon_dense = ImageTk.PhotoImage(Image.open(icons_path_+"allumeLed.png").resize((50, 35)), Image.BILINEAR)
self.mode_aq_icon_rapide = ImageTk.PhotoImage(Image.open(icons_path_+"aq_rapide_icon.png").resize((50, 35)),Image.BILINEAR)
self.button_mode_rapide = Button(self.capture_frame, width=15, text="MODE RAPIDE", font=("Roboto Mono", 16 * -1, "bold"),
bg="#212121", fg="#FFF3AE", command=self._mode_rapide_)
self.button_mode_lent = Button(self.capture_frame, width=15, text="MODE DENSE", font=("Roboto Mono", 16 * -1, "bold"),
bg="#212121", fg="#FFF3AE", command=self._mode_lent_)
self.button_AQ = Button(self.capture_frame, width=15, text="COMMENCER", font=("Roboto Mono", 16 * -1, "bold"),
bg="#212121", fg="#FFF3AE", command=self.__stop__)
self.state_label = Label(self.capture_frame, relief="flat", bg="#212121")
self.progress_bar = Progressbar(self.capture_frame, orient=HORIZONTAL, length=375)
self.label_aq = Label(self.capture_frame, text="", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1))
self.capture_wind.rowconfigure(0, weight=1)
self.capture_wind.columnconfigure(0, weight=1)
self.capture_frame_exit.grid(row=0, column=0, sticky='nw')
self.capture_button_exit.grid(row=0, column=0, sticky='nw')
############### CLAVIER #########################################
keypad_frame = Frame(self.capture_wind, bg='#212121', relief='groove')
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', activebackground ='green', bd=0, font=("Roboto Mono", 15 * -1, "bold"), width=3,
borderwidth=0, relief='flat', command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=2, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE',bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
button_del = Button(keypad_frame, text='<', bg='#424035', fg='#FFF3AE', activebackground ='gray', font=('helvetica', 14, 'bold'),
borderwidth=0, command=self.delete_text_).grid(row=8, column=11, pady=2, sticky='news')
keypad_frame.grid(row=0, column=0, sticky='s')
#################################################################
self.capture_frame.grid(row=0, column=0, sticky="news")
self.label_projectName.place(x=125, y=20)
self.entry_projectName.place(x=160, y=40)
self.button_mode_rapide.place(x=175, y=150)
self.button_mode_lent.place(x=425, y=150)
self.state_label.place(x=370, y=147)
self.progress_bar.place(x=200, y=250)
self.button_AQ.place(x=300, y=200)
self.label_aq.place(x=275, y=280)
self.label_mercurio_icone_.place(x=-15, y=425)
def _mode_rapide_(self):
self.project_data()
print("Mode Rapide lancé!")
mario_sound(100)
led_1_ctrl(1)
self.state_label.config(image=self.mode_aq_icon_rapide, bg='#212121')
self.button_AQ['text'] = "ARRETER"
self.button_mode_rapide['bg'] = '#424035'
self.button_mode_lent['bg'] = '#212121'
self._aquisition_(image_nb=85) ############
def _mode_lent_(self):
self.project_data()
print("Mode Dense lancé!")
mario_sound(100)
led_2_ctrl(1)
self.state_label.config(image=self.mode_aq_icon_dense, bg='#212121')
self.button_AQ['text'] = "ARRETER"
self.button_mode_lent['bg'] = '#424035'
self.button_mode_rapide['bg'] = '#212121'
self ._aquisition_(image_nb=155) #############
def project_data(self):
p_name = self.entry_projectName.get() ### p_name == Project name
what['Appelation'] = p_name
json_file(metadata(what=what))
return p_name
def set_text_(self, text):
widget = self.capture_wind.focus_get()
self.entry_projectName.insert("insert", text)
def delete_text_(self):
self.entry_projectName.delete(0, END)
### ---------------------------------------- See Projects ---------------------------------------------------------------------
def projects(self):
self.project_wind = Toplevel()
self.project_wind.attributes('-fullscreen', True)
#self.project_wind.geometry("800x480")
self.project_wind.configure(bg="#212121")
self.frame = Frame(self.project_wind, bg="#212121")
self.icon_retour = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self.button_exit_ = Button(self.frame, text="Sortir", bg="#212121", fg='#424035', command=self.project_wind.destroy)
self.button_exit_['image'] = self.icon_retour
self.icon_mercurio_pro = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.label_mercurio_icone = Label(self.frame, image=self.icon_mercurio_pro, bg="#212121")
self.button_delete_project = Button(self.frame, text="Supprimer", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), width=8,
state=DISABLED, command=self.message_box)
self.button_copy_project = Button(self.frame, text="Copier USB", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), width=8,
state=DISABLED, command=self.copy_to_usb_)
self.label_display = Label(self.frame, height=450, bg="#212121", fg="#424035", relief="ridge", font=("Roboto Mono", 15 * -1, "bold"))
self.label_imageName = Label(self.frame, bg="#212121", font=("Roboto Mono", 10 * -1, "bold"))
self.label_Nombre = Label(self.frame, bg="#212121", fg="#FFF3AE",font=("Roboto Mono", 10 * -1, "bold"))
self.scrollbar = Scrollbar(self.frame, width=45, bg="#FFF3AE", troughcolor="#212121")
self.list_project = os.listdir(rti_path)
self.list_project.sort()
self.listeProjet = Listbox(self.frame, height=20, width=10, yscrollcommand=self.scrollbar.set, bg="#212121", fg='#FFF3AE', font=("Roboto Mono", 20 * -1, "bold"))
for projet in self.list_project:
self.listeProjet.insert(END, projet)
self.listeProjet.bind("<<ListboxSelect>>", self.selection)
self.project_wind.rowconfigure(0, weight=1)
self.project_wind.columnconfigure(0, weight=1)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.frame.grid(row=0, column=0, sticky="news")
self.button_exit_.place(x=0, y=0)
self.button_delete_project.pack(anchor=SE)
self.button_copy_project.pack(anchor=SE)
self.scrollbar.place(x=25, y=200)
self.label_display.place(x=100, y=0)
self.listeProjet.place(x=75, y=0)
self.label_mercurio_icone.place(x=-15, y=425)
self.label_Nombre.place(x=300, y=455)
def selection(self, event):
self.button_delete_project['state'] = NORMAL
self.button_copy_project['state'] = NORMAL
projet_select = self.listeProjet.get(self.listeProjet.curselection())
print("----", projet_select)
self.list_project = os.listdir(rti_path+str(projet_select))
root, folder, file = next(os.walk(rti_path+str(projet_select)))
for i in file:
if i.endswith(".JPG"):
thumb_file = i
self.previewImg = Image.open(rti_path+str(projet_select)+"/"+thumb_file).resize((600, 520))
self.image__ = ImageTk.PhotoImage(self.previewImg, Image.BILINEAR)
self.label_display.configure(image=self.image__)
self.label_display.image = self.image__
def copy_to_usb_(self):
self.label_Nombre.config(text="Veuillez attendre -- cela peut prendre quelques temps !")
self.project_wind.update()
self.message_box_usb = Toplevel()
self.message_box_usb.attributes('-fullscreen', True)
#self.message_box_usb.geometry("800x480")
self.message_box_usb.configure(bg="#212121")
self.icon_retusb = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.icn_ret = ImageTk.PhotoImage(master=self.message_box_usb, image=self.icon_retusb)
self.button_quitusb = Button(self.message_box_usb, text="Sortir", bg="#212121", command=self.message_box_usb.destroy)
self.button_quitusb['image'] = self.icn_ret
self.button_quitusb.pack(anchor=NW)
self.label_usb = Label(self.message_box_usb, text="", bg="#424035", fg="#FFF3AE", font=("Roboto Mono", 15 * -1,'bold' ))
self.label_usb.place(x=100, y=100)
self.label_usb_mem = Label(self.message_box_usb, text="", bg="#424035", fg="#FFF3AE", font=("Roboto Mono", 15 * -1,'bold' ))
self.label_usb_mem.place(x=100, y=200)
projet_select = self.listeProjet.get(self.listeProjet.curselection())
print("----selected--2--USB", projet_select)
media_path = "/media/pi/"
folders_in_media = os.listdir(media_path)
if len(folders_in_media) == 0:
print("Inserérez une clé USB")
self.label.config(text="Insérez Une Clé USB SVP !")
else:
usb_path = media_path+folders_in_media[0]
total, used, free = shutil.disk_usage(usb_path)
### Make Zip
self.label_usb_mem.config(text="Disponible : "+str(round((free/2**30), 2))+"/"+str(round((total/2**30), 2))+" GO")
if round((free/2**30), 2) > 1.0 :
self.label_usb.config(text="Veuillez attendre -- cela peut prendre quelques temps !")
shutil.make_archive(usb_path+"/"+projet_select, 'zip', rti_path+str(projet_select))
print("Projet copié avec succès !")
self.label_usb.config(text="Le Projet "+projet_select+" est copié vers la clé USB")
else:
self.label_usb.config(text="Votre espace est insuffisant")
self.label_usb_mem.config(text="Disponible : "+str(round((free/2**30), 2))+"/"+str(round((total/2**30), 2))+" GO")
self.message_box_usb.rowconfigure(0, weight=1)
self.message_box_usb.columnconfigure(0, weight=1)
self._logo_mercurio_usb = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__usb = ImageTk.PhotoImage(master=self.message_box_usb, image=self._logo_mercurio_usb)
self.__label_logo__usb = Label(self.message_box_usb, image=self.__logo__usb, bg="#212121").place(x=-15, y=425)
def message_box(self):
self.message_box = Toplevel()
self.message_box.attributes('-fullscreen', True)
#self.message_box.geometry("800x480")
self.message_box.configure(bg="#212121")
projet_select = self.listeProjet.get(self.listeProjet.curselection())
self.label_deleting = Label(self.message_box, text="Voulez-vous supprimer le Projet : "+str(projet_select), bg="#424035", fg='#FFF3AE',
font=("Roboto Mono", 20 * -1, "bold"))
self.button_yes = Button(self.message_box, text="OUI", width=10, height=5, bg="#420035", fg='#FFF3AE',
font=("Roboto Mono", 22 * -1, "bold"), command=self.remove_selected)
self.button_No = Button(self.message_box, text="NON", width=10, height=5, bg="#4240F0", fg='#FFF3AE',
font=("Roboto Mono", 22 * -1, "bold"), command=self.message_box.destroy)
self.message_box.rowconfigure(0, weight=1)
self.message_box.columnconfigure(0, weight=1)
self.label_deleting.grid(row=0, column=0, pady=5, sticky='news')
self.button_yes.grid(row=1, column=0, pady=5, sticky='news')
self.button_No.grid(row=2, column=0, pady=5, sticky='news')
def remove_selected(self):
projet_select = self.listeProjet.get(self.listeProjet.curselection())
directotory_to_remove = rti_path+str(projet_select)
subprocess.run(["rm", "-rf", directotory_to_remove])
trois_colors(150)
self.message_box.destroy()
### ---------------------------------------- Eteindre ---------------------------------------------------------------------
def shutdown(self):
trois_colors(250)
bus.write_byte(0x44, 0)
os.system('sudo shutdown -h now')
###############################################################################################
########################### REGLAGES Nombre LEDs #####################################################
##############################################################################################
def project_exists(self):
print("Exists ! ")
self.window_overwrite = Toplevel()
self.window_overwrite.attributes("-fullscreen", True)
#self.window_overwrite.geometry("800x480")
over_write_label = Label(self.window_overwrite, text="Ecraser ?").grid(row=0, cloumn=0, sticky='n')
btn_OK = Button(self.window_overwrite, text="Oui", command=lambda projectName:self.return_and_rename(rti_path+str(projectName))).grid(row=1, cloumn=0, sticky='n')
btn_NON = Button(self.window_overwrite, text="Non", command=self.window_overwrite.destroy).grid(row=1, cloumn=1, sticky='n')
def thumbnail(self, projectName):
############ ------ ThumNail
bus = smbus.SMBus(1)
settings.killprocess()
gp(clearCMD)
bus.write_block_data(0x44, 0, [2, 15])
thumb_name = "thumbnail.JPG"
print("Thumb Name created !")
#subprocess.run(["gphoto2", "--trigger-capture", "--wait-event=FILEADDED"])
os.system("gphoto2 --trigger-capture --wait-event=FILEADDED")
settings.killprocess()
print("---------------------------1")
os.system("gphoto2 --filename="+thumb_name+" --get-all-files")
time.sleep(0.4)
print("---------------------------2")
settings.killprocess()
bus.write_byte(0x44, 1)
dest = shutil.move(thumb_name, rti_path+str(projectName)+"/")
led_1_ctrl(1)
time.sleep(0.2)
led_1_ctrl(0)
im = Image.open(rti_path+str(projectName)+"/thumbnail.JPG")
im.thumbnail((600, 400), Image.ANTIALIAS)
im.save(rti_path+str(projectName)+"/thumbnail.JPG")
print("Thumb Created !")
def __stop__(self):
"""
Stop i2c transmission
"""
bus.write_byte(0x44, 1)
bus.close()
self.capture_wind.destroy()
def _aquisition_(self, image_nb):
i2c_state = i2c_checker() ### Check i2c ?
leds = [0, 1, 4, 6, 8, 11, 12, 13, 14, 17, 19, 21, 23, 24, 26, 27, 30] ## 85 LEDs Mode !
### For 155 LEDs (Deleting small LEds !
leds_a_allumer = [s for s in range(160)]
for k in range(5):
leds_a_allumer.remove(31+(32*k))
default_projectname = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
camera_available = settings.camera_available()
if camera_available == True and i2c_state != 0 :
self.label_aq.config(text="Camera and i2c Device Detected")
self.capture_wind.update_idletasks()
self.capture_wind.update()
##########
print("Camera is On ")
which["Light"]["Number"]= image_nb
if image_nb == 85 :
how['Modality']['Protocol']['Detail']['AcquisitionType']="RTI LEGERE"
elif image_nb == 155:
how['Modality']['Protocol']['Detail']['AcquisitionType']="RTI DENSE"
# how['Modality']['Protocol']['Detail']['LPFilename']="LP"+str(image_nb)
json_file(metadata(which=which))
json_file(metadata(how=how))
subprocess.run(["gphoto2", "--folder", camera_folder, "-R", "--delete-all-files"])
# gp(clearCMD)
############### ------
projectname = self.project_data()
if len(projectname) == 0:
try:
os.mkdir(rti_path+default_projectname+"_"+str(image_nb))
except:
pass
if os.path.exists(rti_path+default_projectname+"_"+str(image_nb)):
self.project_exists()
self.thumbnail(default_projectname+"_"+str(image_nb))
else:
try:
os.mkdir(rti_path+projectname+"_"+str(image_nb))
except:
pass
self.thumbnail(projectname+"_"+str(image_nb))
#####################################
what["Appelation"]=projectname
json_file(metadata(what=what))
print("__json__")
file_name = rti_path+projectname+"rti%Y%m%d%H%M%S%f.%C"
#### Save Json File
camera_data = save_camera_data()
which.update(camera_data)
lp_filename = how['Modality']['Protocol']['Detail']['LPFilename']
if len(projectname) == 0:
json_file(metadata(what=what, how=how, who=who, where=where, when=when, which=which, why=why),
path=str(rti_path+default_projectname+"_"+str(image_nb)+"/"))
shutil.copy(lp_path+"LP_"+str(image_nb)+".lp", str(rti_path+default_projectname+"_"+str(image_nb)+"/"))
os.rename(rti_path+default_projectname+"_"+str(image_nb)+"/"+"LP_"+str(image_nb)+".lp",
str(rti_path+default_projectname+"_"+str(image_nb)+"/"+lp_filename+".lp"))
else:
json_file(metadata(what=what, how=how, who=who, where=where, when=when, which=which,why=why),
path=str(rti_path+projectname+"_"+str(image_nb)+"/"))
shutil.copy(lp_path+"LP_"+str(image_nb)+".lp", str(rti_path+projectname+"_"+str(image_nb)+"/"))
os.rename(rti_path+projectname+"_"+str(image_nb)+"/"+"LP_"+str(image_nb)+".lp",
rti_path+projectname+"_"+str(image_nb)+"/"+lp_filename+".lp")
bus = smbus.SMBus(1)
bus.write_byte(0x44, 1)
if image_nb == 85:
for k in range(5):
print(str(k))
for s, i in enumerate(leds):
settings.killprocess()
print(str(s), i)
self.label_aq.config(text="En Cours de PDV "+str((17*k)+(s+1))+ "/85... Please Wait!")
self.progress_bar['value'] += 100/(len(leds)*5)
self.capture_wind.update_idletasks()
bus.write_block_data(0x44, 0, [3, 32*k+i])
subprocess.run(["gphoto2", "--trigger-capture"])
time.sleep(0.4)
self.capture_wind.update()
bus.write_byte(0x44, 1)
elif image_nb == 155:
for i, j in enumerate(leds_a_allumer):
settings.killprocess()
self.label_aq.config(text="En Cours de PDV "+str(i)+ "/"+str(len(leds_a_allumer))+" ... Please Wait!")
self.progress_bar['value'] += 100/len(leds_a_allumer)
self.capture_wind.update_idletasks()
bus.write_block_data(0x44, 0, [3, j])
subprocess.run(["gphoto2", "--trigger-capture"])
time.sleep(0.4)
self.capture_wind.update()
bus.write_byte(0x44, 1)
self.label_aq['text'] = "Enregistrement des images..."
self.progress_bar['value'] = 0
self.capture_wind.update()
try:
if len(projectname) == 0:
os.mkdir(rti_path+default_projectname+"_"+str(image_nb)+"/rti")
else:
os.mkdir(rti_path+projectname+"_"+str(image_nb)+"/rti")
except:
pass
#settings.get_data_from_camera(file_name)
data_getter = threading.Thread(target=settings.get_data_from_camera, args=(file_name,))
data_getter.start()
self.progress_bar['value'] = 0
nombre_img = 0
while(nombre_img<=image_nb):
nombre_img = len(glob(rti_path+"*.JPG"))
self.progress_bar['value'] = ((nombre_img-1)/image_nb)*100
self.label_aq.config(text=str(nombre_img)+"/"+str(image_nb))
self.capture_wind.update_idletasks()
self.capture_wind.update()
self.progress_bar['value'] = 0
self.capture_wind.update()
jpg_files = glob(rti_path+'*.JPG')
jpg_files.sort()
prefix_name = "0000"
for i, img in enumerate(jpg_files):
if len(str(i)) == 1:
renamed_file = rti_path+"IMG_"+prefix_name[:-1]+str(i)
elif len(str(i)) == 2:
renamed_file = rti_path+"IMG_"+prefix_name[:-2]+str(i)
elif len(str(i)) == 3:
renamed_file = rti_path+"IMG_"+prefix_name[:-3]+str(i)
os.rename(img, renamed_file+".JPG")
self.label_aq.config(text="Image "+str(i)+"/"+str(image_nb)+ " Renamed ! ")
if len(projectname) == 0:
dest = shutil.move(renamed_file+".JPG", rti_path+default_projectname+"_"+str(image_nb)+"/rti/")
self.label_aq.config(text="Image "+str(i)+"/"+str(image_nb)+ " Moved To Folder ! ")
self.progress_bar['value'] += 100/image_nb
self.capture_wind.update()
else :
dest = shutil.move(renamed_file+".JPG", rti_path+projectname+"_"+str(image_nb)+"/rti/")
self.label_aq.config(text="Image "+str(i)+"/"+str(image_nb)+" Moved To Folder ! ")
self.progress_bar['value'] += 100/image_nb
self.capture_wind.update()
self.capture_wind.destroy()
subprocess.run(["gphoto2", "--folder", camera_folder, "-R", "--delete-all-files"])
#gp(clearCMD)
if camera_available == False or i2c_state == 0:
print("No Camera Detected Or No Device !")
self.label_aq['text'] = " No Camera or i2c Device"
camera_available = settings.camera_available()
self.capture_wind.update()
def choix_aq(self):
global apply
apply = True
self.reglage_aq_win.destroy()
print("Applied!", apply)
return apply
## -----
def _reglage_metadata_(self):
self.reglage_metadata = Toplevel()
self.reglage_metadata.attributes('-fullscreen', True)
#self.reglage_metadata.geometry("800x480")
self.reglage_metadata.configure(bg="#212121")
self.reglage_frame = Frame(self.reglage_metadata, bg="#212121")
self.reglage_frame_exit = Frame(self.reglage_metadata)
self.icon_user = ImageTk.PhotoImage(Image.open(icons_path_+"utilisateur.png").resize((160, 160)), Image.BILINEAR)
self.icon_camera_info = ImageTk.PhotoImage(Image.open(icons_path_+"camera_info.png").resize((160, 160)), Image.BILINEAR)
self.icon_environdata = ImageTk.PhotoImage(Image.open(icons_path_+"environement.png").resize((160, 160)), Image.BILINEAR)
self.icon_other_data = ImageTk.PhotoImage(Image.open(icons_path_+"autres.png").resize((160, 160)), Image.BILINEAR)
self.icon_retour_ = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self.__label_mercurio_icon__ = Label(self.reglage_metadata, image=self.icon_mercurio, bg="#212121")
self.button_exit = Button(self.reglage_frame_exit, relief="flat", compound=TOP, bg="#212121",
command=self.reglage_metadata.destroy)
self.button_user = Button(self.reglage_frame, text="Utilisateur", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.user_data)
self.button_camera_info = Button(self.reglage_frame, text="Caméra info", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.camera_info)
self.button_environement_data = Button(self.reglage_frame, text="Environement", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.environement_data)
self.button_other_data = Button(self.reglage_frame, text="Autres", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.other_data)
self.button_exit['image'] = self.icon_retour_
self.button_user['image'] = self.icon_user
self.button_camera_info['image'] = self.icon_camera_info
self.button_environement_data['image'] = self.icon_environdata
self.button_other_data['image'] = self.icon_other_data
self.reglage_metadata.rowconfigure(0, weight=1)
self.reglage_metadata.columnconfigure(0, weight=1)
self.reglage_frame.grid(row=0, column=0, sticky='n')
self.reglage_frame_exit.grid(row=0, column=0, sticky='nw')
self.button_exit.grid(row=0, column=0, sticky='news')
self.button_user.grid(row=2, column=2, padx=5, pady=20, sticky='news')
self.button_camera_info.grid(row=2, column=3, padx=5, pady=20, sticky='news')
self.button_environement_data.grid(row=3, column=2, padx=5, pady=20, sticky='news')
self.button_other_data.grid(row=3, column=3, padx=5, pady=20, sticky='news')
self.__label_mercurio_icon__.place(x=-15, y=425)
### -- --
def user_data(self):
photographer_data()
def camera_info(self):
camera_info()
def environement_data(self):
environement_data()
def other_data(self):
_camera_folder_()
## -----
def reglage_dometester(self):
self.dome_wind = Toplevel()
self.dome_wind.attributes('-fullscreen', True)
#self.dome_wind.geometry("800x480")
self.dome_wind.configure(bg="#212121")
self.frame = Frame(self.dome_wind, bg="#212121")
self.frame_scales = Frame(self.dome_wind, bg="#212121")
self.button_retour_icon = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((75, 75)), Image.BILINEAR)
self.tout_allumer_icon = ImageTk.PhotoImage(Image.open(icons_path_+"toutAllumer.png").resize((120, 120)), Image.BILINEAR)
self.tout_eteindre_icon = ImageTk.PhotoImage(Image.open(icons_path_+"toutEteindre.png").resize((120, 120)), Image.BILINEAR)
self.allumer_ledX = ImageTk.PhotoImage(Image.open(icons_path_+"allumerledXon.png").resize((120, 120)), Image.BILINEAR)
self.eteindre_ledX = ImageTk.PhotoImage(Image.open(icons_path_+"eteindreLed.png").resize((120, 120)), Image.BILINEAR)
self._label_mercurio_icon_ = Label(self.dome_wind, image=self.icon_mercurio, bg="#212121")
self.button_exit = Button(self.frame, image=self.button_retour_icon, bg="#212121",
compound=TOP, command=self.destroy_)
self.button_tout_allumer = Button(self.frame, text="Tout Allumer", bg="#212121", relief='flat',
compound=TOP, fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), command=self._allOn_)
self.button_tout_eteindre = Button(self.frame, text="Tout Eteindre", bg="#212121", relief='flat',
compound=TOP, fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), command=self._AllOff_)
self.button_allumer_led_x = Button(self.frame, text="Allumer LED X", bg="#212121", relief='flat',
compound=TOP, fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), command=self._allummer_led_x_)
self.button_tout_allumer['image'] = self.tout_allumer_icon
self.button_tout_eteindre['image'] = self.tout_eteindre_icon
self.button_allumer_led_x['image'] = self.eteindre_ledX
## --------------------- Slides --------------------------------------------------------------------
self.slider_allumer_LedNum = Scale(self.frame_scales, width=20, length=350, label="Allumer LED N° x/155", activebackground='white', from_=0, to=155,
orient="horizontal", state=DISABLED, bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"),
troughcolor="#424035", highlightbackground="#FFF3AE", command=self._on_scale_LedN)
self.slider_intensity = Scale(self.frame_scales, width=20, length=350, label="Intensité", from_=0, to=150, orient="horizontal", state=DISABLED,
troughcolor="#424035", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), bg="#212121",
highlightbackground="#FFF3AE", command=self._on_scale_intensity)
## --------------------------- Set Positions -----------------------------------------------------
self.dome_wind.rowconfigure(0, weight=1)
self.dome_wind.columnconfigure(0, weight=1)
self.frame_scales.grid(row=1, column=0, sticky='s')
self.button_exit.pack(anchor=NW)
self.button_tout_allumer.place(x=150, y=100)
self.button_tout_eteindre.place(x=350, y=100)
self.button_allumer_led_x.place(x=550, y=100)
self.slider_allumer_LedNum.grid(row=5, column=0, pady=5, padx=5, sticky='s')
self.slider_intensity.grid(row=7, column=0, pady=5, padx=5, sticky='s')
self.frame.grid(row=0, column=0, sticky='news')
self._label_mercurio_icon_.place(x=-15, y=425)
####---------------------------------------------------------------------------------------------------------
### -------------------------------- FUNCTIONS ---------------------------------------------------------------
def _on_scale_LedN(self, value):
print(value)
bus.write_byte(0x44, 1)
time.sleep(0.1)
bus.write_block_data(0x44, 0, [3, int(value)])
value=0
def _on_scale_intensity(self, value):
print(value)
self.slider_allumer_LedNum['troughcolor'] = '#a0a0a0'
self.slider_allumer_LedNum['state'] = 'disabled'
self.slider_intensity['troughcolor'] = 'green'
self.slider_intensity['state'] = 'active'
bus.write_block_data(0x44, 0, [2, int(value)])
def destroy_(self):
try:
bus.write_byte(0x44, 1)
except:
pass
self.dome_wind.destroy()
def _allOn_(self):
self._AllOff_()
bus.write_block_data(0x44, 0, [2, 15])
self.button_allumer_led_x['image'] = self.eteindre_ledX
self.slider_intensity['troughcolor'] = 'green'
self.slider_intensity['state'] = 'active'
self.slider_intensity.set(15)
self.slider_allumer_LedNum['state'] = 'disabled'
self.slider_allumer_LedNum['troughcolor'] = '#a0a0a0'
def _AllOff_(self):
bus.write_byte(0x44, 1)
self.button_allumer_led_x['image'] = self.eteindre_ledX
self.slider_allumer_LedNum['state'] = 'disabled'
self.slider_intensity['state'] = 'disabled'
self.slider_allumer_LedNum['troughcolor'] = '#a0a0a0'
self.slider_intensity['troughcolor'] = '#a0a0a0'
def _allummer_led_x_(self):
bus.write_block_data(0x44, 0, [3, 0])
self.button_allumer_led_x['image'] = self.allumer_ledX
self.slider_allumer_LedNum['troughcolor'] = 'green'
self.slider_allumer_LedNum['state'] = 'active'
self.slider_intensity['state'] = 'disabled'
self.slider_intensity['troughcolor'] = '#a0a0a0'
## -----
def reglage_cameratester(self):
global camera
camera = {}
self.cam_wind = Toplevel()
self.cam_wind.attributes('-fullscreen', True)
#self.cam_wind.geometry("800x480")
self.cam_wind.configure(bg="#212121")
self.frame_exit = Frame(self.cam_wind, bg="#212121")
self.frame = Frame(self.cam_wind, bg="#212121")
self.camera_deconnctee_icon = ImageTk.PhotoImage(Image.open(icons_path_+"camera_deconnectee.png").resize((200, 200)), Image.BILINEAR)
self._button_retour_icon_ = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((75, 75)), Image.BILINEAR)
self.label_mercurio_icon_ = Label(self.cam_wind, image=self.icon_mercurio, bg="#212121")
self.button_exit = Button(self.cam_wind, bg="#212121", command=self.cam_wind.destroy)
self.button_exit['image'] = self._button_retour_icon_
if settings.camera_available() == True :
camera_infos = []
for line in settings.about_camera():
line = str(line)[2:].split(':')
camera_infos.append(line)
aperture = int(settings.image_data("aperture")['Current'].split(':')[-1])
iso = int(settings.image_data("iso")['Current'].split(':')[-1])
whitebalance = settings.image_data("whitebalance")['Current'].split(':')[-1]
shutterspeed = settings.image_data("shutterspeed")['Current'].split(':')[-1]
_parameters_ = {'aperture':aperture,
'iso':iso, 'whitebalance':whitebalance,
'shutterspeed':shutterspeed}
display_list = list(_parameters_.keys())
self.entry_param = []
for i, param in enumerate(display_list):
self.scrollbar = Scrollbar(self.frame, orient="vertical", width=35, bg="#FFF3AE", troughcolor="#212121")
self.list_para = Listbox(self.frame, height=2, width=25, exportselection=0, font=("Roboto Mono", 20 * -1, "bold"), bg="#212121", fg="#FFF3AE",
selectmode=SINGLE, yscrollcommand=self.scrollbar.set)
para_list = settings.image_data(param)['Choices']
for j in para_list:
self.list_para.insert(END, param+" "+j.split(" ")[-1]+" "+j.split(" ")[1])
self.scrollbar.grid(row=i+1, column=2, padx=5, pady=20, sticky='news')
self.list_para.grid(row=i+1, column=1, padx=15, pady=20, sticky='news')
self.scrollbar.config(command=self.list_para.yview)
self.list_para.bind('<<ListboxSelect>>', self.select_text)
for i,d in enumerate(display_list):
self.label = Label(self.frame, text=" "+d+" ", height=2, bd=2, width=20, relief="flat", font=("Roboto Mono", 15 * -1, "bold"), fg="#FFF3AE",
bg="#212121").grid(row=i+1, column=0, padx=50, pady=20, sticky='news')
which["Camera"] = camera
else :
self.label = Label(self.frame, text=" Aucune caméra détectée, branchez la caméra SVP !", bg="#212121", width=50, font=("Roboto Mono", 16 * -1, "bold"),
fg="#FFF3AE").place(x=150, y=100)
self.label_camera_deconnectee = Label(self.cam_wind, bg="#212121", image=self.camera_deconnctee_icon)
self.label_camera_deconnectee.place(x=325, y=235)
self.button_exit = Button(self.frame, bg="#212121", command=self.cam_wind.destroy)
self.button_exit['image'] = self._button_retour_icon_
self.cam_wind.rowconfigure(0, weight=1)
self.cam_wind.columnconfigure(0, weight=1)
self.frame_exit.grid(row=0, column=0, sticky='nw')
self.frame.grid(row=0, column=0, sticky='news')
self.button_exit.place(x=0, y=0)
self.label_mercurio_icon_.place(x=-15, y=425)
def select_text(self, text):
self.selection = text.widget.curselection()
self.index = self.selection[0]
self.value = text.widget.get(self.index)
settings.set_camera_data(self.value.split(" ")[0], self.value.split(" ")[1])
print(self.value.split(" ")[0], self.value.split(" ")[-1])
def mainloop(self):
self.interface.mainloop()
class photographer_data(Tk):
def __init__(self):
Tk.__init__(self)
#self.geometry("800x480")
self.attributes("-fullscreen", True)
self.configure(bg="#212121")
self.title("Dome")
self.data = ["PRENOM NOM", "SOCIETE", "LIEU de PDV"]
self.date = ["DATE", "TIME"]
keypad_frame = Frame(self, bg="#212121")
self.label_frame = Frame(self, bg="#212121")
for i,d in enumerate(self.data+self.date):
self.label = Label(self.label_frame, text=" "+d+" ", height=2, bd=2, width=15, bg="#212121", fg="#FFF3AE",
font=("Roboto Mono", 12 * -1, "bold")).grid(row=i+1, column=0, padx=15, pady=5, sticky='news')
self.label_y = Label(self.label_frame, text=datetime.datetime.now().strftime("%d/%m/%Y"), height=1, bd=1, width=15, relief="flat",
bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 12 * -1, "bold"))
self.label_y.grid(row=4, column=1, padx=15, pady=5, sticky='news')
self.label_d = Label(self.label_frame, text=datetime.datetime.now().strftime("%H:%M"), height=1, bd=1, width=15, relief="flat",
bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 12 * -1, "bold"))
self.label_d.grid(row=5, column=1, padx=15, pady=5, sticky='news')
self.entries = [Entry(self.label_frame, width=30, bd=3, bg="#424035", fg="#FFF3AE", font=("Roboto Mono", 15 * -1, "bold")) for i in range(len(self.data))]
self.entry_list = []
for i,e in enumerate(self.entries):
e.grid(row=i+1, column=1, padx=5, pady=5)
self.entry_list.append(e)
self.label_frame.place(x=100, y=50)
self.image_de_retour = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self.icone_de_retour = ImageTk.PhotoImage(master=self, image=self.image_de_retour)
self.btn_quit = Button(self, text='Sortir', bg="#212121", command=self.destroy)
self.btn_quit['image'] = self.icone_de_retour
self.btn_quit.place(x=0, y=0)
self.btn_save = Button(self, text='Enregistrer', bd=2, fg='#FFF3AE', bg='#212121', font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, state=DISABLED, command=self.save_data)
self.btn_save.pack(anchor=NE)
cara = settings.clavier()
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"), width=3,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=2, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
self.btn_delete = Button(keypad_frame, text='<', bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=self.delete_text).grid(row=8, column=11, pady=2, sticky='news')
keypad_frame.place(x=135, y=325)
self._logo_mercurio_ = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo_ = ImageTk.PhotoImage(master=self, image=self._logo_mercurio_)
self.__label_logo_ = Label(self, image=self.__logo_, bg="#212121").place(x=-15, y=425)
def set_text(self, text):
self.btn_save['state'] = NORMAL
self.btn_save['bg'] = "#424035"
widget = self.focus_get()
if widget in self.entries:
widget.insert("insert", text)
def delete_text(self):
widget = self.focus_get()
widget.delete(0, END)
def save_data(self):
data_dict = {}
inside_data = {}
for s, i in enumerate(self.entry_list):
widget = i
data = widget.get()
data_dict[s] = data
data_dict[self.data[s]] = data_dict.pop(s)
print(data_dict)
who["Actor"] = data_dict["<NAME>"]
who["Company"] = data_dict["SOCIETE"]
where["Place"] = data_dict["LIEU de PDV"]
when["Date"] = datetime.datetime.now().strftime("%d/%m/%Y")
when["Time"] = datetime.datetime.now().strftime("%H:%M")
json_file(metadata(who=who, where=where, when=when))
try:
bus.write_byte(0x44, 13)
time.sleep(0.1)
bus.write_byte(0x44, 0)
except:
pass
new_wind = Toplevel(self)
#new_wind.geometry("800x480")
new_wind.attributes("-fullscreen", True)
new_wind.configure(bg="#212121")
new_wind.title("info")
new_lab = Label(new_wind, text=" Données Enregistrées avec Succès !", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 18 * -1, "bold")).place(x=150, y=100)
self.image_de_retour_ = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self.icone_de_retour_ = ImageTk.PhotoImage(master=new_wind, image=self.image_de_retour_)
btn_quit_ = Button(new_wind, text="Sortir", bg="#212121", fg="#FFF3AE", image=self.icone_de_retour_, command=new_wind.destroy).pack(side=TOP, anchor=NW)
self._logo_mercurio_s = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__ = ImageTk.PhotoImage(master=new_wind, image=self._logo_mercurio_s)
self.__label_logo__ = Label(new_wind, image=self.__logo__, bg="#212121").place(x=-15, y=425)
#################################################################################################
############################### Camera INFOs ##################################################
class camera_info(Tk):
global camera_
camera_ = {}
def __init__(self):
Tk.__init__(self)
#self.geometry("800x480")
self.attributes("-fullscreen", True)
self.configure(bg="#212121")
self.title("Dome")
keypad_frame = Frame(self, bg="#212121")
self.exit_frame = Frame(self, bg="#212121")
self.label_frame = Frame(self, bg="#212121")
if settings.camera_available() == True :
aperture = int(settings.image_data("aperture")['Current'].split(':')[-1])
iso = int(settings.image_data("iso")['Current'].split(':')[-1])
whitebalance = settings.image_data("whitebalance")['Current'].split(':')[-1]
shutterspeed = settings.image_data("shutterspeed")['Current'].split(':')[-1]
model = settings.image_data("cameramodel")['Current'].split(':')[-1]
which["Camera"]["Model"] = model
try:
which["Camera"]["Focal"] = focal
except:
pass
which["Camera"]["Iso"] = iso
which["Camera"]["Aperture"] = aperture
which["Camera"]["Whitebalance"] = whitebalance
which["Camera"]["Shutterspeed"] = shutterspeed
json_file(metadata(which=which))
try:
focal = settings.focal_length()
except:
focal="nA"
additional_parameters = {'Focal':focal, 'Aperture':aperture,
'ISO':iso, 'Whitebalance':whitebalance,
'Shutterspeed':shutterspeed, 'Model':model}
display_list = list(additional_parameters.keys())
for i,d in enumerate(display_list):
self.label = Label(self.label_frame, text=" "+d+" ", height=2, bd=2, width=15, relief="flat", bg='#424035', font=('helvetica', 12, 'bold'),
fg="#FFF3AE").grid(row=i+1, column=0, padx=40, pady=15, sticky='news')
camera_list = list(additional_parameters.values())
for i,d in enumerate(camera_list):
self.label = Label(self.label_frame, text=" "+str(d)+" ", height=2, bd=2, bg='#424035', fg="#FFF3AE", width=40, font=("Roboto Mono", 16 * -1, "bold")
).grid(row=i+1, column=1, padx=40, pady=15, sticky='news')
else :
self.label = Label(self, text=" Aucune caméra détectée, branchez la caméra SVP !", bg="#212121", width=50, font=("Roboto Mono", 16 * -1, "bold"),
fg="#FFF3AE").place(x=150, y=100)
self.image_quitter_icon = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self._icon_quitter_ = ImageTk.PhotoImage(master=self.label_frame, image=self.image_quitter_icon)
self.btn_quit = Button(self, text='Sortir', bg="#212121", command=self.destroy)
self.btn_quit['image'] = self._icon_quitter_
self.btn_quit.pack(side=TOP, anchor=NW)
self._logo_mercurio_cam = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__cam = ImageTk.PhotoImage(master=self, image=self._logo_mercurio_cam)
self.__label_logo__c = Label(self, image=self.__logo__cam, bg="#212121").place(x=-15, y=425)
self.label_frame.place(x=100, y=100)
def save_camera_data():
aperture = settings.image_data("aperture")
aperture = int(aperture['Current'].split(':')[-1])
iso = int(settings.image_data("iso")['Current'].split(':')[-1])
whitebalance = settings.image_data("whitebalance")['Current'].split(':')[-1]
shutterspeed = settings.image_data("shutterspeed")['Current'].split(':')[-1]
model = settings.image_data("cameramodel")['Current'].split(':')[-1]
try:
which["Camera"]["Focal"] = focal
except:
pass
which["Camera"]["Model"] = model
which["Camera"]["Iso"] = iso
which["Camera"]["Aperture"] = aperture
which["Camera"]["Whitebalance"] = whitebalance
which["Camera"]["Shutterspeed"] = shutterspeed
return which
class environement_data:
def __init__(self):
self.envi_wind = Tk()
#self.envi_wind.geometry("800x480")
self.envi_wind.attributes("-fullscreen", True)
self.envi_wind.title('environment')
self.envi_wind.configure(bg="#212121")
self.frame_exit = Frame(self.envi_wind, bg="#212121")
self.frame = Frame(self.envi_wind, bg="#212121")
keypad_frame = Frame(self.envi_wind, bg="#212121")
self.environment_list = ["Technique", "Diamètre du Dôme mm"]
self.environment_to_edit = ["Appelation", "Description", "Projet", "LP Filename"]
self.sortir_icone = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.___sortir_icn__ = ImageTk.PhotoImage(master=self.envi_wind, image=self.sortir_icone)
self.button_exit = Button(self.envi_wind, text="Sortir", bg="#212121", command=self.envi_wind.destroy)
self.button_exit['image'] = self.___sortir_icn__
for i, data in enumerate(self.environment_to_edit+self.environment_list):
label = Label(self.frame, text=data, width=30, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"))
label.grid(row=i, column=0, padx=10, pady=10, sticky='news')
self.entries = [Entry(self.frame, width=30, bd=2, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold")) for i in range(len(self.environment_to_edit))]
self.entry_list = []
for i,e in enumerate(self.entries):
## e.grid(row=i, column=1, padx=5, pady=5)
self.entry_list.append(e)
self.entry_list[-4].insert(END, what["Appelation"])
self.entry_list[-1].insert(END, how["Modality"]["Protocol"]["Detail"]["LPFilename"])
for i,e in enumerate(self.entry_list):
e.grid(row=i, column=1, padx=5, pady=5)
self.label_technique = Label(self.frame, text="RTI", width=30, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold")
).grid(row=4, column=1, padx=5, pady=5, stick='news')
self.label_diam = Label(self.frame, text="750", width=30, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold")
).grid(row=5, column=1, padx=5, pady=5, stick='news')
self.btn_save = Button(self.envi_wind, text='Enregistrer', bd=2, fg='#FFF3AE', bg='#212121', font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, state=DISABLED, command=self.save_data)
self.btn_save.pack(anchor=NE)
cara = settings.clavier()
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"), width=3,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=2, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
self.btn_delete = Button(keypad_frame, text='<', bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=self.delete_text).grid(row=8, column=11, pady=2, sticky='news')
self.envi_wind.rowconfigure(0, weight=1)
self.envi_wind.columnconfigure(0, weight=1)
self.frame.place(x=75, y=50)
keypad_frame.place(x=135, y=325)
self.button_exit.place(x=0, y=0)
self._logo_mercurio_env = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__env = ImageTk.PhotoImage(master=self.envi_wind, image=self._logo_mercurio_env)
self.__label_logo__e = Label(self.envi_wind, image=self.__logo__env, bg="#212121").place(x=-15, y=425)
def set_text(self, text):
self.btn_save['state'] = NORMAL
self.btn_save['bg'] = "#424035"
widget = self.envi_wind.focus_get()
if widget in self.entries:
widget.insert("insert", text)
def delete_text(self):
widget = self.envi_wind.focus_get()
widget.delete(0, END)
def save_data(self):
global data
data_dict = {}
for s, i in enumerate(self.entry_list):
widget = i
data = widget.get()
data_dict[s] = data
data_dict[self.environment_to_edit[s]] = data_dict.pop(s)
what["Description"]= data_dict["Description"]
what["Appelation"]= data_dict["Appelation"]
why["Project"] = data_dict["Projet"]
how["Modality"]["Protocol"]["Detail"]["LPFilename"] = data_dict["LP Filename"]
json_file(metadata(what=what, why=why, how=how))
try:
bus.write_byte(0x44, 13)
time.sleep(0.1)
bus.write_byte(0x44, 0)
except:
pass
new_wind = Toplevel()
new_wind.title("info")
#new_wind.geometry("800x480")
new_wind.attributes("-fullscreen", True)
new_wind.configure(bg="#212121")
new_lab = Label(new_wind, text="Donneés enregistrées avec Succès !", bg="#212121", fg="#FFF3AE",
font=("Roboto Mono", 16 * -1, "bold")).place(x=150, y=100)
self.image_de_retour_ = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self.icone_de_retour_ = ImageTk.PhotoImage(master=new_wind, image=self.image_de_retour_)
btn_quit_ = Button(new_wind, text="Sortir", bg="#212121", fg="#FFF3AE", image=self.icone_de_retour_, command=new_wind.destroy).pack(side=TOP, anchor=NW)
self._logo_mercurio_s = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__ = ImageTk.PhotoImage(master=new_wind, image=self._logo_mercurio_s)
self.__label_logo__ = Label(new_wind, image=self.__logo__, bg="#212121").place(x=-15, y=425)
class others:
def __init__(self):
self.envi_wind = Tk()
self.envi_wind.attributes('-fullscreen', True)
self.envi_wind.title('Autres')
self.envi_wind.configure(bg="#212121")
#self.envi_wind.geometry("800x480")
self.frame_exit = Frame(self.envi_wind, bg="#212121")
self.frame = Frame(self.envi_wind, bg="#212121")
keypad_frame = Frame(self.envi_wind, bg="#212121")
self.environment_list = ["Version", "Contact", "A Propos"]
self.autres_data = ["1.0.0", "<EMAIL>", "Imagerie d'expertise"]
self.retour___icone = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.retour____ = ImageTk.PhotoImage(master=self.envi_wind, image=self.retour___icone)
self.button_exit = Button(self.frame_exit, text="Sortir", bg='#212121', command=self.envi_wind.destroy)
self.button_exit['image'] = self.retour____
for i, data in enumerate(self.environment_list):
label = Label(self.frame, text=data, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
label.grid(row=i, column=0, padx=25, pady=35, sticky='news')
for i, e in enumerate(self.autres_data):
label = Label(self.frame, text=e, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 15 * -1, "bold"), width=30)
label.grid(row=i, column=1, padx=5, pady=35, sticky='news')
web_label = Label(self.frame, text="Notre Site Web", bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
web_label.grid(row=len(self.environment_list), column=0, padx=10, pady=35, sticky='news')
web_label_ = Label(self.frame, text="mercurioimaging.com", bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 15 * -1, "bold"), cursor="hand2", width=30)
web_label_.grid(row=len(self.environment_list), column=1, padx=5, pady=35, sticky='news')
web_label_.bind("<Button-1>", lambda e: self.callback("https://mercurioimaging.com/"))
self.envi_wind.rowconfigure(0, weight=1)
self.envi_wind.columnconfigure(0, weight=1)
self.frame_exit.grid(row=0, column=0, sticky='nw')
self.frame.grid(row=0, column=0, sticky='n')
self.button_exit.grid(row=0, column=0, sticky='news')
self._logo_mercurio_au = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__au = ImageTk.PhotoImage(master=self.envi_wind, image=self._logo_mercurio_au)
self.__label_logo__au = Label(self.envi_wind, image=self.__logo__au, bg="#212121").place(x=-15, y=425)
def callback(self, url):
webbrowser.open_new(url)
class _camera_folder_:
global camera_folder
def __init__(self):
self.envi_wind = Tk()
#self.envi_wind.attributes('-fullscreen', True)
self.envi_wind.title('environment')
self.envi_wind.geometry('800x480')
self.envi_wind.configure(bg="#212121")
self.frame = Frame(self.envi_wind, bg="#212121")
keypad_frame = Frame(self.envi_wind, bg="#212121")
self.camera_folder_label = "Dossier des images"
self.camera_folder = "/store_00020001/DCIM/100CANON"
self.icone_deRetour = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.icone_Ret = ImageTk.PhotoImage(master=self.envi_wind, image=self.icone_deRetour)
self.button_exit = Button(self.envi_wind, text="Sortir", bg='#212121', command=self.envi_wind.destroy)
self.button_exit['image'] = self.icone_Ret
self.button_exit.pack(side=TOP, anchor=NW)
self.button_modifier = Button(self.envi_wind, text="Modifier", bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"),
command=self.edit_camera_folder)
self.button_modifier.pack(anchor=NE)
self.label_camera_text = Label(self.frame, text=self.camera_folder_label, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
self.label_camera_text.grid(row=1, column=0, sticky='news')
self.label_camera_folder = Label(self.frame, text=self.camera_folder, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
self.label_camera_folder.grid(row=1, column=1, sticky='news')
self.envi_wind.rowconfigure(0, weight=1)
self.envi_wind.columnconfigure(0, weight=1)
self.frame.place(x=100, y=100)
self._logo_mercurio_a = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__a = ImageTk.PhotoImage(master=self.envi_wind, image=self._logo_mercurio_a)
self.__label_logo__a = Label(self.envi_wind, image=self.__logo__a, bg="#212121").place(x=-15, y=425)
def edit_camera_folder(self):
self.camera_folder_editer = Entry(self.frame, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=4)
self.camera_folder_editer.grid(row=1, column=1, pady=10, sticky='news')
self.button_modifier['text'] = "Enregistrer"
self.button_modifier['command'] = self.save_data
keypad_frame = Frame(self.envi_wind, bg="#212121")
cara = settings.numerical_pad()
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
delete_button = Button(keypad_frame, text="<", bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), padx=1, width=4,
borderwidth=0, command=self.delete_text).grid(row=8, column=4, pady=2, sticky='news')
keypad_frame.place(x=300, y=300)
def set_text(self, text):
self.camera_folder_editer.insert("insert", text)
def delete_text(self):
self.camera_folder_editer.delete(0, END)
def save_data(self):
camera_folder = "/store_00020001/DCIM/"+str(self.camera_folder_editer.get())+"CANON"
print(camera_folder)
self.label_camera_text.config(text="Dossier des images")
self.label_camera_text.grid(row=2, column=0, pady=10, sticky='news')
self.label_camera_folder.config(text=camera_folder)
self.label_camera_folder.grid(row=2, column=1, pady=10, sticky='news')
self.button_modifier['text'] = "Modifier"
self.button_modifier['command'] = self.edit_camera_folder
try:
bus.write_byte(0x44, 13)
time.sleep(0.1)
bus.write_byte(0x44, 0)
except:
pass
##### -- -- ---
def main():
settings.killprocess()
return user_interface()
def copy_to_usb(folder, project_name):
mario_sound(100)
media_path = "/media/pi/"
folders_in_media = os.listdir(media_path)
blink = threading.Thread(target=flash_green)
blink.start()
if len(folders_in_media) == 0:
print("Inserérez une clé USB")
else:
usb_path = media_path+folders_in_media[0]
### Make Zip
shutil.make_archive(usb_path+"/"+project_name, 'zip', folder)
def mario_sound(frq):
try:
bus.write_block_data(0x44, 0, [8, frq])
except:
pass
def trois_colors(frq):
try:
bus.write_byte(0x44, 11)
time.sleep(frq)
bus.write_byte(0x44, 12)
time.sleep(frq)
bus.write_byte(0x44, 13)
time.sleep(frq)
except:
pass
def trois_colors_250():
try:
bus.write_byte(0x44, 11)
time.sleep(0.25)
bus.write_byte(0x44, 12)
time.sleep(0.25)
bus.write_byte(0x44, 13)
time.sleep(0.25)
except:
pass
def flash_green():
try:
for i in range(20):
bus.write_byte(0x44, 11)
time.sleep(0.1)
except:
pass
def mario_s():
mario_sound(2640)
time.sleep(0.15)
mario_sound(2640)
time.sleep(0.3)
mario_sound(2640)
time.sleep(0.3)
mario_sound(2040)
time.sleep(0.1)
mario_sound(2640)
time.sleep(0.3)
mario_sound(3080)
time.sleep(0.55)
mario_sound(1520)
time.sleep(0.575)
def led_1_ctrl(state): ### state should be 0 or 1
try:
bus.write_block_data(0x44, 0, [10, state])
except:
pass
def led_2_ctrl(state):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(4,GPIO.OUT)
if state == 1:
GPIO.output(4, GPIO.HIGH)
elif state == 0:
GPIO.output(4, GPIO.LOW)
if __name__ == '__main__':
bus = smbus.SMBus(1) #### Enable i2c
try:
os.system("gphoto2 --set-config capturetarget=1")
except:
pass
led_1_ctrl(0)
led_2_ctrl(0)
icons_path_ = "/home/pi/grandDome/ICONES/"
### ---- Create Folders ---------------
try:
os.mkdir("/home/pi/grandDome/data")
os.mkdir("/home/pi/grandDome/images/rti")
except:
pass
data_path = "/home/pi/grandDome/data/"
image_path = "/home/pi/grandDome/images/"
rti_path = "/home/pi/grandDome/images/rti/"
lp_path = "/home/pi/grandDome/LPFiles/"
### Camera options
camera_folder = "/store_00020001/DCIM/100CANON"
try:
subprocess.run(["gphoto2", "--set-config", "eosremoterelease=4"]) #### Release = Immediate 5 --- Release Full 4
except:
pass
trigCMD = ["--trigger-capture"]
download_allCMD = ["--get-all-files"] ## download files
clearCMD = ["--folder", camera_folder, "-R", "--delete-all-files"] ### To Change if the camera is not Canon !!
shot_date = datetime.datetime.now().strftime("%Y%m%d")
shot_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
picID = "RTI"
rti_folder_name = picID + shot_time
save_location = "./images/"
trois_colors(500)
mario_sound(3000)
main = main()
led_1_ctrl(0)
led_2_ctrl(0)
main.mainloop()
| 2.171875 | 2 |
xml-json/json/json_req.py | sanikamal/awesome-python-examples | 1 | 12759740 | # using the requests library to access internet data
#import the requests library
import requests
import json
def main():
# Use requests to issue a standard HTTP GET request
url = "http://httpbin.org/json"
result = requests.get(url)
# Use the built-in JSON function to return parsed data
dataobj = result.json()
print(json.dumps(dataobj,indent=4))
# Access data in the python object
print(list(dataobj.keys()))
print(dataobj['slideshow']['title'])
print("There are {0} slides".format(len(dataobj['slideshow']['slides'])))
if __name__ == "__main__":
main()
| 3.6875 | 4 |
satgenpy/satgen/dynamic_state/algorithm_free_one_only_over_isls.py | KaushikChavali/hypatia | 70 | 12759741 | # The MIT License (MIT)
#
# Copyright (c) 2020 ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .fstate_calculation import *
def algorithm_free_one_only_over_isls(
output_dynamic_state_dir,
time_since_epoch_ns,
satellites,
ground_stations,
sat_net_graph_only_satellites_with_isls,
ground_station_satellites_in_range,
num_isls_per_sat,
sat_neighbor_to_if,
list_gsl_interfaces_info,
prev_output,
enable_verbose_logs
):
"""
FREE-ONE ONLY OVER INTER-SATELLITE LINKS ALGORITHM
"one"
This algorithm assumes that every satellite and ground station has exactly 1 GSL interface.
"free"
This 1 interface is bound to a maximum outgoing bandwidth, but can send to any other
GSL interface (well, satellite -> ground-station, and ground-station -> satellite) in
range. ("free") There is no reciprocation of the bandwidth asserted.
"only_over_isls"
It calculates a forwarding state, which is essentially a single shortest path.
It only considers paths which go over the inter-satellite network, and does not make use of ground
stations relay. This means that every path looks like:
(src gs) - (sat) - (sat) - ... - (sat) - (dst gs)
"""
if enable_verbose_logs:
print("\nALGORITHM: FREE ONE ONLY OVER ISLS")
# Check the graph
if sat_net_graph_only_satellites_with_isls.number_of_nodes() != len(satellites):
raise ValueError("Number of nodes in the graph does not match the number of satellites")
for sid in range(len(satellites)):
for n in sat_net_graph_only_satellites_with_isls.neighbors(sid):
if n >= len(satellites):
raise ValueError("Graph cannot contain satellite-to-ground-station links")
#################################
# BANDWIDTH STATE
#
# There is only one GSL interface for each node (pre-condition), which as-such will get the entire bandwidth
output_filename = output_dynamic_state_dir + "/gsl_if_bandwidth_" + str(time_since_epoch_ns) + ".txt"
if enable_verbose_logs:
print(" > Writing interface bandwidth state to: " + output_filename)
with open(output_filename, "w+") as f_out:
if time_since_epoch_ns == 0:
for node_id in range(len(satellites)):
f_out.write("%d,%d,%f\n"
% (node_id, num_isls_per_sat[node_id],
list_gsl_interfaces_info[node_id]["aggregate_max_bandwidth"]))
for node_id in range(len(satellites), len(satellites) + len(ground_stations)):
f_out.write("%d,%d,%f\n"
% (node_id, 0, list_gsl_interfaces_info[node_id]["aggregate_max_bandwidth"]))
#################################
# FORWARDING STATE
#
# Previous forwarding state (to only write delta)
prev_fstate = None
if prev_output is not None:
prev_fstate = prev_output["fstate"]
# GID to satellite GSL interface index
gid_to_sat_gsl_if_idx = [0] * len(ground_stations) # (Only one GSL interface per satellite, so the first)
# Forwarding state using shortest paths
fstate = calculate_fstate_shortest_path_without_gs_relaying(
output_dynamic_state_dir,
time_since_epoch_ns,
len(satellites),
len(ground_stations),
sat_net_graph_only_satellites_with_isls,
num_isls_per_sat,
gid_to_sat_gsl_if_idx,
ground_station_satellites_in_range,
sat_neighbor_to_if,
prev_fstate,
enable_verbose_logs
)
if enable_verbose_logs:
print("")
return {
"fstate": fstate
}
| 1.539063 | 2 |
spring2021/hw1/avgpool.py | siyao-7/11775-hws | 0 | 12759742 | <reponame>siyao-7/11775-hws
import os
import numpy as np
files = os.listdir('./soundnet/')
layer = '/tf_fea18.npy'
for filename in files:
if os.path.exists('./soundnet/'+filename+layer):
raw_feat=np.load('./soundnet/'+filename+layer)
raw_feat1 = np.max(raw_feat,0).tolist()
np.savetxt('./avgpool18/'+filename+'.csv', raw_feat1)
| 2.28125 | 2 |
tests/django_app/models.py | orderbird/aserializer | 0 | 12759743 | <gh_stars>0
# -*- coding: utf-8 -*-
from django.db import models
class SimpleDjangoModel(models.Model):
name = models.CharField(max_length=24, blank=False)
code = models.CharField(max_length=4)
number = models.IntegerField(blank=False)
class RelatedDjangoModel(models.Model):
name = models.CharField(max_length=24, blank=False)
relation = models.ForeignKey(SimpleDjangoModel, related_name='relations')
| 2.34375 | 2 |
services/read_scp_write_npy_embeddings.py | ishine/self_supervised_AHC | 10 | 12759744 | <filename>services/read_scp_write_npy_embeddings.py
import os
import numpy as np
import glob
import kaldi_io
import sys
#Usage
if len(sys.argv)!=4:
print("Need 3 input arguments!")
print("Usage :")
print("python read_scp_write_npy_embeddings.py <mat/vec> <complete path of ark/scp file> <path of output folder to store numpy output>")
print("<mat/vec> : mat if scp contains matrix , vec if scp contains vector e.g. x-vectors")
arkscppath = sys.argv[2]
outputnpyfilepath = sys.argv[3]
if not os.path.isdir(outputnpyfilepath):
print('Creating directory where npy scores will be saved : {}'.format(outputnpyfilepath))
os.makedirs(outputnpyfilepath)
else:
print("xvectors numpy path exists !")
exit()
ext = os.path.splitext(file_name)[1]
if sys.argv[1]=='mat':
#for score files
if ext==".scp":
d = { key:mat for key,mat in kaldi_io.read_mat_scp(arkscppath) }
else:
print("File type not correct. scp required.")
elif sys.argv[1]=='vec':
#for embeddings
if ext==".scp":
d = { key:mat for key,mat in kaldi_io.read_vec_flt_scp(arkscppath) }
elif ext == ".ark":
d = { key:mat for key,mat in kaldi_io.read_vec_flt_ark(arkscppath) }
else:
print("File type not correct. scp/ark required.")
else:
print("first argument should be mat/vec ")
for count,(i,j) in enumerate(d.items()):
if count % 100 == 0:
print("Done with {} files".format(count))
np.save(outputnpyfilepath+'/'+i,j)
| 2.796875 | 3 |
123.py | ricky10116/Dos-cmd-for-LV-and-python-template | 0 | 12759745 | <gh_stars>0
from argparse import ArgumentParser
def build_parser():
parser = ArgumentParser()
parser.add_argument('--input',
dest='input'
)
return parser
parser = build_parser()
options = parser.parse_args()
def main():
Name = options.input
Name1 = float(Name) + 10
print(Name1)
if __name__ == '__main__':
main()
| 3.359375 | 3 |
accelerator/standard_methods/a_csvexport.py | drougge/accelerator | 0 | 12759746 | <filename>accelerator/standard_methods/a_csvexport.py
############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# Modifications copyright (c) 2018-2020 <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import division
from __future__ import absolute_import
description = r'''Dataset (or chain) to CSV file.'''
from shutil import copyfileobj
from os import unlink
from os.path import exists
from contextlib import contextmanager
from json import JSONEncoder
from functools import partial
import gzip
from accelerator.compat import PY3, PY2, izip, imap
from accelerator import OptionString, status
options = dict(
filename = OptionString, # .csv or .gz
separator = ',',
line_separator = '\n',
labelsonfirstline = True,
chain_source = False, # everything in source is replaced by datasetchain(self, stop=from previous)
quote_fields = '', # can be any string, but use '"' or "'"
labels = [], # empty means all labels in (first) dataset
sliced = False, # one output file per slice, put %02d or similar in filename
compression = 6, # gzip level
)
datasets = (['source'],) # normally just one, but you can specify several
jobs = ('previous',)
@contextmanager
def writer(fh):
with fh:
write = fh.write
line_sep = options.line_separator
def wrapped_write(s):
write(s)
write(line_sep)
yield wrapped_write
if PY3:
enc = str
else:
enc = lambda s: s.encode('utf-8')
def nonefix_u(s):
return u'None' if s is None else s
def nonefix_b(s):
return b'None' if s is None else s
def csvexport(sliceno, filename, labelsonfirstline):
d = datasets.source[0]
if not options.labels:
options.labels = sorted(d.columns)
if options.chain_source:
if jobs.previous:
prev_source = jobs.previous.params.datasets.source
assert len(datasets.source) == len(prev_source)
else:
prev_source = [None] * len(datasets.source)
lst = []
for src, stop in zip(datasets.source, prev_source):
lst.extend(src.chain(stop_ds=stop))
datasets.source = lst
if filename.lower().endswith('.gz'):
open_func = partial(gzip.open, compresslevel=options.compression)
elif filename.lower().endswith('.csv'):
open_func = open
else:
raise Exception("Filename should end with .gz for compressed or .csv for uncompressed")
if PY2:
open_func = partial(open_func, mode='wb')
else:
open_func = partial(open_func, mode='xt', encoding='utf-8')
iters = []
first = True
dumps = JSONEncoder(
sort_keys=True,
ensure_ascii=True,
check_circular=False,
).encode
for label in options.labels:
it = d.iterate_list(sliceno, label, datasets.source, status_reporting=first)
first = False
t = d.columns[label].type
if d.columns[label].none_support:
if t == 'bytes' or (PY2 and t == 'ascii'):
it = imap(nonefix_b, it)
elif t in ('ascii', 'unicode',):
it = imap(nonefix_u, it)
if t == 'unicode' and PY2:
it = imap(enc, it)
elif t == 'bytes' and PY3:
it = imap(lambda s: s.decode('utf-8', errors='backslashreplace'), it)
elif t in ('float32', 'float64',):
it = imap(repr, it)
elif t == 'number':
if PY2:
it = imap(lambda n: str(n) if isinstance(n, long) else repr(n), it)
else:
it = imap(repr, it)
elif t == 'json':
it = imap(dumps, it)
elif t not in ('unicode', 'ascii', 'bytes'):
it = imap(str, it)
iters.append(it)
it = izip(*iters)
with writer(open_func(filename)) as write:
q = options.quote_fields
sep = options.separator
if q:
qq = q + q
if labelsonfirstline:
write(enc(sep.join(q + n.replace(q, qq) + q for n in options.labels)))
for data in it:
write(sep.join(q + n.replace(q, qq) + q for n in data))
else:
if labelsonfirstline:
write(enc(sep.join(options.labels)))
for data in it:
write(sep.join(data))
def analysis(sliceno, job):
if options.sliced:
csvexport(sliceno, options.filename % (sliceno,), options.labelsonfirstline)
job.register_file(options.filename % (sliceno,))
else:
labelsonfirstline = (sliceno == 0 and options.labelsonfirstline)
filename = '%d.gz' if options.filename.lower().endswith('.gz') else '%d.csv'
csvexport(sliceno, filename % (sliceno,), labelsonfirstline)
def synthesis(job, slices):
if not options.sliced:
filename = '%d.gz' if options.filename.lower().endswith('.gz') else '%d.csv'
with job.open(options.filename, "wb") as outfh:
for sliceno in range(slices):
if exists(filename % sliceno):
with status("Assembling %s (%d/%d)" % (options.filename, sliceno, slices)):
with open(filename % sliceno, "rb") as infh:
copyfileobj(infh, outfh)
unlink(filename % sliceno)
| 1.859375 | 2 |
Curso-Em-Video-Python/2Exercicios/009_Tabuada.py | pedrohd21/Cursos-Feitos | 0 | 12759747 | x = int(input('Qual tabuada multiplicar: '))
print('-' * 15)
print('{} x {:2} = {}'.format(x, 1, (x*1)))
print('{} x {:2} = {}'.format(x, 2, (x*2)))
print('{} x {:2} = {}'.format(x, 3, (x*3)))
print('{} x {:2} = {}'.format(x, 4, (x*4)))
print('{} x {:2} = {}'.format(x, 5, (x*5)))
print('{} x {:2} = {}'.format(x, 6, (x*6)))
print('{} x {:2} = {}'.format(x, 7, (x*7)))
print('{} x {:2} = {}'.format(x, 8, (x*8)))
print('{} x {:2} = {}'.format(x, 9, (x*9)))
print('{} x {:2} = {}'.format(x, 10, (x*10)))
print('-' * 15) | 3.875 | 4 |
Algorithms/Searching & Sorting/Fibonacii Search/fibinacci_search.py | strangestroad/interview-techdev-guide | 320 | 12759748 | <reponame>strangestroad/interview-techdev-guide
def FibonacciSearch(arr, key):
fib2 = 0
fib1 = 1
fib = fib1 + fib2
while (fib < len(arr)):
fib2 = fib1
fib1 = fib
fib = fib1 + fib2
index = -1
while (fib > 1):
i = min(index + fib2, (len(arr)-1))
if (arr[i] < key):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
index = i
elif (arr[i] > key):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
else :
return i
if(fib1 and index < (len(arr)-1) and arr[index+1] == key):
return index+1
return -1
key= 15
arr = [5, 10, 15, 20, 25, 30, 35]
ans = FibonacciSearch(arr, key)
print(ans)
if (ans):
print("Found at "+ str(ans+1) +" position")
else:
print("Not Found")
| 3.796875 | 4 |
data_comprehention/list_comprehention.py | silentstep/useful-python-code | 0 | 12759749 | a_list = [1, 5, 'a', 2, 'b']
squared_ints = [e**2 for e in a_list if type(e) is int]
if __name__ == "__main__":
print squared_ints | 3.609375 | 4 |
scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py | jasonmccampbell/scipy-refactor | 8 | 12759750 | #!/usr/bin/env python
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal, \
assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \
assert_raises, verbose, assert_equal
from numpy import array, finfo, argsort, dot, round, conj, random
from scipy.sparse import csc_matrix, isspmatrix
from scipy.sparse.linalg import LinearOperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence
from scipy.linalg import svd
def assert_almost_equal_cc(actual,desired,decimal=7,err_msg='',verbose=True):
# almost equal or complex conjugates almost equal
try:
assert_almost_equal(actual,desired,decimal,err_msg,verbose)
except:
assert_almost_equal(actual,conj(desired),decimal,err_msg,verbose)
def assert_array_almost_equal_cc(actual,desired,decimal=7,
err_msg='',verbose=True):
# almost equal or complex conjugates almost equal
try:
assert_array_almost_equal(actual,desired,decimal,err_msg,verbose)
except:
assert_array_almost_equal(actual,conj(desired),decimal,err_msg,verbose)
# precision for tests
_ndigits = {'f':4, 'd':12, 'F':4, 'D':12}
class TestArpack(TestCase):
def setUp(self):
self.symmetric=[]
self.nonsymmetric=[]
S1={}
S1['mat']=\
array([[ 2., 0., 0., -1., 0., -1.],
[ 0., 2., 0., -1., 0., -1.],
[ 0., 0., 2., -1., 0., -1.],
[-1., -1., -1., 4., 0., -1.],
[ 0., 0., 0., 0., 1., -1.],
[-1., -1., -1., -1., -1., 5.]])
S1['v0']= array([0.39574246391875789,
0.00086496039750016962,
-0.9227205789982591,
-0.9165671495278005,
0.1175963848841306,
-0.29962625203712179])
S1['eval']=array([0,1,2,2,5,6])
self.symmetric.append(S1)
N1={}
N1['mat']=\
array([[-2., -8., 1., 2., -5.],
[ 6., 6., 0., 2., 1.],
[ 0., 4., -2., 11., 0.],
[ 1., 6., 1., 0., -4.],
[ 2., -6., 4., 9., -3]])
N1['v0'] = array([0.39574246391875789,
0.00086496039750016962,
-0.9227205789982591,
-0.9165671495278005,
0.1175963848841306])
N1['eval']=\
array([ -5.4854094033782888+0.0j,
-2.2169058544873783+8.5966096591588261j,
-2.2169058544873783-8.5966096591588261j,
4.4596105561765107+3.8007839204319454j,
4.4596105561765107-3.8007839204319454j],'D')
self.nonsymmetric.append(N1)
class TestEigenSymmetric(TestArpack):
def get_exact_eval(self,d,typ,k,which):
eval=d['eval'].astype(typ)
ind=argsort(eval)
eval=eval[ind]
if which=='LM':
return eval[-k:]
if which=='SM':
return eval[:k]
if which=='BE':
# one ev from each end - if k is odd, extra ev on high end
l=k//2
h=k//2+k%2
low=range(len(eval))[:l]
high=range(len(eval))[-h:]
return eval[low+high]
def eval_evec(self,d,typ,k,which,v0=None):
a=d['mat'].astype(typ)
if v0 == None:
v0 = d['v0']
exact_eval=self.get_exact_eval(d,typ,k,which)
eval,evec=eigsh(a,k,which=which,v0=v0)
# check eigenvalues
assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ])
# check eigenvectors A*evec=eval*evec
for i in range(k):
assert_array_almost_equal(dot(a,evec[:,i]),
eval[i]*evec[:,i],
decimal=_ndigits[typ])
def test_symmetric_modes(self):
k=2
for typ in 'fd':
for which in ['LM','SM','BE']:
self.eval_evec(self.symmetric[0],typ,k,which)
def test_starting_vector(self):
k=2
for typ in 'fd':
A=self.symmetric[0]['mat']
n=A.shape[0]
v0 = random.rand(n).astype(typ)
self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0)
def test_no_convergence(self):
np.random.seed(1234)
m = np.random.rand(30, 30)
m = m + m.T
try:
w, v = eigsh(m, 4, which='LM', v0=m[:,0], maxiter=5)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence, err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_array_almost_equal(dot(m, vv), ww*vv,
decimal=_ndigits['d'])
class TestEigenComplexSymmetric(TestArpack):
def sort_choose(self,eval,typ,k,which):
# sort and choose the eigenvalues and eigenvectors
# both for the exact answer and that returned from ARPACK
reval=round(eval,decimals=_ndigits[typ])
ind=argsort(reval)
if which=='LM' or which=='LR':
return ind[-k:]
if which=='SM' or which=='SR':
return ind[:k]
def eval_evec(self,d,typ,k,which,v0=None):
a=d['mat'].astype(typ)
if v0 == None:
v0 = d['v0']
# get exact eigenvalues
exact_eval=d['eval'].astype(typ)
ind=self.sort_choose(exact_eval,typ,k,which)
exact_eval=exact_eval[ind]
# compute eigenvalues
eval,evec=eigs(a,k,which=which,v0=v0)
ind=self.sort_choose(eval,typ,k,which)
eval=eval[ind]
evec=evec[:,ind]
# check eigenvalues
assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ])
# check eigenvectors A*evec=eval*evec
for i in range(k):
assert_array_almost_equal(dot(a,evec[:,i]),
eval[i]*evec[:,i],
decimal=_ndigits[typ])
def test_complex_symmetric_modes(self):
k=2
for typ in 'FD':
for which in ['LM','SM','LR','SR']:
self.eval_evec(self.symmetric[0],typ,k,which)
def test_no_convergence(self):
np.random.seed(1234)
m = np.random.rand(30, 30) + 1j*np.random.rand(30, 30)
try:
w, v = eigs(m, 3, which='LM', v0=m[:,0], maxiter=30)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence, err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_array_almost_equal(dot(m, vv), ww*vv,
decimal=_ndigits['D'])
class TestEigenNonSymmetric(TestArpack):
def sort_choose(self,eval,typ,k,which):
reval=round(eval,decimals=_ndigits[typ])
if which in ['LR','SR']:
ind=argsort(reval.real)
elif which in ['LI','SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
ind=argsort(abs(reval.imag))
else:
ind=argsort(abs(reval))
if which in ['LR','LM','LI']:
return ind[-k:]
if which in ['SR','SM','SI']:
return ind[:k]
def eval_evec(self,d,typ,k,which,v0=None):
a=d['mat'].astype(typ)
if v0 == None:
v0 = d['v0']
# get exact eigenvalues
exact_eval=d['eval'].astype(typ.upper())
ind=self.sort_choose(exact_eval,typ,k,which)
exact_eval=exact_eval[ind]
# compute eigenvalues
eval,evec=eigs(a,k,which=which,v0=v0)
ind=self.sort_choose(eval,typ,k,which)
eval=eval[ind]
evec=evec[:,ind]
# check eigenvalues
# check eigenvectors A*evec=eval*evec
for i in range(k):
assert_almost_equal_cc(eval[i],exact_eval[i],decimal=_ndigits[typ])
assert_array_almost_equal_cc(dot(a,evec[:,i]),
eval[i]*evec[:,i],
decimal=_ndigits[typ])
def test_nonsymmetric_modes(self):
k=2
for typ in 'fd':
for which in ['LI','LR','LM','SM','SR','SI']:
for m in self.nonsymmetric:
self.eval_evec(m,typ,k,which)
def test_starting_vector(self):
k=2
for typ in 'fd':
A=self.symmetric[0]['mat']
n=A.shape[0]
v0 = random.rand(n).astype(typ)
self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0)
def test_no_convergence(self):
np.random.seed(1234)
m = np.random.rand(30, 30)
try:
w, v = eigs(m, 3, which='LM', v0=m[:,0], maxiter=30)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence, err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_array_almost_equal(dot(m, vv), ww*vv,
decimal=_ndigits['d'])
class TestEigenComplexNonSymmetric(TestArpack):
def sort_choose(self,eval,typ,k,which):
eps=finfo(typ).eps
reval=round(eval,decimals=_ndigits[typ])
if which in ['LR','SR']:
ind=argsort(reval)
elif which in ['LI','SI']:
ind=argsort(reval.imag)
else:
ind=argsort(abs(reval))
if which in ['LR','LI','LM']:
return ind[-k:]
if which in ['SR','SI','SM']:
return ind[:k]
def eval_evec(self,d,typ,k,which,v0=None):
a=d['mat'].astype(typ)
if v0 == None:
v0 = d['v0']
# get exact eigenvalues
exact_eval=d['eval'].astype(typ.upper())
ind=self.sort_choose(exact_eval,typ,k,which)
exact_eval=exact_eval[ind]
if verbose >= 3:
print "exact"
print exact_eval
# compute eigenvalues
eval,evec=eigs(a,k,which=which,v0=v0)
ind=self.sort_choose(eval,typ,k,which)
eval=eval[ind]
evec=evec[:,ind]
if verbose >= 3:
print eval
# check eigenvalues
# check eigenvectors A*evec=eval*evec
for i in range(k):
assert_almost_equal_cc(eval[i],exact_eval[i],decimal=_ndigits[typ])
assert_array_almost_equal_cc(dot(a,evec[:,i]),
eval[i]*evec[:,i],
decimal=_ndigits[typ])
def test_complex_nonsymmetric_modes(self):
k=2
for typ in 'FD':
for which in ['LI','LR','LM','SI','SR','SM']:
for m in self.nonsymmetric:
self.eval_evec(m,typ,k,which)
def test_no_convergence(self):
np.random.seed(1234)
m = np.random.rand(30, 30) + 1j*np.random.rand(30, 30)
try:
w, v = eigs(m, 3, which='LM', v0=m[:,0], maxiter=30)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence, err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_array_almost_equal(dot(m, vv), ww*vv,
decimal=_ndigits['D'])
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2,3)))
assert_raises(ValueError, eigs, A)
def test_eigs_operator():
# Check inferring LinearOperator dtype
fft_op = LinearOperator((6, 6), np.fft.fft)
w, v = eigs(fft_op, k=3)
assert_equal(w.dtype, np.complex_)
def sorted_svd(m, k):
"""Compute svd of a dense matrix m, and return singular vectors/values
sorted."""
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
ii = np.argsort(s)[-k:]
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
class TestSparseSvd(TestCase):
def test_simple_real(self):
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], np.float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.float)
z = csc_matrix(x)
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_simple_complex(self):
x = np.array([[1, 2, 3],
[3, 4, 3],
[1+1j, 0, 2],
[0, 0, 1]], np.complex)
y = np.array([[1, 2, 3, 8+5j],
[3-2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.complex)
z = csc_matrix(x)
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape)-1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
if __name__ == "__main__":
run_module_suite()
| 2.203125 | 2 |