max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/sima/simo/blueprints/timedependentvolumemass.py
|
SINTEF/simapy
| 0
|
12776251
|
<reponame>SINTEF/simapy
#
# Generated with TimeDependentVolumeMassBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.namedobject import NamedObjectBlueprint
class TimeDependentVolumeMassBlueprint(NamedObjectBlueprint):
""""""
def __init__(self, name="TimeDependentVolumeMass", package_path="sima/simo", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("point","sima/sima/Point3","Mass point (local coordinates).",True))
self.attributes.append(BlueprintAttribute("flowRates","sima/simo/FlowRateItem","",True,Dimension("*")))
self.attributes.append(Attribute("vol0","number","Volume of liquid at t=0",default=0.0))
self.attributes.append(Attribute("volMax","number","Maximum allowable volume",default=0.0))
self.attributes.append(Attribute("volMin","number","Minimum allowable volume",default=0.0))
self.attributes.append(Attribute("volRateMax","number","Maximum allowable volume rate (HLA only)",default=0.0))
self.attributes.append(Attribute("volRateMin","number","Minimum allowable volume rate (HLA only)",default=0.0))
self.attributes.append(Attribute("density","number","Density of liquid in tank",default=0.0))
self.attributes.append(BlueprintAttribute("vectorZ","sima/sima/Vector3","Vector defining portion z-axis in local system",True))
self.attributes.append(BlueprintAttribute("vectorXZ","sima/sima/Vector3","Vector in local xz-plane def. portion x-axis",True))
self.attributes.append(BlueprintAttribute("portions","sima/simo/VolumeMassPortion","",True,Dimension("*")))
| 1.921875
| 2
|
misago/misago/users/tests/test_profilefields.py
|
vascoalramos/misago-deployment
| 2
|
12776252
|
<reponame>vascoalramos/misago-deployment<filename>misago/misago/users/tests/test_profilefields.py
from django.contrib.auth import get_user_model
from django.test import TestCase
from ..profilefields import ProfileFields
User = get_user_model()
class ProfileFieldsLoadTests(TestCase):
def test_no_groups(self):
"""profile fields util handles empty list"""
profilefields = ProfileFields([])
profilefields.load()
self.assertFalse(profilefields.fields_dict)
def test_empty_group(self):
"""profile fields util handles empty group"""
profilefields = ProfileFields([{"name": "Test", "fields": []}])
profilefields.load()
self.assertFalse(profilefields.fields_dict)
def test_field_defines_fieldname(self):
"""fields need to define fieldname"""
profilefields = ProfileFields(
[
{
"name": "Test",
"fields": [
"misago.users.tests.testfiles.profilefields.NofieldnameField"
],
}
]
)
with self.assertRaises(ValueError):
profilefields.load()
try:
profilefields.load()
except ValueError as e:
error = str(e)
self.assertIn(
"misago.users.tests.testfiles.profilefields.NofieldnameField", error
)
self.assertIn("profile field has to specify fieldname attribute", error)
def test_detect_repeated_imports(self):
"""fields can't be specified multiple times"""
profilefields = ProfileFields(
[
{
"name": "Test",
"fields": ["misago.users.profilefields.default.TwitterHandleField"],
},
{
"name": "<NAME>",
"fields": ["misago.users.profilefields.default.TwitterHandleField"],
},
]
)
with self.assertRaises(ValueError):
profilefields.load()
try:
profilefields.load()
except ValueError as e:
error = str(e)
self.assertIn(
"misago.users.profilefields.default.TwitterHandleField", error
)
self.assertIn("profile field has been specified twice", error)
def test_detect_repeated_fieldnames(self):
"""fields can't reuse other field's fieldnames"""
profilefields = ProfileFields(
[
{
"name": "Test",
"fields": [
"misago.users.tests.testfiles.profilefields.FieldnameField"
],
},
{
"name": "<NAME>",
"fields": [
# pylint: disable=line-too-long
"misago.users.tests.testfiles.profilefields.RepeatedFieldnameField"
],
},
]
)
with self.assertRaises(ValueError):
profilefields.load()
try:
profilefields.load()
except ValueError as e:
error = str(e)
self.assertIn(
"misago.users.tests.testfiles.profilefields.FieldnameField", error
)
self.assertIn(
"misago.users.tests.testfiles.profilefields.RepeatedFieldnameField",
error,
)
self.assertIn(
'field defines fieldname "hello" that is already in use by the', error
)
def test_field_correct_field(self):
"""util loads correct field"""
field_path = "misago.users.profilefields.default.RealNameField"
profilefields = ProfileFields([{"name": "Test", "fields": [field_path]}])
profilefields.load()
self.assertIn(field_path, profilefields.fields_dict)
| 2.4375
| 2
|
inherit.py
|
Prashant269/python
| 0
|
12776253
|
class baseclass(object):
def printhim(self):
print "prasanth"
class inhertingclass(baseclass):
pass
x=inhertingclass()
x.printhim()
| 3.21875
| 3
|
cospar/help_functions/_additional_analysis.py
|
rbonhamcarter/cospar
| 1
|
12776254
|
<gh_stars>1-10
import numpy as np
from scipy import stats
import scipy.sparse as ssp
import pandas as pd
from .. import settings
from .. import logging as logg
import time
import ot.bregman as otb
from ._help_functions_CoSpar import *
from matplotlib import pyplot as plt
import seaborn as sns
from .. import plotting as pl
"""
This is not a necessary component of CoSpar. It requires additional packages.
When publishing, you can change the __ini__ setting to remove this part.
Making it too fat increase the work for maintenance.
"""
def Wasserstein_distance_private(
prob_t0,
prob_t1,
full_cost_matrix,
OT_epsilon=0.05,
OT_stopThr=10 ** (-8),
OT_max_iter=1000,
):
"""
Compute symmetric Wasserstein distance between two distributions.
Parameters
----------
prob_t0: `np.array`, (n_1,)
Distribution on initial state space.
prob_t1: `np.array`, (n_2,)
Distribution on later state space
full_cost_matrix: `np.array`, shape (n_1, n_2)
A cost matrix to map all initial states to all later states. This is a full matrix.
OT_epsilon: `float`, optional (default: 0.05)
Entropic regularization parameter to compute the optional
transport map from target to ref.
OT_stopThr: `float`, optional (default: 10**(-8))
The stop thresholding for computing the transport map.
OT_max_iter: `float`, optional (default: 1000)
The maximum number of iteration for computing the transport map.
Returns
-------
A vector for [forward_distance, backward_distance, the average]
"""
# normalized distribution
prob_t0 = np.array(prob_t0)
prob_t1 = np.array(prob_t1)
sp_id_t0 = np.nonzero(prob_t0 > 0)[0]
sp_id_t1 = np.nonzero(prob_t1 > 0)[0]
resol = 10 ** (-10)
input_t0 = prob_t0[sp_id_t0] / (resol + np.sum(prob_t0[sp_id_t0]))
input_t1 = prob_t1[sp_id_t1] / (resol + np.sum(prob_t1[sp_id_t1]))
logg.info("Compute forward transition map")
sp_cost_matrix_t0t1 = full_cost_matrix[sp_id_t0][:, sp_id_t1]
OT_transition_map_t0t1 = otb.sinkhorn_stabilized(
input_t0,
input_t1,
sp_cost_matrix_t0t1,
OT_epsilon,
numItermax=OT_max_iter,
stopThr=OT_stopThr,
)
# if more than 10% of the prediction is less than 50% accurate, then we declare it a failure
flag_1 = np.sum(OT_transition_map_t0t1.sum(1) < 0.5 * input_t0) > 0.1 * len(
input_t0
)
flag_2 = np.sum(OT_transition_map_t0t1.sum(1) > 2 * input_t0) > 0.1 * len(input_t0)
if not (flag_1 or flag_2):
Wass_dis = np.sum(OT_transition_map_t0t1 * sp_cost_matrix_t0t1)
return Wass_dis
else:
logg.error("Forward transition map construction failed")
logg.info("Compute backward transition map instead")
sp_cost_matrix_t1t0 = full_cost_matrix[sp_id_t1][:, sp_id_t0]
OT_transition_map_t1t0 = otb.sinkhorn_stabilized(
input_t1,
input_t0,
sp_cost_matrix_t1t0,
OT_epsilon,
numItermax=OT_max_iter,
stopThr=OT_stopThr,
)
flag_3 = np.sum(OT_transition_map_t1t0.sum(1) < 0.5 * input_t1) > 0.1 * len(
input_t1
)
flag_4 = np.sum(OT_transition_map_t1t0.sum(1) > 2 * input_t1) > 0.1 * len(
input_t1
)
Wass_dis = np.sum(OT_transition_map_t1t0 * sp_cost_matrix_t1t0)
if not (flag_3 or flag_4):
return Wass_dis
else:
logg.error("Backward transition map construction failed")
return None
def Wasserstein_distance(
adata,
group_A,
group_B,
OT_dis_KNN=5,
OT_epsilon=0.05,
compute_new=False,
show_groups=True,
):
"""
Compute Wasserstein between two populations
Parameters
----------
adata: :class:`~anndata.AnnData` object
OT_epsilon: `float`, optional (default: 0.02)
The entropic regularization, >0. A larger value increases
uncertainty of the transition.
OT_dis_KNN: `int`, optional (default: 5)
Number of nearest neighbors to construct the KNN graph for
computing the shortest path distance.
OT_epsilon: `float`, optional (default: 0.05)
Entropic regularization parameter to compute the optional
transport map from target to ref.
compute_new: `bool`, optional (default: False)
If True, compute OT_map and also the shortest path distance from scratch,
whether it was computed and saved before or not.
show_groups: `bool`, optional (default: True)
Plot each group, and overlay them on top of each other.
Returns
-------
A vector for [forward_distance, backward_distance, the average]
"""
data_des = adata.uns["data_des"][-1]
data_path = settings.data_path
figure_path = settings.figure_path
group_A = np.array(group_A).astype(float)
group_B = np.array(group_B).astype(float)
if (len(group_A) != len(group_B)) or (len(group_A) != adata.shape[0]):
logg.error("Size mismatch between group_A, group_B, and adata.shape[0].")
return None
else:
if (np.sum(group_A > 0) == 0) or (np.sum(group_B > 0) == 0):
logg.error("No cells selected.")
return None
if show_groups:
X_emb = adata.obsm["X_emb"]
x_emb = X_emb[:, 0]
y_emb = X_emb[:, 1]
fig_width = settings.fig_width
fig_height = settings.fig_height
point_size = settings.fig_point_size
fig = plt.figure(figsize=(3 * fig_width, fig_height))
ax = plt.subplot(1, 3, 1)
pl.customized_embedding(
x_emb,
y_emb,
group_A,
point_size=point_size,
set_lim=False,
ax=ax,
title="Group A",
)
ax = plt.subplot(1, 3, 2)
pl.customized_embedding(
x_emb,
y_emb,
group_B,
point_size=point_size,
set_lim=False,
ax=ax,
title="Group B",
)
resol = 10 ** (-10)
group_A = group_A / (resol + np.max(group_A))
group_B = group_B / (resol + np.max(group_B))
vector_array = group_A - group_B
ax = plt.subplot(1, 3, 3)
new_idx = np.argsort(abs(vector_array))
pl.customized_embedding(
x_emb[new_idx],
y_emb[new_idx],
vector_array[new_idx],
title="Overlay",
point_size=point_size,
set_lim=False,
ax=ax,
color_map=plt.cm.bwr,
order_points=False,
)
plt.tight_layout()
fig.savefig(
f"{figure_path}/{data_des}_Wass_dis_group_overlay.{settings.file_format_figs}"
)
SPD_file_name = (
f"{data_path}/{data_des}_ShortestPathDistanceMatrix_KNN{OT_dis_KNN}.npy"
)
if os.path.exists(SPD_file_name) and (not compute_new):
logg.info("Load pre-computed shortest path distance matrix")
OT_cost_matrix = np.load(SPD_file_name)
else:
logg.info("Compute new shortest path distance matrix")
t = time.time()
ShortPath_dis = compute_shortest_path_distance(
adata,
num_neighbors_target=OT_dis_KNN,
mode="distances",
method="others",
normalize=False,
)
# we do not want normalization here
OT_cost_matrix = ShortPath_dis
np.save(
SPD_file_name, OT_cost_matrix
) # This is not a sparse matrix at all.
logg.info(
f"Finishing computing shortest-path distance, used time {time.time()-t}"
)
Wass_dis = Wasserstein_distance_private(
group_A, group_B, OT_cost_matrix, OT_epsilon=OT_epsilon
)
return Wass_dis
########################
# Miscellaneous analysis
########################
def assess_fate_prediction_by_correlation_v0(
adata,
ground_truth,
selected_time_points,
plot_style="boxplot",
figure_index="",
show_groups=True,
):
"""
Assess biary fate prediction by correlation
Parameters
----------
adata: :class:`~anndata.AnnData` object
It should be run through cs.pl.binary_fate_bias
ground_truth: `np.array`
A vector of expected fate bias for each cell in the
full space corresponding to adata_orig. We expect bias to fate A has value (0,1],
and bias towards fate B has value [-1,0). Cells with value zeros are
discarded before computing correlation.
selected_time_points: `list`
A list of selected time points for making the comparison.
plot_style: `string`
Plot used to visualize the results. It can be {'boxplot','scatter'}.
figure_index: `string` optional (default: '')
String index for annotate filename for saved figures. Used to distinuigh plots from different conditions.
show_groups: `bool`, optional (default: True)
Plot each group.
Returns
-------
correlation with ground truth at selected time points.
"""
data_des = adata.uns["data_des"][-1]
if "binary_fate_bias" not in adata.uns.keys():
logg.error(
"Binary fate bias not computed yet! Please run cs.pl.binary_fate_bias first!"
)
return None
else:
fig_width = settings.fig_width
fig_height = settings.fig_height
point_size = settings.fig_point_size
time_info = np.array(adata.obs["time_info"])
sp_idx_time = selecting_cells_by_time_points(time_info, selected_time_points)
ground_truth_sp = np.array(ground_truth)[adata.uns["sp_idx"]]
binary_fate_bias = adata.uns["binary_fate_bias"][1]
sel_index = (abs(ground_truth_sp) > 0) & sp_idx_time
ground_truth_sp = (1 + ground_truth_sp) / 2 # transform the value to 0 and 1.
if np.sum(sel_index) == 0:
logg.error("No cells selected.")
return None
else:
corr = np.corrcoef(ground_truth_sp[sel_index], binary_fate_bias[sel_index])[
0, 1
]
if np.isnan(corr):
logg.error("Correlation is NaN.")
return None
else:
corr = round(100 * corr) / 100
if show_groups:
X_emb = adata.obsm["X_emb"]
x_emb = X_emb[:, 0]
y_emb = X_emb[:, 1]
fig_width = settings.fig_width
fig_height = settings.fig_height
point_size = settings.fig_point_size
fig = plt.figure(figsize=(2 * fig_width, fig_height))
ax = plt.subplot(1, 2, 1)
new_idx = np.argsort(abs(ground_truth_sp - 0.5))
pl.customized_embedding(
x_emb[new_idx],
y_emb[new_idx],
ground_truth_sp[new_idx],
point_size=point_size,
set_lim=False,
color_map=plt.cm.bwr,
ax=ax,
title="Group A",
order_points=False,
)
ax = plt.subplot(1, 2, 2)
new_idx = np.argsort(abs(binary_fate_bias - 0.5))
pl.customized_embedding(
x_emb[new_idx],
y_emb[new_idx],
binary_fate_bias[new_idx],
point_size=point_size,
set_lim=False,
color_map=plt.cm.bwr,
ax=ax,
title="Group B",
order_points=False,
)
if plot_style == "boxplot":
data_frame = pd.DataFrame(
{
"Ref": ground_truth_sp[sel_index],
"Prediction": binary_fate_bias[sel_index],
}
)
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.subplot(1, 1, 1)
sns.violinplot(
x="Ref", y="Prediction", data=data_frame, ax=ax, color="red"
)
sns.set(style="white")
# ax.set_ylim([-0.5,20])
ax.set_xlabel("Reference fate bias")
ax.set_ylabel("Predicted fate bias")
plt.tight_layout()
ax.set_title(f"Corr={corr}, {figure_index}")
fig.savefig(
f"{settings.figure_path}/{data_des}_{figure_index}_reference_prediction_box.{settings.file_format_figs}"
)
if plot_style == "scatter":
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.subplot(1, 1, 1)
ax.plot(
ground_truth_sp[sel_index], binary_fate_bias[sel_index], "*r"
)
ax.set_xlabel("Reference fate bias")
ax.set_ylabel("Predicted fate bias")
plt.tight_layout()
ax.set_title(f"Corr={corr}, {figure_index}")
fig.savefig(
f"{settings.figure_path}/{data_des}_{figure_index}_reference_prediction_scatter.{settings.file_format_figs}"
)
return corr
def assess_fate_prediction_by_correlation(
adata,
expect_vector,
predict_vecotr,
selected_time_points,
plot_style="scatter",
figure_index="",
show_groups=True,
mask=None,
remove_neutral_ref=True,
background=False,
vmax=1,
vmin=0,
):
"""
Assess biary fate prediction by correlation
The expect_vector and predict_vecotr are of the same length, in the range (0,1).
We only use cells with non-neutral bias (which is 0.5) to compute the fate correlation.
Parameters
----------
adata: :class:`~anndata.AnnData` object
It should be run through pl.prediction
expect_vector: `np.array`
A vector of expected fate bias for each cell. The bias range is (0,1),
with 0.5 being neutral. The neutral cells are discarded before computing correlation.
predict_vector: `np.array`
A vector of predicted fate bias for each cell. The bias range is (0,1),
with 0.5 being neutral.
selected_time_points: `list`
A list of selected time points for making the comparison.
plot_style: `string`, optional (default: 'scatter')
Plot used to visualize the results. It can be {'boxplot','scatter'}.
figure_index: `string` optional (default: '')
String index for annotate filename for saved figures. Used to distinuigh plots from different conditions.
show_groups: `bool`, optional (default: True)
Plot each group.
mask: `np.array`, optional (default: None)
A boolean array to define which cells are used for computing correlation.
remove_neutral_ref: `bool`, optional (default: True)
Remove neutral reference states before computing the correlation.
background: `bool`, optional (default: False)
Show background at given time points.
vmax: `float`, optional (default: 1)
Maximum value to plot.
vmin: `float`, optional (default: 0)
Minimum value to plot.
Returns
-------
correlation between expect_vector and predict_vector at selected time points.
"""
# Copy the vector to avoid changing the original vector.
reference = expect_vector.copy()
prediction = predict_vecotr.copy()
data_des = adata.uns["data_des"][-1]
if (len(reference) != len(prediction)) or (len(reference) != adata.shape[0]):
logg.error("Size mismatch between reference, prediction, and adata.shape[0].")
return None
else:
fig_width = settings.fig_width
fig_height = settings.fig_height
point_size = settings.fig_point_size
time_info = np.array(adata.obs["time_info"])
sp_idx_time = selecting_cells_by_time_points(time_info, selected_time_points)
if (mask is not None) and (len(mask) == len(prediction)):
mask = np.array(mask)
reference[~mask] = 0.5
prediction[~mask] = 0.5
if remove_neutral_ref:
logg.info(
"Remove neutral states in the reference before computing correlation."
)
sel_index = (
(abs(reference - 0.5) > 0)
& sp_idx_time
& ~np.isnan(reference)
& ~np.isnan(prediction)
)
else:
sel_index = sp_idx_time
if np.sum(sel_index) == 0:
logg.error("No cells selected.")
return None
else:
reference_sp = reference[sel_index]
prediction_sp = prediction[sel_index]
# df=pd.DataFrame({'Reference':reference,'Prediction':prediction,'sel_index':sel_index})
corr = np.corrcoef(reference_sp, prediction_sp)[0, 1]
error = np.mean(abs(prediction_sp - reference_sp))
error = round(100 * error) / 100
if np.isnan(corr):
# logg.error("Correlation is NaN.")
corr = "NaN"
else:
corr = round(100 * corr) / 100
if show_groups:
X_emb = adata.obsm["X_emb"]
x_emb = X_emb[:, 0]
y_emb = X_emb[:, 1]
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.subplot(1, 1, 1)
vector_temp = reference_sp
new_idx = np.argsort(abs(vector_temp - 0.5))
if background:
if mask is not None:
sel_index_1 = mask & sp_idx_time
else:
sel_index_1 = sp_idx_time
pl.customized_embedding(
x_emb[sel_index_1],
y_emb[sel_index_1],
np.ones(np.sum(sel_index_1)),
point_size=point_size,
set_lim=False,
ax=ax,
order_points=False,
)
pl.customized_embedding(
x_emb[sel_index][new_idx],
y_emb[sel_index][new_idx],
vector_temp[new_idx],
point_size=point_size,
set_lim=False,
color_map=plt.cm.bwr,
ax=ax,
title="Reference",
order_points=False,
vmax=vmax,
vmin=vmin,
)
fig.savefig(
f"{settings.figure_path}/{data_des}_{figure_index}_reference.{settings.file_format_figs}"
)
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.subplot(1, 1, 1)
vector_temp = prediction_sp
new_idx = np.argsort(abs(vector_temp - 0.5))
if background:
if mask is not None:
sel_index_1 = mask & sp_idx_time
else:
sel_index_1 = sp_idx_time
pl.customized_embedding(
x_emb[sel_index_1],
y_emb[sel_index_1],
np.ones(np.sum(sel_index_1)),
point_size=point_size,
set_lim=False,
ax=ax,
order_points=False,
)
pl.customized_embedding(
x_emb[sel_index][new_idx],
y_emb[sel_index][new_idx],
vector_temp[new_idx],
point_size=point_size,
set_lim=False,
color_map=plt.cm.bwr,
ax=ax,
title="Prediction",
order_points=False,
vmax=vmax,
vmin=vmin,
)
fig.savefig(
f"{settings.figure_path}/{data_des}_{figure_index}_prediction.{settings.file_format_figs}"
)
if plot_style == "boxplot":
reference_sp[reference_sp < 0.01] = 0
reference_sp[reference_sp > 0.99] = 1
data_frame = pd.DataFrame(
{"Ref": reference_sp, "Prediction": prediction_sp}
)
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.subplot(1, 1, 1)
sns.violinplot(
x="Ref", y="Prediction", data=data_frame, ax=ax, color="red"
)
sns.set(style="white")
# ax.set_ylim([-0.5,20])
ax.set_xlabel("Reference fate bias")
ax.set_ylabel("Predicted fate bias")
plt.tight_layout()
ax.set_title(f"R={corr}, Error={error} {figure_index}")
fig.savefig(
f"{settings.figure_path}/{data_des}_{figure_index}_reference_prediction_box.{settings.file_format_figs}"
)
if plot_style == "scatter":
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.subplot(1, 1, 1)
ax.plot(reference_sp, prediction_sp, "*r")
ax.set_xlabel("Reference fate bias")
ax.set_ylabel("Predicted fate bias")
plt.tight_layout()
ax.set_title(f"R={corr}, Error={error} {figure_index}")
fig.savefig(
f"{settings.figure_path}/{data_des}_{figure_index}_reference_prediction_scatter.{settings.file_format_figs}"
)
return corr, error
####### plot heat maps for genes
def heatmap_v1(
figure_path,
data_matrix,
variable_names_x,
variable_names_y,
int_seed=10,
data_des="",
log_transform=False,
color_map=plt.cm.Reds,
vmin=None,
vmax=None,
fig_width=4,
fig_height=6,
color_bar=True,
):
"""
Plot ordered heat map of data_matrix matrix.
Parameters
----------
figure_path: `str`
path to save figures
data_matrix: `np.array`
A matrix whose columns should match variable_names
variable_names: `list`
List of variable names
color_bar_label: `str`, optional (default: 'cov')
Color bar label
data_des: `str`, optional (default: '')
String to distinguish different saved objects.
int_seed: `int`, optional (default: 10)
Seed to initialize the plt.figure object (to avoid
plotting on existing object).
log_transform: `bool`, optional (default: False)
If true, perform a log transform. This is needed when the data
matrix has entries varying by several order of magnitude.
"""
# o = get_hierch_order(data_matrix)
# o1 = get_hierch_order(data_matrix.T)
plt.figure(int_seed)
if log_transform:
plt.imshow(
np.log(data_matrix + 1) / np.log(10),
aspect="auto",
cmap=color_map,
vmin=vmin,
vmax=vmax,
)
else:
plt.imshow(data_matrix, aspect="auto", cmap=color_map, vmax=vmax, vmin=vmin)
variable_names_x = list(variable_names_x)
variable_names_y = list(variable_names_y)
if variable_names_x == "":
plt.xticks([])
else:
plt.xticks(
np.arange(data_matrix.shape[1]) + 0.4,
variable_names_x,
rotation=70,
ha="right",
)
if variable_names_y == "":
plt.yticks([])
else:
plt.yticks(
np.arange(data_matrix.shape[0]), variable_names_y, rotation=0, ha="right"
)
if color_bar:
cbar = plt.colorbar()
cbar.set_label("Z-Score", rotation=270, labelpad=20)
plt.gcf().set_size_inches((fig_width, fig_height))
plt.tight_layout()
plt.savefig(figure_path + f"/{data_des}_data_matrix.{settings.file_format_figs}")
def gene_expression_heat_map(
adata,
state_info,
gene_list,
selected_fates,
rename_selected_fates=None,
color_bar=False,
method="zscore",
fig_width=6,
fig_height=3,
horizontal="True",
log_transform=False,
vmin=None,
vmax=None,
):
"""
Plot heatmap of gene expression within given clusters.
The gene expression can be the relative value or zscore, depending on method {'zscore','Relative'}
"""
(
mega_cluster_list,
valid_fate_list,
fate_array_flat,
sel_index_list,
) = analyze_selected_fates(selected_fates, state_info)
gene_full = np.array(adata.var_names)
gene_list = np.array(gene_list)
sel_idx = np.in1d(gene_full, gene_list)
valid_sel_idx = np.in1d(gene_list, gene_full)
if np.sum(valid_sel_idx) > 0:
cleaned_gene_list = gene_list[valid_sel_idx]
if np.sum(valid_sel_idx) < len(gene_list):
invalid_gene_list = gene_list[~valid_sel_idx]
print(f"These are invalid gene names: {invalid_gene_list}")
else:
print("No valid genes selected.")
gene_expression_matrix = np.zeros((len(mega_cluster_list), len(cleaned_gene_list)))
X = adata.X
resol = 10 ** (-10)
if method == "zscore":
logg.info("Using zscore (range: [-2,2], or [-1,1]")
else:
logg.info("Using relative gene expression. Range [0,1]")
for k, temp in enumerate(cleaned_gene_list):
temp_id = np.nonzero(gene_full == temp)[0][0]
temp_vector = np.zeros(len(sel_index_list))
for j, temp_idx in enumerate(sel_index_list):
temp_vector[j] = np.mean(X[temp_idx, temp_id])
if method == "zscore":
z_score = stats.zscore(temp_vector)
gene_expression_matrix[:, k] = z_score
else:
temp_vector = (temp_vector + resol) / (resol + np.sum(temp_vector))
gene_expression_matrix[:, k] = temp_vector
if (rename_selected_fates is None) or (
len(rename_selected_fates) != len(mega_cluster_list)
):
rename_selected_fates = mega_cluster_list
if horizontal:
heatmap_v1(
settings.figure_path,
gene_expression_matrix,
cleaned_gene_list,
rename_selected_fates,
int_seed=10,
data_des="",
log_transform=False,
color_map=plt.cm.coolwarm,
fig_width=fig_width,
fig_height=fig_height,
color_bar=color_bar,
vmin=vmin,
vmax=vmax,
)
else:
heatmap_v1(
settings.figure_path,
gene_expression_matrix.T,
rename_selected_fates,
cleaned_gene_list,
int_seed=10,
data_des="",
log_transform=False,
color_map=plt.cm.coolwarm,
fig_width=fig_height,
fig_height=fig_width,
color_bar=color_bar,
vmin=vmin,
vmax=vmax,
)
return gene_expression_matrix
# ####### plot heat maps for genes
# def heatmap_v1(figure_path, data_matrix, variable_names_x,variable_names_y,int_seed=10,
# data_des='',log_transform=False,color_map=plt.cm.Reds,vmin=None,vmax=None,fig_width=4,fig_height=6,
# color_bar=True):
# """
# Plot ordered heat map of data_matrix matrix.
# Parameters
# ----------
# figure_path: `str`
# path to save figures
# data_matrix: `np.array`
# A matrix whose columns should match variable_names
# variable_names: `list`
# List of variable names
# color_bar_label: `str`, optional (default: 'cov')
# Color bar label
# data_des: `str`, optional (default: '')
# String to distinguish different saved objects.
# int_seed: `int`, optional (default: 10)
# Seed to initialize the plt.figure object (to avoid
# plotting on existing object).
# log_transform: `bool`, optional (default: False)
# If true, perform a log transform. This is needed when the data
# matrix has entries varying by several order of magnitude.
# """
# #o = get_hierch_order(data_matrix)
# #o1 = get_hierch_order(data_matrix.T)
# plt.figure(int_seed)
# if log_transform:
# plt.imshow(np.log(data_matrix+1)/np.log(10), aspect='auto',cmap=color_map, vmin=vmin,vmax=vmax)
# else:
# plt.imshow(data_matrix, aspect='auto',cmap=color_map, vmax=vmax,vmin=vmin)
# variable_names_x=list(variable_names_x)
# variable_names_y=list(variable_names_y)
# if variable_names_x=='':
# plt.xticks([])
# else:
# plt.xticks(np.arange(data_matrix.shape[1])+.4, variable_names_x, rotation=70, ha='right')
# if variable_names_y=='':
# plt.yticks([])
# else:
# plt.yticks(np.arange(data_matrix.shape[0]), variable_names_y, rotation=0, ha='right')
# if color_bar:
# cbar = plt.colorbar()
# cbar.set_label('Number of barcodes (log10)', rotation=270, labelpad=20)
# plt.gcf().set_size_inches((fig_width,fig_height))
# plt.tight_layout()
# plt.savefig(figure_path+f'/{data_des}_data_matrix.{settings.file_format_figs}')
# def gene_expression_heat_map(adata, state_info, gene_list,selected_fates,rename_selected_fates=None,color_bar=False,fig_width=6,fig_height=3,horizontal='True',log_transform=False,vmin=None,vmax=None):
# """
# Cacluate the gene expression Z-score of each gene within given clusters
# """
# mega_cluster_list,valid_fate_list,fate_array_flat,sel_index_list=analyze_selected_fates(selected_fates,state_info)
# gene_full=np.array(adata.var_names)
# gene_list=np.array(gene_list)
# sel_idx=np.in1d(gene_full,gene_list)
# valid_sel_idx=np.in1d(gene_list,gene_full)
# if np.sum(valid_sel_idx)>0:
# cleaned_gene_list=gene_list[valid_sel_idx]
# if np.sum(valid_sel_idx)<len(gene_list):
# invalid_gene_list=gene_list[~valid_sel_idx]
# print(f"These are invalid gene names: {invalid_gene_list}")
# else:
# print("No valid genes selected.")
# gene_expression_matrix=np.zeros((len(mega_cluster_list),len(cleaned_gene_list)))
# X=adata.X
# resol=10**(-10)
# for k,temp in enumerate(cleaned_gene_list):
# temp_id=np.nonzero(gene_full==temp)[0][0]
# temp_vector=np.zeros(len(sel_index_list))
# for j,temp_idx in enumerate(sel_index_list):
# temp_vector[j]=np.mean(X[temp_idx,temp_id])
# z_score=stats.zscore(temp_vector)
# #temp_vector=(temp_vector+resol)/(resol+np.sum(temp_vector))
# #gene_expression_matrix[:,k]=temp_vector
# gene_expression_matrix[:,k]=z_score
# if (rename_selected_fates is None) or (len(rename_selected_fates) != len(mega_cluster_list)):
# rename_selected_fates=mega_cluster_list
# if horizontal:
# heatmap_v1(settings.figure_path, gene_expression_matrix, cleaned_gene_list,rename_selected_fates,int_seed=10,
# data_des='',log_transform=False,color_map=plt.cm.coolwarm,fig_width=fig_width,fig_height=fig_height,
# color_bar=color_bar,vmin=vmin,vmax=vmax)
# else:
# heatmap_v1(settings.figure_path, gene_expression_matrix.T,rename_selected_fates, cleaned_gene_list,int_seed=10,
# data_des='',log_transform=False,color_map=plt.cm.coolwarm,fig_width=fig_height,fig_height=fig_width,
# color_bar=color_bar,vmin=vmin,vmax=vmax)
# return gene_expression_matrix
def ML_learn_fate_bias(
adata_train,
adata_test,
train_obs_name,
thresh_low=0.4,
thresh_high=0.6,
test_obs_name="ML_predicted_bias",
):
from sklearn.neural_network import MLPClassifier
train_fate_bias = adata_train.obs[train_obs_name]
final_fate_train = np.zeros(len(train_fate_bias), dtype=int)
## convert it to discrete label
final_fate_train[train_fate_bias < thresh_low] = 0
final_fate_train[
(train_fate_bias >= thresh_low) & (train_fate_bias <= thresh_high)
] = 1
final_fate_train[train_fate_bias > thresh_high] = 2
final_coord_train = adata_train.obsm["X_pca"]
t = time.time()
clf = MLPClassifier(random_state=1, max_iter=300, alpha=0.1).fit(
final_coord_train, final_fate_train
)
final_coord_test = adata_test.obsm["X_pca"]
pred_test = clf.predict(final_coord_test)
adata_test.obs[test_obs_name] = pred_test / 2
pl.plot_precomputed_fate_bias(adata_test, test_obs_name)
| 2.25
| 2
|
testsite/main/views.py
|
navdhakar/django-project
| 0
|
12776255
|
<filename>testsite/main/views.py
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Tutorial, TutorialCategory, TutorialSeries
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
def homepage(request):
return render(request=request,
template_name="main/home.html",
context={"tutorials": Tutorial.objects.all()})
# Create your views here.
def register(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, f"new account created: {username}")
login(request, user)
messages.info(request, f"you are now logged in as {username}")
return redirect("main:homepage")
else:
for msg in form.error_messages:
messages.error(request, f"{msg}: {form.error_messages[msg]}")
form = UserCreationForm
return render(request,
"main/register.html",
context={"form":form})
def logout_req(request):
logout(request)
messages.info(request, "logged out successfully")
return redirect("main:homepage")
| 2.421875
| 2
|
client/wmdriver.py
|
imrehg/lambdam
| 1
|
12776256
|
<gh_stars>1-10
"""
Driver module for HighFinesse WS7
"""
import ctypes
import numpy
wlm = ctypes.windll.wlmData # load the DLL
int32 = ctypes.c_long
uInt32 = ctypes.c_ulong
uInt64 = ctypes.c_ulonglong
float64 = ctypes.c_double
double = ctypes.c_double
long = ctypes.c_long
dword = ctypes.c_ulong
LZERO = long(0)
DZERO = double(0)
cInstCheckForWLM = long(-1)
cInstResetCalc = long(0)
cInstReturnMode = cInstResetCalc
cInstNotification = long(1)
cInstCopyPattern = long(2)
cInstCopyAnalysis = cInstCopyPattern
cInstControlWLM = long(3)
cInstControlDelay = long(4)
cInstControlPriority = long(5)
# Amplitude Constants
cMin1 = long(0);
cMin2 = long(1);
cMax1 = long(2);
cMax2 = long(3);
cAvg1 = long(4);
cAvg2 = long(5);
validAmps = [cMin1, cMin2, cMax1, cMax2, cAvg1, cAvg2]
# Pattern and Analysis
cPatternDisable = long(0);
cPatternEnable = long(1);
cAnalysisDisable = cPatternDisable;
cAnalysisEnable = cPatternEnable;
cSignal1Interferometers = long(0);
cSignal1WideInterferometer = long(1);
cSignal1Grating = long(1);
cSignal2Interferometers = long(2);
cSignal2WideInterferometer = long(3);
cSignalAnalysis = long(4);
# Trigger parameters
cCtrlMeasurementContinue = long(0)
cCtrlMeasurementInterrupt = long(1)
cCtrlMeasurementTriggerPoll = long(2)
cCtrlMeasurementTriggerSuccess = long(3)
getexposure = wlm.GetExposureNum
getexposure.restype = long
def GetExposure():
""" Get exposure values (ms) """
t1 = getexposure(long(1), long(1), LZERO)
t2 = getexposure(long(1), long(2), LZERO)
return (t1, t2)
setexposure = wlm.SetExposureNum
setexposure.restype = long
def SetExposure(t=(5, 5)):
""" Set exposure values """
ret1 = setexposure(long(1), long(1), long(t[0]))
ret2 = setexposure(long(1), long(2), long(t[1]))
return (ret1, ret2)
getfreq = wlm.GetFrequency
getfreq.restype = double
def GetFrequency():
""" Get a single frequency reading """
return getfreq(DZERO)
getwave = wlm.GetWavelength
getwave.restype = double
def GetWavelength():
""" Get a single frequency reading """
return getwave(DZERO)
getinterferencestats = wlm.GetAmplitudeNum
getinterferencestats.restype = long
def GetInterferenceStats(info=cMax1):
""" Get interference pattern stats """
if (info in validAmps):
out = getinterferencestats(long(1), info, LZERO)
else:
out = None
return out
gettemperature = wlm.GetTemperature
gettemperature.restype = double
def GetTemperature():
""" Get current wavemeter temperature """
return gettemperature(DZERO)
getpatternitemcount = wlm.GetPatternItemCount
getpatternitemcount.restype = long
def GetPatternItemCount(index):
""" Get interferometer's point coint """
return getpatternitemcount(index)
getpatternitemsize = wlm.GetPatternItemSize
getpatternitemsize.restype = long
def GetPatternItemSize(index):
""" Get interferometer's data type size """
return getpatternitemsize(index)
getpatterndata = wlm.GetPatternData
getpatterndata.restype = long
def GetPatternData(index, pointer):
""" Get interferometer's data type size """
return getpatterndata(index, pointer)
getpattern = wlm.GetPattern
getpattern.restype = long
def GetPattern(index):
""" Get interferometer's data type size """
return getpattern(index)
setpattern = wlm.SetPattern
setpattern.restype = long
def SetPattern(index, iEnable):
""" Get interferometer's data type size """
return setpattern(index, iEnable)
triggermeasurement = wlm.TriggerMeasurement
triggermeasurement.restype = long
def TriggerMeasurement(Action):
"""
Interrupts, continues or triggers the measurement loop.
Input parameters:
cCtrlMeasurementContinue
cCtrlMeasurementInterrupt
cCtrlMeasurementTriggerPoll
cCtrlMeasurementTriggerSuccess
"""
return triggermeasurement(Action)
####
# Own functions
####
def EnableInterferogram():
"""
Enable to always export the interferograms
"""
SetPattern(cSignal1Interferometers, cPatternEnable)
SetPattern(cSignal1WideInterferometer, cPatternEnable)
def Interferogram():
"""
Get the interferogram from the two interferometers in our current
model of wavemeter.
"""
cnt = 1024
inter1 = (ctypes.c_long*cnt)()
pi1 = ctypes.cast(inter1, ctypes.POINTER(ctypes.c_long))
inter2 = (ctypes.c_long*cnt)()
pi2 = ctypes.cast(inter2, ctypes.POINTER(ctypes.c_long))
GetPatternData(cSignal1Interferometers, pi1)
GetPatternData(cSignal1WideInterferometer, pi2)
inter1 = [int(i/6.5e4) for i in inter1]
inter2 = [int(i/6.5e4) for i in inter2]
return (inter1, inter2)
| 1.757813
| 2
|
redplane-store/apps/pktgen/parse_pcap.py
|
daehyeok-kim/redplane-public
| 11
|
12776257
|
<reponame>daehyeok-kim/redplane-public
from scapy.all import *
import sys
print(sys.argv[1])
packets = rdpcap(sys.argv[1], count = 100)
out_file = open('m57_summary.txt',"w")
ip_list = []
tcp_flows = []
udp_flows = []
total = 0
for packet in packets:
print (packet.time)
if packet.haslayer(IP) and packet.haslayer(TCP):
total += 1
flow_key = (packet[IP].src, packet[IP].dst, packet[TCP].sport, packet[TCP].dport)
if flow_key not in tcp_flows:
tcp_flows.append(flow_key)
elif packet.haslayer(IP) and packet.haslayer(UDP):
total += 1
flow_key = (packet[IP].src, packet[IP].dst, packet[UDP].sport, packet[UDP].dport)
if flow_key not in udp_flows:
udp_flows.append(flow_key)
if packet.haslayer(TCP) and packet[IP].src.find('192.168') != -1:
if packet[IP].dst not in ip_list:
ip_list.append(packet[IP].dst)
out_file.write('%s %d %d %d\n'%(packet[IP].dst, packet[TCP].sport, packet[TCP].dport, packet[IP].len))
if packet.haslayer(TCP) and packet[IP].dst.find('192.168') != -1:
print (packet.show())
#if packet[IP].dst not in ip_list:
# ip_list.append(packet[IP].dst)
#out_file.write('%s %d %d %d\n'%(packet[IP].dst, packet[TCP].sport, packet[TCP].dport, packet[IP].len))
print ("Unique dest IP: %d" % len(ip_list))
print ("UDP flows: %d"%(len(udp_flows)))
print ("TCP flows: %d"%(len(tcp_flows)))
print ("Total packets: %d"%(total))
| 2.5
| 2
|
fancy/trainer/configs/model_config.py
|
susautw/fancy-trainer
| 0
|
12776258
|
<filename>fancy/trainer/configs/model_config.py
from cached_property import cached_property
from torch.nn import Module
from fancy import config as cfg
from . import ImporterConfig
class ModelConfig(ImporterConfig):
param: dict = cfg.Option(default={}, type=cfg.process.flag_container)
@cached_property
def model(self) -> Module:
if not isinstance(self.param, dict):
raise TypeError("params must be a dict")
model = self.imported(**self.param)
if not isinstance(model, Module):
raise TypeError("imported_cls must be subclass of torch.nn.Module or a Callable returned it.")
return model
| 2.375
| 2
|
experiments/coil100.py
|
Guanzhou-Ke/conan
| 5
|
12776259
|
from .default import Experiment
coil100 = Experiment(
arch='alexnet',
hidden_dim=1024,
verbose=True,
log_dir='./logs/mytest',
device='cuda',
extra_record=True,
opt='adam',
epochs=100,
lr=1e-3,
batch_size=24,
cluster_hidden_dim=512,
ds_name='coil-100',
img_size=128,
input_channels=[3, 3, 3],
views=3,
clustering_loss_type='ddc',
num_cluster=100,
fusion_act='relu',
use_bn=True,
contrastive_type='simclr',
projection_layers=2,
projection_dim=512,
prediction_hidden_dim=0, # Just for simsiam.
contrastive_lambda=0.01,
temperature=0.1,
seed=0,
)
| 1.78125
| 2
|
data/migrations/0024_job_vm_volume_name.py
|
Duke-GCB/bespin-api
| 0
|
12776260
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-08-04 19:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0023_auto_20170801_1925'),
]
operations = [
migrations.AddField(
model_name='job',
name='vm_volume_name',
field=models.CharField(blank=True, help_text='Name of the volume attached to store data for this job.', max_length=255, null=True),
),
]
| 1.71875
| 2
|
python-code/ml/ml.py
|
Edward-Son/yhack2017
| 0
|
12776261
|
<filename>python-code/ml/ml.py
import xml.etree.ElementTree as ET
import numpy as np
from sklearn.externals import joblib
from sklearn import svm
#get the number code for the people / get the samples
indices = []
i = 0
with open("./found-bad-people/1-common-people") as f:
for line in f:
indices.append(int(line.strip('\n').split(' ')[-1]))
i += 1
#get the features
root = ET.parse("./FINRAChallengeData/IAPD/IA_INDVL_Feed_10_11_2017.xml/IA_Indvl_Feeds1.xml").getroot()
#preset numpy array size ahead of time to improve speed
i = 0
for line in root[0]:
i += 1
target = np.zeros(shape=(i))
data = np.zeros(shape=(i,5))
#where all the numbers refer to a key
dic = {}
rowCounter = 0
j = 0
for line in root[0]:
#feature 1 : city
try:
city = line[2][0].attrib['city']
except:
city = 'NAcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(city):
dic[city] = j
j += 1
#feature 2 : organisation name
try:
orgName = line[2][0].attrib['orgNm']
except:
orgName = 'NAorg'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(orgName):
dic[orgName] = j
j += 1
#feature 3 : branch location
try:
branchCity = line[2][0][1][0].attrib['city']
except:
branchCity = 'NAbranchcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(branchCity):
dic[branchCity] = j
j += 1
#feature 4 : number of exams taken
try:
numExams = 0
for exam in line[3]:
numExams += 1
exams = str(numExams) + 'examsTaken'
except:
exams = 'NAexams'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(exams):
dic[exams] = j
j += 1
#feature 5 : has other business?
try:
otherBus = line[7][0].attrib['desc']
otherBus = 'YesOtherBusiness'
except:
otherBus = 'NAOtherBusiness'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(otherBus):
dic[otherBus] = j
j += 1
#update the image data
data[rowCounter] = [dic[city],dic[orgName],dic[branchCity],dic[exams],dic[otherBus]]
#update target data
if indices.count(rowCounter) == 0 :
target[rowCounter] = 0
else:
target[rowCounter] = 1
rowCounter += 1
#train model with second data set
indices = []
i = 0
with open("./found-bad-people/2-common-people") as f:
for line in f:
indices.append(int(line.strip('\n').split(' ')[-1]))
i += 1
root2 = ET.parse("./FINRAChallengeData/IAPD/IA_INDVL_Feed_10_11_2017.xml/IA_Indvl_Feeds2.xml").getroot()
#get length of file
l = 0
for line in root2[0]:
l += 1
#set the size of numpy array
data2 = np.zeros(shape=(l,5))
target2 = np.zeros(shape=(l))
rowCounter = 0
for line in root2[0]:
#feature 1 : city
try:
city = line[2][0].attrib['city']
except:
city = 'NAcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(city):
dic[city] = j
j += 1
#feature 2 : organisation name
try:
orgName = line[2][0].attrib['orgNm']
except:
orgName = 'NAorg'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(orgName):
dic[orgName] = j
j += 1
#feature 3 : branch location
try:
branchCity = line[2][0][1][0].attrib['city']
except:
branchCity = 'NAbranchcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(branchCity):
dic[branchCity] = j
j += 1
#feature 4: number of exams taken
try:
numExams = 0
for exam in line[3]:
numExams += 1
exams = str(numExams) + 'examsTaken'
except:
exams = 'NAexams'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(exams):
dic[exams] = j
j += 1
#feature 5 : has other business?
try:
otherBus = line[7][0].attrib['desc']
otherBus = 'YesOtherBusiness'
except:
otherBus = 'NAOtherBusiness'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(otherBus):
dic[otherBus] = j
j += 1
#update the image data
data2[rowCounter] = [dic[city],dic[orgName],dic[branchCity],dic[exams],dic[otherBus]]
#update target data
if indices.count(rowCounter) == 0 :
target2[rowCounter] = 0
else:
target2[rowCounter] = 1
rowCounter += 1
#train model with first data set
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(data, target)
#train model with the second data set
clf.fit(data2, target2)
root3 = ET.parse("./FINRAChallengeData/IAPD/IA_INDVL_Feed_10_11_2017.xml/IA_Indvl_Feeds3.xml").getroot()
m = 0
for line in root3[0]:
m += 1
data3 = np.zeros(shape=(l,5))
rowCounter = 0
for line in root3[0]:
#feature 1 : city
try:
city = line[2][0].attrib['city']
except:
city = 'NAcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(city):
dic[city] = j
j += 1
#feature 2 : organisation name
try:
orgName = line[2][0].attrib['orgNm']
except:
orgName = 'NAorg'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(orgName):
dic[orgName] = j
j += 1
#feature 3 : branch location
try:
branchCity = line[2][0][1][0].attrib['city']
except:
branchCity = 'NAbranchcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(branchCity):
dic[branchCity] = j
j += 1
#feature 4: number of exams taken
try:
numExams = 0
for exam in line[3]:
numExams += 1
exams = str(numExams) + 'examsTaken'
except:
exams = 'NAexams'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(exams):
dic[exams] = j
j += 1
#feature 5 : has other business?
try:
otherBus = line[7][0].attrib['desc']
otherBus = 'YesOtherBusiness'
except:
otherBus = 'NAOtherBusiness'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(otherBus):
dic[otherBus] = j
j += 1
#update the image data
data3[rowCounter] = [dic[city],dic[orgName],dic[branchCity],dic[exams],dic[otherBus]]
rowCounter += 1
#predict on sample data set
print("length: " + str(len(data3)))
with open("resultsFinal", 'w') as f :
results = clf.predict(data3)
for k in results:
f.write(str(k) + "\n")
joblib.dump(clf, "finalPersistence.pkl")
# clf = joblib.load('finalPersistence.pkl')
| 2.6875
| 3
|
inbm-vision/vision-agent/vision/tests/unit/test_registry.py
|
ahameedx/intel-inb-manageability
| 5
|
12776262
|
<reponame>ahameedx/intel-inb-manageability
import datetime
from unittest import TestCase
from vision.registry_manager import Registry, Firmware, Hardware, Status, Security, OperatingSystem
from mock import Mock
class TestRegistry(TestCase):
def setUp(self):
self.mock_datetime = Mock()
def test_create_registry_success(self):
dt = datetime.datetime(2020, 10, 9)
new_firmware = Firmware(boot_fw_date=self.mock_datetime,
boot_fw_vendor="American Megatrends Inc.",
boot_fw_version="1.0")
new_os = OperatingSystem(os_type="Yocto",
os_version="2.5",
os_release_date=dt)
new_hardware = Hardware(flashless=False,
manufacturer="AMI",
platform_type="KEEMBAY",
stepping="A0",
sku="3400VE",
model="Intel Keem Bay HDDL2",
platform_product="intel",
serial_num="c0428202080d709",
version="bit-creek-2.13.2-r1.aarch64")
new_security = Security(dm_verity_enabled=False,
measured_boot_enabled=False,
is_provisioned=False,
is_xlink_secured=False,
guid=12345)
new_status = Status(heartbeat_timestamp=self.mock_datetime)
new_registry = Registry(device_id="123ABC",
firmware=new_firmware,
hardware=new_hardware,
os=new_os,
security=new_security,
status=new_status)
self.assertIsNotNone(new_registry)
def test_create_registry_fail(self):
self.assertRaises(TypeError, Registry,
(self.mock_datetime, "American Megatrends Inc.", "1.0", "123ABC",
self.mock_datetime, "Yocto", "2.5"))
| 2.34375
| 2
|
python/arachne/driver/pipeline.py
|
fixstars/arachne
| 3
|
12776263
|
from dataclasses import dataclass, field
from logging import getLogger
from typing import Any, Dict, List
import hydra
from hydra.core.config_store import ConfigStore
from hydra.utils import to_absolute_path
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline, node
from kedro.runner import SequentialRunner
from omegaconf import MISSING, DictConfig, OmegaConf
from arachne.config.base import BaseConfig
from arachne.data import Model
from arachne.tools import ToolConfigFactory, ToolFactory
from arachne.utils.model_utils import (
init_from_dir,
init_from_file,
load_model_spec,
save_model,
)
logger = getLogger(__name__)
@dataclass
class PipelineConfig(BaseConfig):
"""
This is a configuration class for pipeline.
Attributes:
tools (Any): Tool specific configurations. This will be overwritten by the value of the pipeline option.
pipeline (List[str]): Specifies tools to be applied in series.
"""
tools: Any = MISSING
pipeline: List[str] = MISSING
def get_default_tool_configs(tools: List[str]) -> Dict:
all_tools = ToolFactory.list()
config = {}
for t in tools:
if t not in all_tools:
assert False, f"Not supported tool ({t}) yet"
config[t] = ToolConfigFactory.get(t)
return config
def run(input: Model, cfg: PipelineConfig) -> Model:
"""
This function constructs and applies a pipeline defined by the config object to an input model.
Args:
input (Model): An input model.
cfg (PipelineConfig): A config object.
Returns:
Model: An output model.
"""
# Preprare DataCatalog
data_catalog = DataCatalog()
data_catalog.add("root_input", MemoryDataSet(data=input))
# setup catalogs for each tool configs and outputs
for idx, tool in enumerate(cfg.pipeline):
config = "tools." + tool + "." + str(idx) + ".config"
output = "tools." + tool + "." + str(idx) + ".output"
data_catalog.add(config, MemoryDataSet(data=cfg.tools[tool]))
data_catalog.add(output, MemoryDataSet())
# Construct pipeline
pipeline_tmp = []
prev_output = "root_input"
for idx, tool in enumerate(cfg.pipeline):
t = ToolFactory.get(tool)
config = "tools." + tool + "." + str(idx) + ".config"
output = "tools." + tool + "." + str(idx) + ".output"
tool_inputs = {"input": prev_output, "cfg": config}
task = node(t.run, inputs=tool_inputs, outputs=output)
prev_output = output
pipeline_tmp.append(task)
pipeline = Pipeline(pipeline_tmp)
# Create a runner to run the pipeline
runner = SequentialRunner()
# Run the pipeline
runner.run(pipeline, data_catalog)
return data_catalog.load(prev_output)
@hydra.main(config_path="../config", config_name="config")
def main(cfg: DictConfig) -> None:
"""
This is a main function for `arachne.driver.pipeline`.
"""
try:
assert len(list(cfg.pipeline)) > 0
except AssertionError as err:
logger.exception("You must specify one tool at least")
raise err
logger.info(OmegaConf.to_yaml(cfg))
# Setup the input DNN model
if not cfg.model_file and not cfg.model_dir:
raise RuntimeError("User must specify either model_file or model_dir.")
if cfg.model_file and cfg.model_dir:
raise RuntimeError("User must specify either model_file or model_dir.")
input_model: Model
if cfg.model_file:
input_model = init_from_file(to_absolute_path(cfg.model_file))
else:
input_model = init_from_dir(to_absolute_path(cfg.model_dir))
if cfg.model_spec_file:
# if a YAML file describing the model specification is provided, overwrite input_model.spec
input_model.spec = load_model_spec(to_absolute_path(cfg.model_spec_file))
output_model = run(input_model, cfg) # type: ignore
save_model(
model=output_model, output_path=to_absolute_path(cfg.output_path), tvm_cfg=cfg.tools.tvm
)
if __name__ == "__main__":
defaults = [{"tools": ToolFactory.list()}, {"override hydra/job_logging": "custom"}, "_self_"]
@dataclass
class PipelineCLIConfig(PipelineConfig):
defaults: List[Any] = field(default_factory=lambda: defaults)
cs = ConfigStore.instance()
cs.store(name="config", node=PipelineCLIConfig)
main()
| 2.203125
| 2
|
button_mapping.py
|
ash3rz/flexx_gamepad
| 1
|
12776264
|
import sys
darwin = {
0: "button square",
1: "button cross",
2: "button circle",
3: "button triangle",
4: "bumper left",
5: "bumper right",
6: "trigger left",
7: "trigger right",
8: "select",
9: "start",
10: "left stick",
11: "right stick",
12: "meta",
13: "touchpad",
"left": "dpad left",
"right": "dpad right",
"up": "dpad up",
"down": "dpad down",
"axis": {
0: ["stick left", "x"],
1: ["stick left", "y"],
2: ["stick right", "x"],
5: ["stick right", "y"]
}
}
linux = {
0: "button square",
1: "button cross",
2: "button circle",
3: "button triangle",
4: "bumper left",
5: "bumper right",
6: "trigger left",
7: "trigger right",
8: "select",
9: "start",
10: "left stick",
11: "right stick",
12: "meta",
13: "touchpad",
"left": "dpad left",
"right": "dpad right",
"up": "dpad up",
"down": "dpad down",
"axis": {
0: ["stick left", "x"],
1: ["stick left", "y"],
2: ["stick right", "x"],
5: ["stick right", "y"]
}
}
windows = {
0: "button cross",
1: "button circle",
2: "button square",
3: "button triangle",
4: "select",
5: "meta",
6: "start",
7: "stick left",
8: "stick right",
9: "bumper left",
10: "bumper right",
11: "dpad up",
12: "dpad down",
13: "dpad left",
14: "dpad right",
15: "touchpad",
"axis": {
0: ["stick left", "x"],
1: ["stick left", "y"],
2: ["stick right", "x"],
3: ["stick right", "y"],
4: ["trigger left", None],
5: ["trigger right", None]
}
}
def button_mapping(button_key):
platform = sys.platform
if "darwin" == platform:
return darwin[button_key]
elif "win32" == platform:
return windows[button_key]
else:
return linux[button_key]
| 2.046875
| 2
|
py_test/voice/voice_python.py
|
GotoRen/notify-me
| 0
|
12776265
|
import urllib.request as req
import subprocess
s = '侵入者あり'
print(s)
# 読み上げ
subprocess.check_output('./jtalk.sh "' +s + '"', shell=True)
| 2.34375
| 2
|
figures/Plotter_fisher_fock.py
|
sqvarfort/Coherent-states-Fisher-information
| 1
|
12776266
|
import matplotlib.pyplot as plt
from numpy import *
import os
"""
filename1 = "data_processing/" + "GeneralQuantumFisher1photons"
filename2 = "data_processing/" + "GeneralQuantumFisher2photons"
filename3 = "data_processing/" + "GeneralQuantumFisher3photons"
with open(filename1) as f:
fisher1 = f.readlines()
with open(filename2) as f:
fisher2 = f.readlines()
with open(filename3) as f:
fisher3 = f.readlines()
fisher1 = [float(i) for i in fisher1]
fisher2 = [float(i) for i in fisher2]
fisher3 = [float(i) for i in fisher3]
times = linspace(0, 2*pi, len(fisher1))
filename_pure = "data_processing/" + "FisherInfoN20Homodyne"
filename_deco01 = "data_processing/" + "FisherHomodyneN25Deco0.1"
filename_deco05 = "data_processing/" + "FisherHomodyneN25Deco0.5"
with open(filename_pure) as f:
for line in f:
fisher_pure = line.split(",")
with open(filename_deco01) as f:
for line in f:
fisher_deco01 = line.split(",")
with open(filename_deco05) as f:
for line in f:
fisher_deco05 = line.split(",")
fisher_pure = [float(i) for i in fisher_pure]
fisher_deco01 = [float(i) for i in fisher_deco01]
fisher_deco05 = [float(i) for i in fisher_deco05]
times_pure = list(load("data_processing/" + "FisherInfoN20HomodyneTimes.npy"))
times_deco01 = list(load("data_processing/" + "times2016-08-21-08.05.18.npy"))
times_deco05 = list(load("data_processing/" + "times2016-08-20-01.16.02.npy"))
for i in range(0,1):
del fisher_pure[::2]
del fisher_deco01[::2]
del fisher_deco05[::2]
del times_pure[::2]
del times_deco01[::2]
del times_deco05[::2]
filename_pure = "data_processing/" + "fisher_mirror_N30"
filename_deco = "data_processing/" + "FisherN25MirrorDeco0.1"
with open(filename_pure) as f:
for line in f:
fisher_pure = line.split(",")
with open(filename_deco) as f:
for line in f:
fisher_deco = line.split(",")
fisher_pure = [float(i) for i in fisher_pure]
fisher_deco = [float(i) for i in fisher_deco]
del fisher_pure[::2]
del fisher_pure[::2]
del fisher_pure[::2]
times_pure = linspace(0, 2*pi, len(fisher_pure))
times_deco = list(load("data_processing/" + "times2016-08-21-08.05.18.npy"))
del fisher_deco[::2]
del fisher_deco[::2]
del fisher_deco[::2]
del times_deco[::2]
del times_deco[::2]
del times_deco[::2]
"""
filename00 = "data/simulation2017-02-07-13.45.35/fock_fisher"
filename_times00 = "data/simulation2017-02-07-13.45.35/times"
filename005 = "data/simulation2017-02-08-09.36.08/fock_fisher"
filename_times005 = "data/simulation2017-02-08-09.36.08/times"
fisher00 = load(filename00 + '.npy')
times00 = load(filename_times00 + '.npy')
fisher005 = load(filename005 + '.npy')
times005 = load(filename_times005 + '.npy')
# Mask the decoherence
fisher005 = ma.masked_where(fisher005 > 3.8, fisher005)
fisher005 = ma.masked_where(fisher005 < 0.0, fisher005)
#plt.show()
def plot_fisher(times, data, chirange, filename):
"""
function: plot_fisher
- data/array: Fisher information to be plotted vs time.
Output:
- Plot/file
"""
plt.figure(figsize=(13,11))
# Use Latex
params = {'backend': 'ps',
'font.size': 12,
'axes.labelsize': 12,
# 'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True, #benutze latex fuer schrift encoding -> automatisch selbe schriftart wie in latex
'text.latex.unicode': True,
'font.family': 'serif',
'font.serif': 'cm',
#'figure.figsize': fig_size,
'text.latex.preamble': [r'\usepackage{physics}', r'\usepackage{amsmath}']}
plt.rcParams.update(params)
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
plt.xlabel('$t$', size = 40)
plt.ylabel('$I_F$', size = 40)
ax = plt.subplot(111)
ax.tick_params(axis='both', which='major', pad=10)
#plt.title('Fisher Information vs. time for ' + r'$\bar{g} = $' + str(self.args['gbar']) + ', $k = $' + str(self.k) + ', $N = $' + str(self.N) + ', $h = $' + str(self.h), size = 20, y=1.02)
#plt.gca().grid(True, linewidth = 2)
#plt.plot(times[0], data[0], '-o', color = 'k', label = '$1$ photon')
#plt.plot(times[1], data[1], '-o', color = 'b', label = '$4$ photons')
#plt.plot(times[2], data[2], '-o', color = 'r', label = '$9$ photons')
#plt.plot(times[0], data[0], color = 'b', label = 'Analytic')
plt.plot(times[0], data[0], '-o', color = 'k', markeredgewidth=0.0, label = '$\kappa = 0.0$')
plt.plot(times[1], data[1], '-o', color = 'b', markeredgewidth=0.0, label = '$\kappa = 0.05$')
plt.xticks([ 0, pi/2, pi, 3*pi/2, 2*pi], [r'$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$', r'$2\pi$'], size = 40)
plt.yticks([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], [r'$0.0$', r'$0.5$', r'$1.0$', r'$1.5$', r'$2.0$', r'$2.5$', r'$3.0$', r'$3.5$'], size = 30)
#plt.yticks([0.0, 1000, 2000, 3000, 4000, 5000, 6000], [r'$0.0$', r'$1000$', r'$2000$', r'$3000$', r'$4000$', r'$5000$', r'$6000$'], size = 40)
#plt.yticks([0.0, 100, 200, 300, 400, 500, 600, 700, 800, 900], [r'$0.0$', r'$100$', r'$200$', r'$300$', r'$400$', r'$500$', r'$600$', r'$700$', r'$800$', r'$900$'], size = 40)
#plt.yticks([0.0, 200, 400, 600, 800], [r'$0.0$', r'$200$', r'$400$', r'$600$', r'$800$'], size = 30)
plt.subplots_adjust(bottom=0.15)
#plt.xlim([0, pi/2])
#plt.ylim([0,300])
plt.legend(loc = 1, fontsize = 30)
path = os.path.join(filename)
plt.savefig(path + ".pdf",transparent=True, dpi=600)
plt.show()
plot_fisher([times00, times005], [fisher00, fisher005], [0.0], "Fisher_Fock")
| 2.59375
| 3
|
project/database2.py
|
nikodrum/evaluationua
| 0
|
12776267
|
# -*- coding: UTF-8 -*-
from flask import g
import sqlite3
import math
database = "project/appart.db"
categ_coef = 17960
coef = 360/6378137
def connect_db():
return sqlite3.connect(database)
def insert_db(street,lat,lon,year,room,all_area,all_floors):
# validating wether coordinates in Kyiv
#calculating distances
if lat < 50.702443 and lat > 50.144912:
if lon > 30.225671 and lon < 30.935312:
len_to_center_in_coord = ((lat-50.450198)**2+(lon-30.523986)**2)**(1/2)
len_to_center = len_to_center_in_coord/coef
len_to_metro = def_nearest_subway(lat,lon)
else:
len_to_center = 0
len_to_metro = 0
# calculating data for building of price estimation
client_price = int(math.exp(7.483156+ 0.001652*float(year)
+ 0.122520*float(room)
+ 0.008478*float(all_area)
+ 0.007029*float(all_floors)
- 0.000286*float(len_to_center)
- 0.000407*float(len_to_metro)))
category = int((client_price)/categ_coef)
# inserting data to DB
data_list = [street,lat,lon,year,room,all_area,all_floors,client_price,category,len_to_center*6.283279]
g.db = connect_db()
cur = g.db.cursor()
cur.execute('INSERT INTO client_data VALUES (?,?,?,?,?,?,?,?,?,?)',data_list)
g.db.commit()
return g.db.close()
# taking from database info
def search_db():
g.db = connect_db()
cur = g.db.cursor()
# taking client data
client_row = cur.execute('SELECT * FROM client_data WHERE ROWID=(SELECT MAX(ROWID) FROM client_data)')
posts_cl = []
posts_cl = [dict(year = row[3],room=row[4],all_area=row[5],all_floors=row[6],cl_price=row[7],category=row[8]) for row in client_row.fetchall()]
# taking data from calculated category based on price estimation
category = posts_cl[0]['category']
if category == 0:
category = 1
my_data_row = cur.execute('SELECT * FROM mytable_na WHERE price_category = (?)',[category])
posts_my_data=[]
posts_my_data = [dict(price=row[3],street=row[0],room=row[5],all_area=row[6],all_floors=row[9],distr=row[12]) for row in my_data_row.fetchall()]
g.db.close()
return [posts_cl,posts_my_data]
def taking_data_for_plot(rand_district):
g.db = connect_db()
cur = g.db.cursor()
# taking all data with district which was choosen in my data
#7 differnt metrics of districts
distr_data = cur.execute('SELECT * FROM mytable_na WHERE distr = (?)',[rand_district])
post_plot_distr = [dict(price = row[3],room = row[5],all_area=row[6],livin_area=row[7],kitch_area=row[8],all_floors=row[9],year=row[11],distr=str(row[13])) for row in distr_data.fetchall()]
g.db.close()
return post_plot_distr
def def_nearest_subway(lat,lon):
g.db = connect_db()
cur = g.db.cursor()
# taking metro data
client_row = cur.execute('SELECT * FROM metro_coords ')
metro_coords = []
for row in client_row.fetchall():
metro_coords.append([row[0],row[1],row[2]])
g.db.close()
min_list = []
for i in range(0,len(metro_coords)):
min_list.append((((lat - metro_coords[i][1])**2+(lon - metro_coords[i][2]))**2)**(1/2))
min_val = None
for i in range(0,len(min_list)) :
if min_val is None or min_list[i] < min_val :
min_val = min_list[i]
return min_val/coef
| 3.109375
| 3
|
setup.py
|
philbarker/ocxmd
| 8
|
12776268
|
<reponame>philbarker/ocxmd
from setuptools import setup
setup(
name="ocxmd",
version="0.2",
py_modules=["ocxmd"],
install_requires=["markdown>=3.0", "PyYAML>=3.13"],
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/philbarker/ocxmd",
description="A python markdown extension to take metadata embedded as YAML in a page of markdown and render it as JSON-LD in the HTML created by MkDocs.",
license="Apache2",
)
| 1.320313
| 1
|
myapp/forms.py
|
quinchoponcho/helloworld
| 0
|
12776269
|
from django import forms
from django.forms import ModelForm
from .models import Customer, Order
from myapp.models import Name
class NameForm(forms.ModelForm):
name_value = forms.CharField(max_length=100, help_text = "Enter a name")
class Meta:
model = Name
fields = ('name_value',)
class OrderForm(ModelForm):
class Meta:
model = Order
fields = '__all__'
| 2.34375
| 2
|
elementary/best-stock.py
|
vargad/exercises
| 1
|
12776270
|
<gh_stars>1-10
#!/usr/bin/env python3
def best_stock(data):
return sorted(data.items(), key=lambda x: x[1])[-1][0]
if __name__ == '__main__':
print(best_stock({ "CAL": 42.0, "GOG": 190.5, "DAG": 32.2 }))
assert best_stock({ "CAL": 42.0, "GOG": 190.5, "DAG": 32.2 }) == "GOG"
assert best_stock({ "CAL": 31.4, "GOG": 3.42, "APL": 170.34 }) == "APL"
| 3.109375
| 3
|
18. Exam Prep/checkmate.py
|
elenaborisova/Python-Advanced
| 2
|
12776271
|
<gh_stars>1-10
def find_position(matrix, size, symbol):
positions = []
for row in range(size):
for col in range(size):
if matrix[row][col] == symbol:
positions.append([row, col])
return positions
def is_position_valid(row, col, size):
return 0 <= row < size and 0 <= col < size
def check_for_checkmate(queen_pos, size, matrix):
for direction in CHANGES:
q_row, q_col = queen_pos[0], queen_pos[1]
change_row, change_col = CHANGES[direction][0], CHANGES[direction][1]
while is_position_valid(q_row + change_row, q_col + change_col, size):
q_row += change_row
q_col += change_col
if matrix[q_row][q_col] == "Q":
break
elif matrix[q_row][q_col] == "K":
return True
return False
SIZE = 8
board = [input().split() for _ in range(SIZE)]
king_pos = find_position(board, SIZE, "K")[0]
queens_positions = find_position(board, SIZE, "Q")
queens_winners = []
CHANGES = {
"up": (-1, 0),
"down": (1, 0),
"right": (0, 1),
"left": (0, -1),
"up-left": (-1, -1),
"down-left": (1, -1),
"up-right": (-1, 1),
"down-right": (1, 1),
}
for queen in queens_positions:
if check_for_checkmate(queen, SIZE, board):
queens_winners.append(queen)
if queens_winners:
[print(queen) for queen in queens_winners]
else:
print("The king is safe!")
| 3.859375
| 4
|
Code/Client/ui_face.py
|
jhuboo/sparky
| 3
|
12776272
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Freenove\Desktop\树莓派六足机器人\界面UI\face.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Face(object):
def setupUi(self, Face):
Face.setObjectName("Face")
Face.resize(650, 320)
Face.setStyleSheet("QWidget{\n"
"background:#484848;\n"
"}\n"
"QAbstractButton{\n"
"border-style:none;\n"
"border-radius:0px;\n"
"padding:5px;\n"
"color:#DCDCDC;\n"
"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #858585,stop:1 #383838);\n"
"}\n"
"QAbstractButton:hover{\n"
"color:#000000;\n"
"background-color:#008aff;\n"
"}\n"
"QAbstractButton:pressed{\n"
"color:#DCDCDC;\n"
"border-style:solid;\n"
"border-width:0px 0px 0px 4px;\n"
"padding:4px 4px 4px 2px;\n"
"border-color:#008aff;\n"
"background-color:#444444;\n"
"}\n"
"\n"
"QLabel{\n"
"color:#DCDCDC;\n"
"\n"
"\n"
"}\n"
"QLabel:focus{\n"
"border:1px solid #00BB9E;\n"
"\n"
"}\n"
"\n"
"QLineEdit{\n"
"border:1px solid #242424;\n"
"border-radius:3px;\n"
"padding:2px;\n"
"background:none;\n"
"selection-background-color:#484848;\n"
"selection-color:#DCDCDC;\n"
"}\n"
"QLineEdit:focus,QLineEdit:hover{\n"
"border:1px solid #242424;\n"
"}\n"
"QLineEdit{\n"
"border:1px solid #242424;\n"
"border-radius:3px;\n"
"padding:2px;\n"
"background:none;\n"
"selection-background-color:#484848;\n"
"selection-color:#DCDCDC;\n"
"}\n"
"\n"
"QLineEdit:focus,QLineEdit:hover{\n"
"border:1px solid #242424;\n"
"}\n"
"QLineEdit{\n"
"lineedit-password-character:9<PASSWORD>;\n"
"}")
self.label_video = QtWidgets.QLabel(Face)
self.label_video.setGeometry(QtCore.QRect(20, 10, 400, 300))
font = QtGui.QFont()
font.setFamily("Arial")
self.label_video.setFont(font)
self.label_video.setAlignment(QtCore.Qt.AlignCenter)
self.label_video.setObjectName("label_video")
self.label_photo = QtWidgets.QLabel(Face)
self.label_photo.setGeometry(QtCore.QRect(440, 15, 200, 200))
font = QtGui.QFont()
font.setFamily("Arial")
self.label_photo.setFont(font)
self.label_photo.setAlignment(QtCore.Qt.AlignCenter)
self.label_photo.setObjectName("label_photo")
self.lineEdit = QtWidgets.QLineEdit(Face)
self.lineEdit.setGeometry(QtCore.QRect(490, 235, 140, 25))
font = QtGui.QFont()
font.setFamily("Arial")
self.lineEdit.setFont(font)
self.lineEdit.setObjectName("lineEdit")
self.label = QtWidgets.QLabel(Face)
self.label.setGeometry(QtCore.QRect(440, 240, 45, 15))
font = QtGui.QFont()
font.setFamily("Arial")
self.label.setFont(font)
self.label.setObjectName("label")
self.Button_Read_Face = QtWidgets.QPushButton(Face)
self.Button_Read_Face.setGeometry(QtCore.QRect(460, 275, 150, 25))
font = QtGui.QFont()
font.setFamily("Arial")
self.Button_Read_Face.setFont(font)
self.Button_Read_Face.setObjectName("Button_Read_Face")
self.retranslateUi(Face)
QtCore.QMetaObject.connectSlotsByName(Face)
def retranslateUi(self, Face):
_translate = QtCore.QCoreApplication.translate
Face.setWindowTitle(_translate("Face", "Face"))
self.label_video.setText(_translate("Face", "Video"))
self.label_photo.setText(_translate("Face", "Photo"))
self.label.setText(_translate("Face", "Name:"))
self.Button_Read_Face.setText(_translate("Face", "Read Face"))
| 2.09375
| 2
|
lib/coginvasion/hood/SafeZoneLoader.py
|
theclashingfritz/Cog-Invasion-Online-Dump
| 1
|
12776273
|
<filename>lib/coginvasion/hood/SafeZoneLoader.py<gh_stars>1-10
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.hood.SafeZoneLoader
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm.StateData import StateData
from direct.fsm.ClassicFSM import ClassicFSM
from direct.fsm.State import State
from direct.actor.Actor import Actor
from panda3d.core import ModelPool, TexturePool, NodePath
from lib.coginvasion.globals import CIGlobals
from lib.coginvasion.manager.SettingsManager import SettingsManager
from QuietZoneState import QuietZoneState
import ToonInterior, LinkTunnel, types, random
class SafeZoneLoader(StateData):
notify = directNotify.newCategory('SafeZoneLoader')
def __init__(self, hood, parentFSMState, doneEvent):
StateData.__init__(self, doneEvent)
self.hood = hood
self.parentFSMState = parentFSMState
self.fsm = ClassicFSM('safeZoneLoader', [State('off', self.enterOff, self.exitOff),
State('playground', self.enterPlayground, self.exitPlayground, ['quietZone']),
State('toonInterior', self.enterToonInterior, self.exitToonInterior, ['quietZone']),
State('quietZone', self.enterQuietZone, self.exitQuietZone, ['playground', 'toonInterior'])], 'off', 'off')
self.placeDoneEvent = 'placeDone'
self.place = None
self.playground = None
self.battleMusic = None
self.invasionMusic = None
self.invasionMusicFiles = None
self.interiorMusic = None
self.bossBattleMusic = None
self.music = None
self.tournamentMusic = None
self.linkTunnels = []
self.szHolidayDNAFile = None
self.animatedFish = None
return
def findAndMakeLinkTunnels(self):
for tunnel in self.geom.findAllMatches('**/*linktunnel*'):
dnaRootStr = tunnel.getName()
link = LinkTunnel.SafeZoneLinkTunnel(tunnel, dnaRootStr)
self.linkTunnels.append(link)
def load(self):
StateData.load(self)
if self.pgMusicFilename:
if type(self.pgMusicFilename) == types.ListType:
filename = random.choice(self.pgMusicFilename)
else:
filename = self.pgMusicFilename
self.music = base.loadMusic(filename)
if self.battleMusicFile:
self.battleMusic = base.loadMusic(self.battleMusicFile)
if self.invasionMusicFiles:
self.invasionMusic = None
if self.bossBattleMusicFile:
self.bossBattleMusic = base.loadMusic(self.bossBattleMusicFile)
if self.interiorMusicFilename:
self.interiorMusic = base.loadMusic(self.interiorMusicFilename)
if self.tournamentMusicFiles:
self.tournamentMusic = None
self.createSafeZone(self.dnaFile)
children = self.geom.findAllMatches('**/*doorFrameHole*')
for child in children:
child.hide()
self.parentFSMState.addChild(self.fsm)
_, _, _, _, _, _, _, _, af = SettingsManager().getSettings('settings.json')
if af == 'on':
self.notify.info('Anisotropic Filtering is on, applying to textures.')
for nodepath in self.geom.findAllMatches('*'):
try:
for node in nodepath.findAllMatches('**'):
try:
node.findTexture('*').setAnisotropicDegree(8)
except:
pass
except:
continue
return
def unload(self):
StateData.unload(self)
if self.animatedFish:
self.animatedFish.cleanup()
self.animatedFish.removeNode()
self.animatedFish = None
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
del self.animatedFish
self.geom.removeNode()
del self.geom
del self.fsm
del self.hood
del self.playground
del self.music
del self.interiorMusic
del self.battleMusic
del self.bossBattleMusic
del self.tournamentMusic
self.ignoreAll()
ModelPool.garbageCollect()
TexturePool.garbageCollect()
return
def enter(self, requestStatus):
StateData.enter(self)
if base.localAvatar.zoneId < CIGlobals.DynamicZonesBegin:
self.findAndMakeLinkTunnels()
self.fsm.enterInitialState()
messenger.send('enterSafeZone')
self.setState(requestStatus['where'], requestStatus)
partyGate = self.geom.find('**/prop_party_gate_DNARoot')
if not partyGate.isEmpty():
partyGate.removeNode()
del partyGate
petShop = self.geom.find('**/*pet_shop_DNARoot*')
if not petShop.isEmpty():
fish = petShop.find('**/animated_prop_PetShopFishAnimatedProp_DNARoot')
if fish:
self.animatedFish = Actor('phase_4/models/props/exteriorfish-zero.bam', {'chan': 'phase_4/models/props/exteriorfish-swim.bam'})
self.animatedFish.reparentTo(petShop)
self.animatedFish.setPos(fish.getPos())
self.animatedFish.loop('chan')
fish.removeNode()
def exit(self):
StateData.exit(self)
messenger.send('exitSafeZone')
for link in self.linkTunnels:
link.cleanup()
if self.animatedFish:
self.animatedFish.stop('chan')
self.linkTunnels = []
def setState(self, stateName, requestStatus):
self.fsm.request(stateName, [requestStatus])
def createSafeZone(self, dnaFile):
if self.szStorageDNAFile:
loader.loadDNAFile(self.hood.dnaStore, self.szStorageDNAFile)
if self.szHolidayDNAFile:
loader.loadDNAFile(self.hood.dnaStore, self.szHolidayDNAFile)
node = loader.loadDNAFile(self.hood.dnaStore, dnaFile)
if node.getNumParents() == 1:
self.geom = NodePath(node.getParent(0))
self.geom.reparentTo(hidden)
else:
self.geom = hidden.attachNewNode(node)
self.makeDictionaries(self.hood.dnaStore)
if self.__class__.__name__ not in ('TTSafeZoneLoader', ):
self.geom.flattenMedium()
gsg = base.win.getGsg()
if gsg:
self.geom.prepareScene(gsg)
def makeDictionaries(self, dnaStore):
self.nodeList = []
for i in xrange(dnaStore.getNumDNAVisGroups()):
groupFullName = dnaStore.getDNAVisGroupName(i)
groupNode = self.geom.find('**/' + groupFullName)
if groupNode.isEmpty():
self.notify.error('Could not find visgroup')
if self.__class__.__name__ not in ('TTSafeZoneLoader', ):
groupNode.flattenMedium()
self.nodeList.append(groupNode)
self.hood.dnaStore.resetPlaceNodes()
self.hood.dnaStore.resetDNAGroups()
self.hood.dnaStore.resetDNAVisGroups()
self.hood.dnaStore.resetDNAVisGroupsAI()
def enterPlayground(self, requestStatus):
try:
self.hood.stopSuitEffect()
except:
pass
self.acceptOnce(self.placeDoneEvent, self.handlePlaygroundDone)
self.place = self.playground(self, self.fsm, self.placeDoneEvent)
self.place.load()
def exitPlayground(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handlePlaygroundDone(self):
status = self.place.doneStatus
if self.hood.isSameHood(status) and status['loader'] == 'safeZoneLoader' and status['where'] not in ('minigame', ):
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
def enterToonInterior(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.handleToonInteriorDone)
self.place = ToonInterior.ToonInterior(self, self.fsm, self.placeDoneEvent)
self.place.load()
def enterThePlace(self, requestStatus):
base.cr.playGame.setPlace(self.place)
if self.place is not None:
self.place.enter(requestStatus)
return
def exitToonInterior(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleToonInteriorDone(self):
status = self.place.doneStatus
if status['loader'] == 'safeZoneLoader' and self.hood.isSameHood(status) and status['shardId'] == None or status['how'] == 'doorOut':
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterQuietZone(self, requestStatus):
self.fsm.request(requestStatus['where'], [requestStatus], exitCurrent=0)
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getDoneStatus()
self.exitQuietZone()
if status['where'] == 'estate' or status['loader'] == 'townLoader':
self.doneStatus = status
messenger.send(self.doneEvent)
else:
self.enterThePlace(status)
def enterOff(self):
pass
def exitOff(self):
pass
| 1.882813
| 2
|
Drawing Book/Drawing Book.py
|
saurav0001kumar/HackerRank
| 1
|
12776274
|
<filename>Drawing Book/Drawing Book.py
#!/bin/python3
import os
import sys
#
# Complete the pageCount function below.
#
def pageCount(n, p):
t1=0
t2=0
if p==1 or p==n:
return(0)
else:
i=2
while i<=p:
if i%2==0:
t1+=1
i+=1
i=n-1
while i>=p:
if i%2!=0:
t2+=1
i-=1
if t1<t2:
return(t1)
else:
return(t2)
#
# Write your code here.
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
p = int(input())
result = pageCount(n, p)
fptr.write(str(result) + '\n')
fptr.close()
| 3.890625
| 4
|
hawkweed/monads/either.py
|
hellerve/hawkweed
| 20
|
12776275
|
<filename>hawkweed/monads/either.py
"""The Either monad"""
from hawkweed.monads.monad import Monad
class Either(Monad):
"""The Either abstract base class"""
def __init__(self, value):
raise NotImplementedError("please use the type instances Left or Right")
class Left(Either):
"""The Left instance of the Either monad"""
def __init__(self, value):
self.value = value
def bind(self, fun):
"""
The monadic bind function of Left.
It will just return itself.
Complexity: O(1)
params:
fun: the function
returns: self
"""
return self
class Right(Either):
"""The Right instance of the Either monad"""
def __init__(self, value):
self.value = value
def bind(self, fun):
"""
The monadic bind function of Right.
It will just apply the function to the
value and be done with it.
Complexity: O(k) where k is the complexity of the function
params:
fun: the function
returns: the transformed value
"""
return fun(self.value)
def either(left, right, monad):
"""
Takes a function left, a function right and a value
and binds according to the value (into left if it is a Left,
into right if it is a Right). Otherwise throws a ValueError.
Complexity: O(1) or complexity of the given function
params:
left: the function that should be executed on Left
right: the function that should be executed on Right
throws: ValueError
returns:
whatever the functions return
"""
if is_left(monad):
return monad.bind(left)
if is_right(monad):
return monad.bind(right)
raise ValueError("monad in either must either be left or right")
def lefts(monads):
"""
Takes a list and returns only the instances of Left.
Complexity: O(1)
params:
monads: the list
returns:
an iterable of the Left values
"""
return (x for x in monads if is_left(x))
def rights(monads):
"""
Takes a list and returns only the instances of Right.
Complexity: O(1)
params:
monads: the list
returns:
a generator of the Right values
"""
return (x for x in monads if is_right(x))
def is_either(monad):
"""
Checks whether a value is an instance of Either.
Complexity: O(1)
params:
val: the value to check
returns: the truth value
"""
return is_left(monad) or is_right(monad)
def is_left(monad):
"""
Checks whether a value is an instance of Left.
Complexity: O(1)
params:
val: the value to check
returns: the truth value
"""
return isinstance(monad, Left)
def is_right(monad):
"""
Checks whether a value is an instance of Right.
Complexity: O(1)
params:
val: the value to check
returns: the truth value
"""
return isinstance(monad, Right)
def partition_eithers(monads):
"""
Takes a list and returns a two-element Atuple where the first
element is a list of all the instances of Left and the second
element is a list of all the instances of Right.
Complexity: O(1)
params:
monads: the list of monads
returns: the tuple
"""
return lefts(monads), rights(monads)
| 3.671875
| 4
|
ex0021.py
|
GantzLorran/Python
| 1
|
12776276
|
<filename>ex0021.py
import pygame
pygame.mixer.init()
pygame.mixer.music.load('FH3.mp3')
pygame.mixer.music.play()
input()
pygame.event.wait()
| 2.0625
| 2
|
webapp01.py
|
sgriffith3/2022-02-28-SDE
| 2
|
12776277
|
<reponame>sgriffith3/2022-02-28-SDE
import random
import string
from flask import Flask
app = Flask(__name__)
@app.route("/")
def main_page():
return "Howdy neighbor!"
@app.route("/rando")
def rando_str():
s = ""
for i in range(10):
s += random.choice(string.ascii_letters)
return s
if __name__ == "__main__":
app.run(port=2224, host="0.0.0.0")
| 2.734375
| 3
|
luracoin/helpers.py
|
maguayo/luracoin-python
| 12
|
12776278
|
import os
import ecdsa
import binascii
import hashlib
from typing import Union
from luracoin.config import Config
def bits_to_target(bits: bytes) -> hex:
"""
The first byte is the exponent and the other three bytes are the
coefficient.
Example:
0x1d00ffff => 00000000ffff000000000000000000000000...[0x1d = 29 bytes]
"""
bits = bits.hex()
# We get the first two characters which is the first byte and convert it
# to an integer, later we substract three bytes which are the coefficient
# and after that we multiply that for two, because each byte has two chars
target_exponent_number = (int(bits[0:2], 16) - 3) * 2
target_exponent = "".join(["0" for d in range(target_exponent_number)])
# The target has to be 32 bytes, so 64 characters. We need to add 0's at
# the start of the target as padding. Also here we need to add 6 because
# we need to take in account the exponent too
padding_number = 64 - target_exponent_number - 6
padding = "".join(["0" for d in range(padding_number)])
return padding + bits[2:8] + target_exponent
def sha256d(s: Union[str, bytes]) -> str:
"""A double SHA-256 hash."""
if not isinstance(s, bytes):
s = s.encode()
return hashlib.sha256(hashlib.sha256(s).digest()).hexdigest()
def mining_reward(height) -> int:
halving = int(height / Config.HALVING_BLOCKS) + 1
return int(Config.BLOCK_REWARD / halving)
def little_endian_to_int(little_endian_hex: str) -> int:
return int.from_bytes(
binascii.unhexlify(little_endian_hex), byteorder="little"
)
def is_hex(s: str) -> bool:
try:
int(s, 16)
except ValueError:
return False
return len(s) % 2 == 0
def bytes_to_signing_key(private_key: bytes) -> ecdsa.SigningKey:
return ecdsa.SigningKey.from_string(private_key, curve=ecdsa.SECP256k1)
| 3.328125
| 3
|
moveit_ros/benchmarks/benchmarks/scripts/moveit_benchmark_statistics.py
|
KenmeiFusamae/moveit
| 0
|
12776279
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: <NAME>, <NAME>, <NAME>
from sys import argv, exit
import os
import sqlite3
import datetime
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from optparse import OptionParser, OptionGroup
def read_benchmark_log(dbname, filenames):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512), totaltime REAL, timelimit REAL, hostname VARCHAR(1024), date DATETIME, setup TEXT)""")
c.execute("""CREATE TABLE IF NOT EXISTS known_planner_configs
(id INTEGER PRIMARY KEY AUTOINCREMENT, planner_name VARCHAR(512) NOT NULL, settings TEXT)""")
for filename in filenames:
print("Processing " + filename)
logfile = open(filename,'r')
expname = logfile.readline().split()[-1]
hostname = logfile.readline().split()[-1]
date = " ".join(logfile.readline().split()[2:])
goal_name = logfile.readline().split()[-1]
# disabled the planning request part
#logfile.readline() # skip <<<|
#expsetup = ""
#expline = logfile.readline()
#while not expline.startswith("|>>>"):
# expsetup = expsetup + expline
# expline = logfile.readline()
expsetup = ""
timelimit = float(logfile.readline().split()[0])
totaltime = float(logfile.readline().split()[0])
c.execute('INSERT INTO experiments VALUES (?,?,?,?,?,?,?)',
(None, expname, totaltime, timelimit, hostname, date, expsetup) )
c.execute('SELECT last_insert_rowid()')
experiment_id = c.fetchone()[0]
num_planners = int(logfile.readline().split()[0])
for i in range(num_planners):
planner_name = logfile.readline()[:-1]
print("Parsing data for " + planner_name)
# read common data for planner
num_common = int(logfile.readline().split()[0])
settings = ""
for j in range(num_common):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute("SELECT id FROM known_planner_configs WHERE (planner_name=? AND settings=?)", (planner_name, settings,))
p = c.fetchone()
if p==None:
c.execute("INSERT INTO known_planner_configs VALUES (?,?,?)", (None, planner_name, settings,))
c.execute('SELECT last_insert_rowid()')
planner_id = c.fetchone()[0]
else:
planner_id = p[0]
# read run properties
# number of properties to read from log file
num_properties = int(logfile.readline().split()[0])
# load a dictionary of properties and types
# we keep the names of the properties in a list as well, to ensure the correct order of properties
properties = {}
basePropNames = ['experimentid', 'plannerid', 'goal_name'] # these are the ones not from the planner directly
basePropValues = [experiment_id, planner_id, goal_name]
propNames = []
propNames.extend(basePropNames)
for j in range(num_properties):
field = logfile.readline().split()
ftype = field[-1]
fname = "_".join(field[:-1])
properties[fname] = ftype
propNames.append(fname)
# create the table, if needed
table_columns = "experimentid INTEGER, plannerid INTEGER, goal_name VARCHAR(100)"
for k, v in properties.iteritems():
table_columns = table_columns + ', ' + k + ' ' + v
table_columns = table_columns + ", FOREIGN KEY(experimentid) REFERENCES experiments(id) ON DELETE CASCADE ON UPDATE CASCADE"
table_columns = table_columns + ", FOREIGN KEY(plannerid) REFERENCES known_planner_configs(id) ON DELETE CASCADE ON UPDATE CASCADE"
planner_table = 'planner_%s' % planner_name
c.execute("CREATE TABLE IF NOT EXISTS `%s` (%s)" % (planner_table, table_columns))
# check if the table has all the needed columns; if not, add them
c.execute('SELECT * FROM `%s`' % planner_table)
added_columns = [ t[0] for t in c.description]
for col in properties.keys():
if not col in added_columns:
c.execute('ALTER TABLE `' + planner_table + '` ADD ' + col + ' ' + properties[col] + ';')
# add measurements
insert_fmt_str = 'INSERT INTO `' + planner_table + '` (' + ','.join(propNames) + ') VALUES (' + ','.join('?'*(num_properties + len(basePropNames))) + ')'
num_runs = int(logfile.readline().split()[0])
for j in range(num_runs):
run = tuple(basePropValues + [None if len(x)==0 else float(x)
for x in logfile.readline().split('; ')[:-1]])
c.execute(insert_fmt_str, run)
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plot_attribute(cur, planners, attribute, typename):
"""Create a box plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
plt.clf()
ax = plt.gca()
labels = []
measurements = []
nan_counts = []
is_bool = True
for planner in planners:
cur.execute('SELECT * FROM `%s`' % planner)
attributes = [ t[0] for t in cur.description]
if attribute in attributes:
cur.execute('SELECT `%s` FROM `%s` WHERE `%s` IS NOT NULL' % (attribute, planner, attribute))
measurement = [ t[0] for t in cur.fetchall() ]
cur.execute('SELECT count(*) FROM `%s` WHERE `%s` IS NULL' % (planner, attribute))
nan_counts.append(cur.fetchone()[0])
cur.execute('SELECT DISTINCT `%s` FROM `%s`' % (attribute, planner))
is_bool = is_bool and set([t[0] for t in cur.fetchall() if not t[0]==None]).issubset(set([0,1]))
measurements.append(measurement)
labels.append(planner.replace('planner_geometric_','').replace('planner_control_',''))
if is_bool:
width = .5
measurements_percentage = [sum(m)*100./len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurements_percentage, width)
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0])<1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_',' '))
xtickNames = plt.setp(ax,xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nan_counts)>0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i+width/2 if is_bool else i+1
ax.text(x, .95*maxy, str(nan_counts[i]), horizontalalignment='center', size='small')
plt.show()
def plot_statistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plot...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
planner_names = [ t for t in table_names if t.startswith('planner_') ]
attributes = []
types = {}
experiments = []
# merge possible attributes from all planners
for p in planner_names:
c.execute('SELECT * FROM `%s` LIMIT 1' % p)
atr = [ t[0] for t in c.description]
atr.remove('plannerid')
atr.remove('experimentid')
for a in atr:
if a not in attributes:
c.execute('SELECT typeof(`%s`) FROM `%s` WHERE `%s` IS NOT NULL LIMIT 1' % (a, p, a))
attributes.append(a)
types[a] = c.fetchone()[0]
c.execute('SELECT DISTINCT experimentid FROM `%s`' % p)
eid = [t[0] for t in c.fetchall() if not t[0]==None]
for e in eid:
if e not in experiments:
experiments.append(e)
attributes.sort()
pp = PdfPages(fname)
for atr in attributes:
if types[atr]=='integer' or types[atr]=='real':
plot_attribute(c, planner_names, atr, types[atr])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
for e in experiments:
# get the number of runs, per planner, for this experiment
runcount = []
for p in planner_names:
c.execute('SELECT count(*) FROM `%s` WHERE experimentid = %s' % (p, e))
runcount.append(c.fetchone()[0])
# check if this number is the same for all planners
runs = "Number of averaged runs: "
if len([r for r in runcount if not r == runcount[0]]) > 0:
runs = runs + ", ".join([planner_names[i].replace('planner_geometric_','').replace('planner_control_','') +
"=" + str(runcount[i]) for i in range(len(runcount))])
else:
runs = runs + str(runcount[0])
c.execute('SELECT name, timelimit FROM experiments WHERE id = %s' % e)
d = c.fetchone()
plt.figtext(pagex, pagey, "Experiment '%s'" % d[0])
plt.figtext(pagex, pagey-0.05, runs)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %s seconds" % d[1])
pagey -= 0.22
plt.show()
pp.savefig(plt.gcf())
pp.close()
def save_as_mysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump,'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
c.close()
last = ['experiments', 'known_planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION','COMMIT',
'sqlite_sequence','CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line: break
else:
process = True
if not process: continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def generate_csv(dbname, fname):
"""Create a csv file with all experiments combined into one list."""
print("Generating CSV output...")
# Open CSV File
csv = open(fname, 'w')
# Connect to database
conn = sqlite3.connect(dbname)
cursor = conn.cursor()
cursor.execute('PRAGMA FOREIGN_KEYS = ON')
# Get planner tables
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in cursor.fetchall() ]
planner_names = [ t for t in table_names if t.startswith('planner_') ]
# Create vectors
attributes = []
types = {}
experiments = []
# merge possible attributes from all planners
for planner_name in planner_names:
cursor.execute('SELECT * FROM `%s` LIMIT 1' % planner_name)
atr = [ t[0] for t in cursor.description]
atr.remove('plannerid')
atr.remove('experimentid')
for attribute in atr:
if attribute not in attributes:
cursor.execute('SELECT typeof(`%s`) FROM `%s` WHERE `%s` IS NOT NULL LIMIT 1' % (attribute, planner_name, attribute))
attributes.append(attribute) # add this new attribute (first time seen)
types[attribute] = cursor.fetchone()[0]
# Find new exeperiments for this planner table and add to our experiment vector
cursor.execute('SELECT DISTINCT experimentid FROM `%s`' % planner_name)
experiment_ids = [t[0] for t in cursor.fetchall() if not t[0]==None]
for experiment_id in experiment_ids:
if experiment_id not in experiments:
experiments.append(experiment_id)
# Sort all found attributes
attributes.sort(reverse=True)
# Create header of the CSV
csv.write('planner_type')
for atr in attributes:
#if types[atr]=='integer' or types[atr]=='real':
csv.write(", %s"%atr)
csv.write('\n') # new line
# Start creating CSV file by polling each planner table and seperating its data into proper column
# format, leaving blanks where planner is missing possible attribute data
for planner_name in planner_names:
cursor.execute('SELECT * FROM `%s`' % planner_name)
# get this planner's attributes
planner_attributes = [ t[0] for t in cursor.description]
#print>>csv, planner_attributes
# loop through each row of the planner experiments, aka each 'run'
for run in cursor.fetchall():
# write a *simplified* planner name
name_short = planner_name.strip('planner')
name_short = name_short.strip('_OMPL_')
name_short = name_short.replace('[','_')
name_short = name_short.strip('kConfigDefault]')
csv.write(name_short)
# loop through each global attribute
for atr in attributes:
# find the global attribute in this table if it exists
if atr in planner_attributes:
# output value
index_of_attr = planner_attributes.index(atr)
csv.write(", %s" %run[index_of_attr])
else:
csv.write(", ")
# done with this line
csv.write("\n")
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db",
help="Filename of benchmark database [default: %default]")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False,
help="Compute the views for best planner configurations")
parser.add_option("-p", "--plot", dest="plot", default=None,
help="Create a PDF of plots")
parser.add_option("-c", "--csv", dest="csv", default=None,
help="Create a CSV of combined experiments")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None,
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("-o", "--overwrite", action="store_true", dest="overwrite", default=False,
help="Use this flag to enable overwriting a previous database file with new benchmarks")
if len(argv) == 1:
parser.print_help()
(options, args) = parser.parse_args()
if len(args) > 0:
# Check if user wants to start a new database (delete old one)
if options.overwrite:
try:
os.remove(options.dbname)
except OSError:
pass
read_benchmark_log(options.dbname, args)
if options.plot:
plot_statistics(options.dbname, options.plot)
if options.csv:
generate_csv(options.dbname, options.csv)
if options.mysqldb:
save_as_mysql(options.dbname, options.mysqldb)
| 1.125
| 1
|
composer/models/__init__.py
|
jacobfulano/composer
| 2
|
12776280
|
# Copyright 2021 MosaicML. All Rights Reserved.
from composer.models.base import BaseMosaicModel as BaseMosaicModel
from composer.models.base import MosaicClassifier as MosaicClassifier
from composer.models.classify_mnist import MNIST_Classifier as MNIST_Classifier
from composer.models.classify_mnist import MnistClassifierHparams as MnistClassifierHparams
from composer.models.efficientnetb0 import EfficientNetB0 as EfficientNetB0
from composer.models.efficientnetb0 import EfficientNetB0Hparams as EfficientNetB0Hparams
from composer.models.gpt2 import GPT2Hparams as GPT2Hparams
from composer.models.gpt2 import GPT2Model as GPT2Model
from composer.models.model_hparams import Initializer as Initializer
from composer.models.model_hparams import ModelHparams as ModelHparams
from composer.models.resnet18 import ResNet18 as ResNet18
from composer.models.resnet18 import ResNet18Hparams as ResNet18Hparams
from composer.models.resnet50 import ResNet50 as ResNet50
from composer.models.resnet50 import ResNet50Hparams as ResNet50Hparams
from composer.models.resnet56_cifar10 import CIFAR10_ResNet56 as CIFAR10_ResNet56
from composer.models.resnet56_cifar10 import CIFARResNetHparams as CIFARResNetHparams
from composer.models.resnet101 import ResNet101 as ResNet101
from composer.models.resnet101 import ResNet101Hparams as ResNet101Hparams
from composer.models.transformer_shared import MosaicTransformer as MosaicTransformer
from composer.models.unet import UNet as UNet
from composer.models.unet import UnetHparams as UnetHparams
| 1.125
| 1
|
mpi4jax/_src/flush.py
|
Thenerdstation/mpi4jax
| 122
|
12776281
|
<reponame>Thenerdstation/mpi4jax
import jax
def flush(platform):
"""Wait for all pending XLA operations"""
devices = jax.devices(platform)
for device in devices:
# as suggested in jax#4335
noop = jax.device_put(0, device=device) + 0
noop.block_until_ready()
| 2.1875
| 2
|
Assignments/Comprehensions/Exercise/09. Bunker.py
|
KaloyankerR/python-advanced-repository
| 0
|
12776282
|
items = {key: {} for key in input().split(", ")}
n = int(input())
for _ in range(n):
line = input().split(" - ")
category, item = line[0], line[1]
items_count = line[2].split(";")
quantity = int(items_count[0].split(":")[1])
quality = int(items_count[1].split(":")[1])
items[category][item] = (quantity, quality)
items_count = sum([sum([i[0] for i in list(items[x].values())]) for x in items])
avg_quality = sum([sum([i[1] for i in list(items[x].values())]) for x in items]) / len([x for x in items.keys()])
print(f"Count of items: {items_count}")
print(f"Average quality: {avg_quality:.2f}")
[print(f"{x} -> {', '.join(items[x])}") for x in items.keys()]
| 3.4375
| 3
|
scripts/move.py
|
xdze2/pyrobot
| 0
|
12776283
|
<gh_stars>0
import time
from threading import Thread
from pyrobot.drive import Mobility
# from pyrobot.imu import Imu
# import numpy as np
# import matplotlib.pyplot as plt
import zmq
# Prepare our context and publisher
context = zmq.Context()
subscriber = context.socket(zmq.PAIR)
subscriber.bind("tcp://*:5564")
# subscriber.setsockopt(zmq.SUBSCRIBE, b"key")
mob = Mobility()
while True:
# Read envelope with address
key = subscriber.recv()
print('->', key)
if key == b'i':
print('forward')
mob.drive(70, 0)
time.sleep(.2)
mob.drive(0, 0)
elif key == b'k':
print('backward')
mob.drive(-70, 0)
time.sleep(.2)
mob.drive(0, 0)
# [address, contents] = subscriber.recv()
# print(f"[{address}] {contents}")
# mob = Mobility()
# data = list()
# sense = Imu(data)
# th = Thread(
# target=sense.loop
# )
# th.start()
# time.sleep(.2)
# mob.drive(0, -70, 1)
# time.sleep(.2)
# mob.drive(0, 70, 1)
# time.sleep(.2)
# sense.run = False
# data = np.array(sense.data)
# plt.plot(np.cumsum(data[:, 0]), label='wx')
# plt.plot(np.cumsum(data[:, 1]), label='wy')
# plt.plot(np.cumsum(data[:, 2]), label='wz')
# plt.legend()
# plt.savefig('graph.png')
| 2.46875
| 2
|
dask/dataframe/io/orc/__init__.py
|
Juanlu001/dask
| 9,684
|
12776284
|
<filename>dask/dataframe/io/orc/__init__.py
from .core import read_orc, to_orc
| 1.140625
| 1
|
xml2ES.py
|
Fadavvi/XML2Elastic
| 4
|
12776285
|
import xmltodict
import json
from elasticsearch import Elasticsearch
from elasticsearch.connection import create_ssl_context
import sys
##By: <NAME>
def Connect2ES(ip='127.0.0.1',port='9200',user="",password="",https=False,CertPath="",ES_Index='reports',Data=""):
## Connection to Elastic Search (http/https)
raiseFieldLimit = '''
{
"index.mapping.total_fields.limit": 500000
}'''
if https :
context = create_ssl_context(cafile=CertPath)
es = Elasticsearch(
[ip],
http_auth=(user, password),
scheme="https",
port=int(port),
ssl_context=context,
)
else :
es = Elasticsearch(
[ip],
scheme="http",
port=int(port),
)
if not es.indices.exists(index=ES_Index):
es.indices.create(index=ES_Index, ignore=400,body=raiseFieldLimit)
es.index(index=ES_Index, doc_type='Report', body=Data)
def XML2JSON(address):
# Any XML 2 JSON (tested on: ZAP, Nesus v2 and higher, acunetix 11, Openvas, Arachni , Nikto, NMAP)
file = open(address,"r")
return (json.dumps(xmltodict.parse(file.read())))
if (sys.argv[1] == 'http'):
Connect2ES (ip=sys.argv[2],port=sys.argv[3],https=False,ES_Index=(sys.argv[5]),Data=XML2JSON(sys.argv[4]))
elif (sys.argv[1] == 'https'):
Connect2ES (ip=sys.argv[2],port=sys.argv[3],https=True,user=sys.argv[6],password=sys.argv[7],CertPath=sys.argv[8],ES_Index=(sys.argv[5]),Data=XML2JSON(sys.argv[4]))
else:
print ('Did not support on this version')
# Usage: python3 scanner.py [HTTP] [ES IP] [ES Port] [XML Path] [ES Index name]
# python3 scanner.py [HTTPs] [ES IP] [ES Port] [XML Path] [ES Index name] [User Name] [Password] [Cert Path]
| 2.859375
| 3
|
mala/datahandling/__init__.py
|
DanielKotik/mala
| 11
|
12776286
|
"""All functions for handling data."""
from .data_handler import DataHandler
from .data_scaler import DataScaler
from .data_converter import DataConverter
| 1.046875
| 1
|
neat/activations.py
|
greenmachine1902/NEAT
| 0
|
12776287
|
from __future__ import annotations
import random
from mattslib import math_util
__version__ = '1.4.1'
__date__ = '19/03/2022'
def getActivation(activation: str = '') -> activation_function:
"""
Returns the requested or a random activation function.
:param activation: str
:return:
- activation_function - (x: int | float) -> int | float
"""
activations = {'absolute': math_util.absolute,
'binaryStep': math_util.binaryStep,
'clamped': math_util.clamped,
'identity': math_util.identity,
'log': math_util.log,
'tanh': math_util.tanh,
'leakyReLU': math_util.leakyReLU,
'sigmoid': math_util.sigmoid,
'swish': math_util.swish}
activation_function = activations[activation] if activation in activations \
else random.choice(list(activations.values()))
return activation_function
| 2.96875
| 3
|
venv/Lib/site-packages/psychopy/demos/coder/stimuli/aperture.py
|
mintzer/pupillometry-rf-back
| 0
|
12776288
|
<filename>venv/Lib/site-packages/psychopy/demos/coder/stimuli/aperture.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo for the class psychopy.visual.Aperture().
Draw two gabor circles, one with an irregular aperture and one with no aperture.
"""
from __future__ import division
from psychopy import visual, event
# Need to allowStencil=True for a window with an Aperture:
win = visual.Window([400, 400], allowStencil=True, units='norm')
instr = visual.TextStim(win, text="Any key to quit", pos=(0, -.7))
gabor1 = visual.GratingStim(win, mask='circle', sf=4, size=1.2, color=[0.5, -0.5, 1])
gabor2 = visual.GratingStim(win, mask='circle', sf=4, size=1.2, color=[-0.5, -0.5, -1])
vertices = [(-0.02, -0.0), (-.8, .2), (0, .6), (.1, 0.06), (.8, .3), (.6, -.4)]
# `sizes in Aperture refers to the diameter when shape='circle';
# vertices or other shapes are scaled accordingly
aperture = visual.Aperture(win, size=0.9, shape=vertices) # try shape='square'
aperture.enabled = False # enabled by default when created
gabor1.draw()
instr.draw()
# drawing will now only be done within the aperture shape:
aperture.enabled = True
gabor2.draw()
win.flip()
event.waitKeys()
win.close()
# The contents of this file are in the public domain.
| 3.21875
| 3
|
stackoverflow/question/models.py
|
vivekdhinoja/stackoverflow
| 0
|
12776289
|
<reponame>vivekdhinoja/stackoverflow<filename>stackoverflow/question/models.py
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django_extensions.db.models import TimeStampedModel
from taggit.managers import TaggableManager
# Create your models here.
class Question(TimeStampedModel):
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
description = RichTextUploadingField()
tags = TaggableManager()
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='questions')
class Meta:
verbose_name_plural = 'Questions'
ordering = ['-created']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse_lazy("question:question-detail", kwargs={"slug": self.slug})
| 2.25
| 2
|
python/MarkovChain.py
|
alessandrobondielli/alessandrobondielli.github.com
| 0
|
12776290
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy.random as random
class MarkovChain:
"""Simple Markov Chain Model.
Parameters
----------
n : int
Number of states.
p : list, len (n)
Base probabilities.
T : list of list, shape (n, n)
Transition probabilities
states : list, len (n), optional
List of string labels for the states.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
Attributes
----------
stat : int
Dimensionality of the Gaussian emissions.
Examples
--------
>>> mc = MarkovChain(n=2, P = [0.3,0.7], T =[[0.2,0.5],[0.8,0.5]],
states = ['Rain','Sunny'])
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
>>> mc.move()
>>> mc.get_state()
'Sunny'
"""
def __init__(self, n, P, T, states=None, verbose= False):
"""Simple Markov chain model.
"""
assert len(P) == n, "Probability vector should be of size %d" %n
assert len(T) == len(T[0]) and len(T) == n, "Transition matrix should be of size %d" %n
assert states is None or len(states) == n, "States vector should be of size $d" %n
# Number of states of the MarkovChain
self.n = n
self.states = states
self.p = P
self.T = T
self.state = random.choice(range(self.n),1,p=self.p)[0]
self.verbose = verbose
def check_state(self):
state = self.states[self.state] if self.states else self.state
if self.verbose:
print 'Current State: %s' % (state)
def set_state(self, state):
"""Set the state for the MarkovChain to the specified state.
Can be used for initialization.
Parameters
----------
state : int, or string
the state to set; can be either the index of the state,
or its string label, if labels have been provided at initialization
time.
Returns
-------
None
"""
self.state = self.states.index(state) if self.states else state
if self.verbose:
state = self.states[self.state] if self.states else self.state
print 'State is now: %s' % (state)
def get_state(self):
"""Get the state of the markov chain.
Parameters
----------
Returns
-------
state : int, or string
the current state to of the Markov Chain; can be either the index of the state,
or its string label, if labels have been provided at initialization
time.
"""
state = self.states[self.state] if self.states else self.state
return state
def move(self):
"""Sample num_samples samples from the Markov Model.
Parameters
----------
Returns
-------
mutated : boolean
true if the the model state has changed during the current update.
"""
current_state = self.state
self.state = random.choice(range(self.n),1,p=self.T[current_state])[0]
state = self.states[self.state] if self.states else self.state
if self.verbose:
print 'New State: %s' % (state)
return self.state != current_state
# Jukes e Cantor
# alpha < 1/3.
def JC69(nucleotide):
"""Initialize a Markov Chain for a nucleotide, using a Jukes e Cantor model.
Parameters
----------
nucleotide: string, must be one of ['A', 'C', 'G', 'T']
A nucleotide used to initialize the model.
Returns
-------
mc : MarkovChain object.
The MarkovChain object, using a Jukes Cantor Model initialized to the provided
nucleotide.
Examples
--------
>>> nuc_mc = JC69('A')
<__main__.MarkovChain instance at 0x1a22066638>
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
>>> nuc_mc.get_state()
'A'
>>> nuc_mc.move()
'True'
>>> nuc_mc.get_state()
'C'
"""
n = 4
prob = [0.25, 0.25, 0.25, 0.25]
alpha = 0.005
JC69_sub = [[1-alpha*3, alpha, alpha, alpha],
[alpha, 1-alpha*3, alpha, alpha],
[alpha, alpha, 1-alpha*3, alpha],
[alpha, alpha, alpha, 1-alpha*3]]
states = list('ACGT')
mc = MarkovChain(n, prob, JC69_sub, states = states)
mc.set_state(nucleotide)
return mc
#plot della distribuzione
def plot_gen(total_arr):
total_v = total_arr.mean(axis=0)
fig = plt.figure()
ax = plt.plot(total_v,range(length*5))
plt.plot(range(length),range(length))
plt.plot([75,75],[0,500],'--')
c_p = np.linspace(0,0.75,750,endpoint=False)
k_p = -3./4 * np.log(1- 4./3 *c_p)
plt.plot(c_p*length,k_p*length)
plt.xlabel('c')
plt.ylabel('k')
return plt
| 3.21875
| 3
|
BOJ/04000~04999/4400~4499/4447.py
|
shinkeonkim/today-ps
| 2
|
12776291
|
cnt = int(input())
for i in range(cnt):
s=input()
a=s.lower()
g=a.count('g')
b=a.count('b')
print(s,"is",end=" ")
if g == b:
print("NEUTRAL")
elif g>b:
print("GOOD")
else:
print("A BADDY")
| 3.59375
| 4
|
sandbox/scripts/doc_embeddings.py
|
GOALCLEOPATRA/MLM
| 0
|
12776292
|
<reponame>GOALCLEOPATRA/MLM
import torch
import flair
from flair.data import Sentence
from flair.embeddings import FlairEmbeddings, BertEmbeddings, DocumentPoolEmbeddings
# set device
torch.cuda.set_device(2)
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
flair.device = DEVICE
class DocEmbeddings(object):
def __init__(self, model_name):
super().__init__()
assert model_name in ['flair', 'bert', 'use']
self.model_name = model_name
self.document_embeddings = self.load_embedding_model(model_name)
def load_embedding_model(self, name):
if name == 'flair':
print('Loading Flair Embeddings...')
flair_embedding_forward = FlairEmbeddings('news-forward')
flair_embedding_backward = FlairEmbeddings('news-backward')
return DocumentPoolEmbeddings([flair_embedding_forward, flair_embedding_backward])
elif name == 'bert':
print('Loading BERT Embeddings...')
bert_embeddings = BertEmbeddings('bert-base-uncased') # bert-base-multilingual-cased
return DocumentPoolEmbeddings([bert_embeddings])
print('Done!')
def embed(self, tokens):
x = Sentence(tokens if self.model_name == 'flair' else tokens[:500]) # for bert we use the first 500 tokens
self.document_embeddings.embed(x)
return x.embedding
| 2.421875
| 2
|
helper_functions/test.py
|
cleveranjos/Rapid-ML-Gateway
| 3
|
12776293
|
<gh_stars>1-10
import python_finance as pf
def main():
tickers = ['AAPL', 'MSFT', 'NFLX', 'AMZN', 'GOOG']
tickers_b = ['DOCU', 'SHOP', 'WMT']
start = '20200101'
end = '20201007'
attrib = 'Adj Close'
#print(pf.get_tickers(tickers, start, end, attrib))
#print(pf.get_Percent_change(tickers, start, end, attrib))
#print(pf.get_Mean_Daily_Return(tickers, start, end, attrib))
#print(pf.get_Cov_Matrix(tickers, start, end, attrib))
print(pf.get_tickers(tickers_b, start, end, attrib))
print(pf.get_Percent_change(tickers_b, start, end, attrib))
mean_returns = pf.get_Mean_Daily_Return(tickers_b, start, end, attrib)
print(mean_returns)
cov = pf.get_Cov_Matrix(tickers_b, start, end, attrib)
print(cov)
num_portfolios = 100000
rf = 0
print(pf.simulate_random_portfolios(num_portfolios, mean_returns, cov, rf, tickers_b))
if __name__ == "__main__":
main()
| 2.5625
| 3
|
game/state.py
|
MeinAccount/ea-snake
| 0
|
12776294
|
import random
from collections import deque
from typing import Tuple
from game.direction import Directions, GRID_WIDTH, GRID_HEIGHT
class GameState:
apple_pos = (0, 0)
positions = None
direction = 0
def __init__(self, start_pos: Tuple[int, int], length: int = 3) -> None:
self.length = length
self.positions = deque()
self.positions.append(start_pos)
for i in range(1, length):
self.positions.append(Directions.apply(2, self.positions[i - 1]))
self.apple_replace()
def move(self) -> Tuple[bool, bool]:
new_pos = Directions.apply(self.direction, self.positions[0])
# check for edge
if new_pos[0] == -1 or new_pos[0] == GRID_WIDTH or new_pos[1] == -1 or new_pos[1] == GRID_HEIGHT:
return False, False
# check for apple
has_grown = self.apple_pos == new_pos
if has_grown:
self.length += 1
self.apple_replace()
else:
self.positions.pop()
# check for self intersection
self_intersection = new_pos in self.positions
self.positions.appendleft(new_pos)
return not self_intersection, has_grown
def apple_replace(self):
self.apple_pos = (random.randrange(0, GRID_WIDTH), random.randrange(0, GRID_HEIGHT))
while self.apple_pos in self.positions:
self.apple_pos = (random.randrange(0, GRID_WIDTH), random.randrange(0, GRID_HEIGHT))
def distance_to_apple(self):
x, y = self.positions[0]
return abs(x - self.apple_pos[0]) + abs(y - self.apple_pos[1])
| 3.546875
| 4
|
sqlite/funcs/db_creation.py
|
vrcunha/db_sql_and_nosql
| 1
|
12776295
|
import os
from dotenv import load_dotenv, find_dotenv
from sqlalchemy.sql.sqltypes import Integer, REAL, TEXT
from sqlalchemy import (create_engine, MetaData, Column, Table)
from sqlalchemy_utils import database_exists, create_database
load_dotenv(find_dotenv())
engine = create_engine(f"sqlite:///{os.getenv('SQLITE_DB_NAME')}", echo=False)
if not database_exists(engine.url):
create_database(engine.url)
metadata = MetaData(bind=engine)
produtos = Table('products', metadata,
Column('id', Integer, primary_key=True),
Column('name', TEXT(50), nullable=False),
Column('price', REAL(8,2), nullable=False),
Column('stock', TEXT(50), nullable=False)
)
metadata.create_all()
| 2.515625
| 3
|
tryalgo/fenwick.py
|
siebenbrunner/tryalgo
| 0
|
12776296
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Fenwick tree
<NAME> et <NAME> - 2014-2018
"""
# snip{
class Fenwick:
"""maintains a tree to allow quick updates and queries
"""
def __init__(self, t):
"""stores a table t and allows updates and queries
of prefix sums in logarithmic time.
:param array t: with numerical values
"""
self.s = [0] * (len(t) + 1) # create internal storage
for a in range(len(t)):
self.add(a, t[a]) # initialize
# pylint: disable=redefined-builtin
def prefixSum(self, a):
"""
:param int a: index in t, negative a will return 0
:returns: t[0] + ... + t[a]
"""
i = a + 1 # internal index starts at 1
total = 0
while i > 0: # loops over neighbors
total += self.s[i] # cumulative sum
i -= (i & -i) # left neighbor
return total
def intervalSum(self, a, b):
"""
:param int a b: with 0 <= a <= b
:returns: t[a] + ... + t[b]
"""
return self.prefixSum(b) - self.prefixSum(a-1)
def add(self, a, val):
"""
:param int a: index in t
:modifies: adds val to t[a]
"""
i = a + 1 # internal index starts at 1
while i < len(self.s): # loops over parents
self.s[i] += val # update node
i += (i & -i) # parent
# variante:
# pylint: disable=bad-whitespace
def intervalAdd(self, a, b, val):
"""Variant, adds val to t[a], to t[a + 1] ... and to t[b]
:param int a b: with 0 <= a <= b < len(t)
"""
self.add(a, +val)
self.add(b + 1, -val)
def get(self, a):
"""Variant, reads t[a]
:param int i: negative a will return 0
"""
return self.prefixSum(a)
# snip}
| 3.515625
| 4
|
aliyun_exporter/info_provider.py
|
harvey-zang/aliyun-exporter
| 1
|
12776297
|
import json
import time
import datetime
from aliyunsdkcore.client import AcsClient
from cachetools import cached, TTLCache
from prometheus_client.metrics_core import GaugeMetricFamily
import aliyunsdkecs.request.v20140526.DescribeInstancesRequest as DescribeECS
import aliyunsdkrds.request.v20140815.DescribeDBInstancesRequest as DescribeRDS
import aliyunsdkr_kvstore.request.v20150101.DescribeInstancesRequest as DescribeRedis
import aliyunsdkslb.request.v20140515.DescribeLoadBalancersRequest as DescribeSLB
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerAttributeRequest as DescribeSLBAttr
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerTCPListenerAttributeRequest as DescribeSLBTcpAttr
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPListenerAttributeRequest as DescribeSLBHttpAttr
import aliyunsdkslb.request.v20140515.DescribeLoadBalancerHTTPSListenerAttributeRequest as DescribeSLBHttpsAttr
import aliyunsdkdds.request.v20151201.DescribeDBInstancesRequest as Mongodb
import aliyunsdkcdn.request.v20180510.DescribeUserDomainsRequest as DescribeCDN
from aliyun_exporter.utils import try_or_else
cache = TTLCache(maxsize=100, ttl=3600)
'''
InfoProvider provides the information of cloud resources as metric.
The result from alibaba cloud API will be cached for an hour.
Different resources should implement its own 'xxx_info' function.
Different resource has different information structure, and most of
them are nested, for simplicity, we map the top-level attributes to the
labels of metric, and handle nested attribute specially. If a nested
attribute is not handled explicitly, it will be dropped.
'''
class InfoProvider():
def __init__(self, client: AcsClient):
self.client = client
@cached(cache)
def get_metrics(self, resource: str) -> GaugeMetricFamily:
return {
'ecs': lambda : self.ecs_info(),
'rds': lambda : self.rds_info(),
'cdn': lambda : self.cdn_info(),
'redis': lambda : self.redis_info(),
'slb':lambda : self.slb_info(),
'mongodb':lambda : self.mongodb_info(),
}[resource]()
def ecs_info(self) -> GaugeMetricFamily:
req = DescribeECS.DescribeInstancesRequest()
nested_handler = {
'InnerIpAddress': lambda obj : try_or_else(lambda : obj['IpAddress'][0], ''),
'PublicIpAddress': lambda obj : try_or_else(lambda : obj['IpAddress'][0], ''),
'VpcAttributes': lambda obj : try_or_else(lambda : obj['PrivateIpAddress']['IpAddress'][0], ''),
}
return self.info_template(req, 'aliyun_meta_ecs_info', nested_handler=nested_handler)
def rds_info(self) -> GaugeMetricFamily:
req = DescribeRDS.DescribeDBInstancesRequest()
return self.info_template(req, 'aliyun_meta_rds_info', to_list=lambda data: data['Items']['DBInstance'])
def redis_info(self) -> GaugeMetricFamily:
req = DescribeRedis.DescribeInstancesRequest()
return self.info_template(req, 'aliyun_meta_redis_info', to_list=lambda data: data['Instances']['KVStoreInstance'])
def slb_info(self) -> GaugeMetricFamily:
req = DescribeSLB.DescribeLoadBalancersRequest()
gauge = self.info_template(req, 'aliyun_meta_slb_info', to_list=lambda data: data['LoadBalancers']['LoadBalancer'])
gauge_slb_info = None
for s in gauge.samples:
slb_id = s.labels['LoadBalancerId']
req_slb_attr = DescribeSLBAttr.DescribeLoadBalancerAttributeRequest()
req_slb_attr.set_LoadBalancerId(slb_id)
slb_attrs_resp = self.client.do_action_with_exception(req_slb_attr)
slb_attrs_info = json.loads(slb_attrs_resp)
for protocol_info in slb_attrs_info['ListenerPortsAndProtocol']['ListenerPortAndProtocol']:
protocol = protocol_info['ListenerProtocol']
port = protocol_info['ListenerPort']
req_slb_proto = None
if protocol == 'tcp':
req_slb_proto = DescribeSLBTcpAttr.DescribeLoadBalancerTCPListenerAttributeRequest()
elif protocol == 'http':
req_slb_proto = DescribeSLBHttpAttr.DescribeLoadBalancerHTTPListenerAttributeRequest()
elif protocol == 'https':
req_slb_proto = DescribeSLBHttpsAttr.DescribeLoadBalancerHTTPSListenerAttributeRequest()
req_slb_proto.set_LoadBalancerId(slb_id)
req_slb_proto.set_ListenerPort(int(port))
slb_protocol_resp = self.client.do_action_with_exception(req_slb_proto)
slb_protocol_info: dict = json.loads(slb_protocol_resp)
if 'ForwardCode' in slb_protocol_info.keys():
continue
Bandwidth = slb_protocol_info['Bandwidth']
if gauge_slb_info is None:
gauge_slb_info = GaugeMetricFamily('aliyun_meta_slb_proto_bandwidth', 'protocolBandwidth', labels=['instanceId', 'ListenerProtocol', 'ListenerPort'])
gauge_slb_info.add_metric([slb_id, protocol, str(port)], value=float(Bandwidth))
return gauge_slb_info
def mongodb_info(self) -> GaugeMetricFamily:
req = Mongodb.DescribeDBInstancesRequest()
return self.info_template(req, 'aliyun_meta_mongodb_info', to_list=lambda data: data['DBInstances']['DBInstance'])
def cdn_info(self) -> GaugeMetricFamily:
req = DescribeCDN.DescribeUserDomainsRequest()
req.set_DomainStatus('online')
nested_handler = {
'DomainName': lambda obj: try_or_else(lambda: obj['DomainName'], ''),
}
return self.info_template(req, 'aliyun_meta_cdn_info', to_list=lambda data: data['Domains']['PageData'])
'''
Template method to retrieve resource information and transform to metric.
'''
def info_template(self,
req,
name,
desc='',
page_size=100,
page_num=1,
nested_handler=None,
to_list=(lambda data: data['Instances']['Instance'])) -> GaugeMetricFamily:
gauge = None
label_keys = None
for instance in self.pager_generator(req, page_size, page_num, to_list):
if gauge is None:
label_keys = self.label_keys(instance, nested_handler)
gauge = GaugeMetricFamily(name, desc, labels=label_keys)
gauge.add_metric(labels=self.label_values(instance, label_keys, nested_handler), value=1.0)
return gauge
def info_template_bytime(self,
req,
name,
desc='',
label_keys=None,
nested_handler=None,
to_value=(lambda data: data['Instances']['Instance'])) -> GaugeMetricFamily:
value = self.generator_by_time(req, to_value)
gauge = GaugeMetricFamily(name, desc, labels=label_keys)
gauge.add_metric(labels=[value], value=1.0)
return gauge
def pager_generator(self, req, page_size, page_num, to_list):
req.set_PageSize(page_size)
while True:
req.set_PageNumber(page_num)
resp = self.client.do_action_with_exception(req)
data = json.loads(resp)
instances = to_list(data)
for instance in instances:
if 'test' not in instance.get('DomainName'):
yield instance
if len(instances) < page_size:
break
page_num += 1
def generator_by_time(self, req, to_value):
now = time.time() - 60
start_time = datetime.datetime.utcfromtimestamp(now-120).strftime("%Y-%m-%dT%H:%M:%SZ")
end_time = datetime.datetime.utcfromtimestamp(now).strftime("%Y-%m-%dT%H:%M:%SZ")
req.set_accept_format('json')
req.set_StartTime(start_time)
req.set_EndTime(end_time)
resp = self.client.do_action_with_exception(req)
value = to_value(resp)
return value
def label_keys(self, instance, nested_handler=None):
if nested_handler is None:
nested_handler = {}
return [k for k, v in instance.items()
if k in nested_handler or isinstance(v, str) or isinstance(v, int)]
def label_values(self, instance, label_keys, nested_handler=None):
if nested_handler is None:
nested_handler = {}
return map(lambda k: str(nested_handler[k](instance[k])) if k in nested_handler else try_or_else(lambda: str(instance[k]), ''),
label_keys)
| 1.804688
| 2
|
karmabot/settings.py
|
meganlkm/karmabot
| 15
|
12776298
|
<filename>karmabot/settings.py
# Copyright (c) 2019 Target Brands, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import current_app
import hvac
from datetime import datetime
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING')
USE_VAULT = os.environ.get('USE_VAULT', "False").lower() in ['true', '1', 't', 'y', 'yes']
vault = None
vault_base_path = None
if USE_VAULT:
vault = hvac.Client(url=os.environ.get('VAULT_URI'), token=os.environ.get('VAULT_TOKEN'))
vault_base_path = os.environ.get('VAULT_BASE', "secret")
VERIFICATION_TOKEN = os.environ.get('VERIFICATION_TOKEN', '')
MONGODB = os.environ.get('MONGODB', 'mongodb://localhost:27017')
FAKE_SLACK = os.environ.get('FAKE_SLACK', "False").lower() in ['true', '1', 't', 'y', 'yes']
SLACK_EVENTS_ENDPOINT = os.environ.get("SLACK_EVENTS_ENDPOINT", "/slack_events")
# Number of "gifts" per hour (note: quantity in gifts is not considered)
KARMA_RATE_LIMIT = os.environ.get('KARMA_RATE_LIMIT', 60)
# Number of days karma is good for
KARMA_TTL = os.environ.get('KARMA_TTL', 90)
# Color to use for stuff
KARMA_COLOR = os.environ.get('KARMA_COLOR', '#af8b2d')
def get_access_token(workspace):
if USE_VAULT:
return _vault_get_access_token(workspace)
else:
return _env_get_access_token(workspace)
def get_bot_token(workspace):
if USE_VAULT:
return _vault_get_bot_token(workspace)
else:
return _env_get_bot_token(workspace)
def _env_get_access_token(workspace):
current_app.logger.debug(f"DEBUG: Got request for {workspace} workspace (env)")
return os.environ.get(f"ACCESS_{workspace}", None)
def _env_get_bot_token(workspace):
current_app.logger.debug(f"DEBUG: Got request for {workspace} workspace (env)")
return os.environ.get(f"BOT_{workspace}", None)
# vault cache
_access_token_cache = {}
_bot_token_cache = {}
_vault = None
_TTL = os.environ.get('VAULT_CACHE_TTL', 300) # Measured in seconds
def _vault_get_access_token(workspace):
global _access_token_cache, _TTL
current_app.logger.debug(f"DEBUG: Got request for {workspace} workspace (vault)")
n = datetime.now().timestamp()
(token, ts) = _access_token_cache.get(workspace, (None, 0))
if ts + _TTL > n:
return token
try:
token_data = vault.secrets.kv.v1.read_secret(f'{vault_base_path}/access_{workspace}.txt')
token = token_data['data']['value']
_access_token_cache[workspace] = (token, n)
return token
except Exception as ex:
current_app.logger.warning(f"Had a problem getting a token for workspace {workspace}:\n{ex}")
def _vault_get_bot_token(workspace):
global _vault, _bot_token_cache, _TTL, vault, vault_base_path
current_app.logger.debug(f"DEBUG: Got request for {workspace} workspace (vault)")
n = datetime.now().timestamp()
(token, ts) = _bot_token_cache.get(workspace, (None, 0))
if ts + _TTL > n:
return token
try:
token_data = vault.secrets.kv.v1.read_secret(f'{vault_base_path}/bot_{workspace}.txt')
token = token_data['data']['value']
_bot_token_cache[workspace] = (token, n)
return token
except Exception as ex:
current_app.logger.warning(f"Had a problem getting a token for workspace {workspace}:\n{ex}")
| 1.710938
| 2
|
ASCII_value.py
|
RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2-
| 0
|
12776299
|
<reponame>RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2-<filename>ASCII_value.py
print("Enter a character:\n")
ch=input()
while(len(ch)!=1):
print("\nShould Enter single Character...RETRY!")
ch=input()
print(ord(ch))
| 3.3125
| 3
|
nlde/query/triple_pattern.py
|
Lars-H/federated_crop
| 3
|
12776300
|
from urlparse import urlparse
from . import Argument
class TriplePattern(object):
def __init__(self, s, p, o, **kwargs):
if isinstance(s, Argument):
self.subject = s
else:
self.subject = Argument(s)
if isinstance(p, Argument):
self.predicate = p
else:
self.predicate = Argument(p)
if isinstance(o, Argument):
self.object = o
else:
self.object = Argument(o)
self.count = kwargs.get("count", None)
self.distinct_subjects = kwargs.get("subjects", None)
self.distinct_predicates = kwargs.get("predicates", None)
self.distinct_objects = kwargs.get("objects", None)
self.sources = kwargs.get("sources", {})
self.subject_auths = {}
self.object_auths = {}
self.id = -1
def __key(self):
return (self.subject, self.predicate, self.object)
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return str(self.subject) + " " + str(self.predicate) + " " + str(self.object) + " ."
def __str__(self):
return repr(self)
# Less than: for Sorting
def __lt__(self, other):
return hash(self) < hash(other)
# Equals: for Sorting
def __eq__(self, other):
return hash(self) == hash(other)
# Length
def __len__(self):
return 1
@property
def cardinality(self):
return self.count
@cardinality.setter
def cardinality(self, cardinality):
self.count = cardinality
@property
def selectivity(self):
try:
sel = min(
float(1.0 / self.distinct_subjects),
float(1.0 / self.distinct_predicates),
float(1.0 / self.distinct_objects))
return sel
except:
return 0.0
def get_variables(self):
variables = []
if not self.subject.isconstant:
variables.append(self.subject.get_variable())
if not self.predicate.isconstant:
variables.append(self.predicate.get_variable())
if not self.object.isconstant:
variables.append(self.object.get_variable())
return set(variables)
def __getitem__(self, i):
if i == 0:
return self.subject
elif i == 1:
return self.predicate
elif i == 2:
return self.object
elif i == 3:
return self.variables
else:
raise IndexError()
@property
def variables(self):
return self.get_variables()
def compatible(self, T):
return len((self.variables.intersection(T.variables))) > 0
@property
def variables_dict(self):
v_dict = {
"s" : [],
"p" : [],
"o" : []
}
if not self.subject.isconstant:
v_dict['s'] = list([self.subject.get_variable()])
if not self.predicate.isconstant:
v_dict['p'] = list([self.predicate.get_variable()])
if not self.object.isconstant:
v_dict['o'] = list([self.object.get_variable()])
return v_dict
def get_variable_position(self, var):
positions = 0
if not self.subject.isconstant and self.subject.get_variable() == var:
positions = positions | 4
if not self.predicate.isconstant and self.predicate.get_variable() == var:
positions = positions | 2
if not self.object.isconstant and self.object.get_variable() == var:
positions = positions | 1
return positions
@property
def variable_position(self):
positions = 0
if not self.subject.isconstant and self.subject.isvariable():
positions = positions | 4
if not self.predicate.isconstant and self.predicate.isvariable():
positions = positions | 2
if not self.object.isconstant and self.object.isvariable():
positions = positions | 1
return positions
@property
def source_set(self):
return set(self.sources.keys())
| 2.640625
| 3
|
src/frange.py
|
ttheikki2/Usadel-for-nanowires
| 0
|
12776301
|
import sys
def frange(start, end=None, inc=None):
"A range function, that does accept floats"
if end == None:
end = start + 0.0
start = 0.0
else: start += 0.0 # force it to be a float
if inc == None:
inc = 1.0
count = int((end - start) / inc)
if start + count * inc != end + inc:
count += 1
L = [None,] * count
for i in xrange(count):
L[i] = start + i * inc
return L
start = float(sys.argv[1])
end = float(sys.argv[2])
inc = float(sys.argv[3])
for item in frange(start, end, inc):
print("%.2f" % item)
| 3.96875
| 4
|
OpenGLCffi/GL/EXT/ARB/transform_feedback_instanced.py
|
cydenix/OpenGLCffi
| 0
|
12776302
|
<filename>OpenGLCffi/GL/EXT/ARB/transform_feedback_instanced.py
from OpenGLCffi.GL import params
@params(api='gl', prms=['mode', 'id', 'instancecount'])
def glDrawTransformFeedbackInstanced(mode, id, instancecount):
pass
@params(api='gl', prms=['mode', 'id', 'stream', 'instancecount'])
def glDrawTransformFeedbackStreamInstanced(mode, id, stream, instancecount):
pass
| 1.726563
| 2
|
gym-duckietown/learning/imitation/iil_dagger/algorithms/__init__.py
|
lyf44/CS4278-5478-Project-Materials
| 4
|
12776303
|
from .dagger import DAgger
| 1.03125
| 1
|
starfish/pipeline/registration/fourier_shift.py
|
Xiaojieqiu/starfish
| 1
|
12776304
|
<filename>starfish/pipeline/registration/fourier_shift.py
from typing import Union
from starfish.constants import Indices
from starfish.image import ImageStack
from starfish.util.argparse import FsExistsType
from ._base import RegistrationAlgorithmBase
class FourierShiftRegistration(RegistrationAlgorithmBase):
"""
Implements fourier shift registration. TODO: (dganguli) FILL IN DETAILS HERE PLS.
Performs a simple translation registration.
See Also
--------
https://en.wikipedia.org/wiki/Phase_correlation
"""
def __init__(self, upsampling: int, reference_stack: Union[str, ImageStack], **kwargs) -> None:
self.upsampling = upsampling
if isinstance(reference_stack, ImageStack):
self.reference_stack = reference_stack
else:
self.reference_stack = ImageStack.from_path_or_url(reference_stack)
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument("--upsampling", default=1, type=int, help="Amount of up-sampling")
group_parser.add_argument(
"--reference-stack", type=FsExistsType(), required=True,
help="The image stack to align the input image stack to.")
def register(self, image: ImageStack):
# TODO: (ambrosejcarr) is this the appropriate way of dealing with Z in registration?
mp = image.max_proj(Indices.CH, Indices.Z)
reference_image = self.reference_stack.max_proj(Indices.HYB, Indices.CH, Indices.Z)
for h in range(image.num_hybs):
# compute shift between maximum projection (across channels) and dots, for each hyb round
# TODO: make the max projection array ignorant of axes ordering.
shift, error = compute_shift(mp[h, :, :], reference_image, self.upsampling)
print("For hyb: {}, Shift: {}, Error: {}".format(h, shift, error))
for c in range(image.num_chs):
for z in range(image.num_zlayers):
# apply shift to all zlayers, channels, and hyb rounds
indices = {Indices.HYB: h, Indices.CH: c, Indices.Z: z}
data, axes = image.get_slice(indices=indices)
assert len(axes) == 0
result = shift_im(data, shift)
image.set_slice(indices=indices, data=result)
return image
def compute_shift(im, ref, upsample_factor=1):
from skimage.feature import register_translation
shift, error, diffphase = register_translation(im, ref, upsample_factor)
return shift, error
def shift_im(im, shift):
import numpy as np
from scipy.ndimage import fourier_shift
fim_shift = fourier_shift(np.fft.fftn(im), map(lambda x: -x, shift))
im_shift = np.fft.ifftn(fim_shift)
return im_shift.real
| 2.546875
| 3
|
samples/models/pu_power/rx580_power_model/__init__.py
|
greenlsi/mercury_mso_framework
| 1
|
12776305
|
from .rx580_power_model import Rx580PowerModel
| 1.148438
| 1
|
green/__init__.py
|
TurgayPamuklu/GreenRAN
| 0
|
12776306
|
<reponame>TurgayPamuklu/GreenRAN
"""Deployment of Solar Panels on a Urban Area.
<NAME> <<EMAIL>>
Initial Operations for Journal 1
"""
from collections import OrderedDict
from datetime import datetime
from city import CityAfterDeployment
from city import CityBeforeDeployment
from heuristic import E
from monitor import BatteryMemoryPlotter
from monitor import Monitor
from monitor import MonitorAssignment
from monitor import MonitorTraffic
from operatorCallers import DeploymentHeuristics
from operatorCallers import PreOperator
from operators import FossilDeployment
from output import *
from renewableEnergy import SolarEnergy
from snapshot import *
from traffic import Traffic
# ------------------------------- VARIOUS INIT METHOD FUNCTIONS ---------------------------------------------------
def get_number_of_awake_micros(fo, co):
j = 0
print("co.bs_types:{}".format(co.bs_types))
for cl in fo.city_configuration_list:
micro = 0
for i in cl.bs_deployed_and_active:
if co.bs_types[i] == BSType.MICRO:
micro += 1
print("hour {} --> bs_count:{} micro_bs:{}".format(j, len(cl.bs_deployed_and_active), micro))
j += 1
def fossil_data_for_initial_panel_sizes():
city_name = 'jakarta'
snapshot.set_solar_data_path(city_name)
solar_energy = snapshot.load_solar_energy()
total_generated_energy = 0
for day_of_the_year in range(NUMBER_OF_SIMULATION_DAY):
for hour_of_the_day in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY):
total_generated_energy += solar_energy.harvest_the_solar_energy(day_of_the_year, hour_of_the_day, 1)
print("total_generated_energy:{}".format(total_generated_energy))
fossil_operator = snapshot.load_fossil_operator()
city_after_deployment = snapshot.load_city_after_deployment()
energy_consumption, number_of_awake_count = fossil_operator.get_energy_consumption_per_bs()
total_energy_consumption = sum(energy_consumption)
print("PURE FOSSIL SYSTEM:: total_energy_consumption:{} Price:{}".format(total_energy_consumption,
total_energy_consumption * E.LIFE_TIME_ENERGY_COST))
for i in range(city_after_deployment.bs_count):
print("BS[{}] Type:{} Awake_count:{} Energy Consumption:{} panel Size:{}".format(i,
city_after_deployment.bs_types[i],
number_of_awake_count[i],
energy_consumption[i],
energy_consumption[i] / total_generated_energy))
size_of_solar_panels_and_batteries = []
for i in range(city_after_deployment.bs_count):
if city_after_deployment.bs_types[i] == BSType.MICRO:
size_of_solar_panels_and_batteries.append((1, 2500))
else:
panel_size = int(np.ceil(energy_consumption[i] / total_generated_energy))
if panel_size > 8:
panel_size = 8
size_of_solar_panels_and_batteries.append((panel_size, 2500 * panel_size))
snapshot.save_size_of_sp_and_batt(size_of_solar_panels_and_batteries, snapshot.log_file_name('fossil', 1))
# ------------------------------- VARIOUS INIT METHOD FUNCTIONS ---------------------------------------------------
class PreMonitor(object): # MONITOR RELATED FUNCTIONS
@staticmethod
def get_average_traffic_in_a_day_period(tr):
smf = SaatliMaarifTakvimi()
traffic_in_a_day_period = [0 for x in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY)]
traffic_in_a_day_period_density = [0 for x in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY)]
for day_no in range(NUMBER_OF_SIMULATION_DAY):
for hour_of_the_day in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY):
traffic_in_a_day_period[hour_of_the_day] += tr.get_user_traffic_demand_in_a_specif_time_slot(day_no, hour_of_the_day)
smf.yapragi_kopar() # increase_the_time_slot
for hour_no in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY):
traffic_in_a_day_period[hour_no] /= NUMBER_OF_SIMULATION_DAY
for x_coor in range(CoordinateConverter.GRID_COUNT_IN_ONE_EDGE):
for y_coor in range(CoordinateConverter.GRID_COUNT_IN_ONE_EDGE):
traffic_in_a_day_period_density[hour_no] += traffic_in_a_day_period[hour_no][x_coor][y_coor]
traffic_in_a_day_period_density[hour_no] /= CoordinateConverter.GRID_COUNT
return traffic_in_a_day_period_density
@staticmethod
def get_average_traffic_per_meter_square_per_day(tr):
avg = 0
day_period = PreMonitor.get_average_traffic_in_a_day_period(tr)
for hour_no in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY):
avg += day_period[hour_no]
avg /= (CoordinateConverter.GRID_WIDTH * CoordinateConverter.GRID_WIDTH)
return avg
@staticmethod
def get_daily_average_harvested_energy():
avg = []
snapshot = Snapshot()
for cn in city_name_list:
snapshot.set_solar_data_path(cn)
s = snapshot.load_solar_energy()
avg.append((cn, s.get_average_regeneration_energy_in_a_day(1))) # 4 is the panel size
return avg
@staticmethod
def show_assignment():
snapshot = Snapshot()
snapshot.set_traffic_scen_folder(traffic_scenarios[0])
snapshot.set_solar_data_path(city_name_list[0])
city_after_deployment = snapshot.load_city_after_deployment()
operator = snapshot.load_fossil_operator()
m_assignment = MonitorAssignment()
m_assignment.show_assignment_all(operator, city_after_deployment)
@staticmethod
def show_bs_locations():
m_assignment = MonitorAssignment()
m_assignment.show_bs_locations()
@staticmethod
def iteration_load(method, traffic_scen, city_name, value_list, data_type):
if data_type is "GUROBI":
for i in range(10):
value_list[i] = value_list[i] * 1.05
if traffic_scen is 3 and city_name is "jakarta":
value_list[8] += 1435
value_list[9] += 2367
elif method is "traffic_aware" and traffic_scen is 1:
for i in range(10):
value_list[i] += 5000
@staticmethod
def plot_iterations_only_one_tr():
snapshot = Snapshot()
operational_method = 'hybrid'
m = Monitor()
for ts in traffic_scenarios:
h_all = []
for city in city_name_list:
snapshot.set_results_folder(ts, city, "STANDARD")
expenditures = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
total_expenditure = [0 for x in range(len(expenditures))]
for configuration_index in range(len(expenditures)):
total_expenditure[configuration_index] += expenditures[configuration_index][0]
h_all.append(total_expenditure)
m.plt_iterations_heuristic(h_all)
@staticmethod
def plot_iterations_compare_with_prev_data():
snapshot = Snapshot()
operational_method = 'hybrid'
m = Monitor()
for ts in traffic_scenarios:
h_all = []
for city_name in city_name_list:
snapshot.set_results_folder(ts, city_name, "STANDARD")
expenditures = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
total_expenditure = [0 for x in range(len(expenditures))]
for configuration_index in range(len(expenditures)):
total_expenditure[configuration_index] += expenditures[configuration_index][0]
h_all.append(total_expenditure)
snapshot.set_results_folder(ts, city_name, "PREV_DATA")
expenditures = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
total_expenditure = [0 for x in range(len(expenditures))]
for configuration_index in range(len(expenditures)):
total_expenditure[configuration_index] += expenditures[configuration_index][0]
h_all.append(total_expenditure)
m.plt_iterations_heuristic_prev_data(h_all)
m.show()
@staticmethod
def plot_iteration_for_each_scenario(show_type='total_expenditure'):
snapshot = Snapshot()
gtbi = PreMonitor.get_the_iteration("STANDARD") # standard
operational_method = 'hybrid'
m = Monitor()
for city_name in city_name_list:
h_all = []
best_heuristic_all = []
for ts in traffic_scenarios:
total_expenditure = [0 for x in range(E.MAX_PANEL_SIZE * E.MAX_BATTERY_SIZE)]
best_heuristic_key = city_name + "_hybrid_ts:" + str(
ts) # get the best heuristic value for the specific city and traffic scenario
best_heuristic_value = gtbi[best_heuristic_key]
snapshot.set_results_folder(ts, city_name, "SAME_PANEL_SIZE")
expenditures = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
for configuration_index in range(len(expenditures)):
total_expenditure[configuration_index] = expenditures[configuration_index][0]
best_heuristic_all.append(best_heuristic_value)
h_all.append(total_expenditure)
m.plt_iterations_same_size(h_all, best_heuristic_all, show_type)
m.show()
@staticmethod
def get_the_iteration(data_type="STANDARD", iteration_type="BEST"):
snapshot = Snapshot()
best_iteration_values = OrderedDict()
number_of_city = len(city_name_list)
number_of_scenario = len(traffic_scenarios)
for traffic_scenario in traffic_scenarios:
for cn in city_name_list:
snapshot.set_results_folder(traffic_scenario, cn, data_type) # standard
for operational_method in CalibrationParameters.get_parameters():
lih = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
if iteration_type is "BEST":
ll = min(lih, key=lambda t: t[0])
elif iteration_type is "FIRST":
ll = lih[0]
else:
raise Exception("Aieee SW Bug!")
if data_type is "GUROBI":
ll = [x * 1.05 for x in ll]
if number_of_city == 1:
best_iteration_values['TS:' + str(traffic_scenario) + operational_method] = ll[0]
elif number_of_scenario == 1:
best_iteration_values['City:' + cn + ' ' + operational_method] = ll[0]
else:
best_iteration_values[cn + '_' + operational_method + '_ts:' + str(traffic_scenario)] = ll[0]
return best_iteration_values
@staticmethod
def get_the_best_tco(return_average_value, data_type="STANDARD", iteration_type="BEST"):
snapshot = Snapshot()
confidence_data = OrderedDict()
op_methods = CalibrationParameters.get_parameters()
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
for op_index in range(len(op_methods)):
value_list = []
for traffic_index in range(10):
snapshot.set_results_folder(traffic_scen, city_name, data_type, traffic_index)
lih = snapshot.load_iteration_history(snapshot.log_file_name(op_methods[op_index], 0))
if iteration_type is "BEST":
ll = min(lih, key=lambda t: t[0])
elif iteration_type is "FIRST":
ll = lih[0]
value_list.append(ll[0])
PreMonitor.iteration_load(op_methods[op_index], traffic_scen, city_name, value_list, data_type)
if return_average_value:
value_list = sum(value_list) / float(len(value_list))
confidence_data[city_name + '_' + op_methods[op_index] + '_ts:' + str(traffic_scen)] = value_list
return confidence_data
@staticmethod
def plot_confidence_intervals(comparison_type="OPERATIONAL_METHODS"):
m = Monitor()
if comparison_type is "OPERATIONAL_METHODS":
confidence_data = PreMonitor.get_the_best_tco(False)
m.plt_confidence_intervals(confidence_data)
else:
dict_draw = OrderedDict()
confidence_data = PreMonitor.get_the_best_tco(False)
confidence_data_gurobi = PreMonitor.get_the_best_tco(False, "GUROBI", "FIRST")
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
key_is = city_name + '_' + 'hybrid' + '_ts:' + str(traffic_scen) # this is the key coming from get_the_best_tco
heuristic_key_is = city_name + '_' + 'heuristic' + '_ts:' + str(traffic_scen) # this is new key
gurobi_key_is = city_name + '_' + 'gurobi' + '_ts:' + str(traffic_scen) # this is new key
dict_draw[heuristic_key_is] = confidence_data[key_is]
dict_draw[gurobi_key_is] = confidence_data_gurobi[key_is]
m.plt_confidence_intervals(dict_draw, "GUROBI")
@staticmethod
def plot_iterations(show_type='total_expenditure'):
snapshot = Snapshot()
gtbi = PreMonitor.get_the_iteration("STANDARD") # standard
operational_method = 'hybrid'
m = Monitor()
h_all = []
best_heuristic_all = []
for city_name in city_name_list:
total_expenditure = [0 for x in range(E.MAX_PANEL_SIZE * E.MAX_BATTERY_SIZE)]
best_heuristic_value = 0
for ts in traffic_scenarios:
best_heuristic_key = city_name + "_hybrid_ts:" + str(ts) # get the best heuristic value for the specific city and traffic scenario
best_heuristic_value += gtbi[best_heuristic_key]
snapshot.set_results_folder(ts, city_name, "SAME_PANEL_SIZE")
expenditures = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
for configuration_index in range(len(expenditures)):
if show_type == 'carbon_emission':
total_expenditure[configuration_index] += (expenditures[configuration_index][2] / E.CARBON_RATE)
else:
total_expenditure[configuration_index] += expenditures[configuration_index][0]
best_heuristic_value /= len(traffic_scenarios)
best_heuristic_all.append(best_heuristic_value)
total_expenditure = [x / len(traffic_scenarios) for x in total_expenditure]
h_all.append(total_expenditure)
m.plt_iterations_same_size(h_all, best_heuristic_all, show_type, "city")
h_all = []
best_heuristic_all = []
for ts in traffic_scenarios:
total_expenditure = [0 for x in range(E.MAX_PANEL_SIZE * E.MAX_BATTERY_SIZE)]
best_heuristic_value = 0
for city_name in city_name_list:
best_heuristic_key = city_name + "_hybrid_ts:" + str(ts) # get the best heuristic value for the specific city and traffic scenario
best_heuristic_value += gtbi[best_heuristic_key]
snapshot.set_results_folder(ts, city_name, "SAME_PANEL_SIZE")
expenditures = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
for configuration_index in range(len(expenditures)):
total_expenditure[configuration_index] += expenditures[configuration_index][0]
best_heuristic_value /= len(traffic_scenarios)
best_heuristic_all.append(best_heuristic_value)
total_expenditure = [x / len(traffic_scenarios) for x in total_expenditure]
h_all.append(total_expenditure)
m.plt_iterations_same_size(h_all, best_heuristic_all, show_type, "traffic")
@staticmethod
def plot_iterations_all_type():
snapshot = Snapshot()
m = Monitor()
for traffic_scenario in traffic_scenarios:
for cn in city_name_list:
snapshot.set_results_folder(traffic_scenario, cn, 'SAME_PANEL_SIZE') # standard
h = []
for operational_method in CalibrationParameters.get_parameters():
h.append(snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0)))
m.plt_iterations_same_size(h)
m.show()
@staticmethod
def plot_daily_average_energy():
avg = PreMonitor.get_daily_average_harvested_energy()
monitor_generic = Monitor()
monitor_generic.show_harvesting_energy_hour(avg)
@staticmethod
def plot_monthly_average_energy():
snapshot = Snapshot()
avg = []
for cn in city_name_list:
snapshot.set_solar_data_path(cn)
s = snapshot.load_solar_energy()
avg.append((cn, s.get_average_regeneration_energy_in_a_month(1)))
monitor_generic = Monitor()
monitor_generic.show_harvesting_energy_month(avg)
@staticmethod
def new_city_map_shower(type_of_draw='day_period'):
snapshot = Snapshot()
mg = MonitorTraffic()
snapshot.set_traffic_scen_folder(1)
tr = snapshot.load_tr()
traffic_map_list = []
traffic_map_list_title = []
if type_of_draw == 'day_period':
# HOURS_OF_A_DAY = [4, 8, 11, 14, 16, 18, 20, 22]
HOURS_OF_A_DAY = [4, 11, 16, 20]
for i in HOURS_OF_A_DAY:
traffic_map_list.append(tr.get_user_traffic_demand_in_a_specif_time_slot(0, i))
traffic_map_list_title.append("{:0>2d}:00".format(i))
mg.simple_show_city_map(traffic_map_list, type_of_draw)
else:
DAYS_OF_A_WEEK = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
days_of_a_week_no = [0, 2, 5, 6]
# for i in range(0, 8, 1):
for i in days_of_a_week_no:
traffic_map_list.append(tr.get_user_traffic_demand_in_a_specif_time_slot(i, 18))
# traffic_map_list.append(tr.get_user_traffic_demand_in_a_specif_time_slot(i, 0))
traffic_map_list_title.append(DAYS_OF_A_WEEK[i % len(DAYS_OF_A_WEEK)])
mg.simple_show_city_map(traffic_map_list, type_of_draw)
@staticmethod
def plot_traffic_in_a_day_period():
mg = MonitorTraffic()
tra = Traffic(CoordinateConverter.GRID_COUNT_IN_ONE_EDGE, 1)
one_day_traffic = []
number_of_zone = 1 # 5
for i in range(number_of_zone):
one_day_traffic.append(tra.get_a_random_traffic_pattern_for_monitor())
mg.plt_traffic_in_a_day_period(one_day_traffic)
@staticmethod
def plot_traffic_figures():
# PreMonitor.new_city_map_shower(3)
# PreMonitor.new_city_map_shower()
PreMonitor.plot_traffic_in_a_day_period()
@staticmethod
def get_the_best_configuration_for_battery_history_record():
best_iteration_values_for_all_methods = OrderedDict()
for method in CalibrationParameters.get_parameters():
a = PreMonitor.get_the_best_iteration_index(method, "same")
for key_a in a:
val_a = a[key_a]
best_iteration_values_for_all_methods[key_a + '_' + method] = val_a
return best_iteration_values_for_all_methods
@staticmethod
def get_the_best_iteration_index(operational_method='hybrid', folder_type='STANDARD'):
snapshot = Snapshot()
best_iteration_values = OrderedDict()
for cn in city_name_list:
for ts in traffic_scenarios:
snapshot.set_results_folder(ts, cn, folder_type) # data_type = same
lih = snapshot.load_iteration_history(snapshot.log_file_name(operational_method, 0))
ll = lih.index(min(lih, key=lambda t: t[0]))
best_iteration_values[cn + '_ts' + str(ts)] = ll
return best_iteration_values
@staticmethod
def get_the_best_size_of_sp_and_batt(operational_method='hybrid', folder_type='STANDARD'):
snapshot = Snapshot()
best_iter = PreMonitor.get_the_best_iteration_index(operational_method, folder_type)
it_no = list(best_iter.values())[0]
battery_info_list = snapshot.load_size_of_sp_and_batt(snapshot.log_file_name('hybrid', it_no))
return battery_info_list
@staticmethod
def get_consumption_data_from_battery_history(battery_list):
"""read operation, return ren_con_total, fossil_con_total
"""
ren_con_total = 0
fossil_con_total = 0
wasted_en_total = 0
generated_energy_total = 0
bs_count = len(battery_list)
for i in range(bs_count):
ren_con = [0 for x in range(bs_count)]
fossil_con = [0 for x in range(bs_count)]
wasted_en = [0 for x in range(bs_count)]
generated_energy = [0 for x in range(bs_count)]
for time_slice in range(NUMBER_OF_SIMULATION_DAY * NUMBER_OF_TIME_SLOT_IN_ONE_DAY):
generated_energy[i] += battery_list[i][time_slice][0]
ren_con[i] += battery_list[i][time_slice][3]
fossil_con[i] += battery_list[i][time_slice][2]
wasted_en[i] += battery_list[i][time_slice][4]
generated_energy_total += generated_energy[i]
ren_con_total += ren_con[i]
fossil_con_total += fossil_con[i]
wasted_en_total += wasted_en[i]
return ren_con_total, fossil_con_total
@staticmethod
def dump_the_best_cal_vals():
snapshot = Snapshot()
for ts in traffic_scenarios:
for cn in city_name_list:
snapshot.set_results_folder(ts, cn) # standard
print("======== traffic_scen:{} city_name:{}".format(ts, cn))
lowest_fossil_consumption = 10000000000.0
lfc_cal = -1
for calibration_val in np.arange(0, 4.1, 0.2):
conf_name = snapshot.log_file_name("hybrid_with_traffic_sizing", 100.0 + calibration_val)
bh = snapshot.load_battery_history(conf_name)
(ren_con_total, fossil_con_total) = PreMonitor.get_consumption_data_from_battery_history(bh)
if fossil_con_total < lowest_fossil_consumption:
lfc_cal = calibration_val
lowest_fossil_consumption = fossil_con_total
print("cv:{} --> ren_con_total:{} fossil_con_total:{} TOTAL:{}".format(calibration_val,
ren_con_total, fossil_con_total,
fossil_con_total + ren_con_total))
print("operational expenditure: {}".format(fossil_con_total * E.LIFE_TIME_ENERGY_COST))
print("the lowest fossil consumption index:{} value:{}".format(lfc_cal, lowest_fossil_consumption))
@staticmethod
def dump_the_best_panel_batt_comb_same_size():
a = PreMonitor.get_the_best_iteration_index()
for cn in city_name_list:
for ts in traffic_scenarios:
val = a[cn + '_ts' + str(ts)]
(panel, battery) = ((val / E.MAX_BATTERY_SIZE) + 1, ((val % E.MAX_BATTERY_SIZE) + 1) * 2.5)
print('{} - {} : {}/{}'.format(cn, ts, panel, battery))
'''
@staticmethod
def get_the_min_max(data_type="STANDARD", only_avg=False):
min_max_vals = OrderedDict()
op_methods = CalibrationParameters.get_parameters()
min_vals = [1000000000.0 for x in range(len(op_methods))]
max_vals = [0 for x in range(len(op_methods))]
avg_vals = [0 for x in range(len(op_methods))]
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
for traffic_index in range(10):
snapshot.set_results_folder(traffic_scen, city_name, data_type, traffic_index)
for op_index in range(len(op_methods)):
lih = snapshot.load_iteration_history(snapshot.log_file_name(op_methods[op_index], 0))
ll = min(lih, key=lambda t: t[0])
if min_vals[op_index] >= ll[0]:
min_vals[op_index] = ll[0]
if max_vals[op_index] <= ll[0]:
max_vals[op_index] = ll[0]
avg_vals[op_index] += ll[0]
for op_index in range(len(op_methods)):
min_max_vals["min_" + city_name + '_' + op_methods[op_index] + '_ts:' + str(traffic_scen)] = min_vals[op_index]
min_max_vals["max_" + city_name + '_' + op_methods[op_index] + '_ts:' + str(traffic_scen)] = max_vals[op_index]
min_max_vals["avg_" + city_name + '_' + op_methods[op_index] + '_ts:' + str(traffic_scen)] = avg_vals[op_index] / float(10.0)
return min_max_vals
'''
@staticmethod
def plot_comparison_of_standard_and_gurobi_results():
dict_draw = OrderedDict()
gtbi = PreMonitor.get_the_best_tco(True)
gtbri = PreMonitor.get_the_best_tco(True, "GUROBI", "FIRST") # standard
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
key_is = city_name + '_' + 'hybrid' + '_ts:' + str(traffic_scen) # this is the key coming from get_the_best_tco
heuristic_key_is = city_name + '_' + 'heuristic' + '_ts:' + str(traffic_scen) # this is new key
gurobi_key_is = city_name + '_' + 'gurobi' + '_ts:' + str(traffic_scen) # this is new key
dict_draw[heuristic_key_is] = gtbi[key_is]
dict_draw[gurobi_key_is] = gtbri[key_is]
Monitor.plt_bar_gurobi(dict_draw)
@staticmethod
def plot_bar_comparison_of_operational_method():
bi = PreMonitor.get_the_best_tco(True)
'''
PURE_GRID_ENERGY = [410000, 543000, 705000, 814000]
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
key_is = city_name + '_' + 'grid' + '_ts:' + str(traffic_scen)
bi[key_is] = PURE_GRID_ENERGY[traffic_scen - 1]
'''
Monitor.plt_bar_total_expenditure(bi)
@staticmethod
def plot_cost_vs_traffic_rate():
snapshot = Snapshot()
bi = PreMonitor.get_the_best_tco(True)
avg_traffic = []
number_of_bs = []
for traffic_scen in traffic_scenarios:
snapshot.set_traffic_scen_folder(traffic_scen)
city_after_deployment = snapshot.load_city_after_deployment()
number_of_bs.append(city_after_deployment.bs_count)
tr = snapshot.load_tr()
avg_traffic.append(PreMonitor.get_average_traffic_per_meter_square_per_day(tr))
# PURE_GRID_ENERGY = [410000, 543000, 705000, 814000]
# PURE_GRID_ENERGY = [252116, 332798, 382641, 418942]
PURE_GRID_ENERGY = [261492, 333326, 374648, 408566]
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
key_is = city_name + '_' + 'grid' + '_ts:' + str(traffic_scen)
bi[key_is] = PURE_GRID_ENERGY[traffic_scen - 1]
Monitor.plt_cost_vs_traffic_rate(bi, avg_traffic)
@staticmethod
def remaining_energy():
snapshot = Snapshot()
op_met = CalibrationParameters.get_parameters()
for op in op_met:
bh = snapshot.load_battery_history(snapshot.log_file_name(op, 0))
d = Monitor()
d.show_battery_history(bh)
@staticmethod
def plot_renewable_energy_snapshots(co):
p = BatteryMemoryPlotter(co.bs_count)
plot_each_figure_separately = False
if plot_each_figure_separately:
for plotted_type in range(4):
print("Plotted Type:{}".format(SHC.BATTERY_RECORDS_FOR_EACH_CONF[plotted_type]))
p.plot_every_vs_month(plotted_type)
# p.plot_every_vs_hour(plotted_type)
p.show()
else:
plotted_type = 'standard' # 'show_carbon_emission'
# p.plot_every_vs_month_two_figures(plotted_type)
p.plot_every_vs_hour_two_figures(plotted_type)
'''
plotted_type = 'show_carbon_emission'
p.plot_every_vs_month_two_figures(plotted_type)
p.plot_every_vs_hour_two_figures(plotted_type)
'''
@staticmethod
def battery_history_plottings():
cn = 'istanbul'
ts = 1
snapshot = Snapshot()
snapshot.set_solar_data_path(cn)
snapshot.set_traffic_scen_folder(ts)
snapshot.set_results_folder(ts, cn, "SAME_PANEL_SIZE") # data_type = same
'''
best_iter = []
for method in CalibrationParameters.get_parameters():
lih = snapshot.load_iteration_history(snapshot.log_file_name(method, 0))
ll = lih.index(min(lih, key=lambda t: t[0]))
best_iter.append(ll)
best_iter = [best_iter[1] for x in range(len(best_iter))] # the best hybrid panel&batt comb. for every methods
Output.write_the_all_history_logs_to_a_single_file(best_iter)
'''
city_after_deployment = snapshot.load_city_after_deployment()
PreMonitor.plot_renewable_energy_snapshots(city_after_deployment) # overall system battery plotting function
class PreSimulation(object):
@staticmethod
def print_deployed_bs_list(s):
deployed_list = []
for i in range(len(s.bs_deployed_and_active)):
if s.bs_deployed_and_active[i] == 1:
deployed_list.append(i)
print("# of deployed base stations:" + str(len(deployed_list)))
print("deployed base stations:" + str(deployed_list))
@staticmethod
def run_fossil_operation():
snapshot = Snapshot()
city_name = 'istanbul'
for traffic_scen in traffic_scenarios:
total_tco = 0
for traffic_index in range(10):
print("======== traffic_scen:{} city_name:{} traffic_index:{}".format(traffic_scen, city_name, traffic_index))
snapshot.set_traffic_scen_folder(traffic_scen, traffic_index)
snapshot.set_solar_data_path(city_name)
city_after_deployment = snapshot.load_city_after_deployment()
co, ro, tco = PreOperator.run_only_one_iteration(city_after_deployment)
print("traffic_index:{} tco:{}".format(traffic_index, tco))
total_tco += tco
# snapshot.save_fossil_operator(ro)
print("traffic_scen:{} total_tco:{}".format(traffic_scen, total_tco))
@staticmethod
def create_city_and_fossil_deployment():
snapshot = Snapshot()
tr = snapshot.load_tr()
c = CityBeforeDeployment(tr)
# write_service_rate_to_a_file(c)
s = FossilDeployment(c)
s.greedy_deployment_for_every_grid()
# modify_traffic_for_uncovered_nodes(s, tr)
s.remove_unassigned_bses()
PreOperator.greedy_remove_deployed_base_stations(s)
deployed_list = []
for i in range(len(s.bs_deployed_and_active)):
if s.bs_deployed_and_active[i] == 1:
deployed_list.append(i)
print("# of deployed base stations:" + str(len(deployed_list)))
print("deployed base stations:" + str(deployed_list))
print("is_there_any_unassigned_location: " + str(s.is_there_any_unassigned_location()))
cad = CityAfterDeployment(s, tr)
snapshot.save_city_after_deployment(cad)
print("City is created and saved to a file at: {}".format(datetime.now()))
@staticmethod
def create_and_save_solar_energy(city_name):
snapshot = Snapshot()
snapshot.set_solar_data_path(city_name)
solar_energy = SolarEnergy(city_name) # connecting the battery to the solar panel
snapshot.save_solar_energy(solar_energy)
@staticmethod
def create_and_save_traffic(extra=None):
snapshot = Snapshot()
for traffic_scen in traffic_scenarios:
snapshot.set_traffic_scen_folder(traffic_scen, extra)
tr = Traffic(CoordinateConverter.GRID_COUNT_IN_ONE_EDGE, traffic_scen)
snapshot.save_tr(tr)
print("Traffic is created and saved to a file at: {}".format(datetime.now()))
pass
class PriceTrends(object):
@staticmethod
def calculate_price_in_future(current_year_price, end_year, increasing_percentage):
for i in range(end_year):
current_year_price *= (1 + increasing_percentage)
return current_year_price
@staticmethod
def calculate_average_price(current_year_price, end_year, increasing_percentage):
total_price = current_year_price
for i in range(end_year - 1):
current_year_price *= (1 + increasing_percentage)
total_price += current_year_price
return total_price / end_year
@staticmethod
def calculate_price_trends():
val = PriceTrends.calculate_price_in_future(0.117, 15, 0.04)
avg = PriceTrends.calculate_average_price(0.117, 15, 0.04)
print('current price:{} in next {} years it becomes:{} and avg is:{}'.format(0.1, 4, val, avg))
@staticmethod
def clean_unneccessary_files(folder="STANDARD"):
snapshot = Snapshot()
for city_name in city_name_list:
for traffic_scen in traffic_scenarios:
if folder is "STANDARD":
for traffic_index in range(10):
snapshot.set_results_folder(traffic_scen, city_name, "STANDARD", traffic_index)
snapshot.delete_all_battery_history_in_a_folder()
elif folder is "GUROBI":
for traffic_index in range(10):
snapshot.set_results_folder(traffic_scen, city_name, "GUROBI", traffic_index)
snapshot.delete_all_battery_history_in_a_folder()
else:
snapshot.set_results_folder(traffic_scen, city_name, "SAME_PANEL_SIZE")
for iter_no in range(96):
log_file_name = snapshot.log_file_name("hybrid", iter_no)
snapshot.delete_battery_history(log_file_name)
@staticmethod
def traffic_test():
avg_traffic = []
avg_traffic_old = []
number_of_bs = []
snapshot = Snapshot()
snapshot.set_traffic_scen_folder(3)
tr = snapshot.load_tr()
u_d = tr.get_user_traffic_demand_for_sim_duration()
var_1 = np.var(u_d)
mean_1 = np.var(u_d)
std_1 = np.std(u_d)
avg_traffic = PreMonitor.get_average_traffic_per_meter_square_per_day(tr)
print("var_1:{:.2E}, mean_1:{:.2E}, std_1:{:.2E}".format(var_1, mean_1, std_1))
snapshot.set_traffic_scen_folder(3)
tr = snapshot.load_tr()
u_d = tr.get_user_traffic_demand_for_sim_duration()
var_2 = np.var(u_d)
mean_2 = np.var(u_d)
std_2 = np.std(u_d)
avg_traffic_old = PreMonitor.get_average_traffic_per_meter_square_per_day(tr)
print("var_2:{:.2E}, mean_2:{:.2E}, std_2:{:.2E}".format(var_2, mean_2, std_2))
def multi_create(traffic_scen, traffic_index=None):
snapshot = Snapshot()
snapshot.set_traffic_scen_folder(traffic_scen, traffic_index)
PreSimulation.create_city_and_fossil_deployment() # Creating a city and providing a fossil deployment
co = snapshot.load_city_after_deployment()
lone_wolves = set()
for x_coor in range(CoordinateConverter.GRID_COUNT_IN_ONE_EDGE):
for y_coor in range(CoordinateConverter.GRID_COUNT_IN_ONE_EDGE):
if co.can_bs_list[x_coor][y_coor][0] == -1:
lone_wolves.add(co.can_bs_list[x_coor][y_coor][0])
print("lone_wolves:{}".format(lone_wolves))
macro, micro = co.get_macro_micro_list()
print("Traffic:{}".format(traffic_scen))
print("MACRO BS[#{}]:{}".format(len(macro), macro))
for i in range(len(macro)):
print("{}::{}".format(i, co.bs_locations[macro[i]]))
print("MICRO BS[#{}]:{}".format(len(micro), micro))
for i in range(len(micro)):
print("{}::{}".format(i, co.bs_locations[micro[i]]))
def multi_test(traffic_scen, city_name, traffic_index=None, sim_type="STANDARD"):
snapshot = Snapshot()
# sys.stdout = open('log_tr{}_{}.txt'.format(traffic_scen, city_name), 'w')
print("--------------- traffic_scen:{} city_name:{} traffic_index:{}---------------".format(traffic_scen, city_name, traffic_index))
# sys.stdout.flush()
snapshot.set_traffic_scen_folder(traffic_scen, traffic_index)
snapshot.set_solar_data_path(city_name)
if sim_type == "SAME_PANEL_SIZE":
snapshot.set_results_folder(traffic_scen, city_name, "SAME_PANEL_SIZE")
else:
snapshot.set_results_folder(traffic_scen, city_name, "STANDARD", traffic_index)
for operational_method in CalibrationParameters.get_parameters():
print("--------------- OPERATIONAL METHOD:{} ---------------".format(operational_method))
snapshot = Snapshot()
# sys.stdout.flush()
city_after_deployment = snapshot.load_city_after_deployment()
if sim_type == "SAME_PANEL_SIZE":
DeploymentHeuristics.simulate_with_same_size_solar_and_batteries(city_after_deployment, operational_method)
else:
DeploymentHeuristics.simulate(city_after_deployment, operational_method)
if __name__ == '__main__':
print("Simulation starts..{}".format(datetime.now()))
snapshot = Snapshot()
# ''' Creating Data
# snapshot.create_results_folders_for_random_panel_size_and_batteries()
# for city_name in city_name_list:
# PreSimulation.create_and_save_solar_energy(city_name)
# snapshot.create_traffic_scen_folder()
# for index in range(10):
# PreSimulation.create_and_save_traffic(index)
'''
processes = []
for traffic_scen in traffic_scenarios:
for traffic_index in range(10):
processes.append(Process(target=multi_create, args=(traffic_scen, traffic_index)))
for p in processes:
p.start()
for p in processes:
p.join()
'''
# PreSimulation.run_fossil_operation()
# Output.out_cplex("cplex")
''' Dump various information before simulations
city_after_deployment = load_city_after_deployment()
print "Energy Threshold to increasing panel size:{}".format((E.SOLAR_PANEL_COST_OF_1KW_SIZE * E.THRESHOLD_CALIBRATION_FOR_IP) / E.LIFE_TIME_ENERGY_COST)
print "Energy Threshold to decreasing panel size:{}".format((E.SOLAR_PANEL_COST_OF_1KW_SIZE * E.THRESHOLD_CALIBRATION_FOR_DP) / E.LIFE_TIME_ENERGY_COST)
print "Energy Threshold to increasing/decreasing battery size:{}".format((E.BATTERY_COST_OF_2500KW_SIZE * E.THRESHOLD_CALIBRATION_FOR_IB) / E.LIFE_TIME_ENERGY_COST)
print "Energy Threshold to decreasing battery size:{}".format((E.BATTERY_COST_OF_2500KW_SIZE * E.THRESHOLD_CALIBRATION_FOR_DB) / E.LIFE_TIME_ENERGY_COST)
'''
# ''' Simulation and Heuristic Stuff
# Event('print performance results', 'test')
# PriceTrends.calculate_price_trends()
# explanation:: dump the results of 999 iteration and then simulate only it and don't save the new iteration results
'''
snapshot.set_solar_data_path('istanbul')
snapshot.set_traffic_scen_folder(1)
snapshot.set_results_folder(1, 'istanbul')
city_after_deployment = snapshot.load_city_after_deployment()
DeploymentHeuristics.diagnose_operational_methods(city_after_deployment, 'battery_aware', 'hybrid', 32)
'''
# ----------------------------------- SIMULATIONS - AND - HEURISTICS ------------------------------------------
# explanation:: main operation iteratively simulate until reach a stop
''' normal simulation
snapshot.create_results_folders()
multi_process = True
if multi_process:
processes = []
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
for traffic_index in range(10):
processes.append(Process(target=multi_test, args=(traffic_scen, city_name, traffic_index)))
for p in processes:
p.start()
for p in processes:
p.join()
else:
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
print "--------------- traffic_scen:{} city_name:{}---------------".format(traffic_scen, city_name)
snapshot.set_traffic_scen_folder(traffic_scen)
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name, "STANDARD")
for operational_method in CalibrationParameters.get_parameters():
print "--------------- OPERATIONAL METHOD:{} ---------------".format(operational_method)
city_after_deployment = snapshot.load_city_after_deployment()
DeploymentHeuristics.simulate(city_after_deployment, operational_method)
'''
''' random process simulation
for traffic_scen in traffic_scenarios:
snapshot.set_traffic_scen_folder(traffic_scen)
city_after_deployment = snapshot.load_city_after_deployment()
for city_name in city_name_list:
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name)
DeploymentHeuristics.simulate_random_heuristic(city_after_deployment, 'hybrid')
'''
''' same panel & battery size simulation
snapshot.create_results_folders_for_same_panel_size_and_batteries()
multi_process = True
if multi_process:
processes = []
for traffic_scen in traffic_scenarios:
for city_name in city_name_list:
processes.append(Process(target=multi_test, args=(traffic_scen, city_name, None, "SAME_PANEL_SIZE")))
for p in processes:
p.start()
for p in processes:
p.join()
else:
for traffic_scen in traffic_scenarios:
snapshot.set_traffic_scen_folder(traffic_scen)
city_after_deployment = snapshot.load_city_after_deployment()
for city_name in city_name_list:
print "======== traffic_scen:{} city_name:{}".format(traffic_scen, city_name)
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name, "SAME_PANEL_SIZE")
ZERO_SIZE = False
if ZERO_SIZE:
DeploymentHeuristics.simulate_with_zero_size_solar_and_batteries(city_after_deployment)
else:
for operational_method in CalibrationParameters.get_parameters():
DeploymentHeuristics.simulate_with_same_size_solar_and_batteries(city_after_deployment, operational_method)
'''
''' heuristic optimization
battery_info_list = PreMonitor.get_the_best_size_of_sp_and_batt('hybrid','BASE')
traffic_scen = traffic_scenarios[0]
snapshot.set_traffic_scen_folder(traffic_scen)
city_after_deployment = snapshot.load_city_after_deployment()
city_name = city_name_list[0]
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name)
snapshot.create_results_folders()
DeploymentHeuristics.simulate_optimization_calibration(city_after_deployment, 'hybrid', battery_info_list)
'''
''' Running the hybrid algorithm with the best traffic-aware sizing results
best_iteration_dictionary = PreMonitor.get_the_best_iteration_index('traffic_aware','STANDARD')
for traffic_scen in traffic_scenarios:
snapshot.set_traffic_scen_folder(traffic_scen)
city_after_deployment = snapshot.load_city_after_deployment()
for city_name in city_name_list:
the_key = city_name+'_ts'+str(traffic_scen)
traffic_aware_it_no = best_iteration_dictionary[the_key]
print "======== traffic_scen:{} city_name:{}".format(traffic_scen, city_name)
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name)
battery_info_list = snapshot.load_size_of_sp_and_batt(snapshot.log_file_name('traffic_aware', traffic_aware_it_no))
PreOperator.calibrate_renewable_city(city_after_deployment, battery_info_list)
'''
''' Calibrating the hybrid algorithm with the best hybrid sizing results
best_iteration_dictionary = PreMonitor.get_the_best_iteration_index('hybrid')
for traffic_scen in traffic_scenarios:
snapshot.set_traffic_scen_folder(traffic_scen)
city_after_deployment = snapshot.load_city_after_deployment()
for city_name in city_name_list:
the_key = city_name+'_ts'+str(traffic_scen)
traffic_aware_it_no = best_iteration_dictionary[the_key]
print "======== traffic_scen:{} city_name:{}".format(traffic_scen, city_name)
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name, "STANDARD")
battery_info_list = snapshot.load_size_of_sp_and_batt(snapshot.log_file_name('hybrid', traffic_aware_it_no))
PreOperator.calibrate_renewable_city(city_after_deployment, battery_info_list)
'''
''' Running the hybrid algorithm with the gurobi sizing results
for traffic_scen in traffic_scenarios:
for traffic_index in range(8, 10):
snapshot.set_traffic_scen_folder(traffic_scen, traffic_index)
city_after_deployment = snapshot.load_city_after_deployment()
for city_name in city_name_list:
print "======== traffic_scen:{} city_name:{} traffic_index:{}".format(traffic_scen, city_name, traffic_index)
snapshot.set_solar_data_path(city_name)
snapshot.set_results_folder(traffic_scen, city_name, "GUROBI", traffic_index)
battery_info_list = snapshot.load_size_of_sp_and_batt(snapshot.log_file_name('gurobi', 0))
DeploymentHeuristics.simulate_optimization_calibration(city_after_deployment, 'hybrid', battery_info_list)
'''
# ----------------------------------- JOURNAL 1 - FIGURES ------------------------------------------
# Figure 1-2-8 are not related with Python
# Figure 3 : \label{fig:traffic_day
# Figure 4 : \label{fig:traffic_hours_of_the_day
# Figure 5 : \label{fig:traffic_days_of_the_week
# PreMonitor.plot_traffic_figures()
# Figure 6 : \label{fig:harvested_hourly
# PreMonitor.plot_daily_average_energy()
# Figure 7 : \label{fig:harvested_daily
# PreMonitor.plot_monthly_average_energy()
# Figure 9 : \label{fig:assignments
# PreMonitor.show_assignment()
# Figure 10 : \label{fig:bs_locations
# PreMonitor.show_bs_locations()
# Figure 11 : \label{fig:bc_cumulative}The Performance of Operational Methods in Different Cities and Traffic Rates}
# PreMonitor.plot_cost_vs_traffic_rate()
# Figure 12: \label{fig:bc} Comparison of Operational Methods.
# PreMonitor.plot_bar_comparison_of_operational_method()
# Figure 13-14: \label{fig:active_unstored_hourly and active_unstored_monthly} Comparison of Operational Methods.
# PreMonitor.battery_history_plottings()
# Figure 15: label{fig:brute} Comparison of Our Heuristic with the Brute Force Method
# PreMonitor.plot_comparison_of_standard_and_gurobi_results()
# Figure 16-17: label{fig:combination_per_traffic and combination_per_city} Heuristic in different configurations
# PreMonitor.plot_iterations()
# Figure Confidence Intervals
# PreMonitor.plot_confidence_intervals()
# PreMonitor.plot_confidence_intervals("GUROBI")
# ----------------------------------- OLD FIGURES ------------------------------------------
# PreMonitor.remaining_energy() # a specific battery plotting function
# PreMonitor.dump_the_best_panel_batt_comb_same_size()
# PreMonitor.dump_the_best_cal_vals()
# PreMonitor.plot_iterations_only_one_tr()
# PreMonitor.plot_iterations_compare_with_prev_data()
# PreMonitor.plot_iteration_for_each_scenario()
# PreMonitor.plot_iterations_all_type()
# PreMonitor.plot_iterations('carbon_emission')
# '''
# TRAFFIC TEST
# PriceTrends.traffic_test()
# PriceTrends.clean_unneccessary_files("GUROBI")
print("Running ends at:{}".format(datetime.now()))
| 2.375
| 2
|
setup.py
|
zaanposni/vvs
| 13
|
12776307
|
from setuptools import setup, find_packages
with open("readme.md", "r") as fh:
long_description = fh.read()
setup(
name='vvspy',
py_modules=["vvspy"],
version='1.1.3',
license='MIT',
description='API Wrapper for VVS (Verkehrsverbund Stuttgart)',
author='zaanposni',
author_email='<EMAIL>',
url='https://github.com/FI18-Trainees/vvspy',
keywords=['VVS', 'API', 'STUTTGART', 'WRAPPER', 'JSON', 'REST', 'EFA', 'PYTHON'],
packages=find_packages(exclude=["*tests"]),
package_data={
"vvspy": ["vvspy/*"]
},
install_requires=[
'requests',
'typing',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
long_description=long_description,
long_description_content_type="text/markdown"
)
| 1.367188
| 1
|
ioflo/base/__init__.py
|
BradyHammond/ioflo
| 128
|
12776308
|
<filename>ioflo/base/__init__.py
""" base package
"""
#print("Package at {0}".format(__path__[0]))
import importlib
_modules = ['globaling', 'excepting', 'interfacing',
'registering', 'storing', 'skedding',
'tasking', 'framing', 'logging', 'serving', 'monitoring',
'acting', 'poking', 'goaling', 'needing', 'traiting',
'fiating', 'wanting','completing','doing', 'deeding', 'arbiting',
'housing', 'building']
for m in _modules:
importlib.import_module(".{0}".format(m), package='ioflo.base')
from .storing import Store, Node, Share, Data, Deck
from .doing import doify, Doer, DoerParam, DoerSince, DoerLapse
| 2.34375
| 2
|
Visualizer/PureSorts/MergeSorts.py
|
jupiterbjy/Sorting_in_visual
| 1
|
12776309
|
<filename>Visualizer/PureSorts/MergeSorts.py<gh_stars>1-10
from typing import MutableSequence
import array
# TODO: check stability on this: failed stability test. on Baekjoon.
def Merge(arr: MutableSequence):
def join_parts(array_, left, right, mid):
sorted_ = array.array('i')
l_, r, m = left, right, mid + 1
while l_ <= mid and m <= right:
if array_[l_] <= array_[m]:
sorted_.append(array_[l_])
l_ += 1
else:
sorted_.append(array_[m])
m += 1
if l_ > mid:
for idx in range(m, right + 1):
sorted_.append(array_[idx])
else:
for idx in range(l_, mid + 1):
sorted_.append(array_[idx])
for idx in range(right, left - 1, -1):
array_[idx] = sorted_.pop()
def sub_merge(array_, left, right):
if left < right:
mid = (left + right) // 2
sub_merge(array_, left, mid)
sub_merge(array_, mid + 1, right)
join_parts(array_, left, right, mid)
sub_merge(arr, 0, len(arr) - 1)
# return arr
def Merge_inplace_rotation(arr):
import operator
# https://www.geeksforgeeks.org/iterative-merge-sort/
# https://xinok.wordpress.com/2014/08/17/in-place-merge-sort-demystified-2/
def swap(arr_, a, b):
arr_[a], arr_[b] = arr_[b], arr_[a]
def reverse(arr_, range_: range):
for idx in range(len(range_) // 2 - 1, -1, -1):
swap(arr_, range_.start + idx, range_.stop - idx - 1)
def rotate(arr_, range_: range, amount):
if len(range_) == 0 or amount == 0:
return
split = range_.start + amount if amount >= 0 else range_.stop + amount
reverse(arr_, range(range_.start, split))
reverse(arr_, range(split, range_.stop))
reverse(arr_, range_)
def _binary_main(arr_, range_: range, val, comp):
start = range_.start
end = range_.stop
while start < end:
mid = start + (end - start) // 2
if comp(arr_[mid], val):
start = mid + 1
else:
end = mid
if start == range_.stop - 1 and comp(arr_[start], val):
start += 1
return start
def binary_first(arr_, val, range_: range):
return _binary_main(arr_, range_, val, operator.lt)
def binary_last(arr_, val, range_: range):
return _binary_main(arr_, range_, val, operator.le)
def merge(arr_, range_a, range_b):
if len(range_a) == 0 or len(range_b) == 0:
return
range_a = range(range_a.start, range_a.stop)
range_b = range(range_b.start, range_b.stop)
while True:
mid = binary_first(arr_, arr_[range_a.start], range_b)
amount = mid - range_a.stop
rotate(arr_, range(range_a.start, mid), -amount)
if range_b.stop == mid:
break
range_b = range(mid, range_b.stop)
range_a = range(range_a.start + amount, mid)
range_a = range(binary_last(arr_, arr_[range_a.start], range_a), range_a.stop)
if len(range_a) == 0:
break
def sub_merge(array_, left, right):
if left < right:
mid = (left + right) // 2
sub_merge(array_, left, mid)
sub_merge(array_, mid + 1, right)
merge(array_, range(left, mid), range(mid, right))
sub_merge(arr, 0, len(arr) - 1)
| 3.25
| 3
|
app.py
|
deutranium/wordclouds
| 0
|
12776310
|
<gh_stars>0
from flask import Flask, render_template, request
import pandas as pd
from PySripts.pandas_implementation import *
from PySripts.df_text import *
from PySripts.wordcloud_generator import *
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/', methods = ['POST'])
def upload_file():
if request.method == 'POST':
# input file
f = request.files['file']
# decodes utf-8 into a python string
decoded = f.read().decode('utf-8')
# Converts the string into a Pandas DataFrame
Data_extracted = get_DataFrame(decoded)
# Date input conditions
start_date = request.form.get("start_date")
end_date = request.form.get("end_date")
# The if-elif statements below are used to to trim down the chats DataFrame
# based on the input Dates provided by the user.
if (start_date!='') & (end_date!=''):
start_date = pd.to_datetime(start_date, format="%Y-%m-%d")
end_date = pd.to_datetime(end_date, format="%Y-%m-%d")
if (start_date>end_date):
return render_template('index.html', date_error=1)
Data_extracted = Data_extracted[ (Data_extracted.Date>=start_date) & (Data_extracted.Date<=end_date) ]
elif (start_date!=''):
start_date = pd.to_datetime(start_date, format="%Y-%m-%d")
print(start_date)
Data_extracted = Data_extracted[ (Data_extracted.Date>=start_date) ]
elif (end_date!=''):
end_date = pd.to_datetime(end_date, format="%Y-%m-%d")
Data_extracted = Data_extracted[ Data_extracted.Date<=end_date ]
# Returns a Warning if the DataFrame is empty, given all input conditions
if(len(Data_extracted)==0):
return render_template("index.html", no_element_error=1)
# Gets a string of all chats from the DataFrame and using that string to create a
# wordcloud image and show it on index.hmtl
txt = df_to_text(Data_extracted)
wc_created = create_wc(txt)
return render_template('index.html', plot=1, url ='/static/WordCloud.png')
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
if __name__ == '__main__':
app.run(debug=True)
| 2.78125
| 3
|
solutions/439_ternary_expression_parser.py
|
YiqunPeng/leetcode_pro
| 0
|
12776311
|
<gh_stars>0
class Solution:
def parseTernary(self, expression: str) -> str:
c, values = expression.split('?', 1)
cnt = 0
p = 0
for i in range(len(values)):
if values[i] == ':':
if cnt > 0:
cnt -= 1
else:
p = i
break
elif values[i] == '?':
cnt += 1
tv, fv = values[:p] , values[p+1:]
if c == 'T':
if tv.find('?') == -1:
return tv
else:
return self.parseTernary(tv)
else:
if fv.find('?') == -1:
return fv
else:
return self.parseTernary(fv)
| 2.84375
| 3
|
tests/run_command_test.py
|
m3brown/plz
| 9
|
12776312
|
import os
import subprocess
from unittest import skip
from unittest.mock import patch
from plz.runner import run_command
starting_dir = os.getcwd()
def test_run_command_returns_int():
# Arrange
# Act
result = run_command("echo test")
# Assert
assert type(result) == int
@patch("subprocess.check_call")
def test_run_command_returns_1_if_CalledProcessError(mock_check_call):
# Arrange
mock_check_call.side_effect = subprocess.CalledProcessError
# Act
result = run_command('bash -c "exit 99"')
# Assert
assert result == 1
@patch("subprocess.check_call")
def test_run_command_returns_1_if_CalledProcessError(mock_check_call):
# Arrange
mock_check_call.side_effect = KeyboardInterrupt
# Act
result = run_command('bash -c "exit 99"')
# Assert
assert result == 1
@skip("Error codes no longer supported")
def test_run_command_returns_exit_code():
# Arrange
# Act
result = run_command('bash -c "exit 99"')
# Assert
assert result == 99
@skip("returning output not currently supported")
def test_run_command_returns_output():
# Arrange
stdout = "\n".join(["1", "2", "3", "4"])
# Act
result = run_command('bash -c "for x in `seq 1 4`; do echo $x; done"')
# Assert
assert result[1] == stdout.split("\n")
def test_run_command_prints_to_stdout(capfd):
# Arrange
stdout = "\n".join(["1", "2", "3", "4"]) + "\n"
# Act
run_command('bash -c "for x in `seq 1 4`; do echo $x; done"')
out, err = capfd.readouterr()
# Assert
assert out == stdout
@skip("stdout parameter not currently supported")
def test_run_command_does_not_print_to_stdout_when_disabled(capfd):
# Arrange
# Act
run_command('bash -c "for x in `seq 1 4`; do echo $x; done"', std_output=False)
out, err = capfd.readouterr()
# Assert
assert out == ""
def test_run_command_accepts_env(capfd):
# Arrange
test_value = "this is a test"
# Act
run_command('bash -c "echo $FOO"', env={"FOO": test_value})
out, err = capfd.readouterr()
# Assert
assert out == "{}\n".format(test_value)
def test_run_command_simple_glob(capfd):
# Arrange
stdout = "\n".join(["plz/__init__.py"]) + "\n"
# Act
run_command("ls plz/__*.py")
out, err = capfd.readouterr()
# Assert
assert out == stdout
def test_run_command_glob_with_cwd(capfd):
"""
Integration test
Scenario: the plz.yaml file is "located" in the plz directory.
In this case, the user will be running something like: `plz ls`
"""
# Arrange
os.chdir(starting_dir)
stdout = "\n".join(["__init__.py"]) + "\n"
cwd = os.path.join(os.getcwd(), "plz")
# Act
run_command("ls __*.py", cwd=cwd)
out, err = capfd.readouterr()
# Assert
assert out == stdout
def test_run_command_glob_with_cwd_and_args(capfd):
"""
Integration test
Scenario: the plz.yaml file is "located" in the root of this repo, but
the command is run from the child plz directory.
In this case, the user will be running something like: `plz ls ../*.md`
"""
# Arrange
os.chdir(starting_dir)
stdout = "\n".join(["README.md"]) + "\n"
cwd = os.getcwd()
os.chdir("plz")
# Act
run_command("ls", cwd=cwd, args=["../*.md"])
out, err = capfd.readouterr()
# Assert
assert out == stdout
| 2.46875
| 2
|
scripts/tutorials/generate_database.py
|
cfontanive/SIMPLE-db
| 6
|
12776313
|
<reponame>cfontanive/SIMPLE-db
# Script to generate database from JSON contents
# This gets run automatically with Github Actions
import argparse
import sys
import os
from astrodbkit2.astrodb import create_database, Database
sys.path.append(os.getcwd()) # hack to be able to discover simple
from simple.schema import *
# Location of source data
DB_PATH = 'data'
DB_NAME = 'SIMPLE.db'
# Used to overwrite AstrodbKit2 reference tables defaults
REFERENCE_TABLES = ['Publications', 'Telescopes', 'Instruments', 'Modes', 'PhotometryFilters']
def load_postgres(connection_string):
# For Postgres, we connect and drop all database tables
# Fix for SQLAlchemy 1.4.x
if connection_string.startswith("postgres://"):
connection_string = connection_string.replace("postgres://", "postgresql://", 1)
try:
db = Database(connection_string)
db.base.metadata.drop_all()
db.session.close()
db.engine.dispose()
except RuntimeError:
# Database already empty or doesn't yet exist
pass
# Proceed to load the database
load_database(connection_string)
def load_sqlite():
# First, remove the existing database in order to recreate it from the schema
# If the schema has not changed, this part can be skipped
if os.path.exists(DB_NAME):
os.remove(DB_NAME)
connection_string = 'sqlite:///' + DB_NAME
# Proceed to load the database
load_database(connection_string)
def load_database(connection_string):
# Create and load the database
create_database(connection_string)
# Now that the database is created, connect to it and load up the JSON data
db = Database(connection_string, reference_tables=REFERENCE_TABLES)
db.load_database(DB_PATH, verbose=False)
print('New database generated.')
# Close all connections
db.session.close()
db.engine.dispose()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the SIMPLE database')
parser.add_argument('architecture', choices=['sqlite', 'postgres'],
help='Database architecture to use.')
parser.add_argument('connection_string', nargs='?',
help='Connection string to use for non-sqlite databases.')
args = parser.parse_args()
# Get the connection string for any non-sqlite database
if args.connection_string is not None:
connection_string = args.connection_string
else:
connection_string = os.getenv('SIMPLE_DATABASE_URL', default='')
# Run the loader for the specified DB architecture
if args.architecture == 'postgres':
load_postgres(connection_string)
elif args.architecture == 'sqlite':
load_sqlite()
| 3.078125
| 3
|
model/__init__.py
|
kaniblu/pytorch-vrae
| 10
|
12776314
|
import utils
from . import rnn
from . import vae
from . import common
from . import pooling
from . import manager
from . import encoder
from . import decoder
from . import nonlinear
from . import embedding
def add_arguments(parser):
ModelArgumentConstructor(parser).add_all_arguments()
class ModelArgumentConstructor(object):
def __init__(self, parser):
self.parser = parser
@staticmethod
def joinargs(parent, name):
assert name is not None, "name cannot be empty"
frags = [name]
if parent is not None:
frags.insert(0, parent)
return '-'.join(frags)
def add(self, name, parent=None, **kwargs):
self.parser.add_argument(f"--{self.joinargs(parent, name)}", **kwargs)
def add_module_argument(self, key, module):
modules = manager.get_module_names(module)
self.add(key, type=str, default=modules[0], choices=modules)
def add_nonlinear_argument(self, key):
self.add_module_argument(key, nonlinear)
def add_pooling_arguments(self, key):
self.add_module_argument(key, pooling)
def add_rnn_arguments(self, key):
self.add_module_argument(key, rnn)
self.add("layers", parent=key, type=int, default=1)
self.add("dynamic", parent=key, action="store_true", default=False)
self.add("dropout", parent=key, type=float, default=0)
def add_encoder_arguments(self, key):
self.add_module_argument(key, encoder)
self.add_rnn_arguments(self.joinargs(key, "cell"))
self.add_pooling_arguments(self.joinargs(key, "pooling"))
def add_decoder_arguments(self, key):
self.add_module_argument(key, decoder)
self.add_rnn_arguments(self.joinargs(key, "cell"))
def add_vsae_arguments(self, key):
self.add_module_argument(key, vae)
self.add("z-dim", parent=key, type=int, default=512)
self.add("word-dim", parent=key, type=int, default=300)
self.add("kld-scale", parent=key, type=float, default=1.0)
self.add_encoder_arguments(self.joinargs(key, "encoder"))
self.add_decoder_arguments(self.joinargs(key, "decoder"))
self.add("embed-freeze", parent=key, action="store_true", default=False)
def add_all_arguments(self):
self.add_nonlinear_argument("nonlinear")
self.add_vsae_arguments("vae")
class ModelBuilder(object):
def __init__(self, args, vocab):
self.args = args
self.vocab = vocab
self.vocab_size = len(vocab)
self.bos_idx = vocab.f2i.get(args.bos)
self.eos_idx = vocab.f2i.get(args.eos)
def get(self, key, default=None):
return getattr(self.args, key, default)
def get_module_cls(self, key, kwargs_map=None, fallback=None):
if fallback is None:
fallback = {}
if kwargs_map is None:
kwargs_map = {}
type = self.get(key)
cls = manager.get(type)
sub_kwargs = utils.map_val(type, kwargs_map,
ignore_err=True, fallback=fallback)
def create(*args, **kwargs):
return cls(*args, **kwargs, **sub_kwargs)
return create
def get_nonlinear_cls(self, key):
return self.get_module_cls(key)
def get_pooling_cls(self, key):
return self.get_module_cls(key)
def get_rnn_cls(self, key):
return self.get_module_cls(key, fallback=dict(
dynamic=self.get(f"{key}_dynamic"),
dropout=self.get(f"{key}_dropout"),
layers=self.get(f"{key}_layers")
))
def get_encoder_cls(self, key):
return self.get_module_cls(key, {
"last-state-rnn-encoder": dict(
rnn_cls=self.get_rnn_cls(f"{key}_cell")
),
"pooled-rnn-encoder": dict(
rnn_cls=self.get_rnn_cls(f"{key}_cell"),
pool_cls=self.get_pooling_cls(f"{key}_pooling")
)
})
def get_decoder_cls(self, key):
return self.get_module_cls(key, {
"rnn-decoder": dict(
rnn_cls=self.get_rnn_cls(f"{key}_cell")
),
"rnn-recalling-decoder": dict(
rnn_cls=self.get_rnn_cls(f"{key}_cell")
),
})
def get_embedding_cls(self, key):
return lambda *args, **kwargs: embedding.FineTunableEmbedding(
*args, **kwargs,
allow_padding=True,
freeze=self.get(f"{key}_embed_freeze"),
unfrozen_idx=[self.bos_idx, self.eos_idx]
)
def get_vsae_cls(self, key):
return self.get_module_cls(key, {
"variational-sentence-autoencoder": dict(
z_dim=self.get(f"{key}_z_dim"),
word_dim=self.get(f"{key}_word_dim"),
vocab_size=self.vocab_size,
kld_scale=self.get(f"{key}_kld_scale"),
emb_cls=self.get_embedding_cls(key),
enc_cls=self.get_encoder_cls(f"{key}_encoder"),
dec_cls=self.get_decoder_cls(f"{key}_decoder")
)
})
def build_model(*args, **kwargs):
builder = ModelBuilder(*args, **kwargs)
nonlinear.set_default(builder.get_nonlinear_cls("nonlinear"))
return builder.get_vsae_cls("vae")()
| 2.265625
| 2
|
Course-Work/week_5/Money Change Again.py
|
TSG405/Algorithmic-Toolbox-UC-San-Diego
| 1
|
12776315
|
<reponame>TSG405/Algorithmic-Toolbox-UC-San-Diego
>>
import math
money = int(input())
denominations = [1, 3, 4]
minCoins = [0] + [math.inf]*money
for i in range(1, money+1):
for j in denominations:
if i>=j:
coins = minCoins[i-j]+1
if coins < minCoins[i]:
minCoins[i] = coins
print(minCoins[money])
'''
---OR---
n=int(input())
f=[0 for tsg in range(n+1)]
f[0],f[1],i=0,1,2
def ch3(i):
if (i>=4):
z3 = i-4
return z3
else: return -1
def ch2(i):
if (i>=3):
z2 = i-3
return z2
else: return -1
while(i<=n):
z3=z2=z1=2*n
k3=ch3(i)
if (k3!=-1): z3=f[k3]
else: z3=n*2
k2=ch2(i)
if (k2!=-1): z2=f[k2]
else: z2=n*2
k1=i-1
z1=f[k1]
j=min(z3,z2,z1)
if (j==z3): f[i]=z3+1
elif (j==z2): f[i]=z2+1
else: f[i]=z1+1
i+=1
if(n==1): print(1)
else: print(f[-1])
'''
@ CODED BY TSG405, 2021
| 3.15625
| 3
|
test.py
|
AniruddhaGawali/Hand_Cricket
| 1
|
12776316
|
<filename>test.py
# Importing Tkinter module
from tkinter import *
from tkinter.ttk import *
# Creating master Tkinter window
master = Tk()
# Creating object of photoimage class
# Image should be in the same folder
# in which script is saved
p1 = PhotoImage(file = 'data/img/img1.png')
# Setting icon of master window
master.iconphoto(False, p1)
# Creating button
b = Button(master, text = 'Click me !')
b.pack(side = TOP)
# Infinite loop can be terminated by
# keyboard or mouse interrupt
# or by any predefined function (destroy())
mainloop()
| 3.109375
| 3
|
inkml/references.py
|
Coderik/pinkml
| 0
|
12776317
|
<reponame>Coderik/pinkml
from typing import Mapping, Set
from . import ink, reading_types as rt
from .ids import is_local_id, to_local_id
def resolve_references(definitions: rt.Definitions, assume_local_refs):
# Resolve Brush.parentRef
ignored_ids = resolve_brush_parent_references(definitions.brushes)
if len(ignored_ids) > 0:
for id in ignored_ids:
del definitions.brushes[id]
print('Some brush references are either cyclic or incorrect and could not be resolved.Following brushes will '
'be ignored: {} '.format(', '.join(ignored_ids)))
# Resolve Timestamp.parentRef
ignored_ids = resolve_timestamp_parent_references(definitions.timestamps)
if len(ignored_ids) > 0:
for id in ignored_ids:
del definitions.timestamps[id]
print('Some timestamp references are either cyclic or incorrect and could not be resolved.Following timestamps '
'will be ignored: {} '.format(', '.join(ignored_ids)))
# Resolve Context.parentRef
ignored_ids = resolve_context_parent_references(definitions.contexts)
if len(ignored_ids) > 0:
for id in ignored_ids:
del definitions.contexts[id]
print('Some context references are either cyclic or incorrect and could not be resolved.Following timestamps '
'will be ignored: {} '.format(', '.join(ignored_ids)))
# Resolve Context.inkSourceRef, Context.traceFormatRef, Context.brushRef and Context.inkSourceRef
resolve_context_content_references(definitions, assume_local_refs)
def resolve_brush_parent_references(brushes: Mapping[str, rt.BrushEnvelope]) -> Set[str]:
backlog: Set[str] = set()
resolved: Set[str] = set()
# Find all terminal brushes that do not reference other brushes
for brush in brushes.values():
if brush.parent_ref == '' or brush.parent_ref == '#DefaultBrush':
resolved.add(brush.id)
else:
backlog.add(brush.id)
while len(backlog) > 0:
# Look for brushes that reference already resolved brushes (and thus can be resolved)
stage_ids = []
for id in backlog:
brush = brushes[id]
parent_id = to_local_id(brush.parent_ref)
if parent_id in resolved:
# Resolve reference
brush.brush.parent = brushes[parent_id].brush
stage_ids.append(id)
# Stop, if no refs were resolved at this stage
if len(stage_ids) == 0:
break
# Mark brushes resolved at this stage and remove them from backlog
for id in stage_ids:
resolved.add(id)
backlog.remove(id)
# TODO: replace missing ref by no-ref and keep the item itself? (only drop cycles)
# NOTE: if backlog is not empty at this point, there are cyclic or incorrect references
return backlog
def resolve_timestamp_parent_references(timestamps: Mapping[str, rt.TimestampEnvelope]) -> Set[str]:
backlog: Set[str] = set()
resolved: Set[str] = set()
# Find all terminal timestamps that do not reference other timestamps
for timestamp in timestamps.values():
if timestamp.parent_ref == '':
resolved.add(timestamp.id)
else:
backlog.add(timestamp.id)
while len(backlog) > 0:
# Look for timestamps that reference already resolved timestamps (and thus can be resolved)
stage_ids = []
for id in backlog:
timestamp = timestamps[id]
parent_id = to_local_id(timestamp.parent_ref)
if parent_id in resolved:
# Resolve reference
timestamp.timestamp.parent = timestamps[parent_id].timestamp
stage_ids.append(id)
# Stop, if no refs were resolved at this stage
if len(stage_ids) == 0:
break
# Mark timestamps resolved at this stage and remove them from backlog
for id in stage_ids:
resolved.add(id)
backlog.remove(id)
# NOTE: if backlog is not empty at this point, there are cyclic or incorrect references
return backlog
def resolve_context_parent_references(contexts: Mapping[str, rt.ContextEnvelope]) -> Set[str]:
backlog: Set[str] = set()
resolved: Set[str] = set()
# Find all terminal contexts that do not reference other contexts
for context in contexts.values():
if context.parent_ref == '':
resolved.add(context.id)
else:
backlog.add(context.id)
while len(backlog) > 0:
# Look for contexts that reference already resolved contexts (and thus can be resolved)
stage_ids = []
for id in backlog:
context = contexts[id]
parent_id = to_local_id(context.parent_ref)
if parent_id in resolved:
# Resolve reference
context.context.parent = contexts[parent_id].context
stage_ids.append(id)
# Stop, if no refs were resolved at this stage
if len(stage_ids) == 0:
break
# Mark contexts resolved at this stage and remove them from backlog
for id in stage_ids:
resolved.add(id)
backlog.remove(id)
# NOTE: if backlog is not empty at this point, there are cyclic or incorrect references
return backlog
def resolve_context_content_references(definitions: rt.Definitions, assume_local_refs):
for context in definitions.contexts.values():
# Set Context.ink_source
if isinstance(context.ink_source_or_ref, ink.InkSource):
context.context.ink_source = context.ink_source_or_ref
elif len(context.ink_source_or_ref) > 0:
if is_local_id(context.ink_source_or_ref) or assume_local_refs:
ink_source_id = to_local_id(context.ink_source_or_ref)
if ink_source_id in definitions.ink_sources:
context.context.ink_source = definitions.ink_sources[ink_source_id]
else:
print('Warning. Could not find inkSource "{}" referenced by context "{}"'
.format(context.ink_source_or_ref, context.id))
else:
print('Warning. External references are not yet supported: "{}"'.format(context.ink_source_or_ref))
# Set Context.trace_format
if isinstance(context.trace_format_or_ref, ink.TraceFormat):
context.context.trace_format = context.trace_format_or_ref
elif len(context.trace_format_or_ref) > 0:
if is_local_id(context.trace_format_or_ref) or assume_local_refs:
trace_format_id = to_local_id(context.trace_format_or_ref)
if trace_format_id in definitions.trace_formats:
context.context.trace_format = definitions.trace_formats[trace_format_id]
else:
print('Warning. Could not find traceFormat "{}" referenced by context "{}"'
.format(context.trace_format_or_ref, context.id))
else:
print('Warning. External references are not yet supported: "{}"'.format(context.trace_format_or_ref))
# Set Context.brush
if isinstance(context.brush_or_ref, rt.BrushEnvelope):
# Set brush that is given as a nested element
if len(context.brush_or_ref.id) > 0:
# This brush has an ID, so its refs should already be resolved. Take it from definitions
if context.brush_or_ref.id in definitions.brushes:
context.context.brush = definitions.brushes[context.brush_or_ref.id].brush
else:
# This brush is not in definitions, which means that it was ignored for some reason
print('Warning. Context "{}" references a brush that was ignored'.format(context.id))
else:
# This brush has no ID, so it is not in definitions
context.context.brush = context.brush_or_ref.brush
# If needed, resolve parent reference of this brush here, because it was not processed before
if len(context.brush_or_ref.parent_ref) > 0:
parent_id = to_local_id(context.brush_or_ref.parent_ref)
if parent_id in definitions.brushes:
context.context.brush.parent = definitions.brushes[parent_id].brush
else:
print('Warning. Could not find brush "{}" referenced by brush "{}"'
.format(context.brush_or_ref.parent_ref, context.brush_or_ref.id))
elif len(context.brush_or_ref) > 0:
if is_local_id(context.brush_or_ref) or assume_local_refs:
# Set brush that is given as a reference
brush_id = to_local_id(context.brush_or_ref)
if brush_id in definitions.brushes:
context.context.brush = definitions.brushes[brush_id].brush
else:
print('Warning. Could not find brush "{}" referenced by context "{}"'
.format(context.brush_or_ref, context.id))
else:
print('Warning. External references are not yet supported: "{}"'.format(context.brush_or_ref))
# Set Context.timestamp
if isinstance(context.timestamp_or_ref, rt.TimestampEnvelope):
# Set timestamp that is given as a nested element
if len(context.timestamp_or_ref.id) > 0:
# This timestamp has an ID, so its refs should already be resolved. Take it from definitions
if context.timestamp_or_ref.id in definitions.timestamps:
context.context.timestamp = definitions.timestamps[context.timestamp_or_ref.id].timestamp
else:
# This timestamp is not in definitions, which means that it was ignored for some reason
print('Context "{}" references a timestamp that was ignored'.format(context.id))
else:
# This timestamp has no ID, so it is not in definitions
context.context.timestamp = context.timestamp_or_ref.timestamp
# If needed, resolve parent reference of this timestamp here, because it was not processed before
if len(context.timestamp_or_ref.parent_ref) > 0:
if context.timestamp_or_ref.parent_ref in definitions.timestamps:
context.context.timestamp.parent = \
definitions.timestamps[context.timestamp_or_ref.parent_ref].timestamp
else:
print('Could not find timestamp "{}" referenced by timestamp "${}"'
.format(context.timestamp_or_ref.parent_ref, context.timestamp_or_ref.id))
elif len(context.timestamp_or_ref) > 0:
if is_local_id(context.timestamp_or_ref) or assume_local_refs:
# Set timestamp that is given as a reference
timestamp_id = to_local_id(context.timestamp_or_ref)
if timestamp_id in definitions.timestamps:
context.context.timestamp = definitions.timestamps[timestamp_id].timestamp
else:
print('Could not find timestamp "{}" referenced by context "{}"'
.format(context.timestamp_or_ref, context.id))
else:
print('External references are not yet supported: "{}"'.format(context.timestamp_or_ref))
| 2.03125
| 2
|
aws_interface/settings/base.py
|
hubaimaster/aws-interface
| 53
|
12776318
|
import json
import os
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRETS_DIR = os.path.join(BASE_DIR, 'secret')
SECRETS_BASE = os.path.join(SECRETS_DIR, 'base.json')
try:
secrets_base = json.load(open(SECRETS_BASE, 'rt'))
except FileNotFoundError:
import subprocess
subprocess.call('python generate_secrets.py')
secrets_base = json.load(open(SECRETS_BASE, 'rt'))
"""
raise ImproperlyConfigured('Could not find secret file {}'.format(SECRETS_BASE))
"""
SECRET_KEY = secrets_base['SECRET_KEY']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storages',
'dashboard.apps.DashboardConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware', # For translation
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/apps/'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'dashboard.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-KR'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
LANGUAGES = [
('ko', _('Korean')),
('en', _('English')),
]
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
| 1.929688
| 2
|
01_captions/utils.py
|
saikrishnarallabandi/compositionality-expts
| 2
|
12776319
|
<reponame>saikrishnarallabandi/compositionality-expts
from datetime import datetime # to get the current date and time
def log(log_message):
"""
- DOES: adds a log message "log_message" and its time stamp to a log file.
"""
# open the log file and make sure that it's closed properly at the end of the
# block, even if an exception occurs:
with open("log.txt", "a") as log_file:
# write the current time stamp and log message to logfile:
log_file.write(datetime.strftime(datetime.today(),
"%Y-%m-%d %H:%M:%S") + ": " + log_message)
log_file.write("\n") # (so the next message is put on a new line)
| 3.546875
| 4
|
tests/platform_tests/test_killed_pid.py
|
noahspurrier/pexpect
| 8
|
12776320
|
<filename>tests/platform_tests/test_killed_pid.py
#!/usr/bin/env python
import os, time, signal
import expyct
e = expyct.expyct ('/bin/sh', '-i')
print 'pid,fd:', e.pid, e.fd
print 'isAlive:', e.isAlive()
# Treat it brusquely.
print 'sending SIGKILL...'
os.kill (e.pid, signal.SIGKILL)
time.sleep (1)
print os.read(e.fd, 1000)
print 'isAlive:', e.isAlive()
e.expect('\#')
e.send ('ls -la /\n')
r,m,i = e.expect ('\#')
print r
| 1.992188
| 2
|
ADContentViewer.py
|
Ping-P0ng/ADContentViewer
| 0
|
12776321
|
import tornado.ioloop
import tornado.web
import os
import sqlite3
import json
import re
import argparse
import sys
MainPageStart = """<html><head>
<title>ADContentViewer</title>
</head>
<body>
<div style="display: flex;height: 100%;width: 100%;">
<div style="float: left;
width: 60px;
background-color: mediumblue;
padding: 3px;
box-shadow: 0 0 5px 2px;
">
<form action="request">
<input type="submit" name="action" value="H" style=" background-color: Transparent;background-repeat:no-repeat;border: none;cursor:pointer;overflow: hidden;outline:none;height: 7%;width: 100%;color: white;font-size: x-large;font-family: unset;font-style: normal;"/>
<input type="submit" name="action" value="U" style=" background-color: Transparent;background-repeat:no-repeat;border: none;cursor:pointer;overflow: hidden;outline:none;height: 7%;width: 100%;color: white;font-size: x-large;font-family: unset;font-style: normal;"/>
<input type="submit" name="action" value="G" style=" background-color: Transparent;background-repeat:no-repeat;border: none;cursor:pointer;overflow: hidden;outline:none;height: 7%;width: 100%;color: white;font-size: x-large;font-family: unset;font-style: normal;"/>
<input type="submit" name="action" value="C" style=" background-color: Transparent;background-repeat:no-repeat;border: none;cursor:pointer;overflow: hidden;outline:none;height: 7%;width: 100%;color: white;font-size: x-large;font-family: unset;font-style: normal;"/>
<input type="submit" name="action" value="T" style=" background-color: Transparent;background-repeat:no-repeat;border: none;cursor:pointer;overflow: hidden;outline:none;height: 7%;width: 100%;color: white;font-size: x-large;font-family: unset;font-style: normal;"/>
</form>
</div >
<div style="float: left;
background-color: #f9f9f9;
height: 100%;
position: relative;
width: 100%;
margin: 0 12px;
box-shadow:
-20px 20px 0 -17px #fff,
20px -20px 0 -17px #fff,
20px 20px 0 -20px #c27153,
0 0 0 2px #c27153;">
"""
MainPageEnd = """</div>
</div>
</body></html>"""
HomePage = """<div style="height: 7%;
width: 100%;
text-align: left;
margin: 5px;
line-height: 2.5;
vertical-align: middle;
box-shadow: 0 2px 0 0 black;
font-size: x-large;
font-style: normal;">"""
ObjPage = """style="background-color: Transparent;
background-repeat: no-repeat;
border: none;
cursor: pointer;
line-height: normal;
overflow: hidden;
outline: none;
height: auto;
min-height: 5%;
position: static;
width: 100%;
text-align: left;
color: black;
font-size: larger;
font-style: normal;
box-shadow: 0 2px 0 0 black;" """
ViewDiv = """<div style="height: 85%;width: 100%;overflow: auto;">"""
SearchPanel = """method="get" style="margin: 15px;">
<input type="text" name="search" placeholder="Search string" style="border: 2px solid black;width: 89%;
height: 5%;border-radius: 21px; outline: none;">
<input type="submit" name="action" value="search" style="border: 2px solid black;width: 10%;
height: 5%;
color: black;
outline: none;
background-color: white;
text-decoration: none;
border-radius: 21px;"/></form>"""
UpdateHashPanel = """
<label style="text-decoration: underline;
color: black;
font-size: x-large;
margin-left: 90px;">Hash</label>
<div style="width: max-content;
border: 2px solid mediumblue;
margin: 10px;"><form action="tools" enctype="multipart/form-data" method="post">
<label for="file-upload" style=" border: 2px solid black;
width: 200px;
height: 30px;
border-radius: 21px;
outline: none;
text-align: center;
display: block;
margin: 10px;
padding-top: 10;">Select hash file
</label>
<input id="file-upload" type="file" name="content" style="display:none;">
<input type="hidden" name="action" value="update_hash"/>
<input type="submit" value="Update" style=" border: 2px solid black;
width: 100px;
height: 5%;
margin-left: 60px;
color: black;
outline: none;
background-color: white;
text-decoration: none;
border-radius: 21px;">
</form></div>"""
UpdateObjectPanel = """
<label style="text-decoration: underline;
color: black;
font-size: x-large;
margin-left: 85px;">Object</label>
<div style="width: max-content;
border: 2px solid mediumblue;
margin: 10px;">
<form action="tools" enctype="multipart/form-data" method="post">
<label for="file-object" style=" border: 2px solid black;
width: 200px;
height: 30px;
border-radius: 21px;
outline: none;
text-align: center;
display: block;
margin: 10px;
padding-top: 10;">Select object file
</label>
<input id="file-object" type="file" name="content" style="display:none;"/>
<input type="hidden" name="action" value="update_object"/>
<select id="format" name="format_file" style="border: 2px solid black;
border-radius: 21px;
display: block;
padding: 5px;
margin: 10px;
outline: none;
margin-left: 50px;
background-color: white;">
<option value="adfind_default">adfind_default</option>
</select>
<input type="submit" value="Update" style=" border: 2px solid black;
width: 100px;
height: 5%;
margin-left: 60px;
color: black;
outline: none;
background-color: white;
text-decoration: none;
padding: .8em 1em calc(.8em + 3px);
border-radius: 21px;">
</form></div>"""
UserAccountControl = {"SCRIPT":1,
"ACCOUNTDISABLE":2,
"HOMEDIR_REQUIRED":8,
"LOCKOUT":16,
"PASSWD_NOTREQD":32,
"PASSWD_CANT_CHANGE":64,
"ENCRYPTED_TEXT_PWD_ALLOWED":128,
"TEMP_DUPLICATE_ACCOUNT":256,
"NORMAL_ACCOUNT":512,
"INTERDOMAIN_TRUST_ACCOUNT":2048,
"WORKSTATION_TRUST_ACCOUNT":4096,
"SERVER_TRUST_ACCOUNT":8192,
"DONT_EXPIRE_PASSWORD":<PASSWORD>,
"MNS_LOGON_ACCOUNT":131072,
"SMARTCARD_REQUIRED":262144,
"TRUSTED_FOR_DELEGATION":524288,
"NOT_DELEGATED":1048576,
"USE_DES_KEY_ONLY":2097152,
"DONT_REQ_PREAUTH":4194304,
"PASSWORD_EXPIRED":<PASSWORD>,
"TRUSTED_TO_AUTH_FOR_DELEGATION":16777216,
"PARTIAL_SECRETS_ACCOUNT":67108864
}
class Web(tornado.web.RequestHandler):
def initialize(self,Settings,MainReader):
self.Settings = Settings
self.MainReader = MainReader
if(os.path.exists(Settings["db_name"])):
self.ObjDb = sqlite3.connect(Settings["db_name"])
self.ObjCursor = self.ObjDb.cursor()
def post(self):
if(self.request.uri[:6] == "/tools"):
WritePage = MainPageStart
AllInputArg = self.request.arguments
if("action" in AllInputArg.keys()):
if("update_hash" == self.get_argument("action")):
HashUpdateFiles = self.request.files
Out = {"update":0,"add":0}
if("content" in HashUpdateFiles.keys()):
try:
HashStr = HashUpdateFiles["content"][0]["body"].decode("utf-8").replace("\n"," ")
PwdData = re.findall(r"([^: ]*):[^:]*:([^:]*):([^:]*):\S*:\S*:(\S*)",HashStr)
except:
PwdData = re.findall(r"([^\n\r:]*):[^:]*:([^:]*):([^:]*):\S*:\S*:(\S*)",HashUpdateFiles["content"][0]["body"].decode("UTF-16LE")[1:])
if(len(PwdData) == 0):
try:
HashStr = HashUpdateFiles["content"][0]["body"].decode("utf-8").replace("\n"," ")
HashcatFormat = re.findall(r"([^: ]*):(\S*)",HashStr)
except:
HashcatFormat = re.findall(r"([^\n\r:]*):(\S*)",HashUpdateFiles["content"][0]["body"].decode("UTF-16LE")[1:])
if(len(HashcatFormat) != 0):
for CurretHash in HashcatFormat:
self.ObjCursor.execute("""UPDATE pwd SET pass='{1}' WHERE NT='{0}'""".format(CurretHash[0],CurretHash[1]))
self.ObjDb.commit()
WritePage = WritePage + """<div style="margin:10px;">Filename: {0}<br>Format: NT:pass<br>{1} hash updated<br><div>""".format(HashUpdateFiles["content"][0]["filename"],len(HashcatFormat))
else:
for CurretPwd in PwdData:
SelectPwd = self.ObjCursor.execute("""SELECT * FROM pwd WHERE sAMAccountName='{0}'""".format(CurretPwd[0]))
CheckPwd = SelectPwd.fetchone()
if(CheckPwd == None):
self.ObjCursor.execute("""INSERT INTO pwd VALUES ('{0}','{1}','{2}','{3}')""".format(CurretPwd[0],CurretPwd[1],CurretPwd[2],CurretPwd[3]))
Out["add"] += 1
else:
self.ObjCursor.execute("""UPDATE pwd SET LM='{1}',NT='{2}',pass='{3}' WHERE sAMAccountName='{0}'""".format(CurretPwd[0],CurretPwd[1],CurretPwd[2],CurretPwd[3]))
Out["update"] += 1
self.ObjDb.commit()
WritePage = WritePage + """<div style="margin:10px;">Filename: {0}<br>Format: pwd<br>{1} hash added<br>{2} hash updated<br><div>""".format(HashUpdateFiles["content"][0]["filename"],Out["add"],Out["update"])
self.write(WritePage)
elif("update_object" == self.get_argument("action")):
ObjectUpdateFile = self.request.files
if("content" in ObjectUpdateFile.keys()):
try:
ObjectData = ObjectUpdateFile["content"][0]["body"].decode("utf-8")
except:
ObjectData = ObjectUpdateFile["content"][0]["body"].decode("UTF-16LE")[1:]
if(len(ObjectData) != 0):
WritePage = WritePage + """<div style="margin:10px;height: 85%;width: 100%;overflow: auto;">"""
self.write(WritePage)
self.MainReader.GetADObjects(ObjectData.split('\n'),self)
self.write("""</div>"""+MainPageEnd)
def get(self):
if(self.request.uri == "/"):
self.redirect("/home")
elif(self.request.uri[:6] == "/tools"):
WritePage = MainPageStart
WritePage = WritePage + UpdateHashPanel + UpdateObjectPanel
self.write(WritePage+"""</div>"""+MainPageEnd)
elif(self.request.uri[:6] == "/users" or self.request.uri[:10] == "/computers" or self.request.uri[:7] == "/groups"):
AllInputArg = self.request.arguments
if(self.request.uri[:6] == "/users"):
WritePage = MainPageStart + '<form action="users" ' + SearchPanel + ViewDiv
elif(self.request.uri[:10] == "/computers"):
WritePage = MainPageStart + '<form action="computers" ' + SearchPanel + ViewDiv
elif(self.request.uri[:7] == "/groups"):
WritePage = MainPageStart + '<form action="groups" ' + SearchPanel + ViewDiv
if("count" in AllInputArg.keys() and "offset" in AllInputArg.keys()):
GetArgUserCount = self.get_argument("count")
GetArgUserOffset = self.get_argument("offset")
if("action" in AllInputArg.keys()):
if(self.get_argument("action") == "next"):
GetArgUserOffset = int(GetArgUserOffset) + int(GetArgUserCount)
elif(self.get_argument("action") == "back"):
GetArgUserOffset = int(GetArgUserOffset) - int(GetArgUserCount)
if(GetArgUserOffset < 0):
GetArgUserOffset = 0
if(self.request.uri[:6] == "/users"):
for InfoObject in self.ObjCursor.execute("""SELECT sAMAccountName,userAccountControl,description FROM users LIMIT {0},{1}""".format(GetArgUserOffset,GetArgUserCount)):
PrintStr = "{0}".format(InfoObject[0])
if(UserAccountControl["ACCOUNTDISABLE"] & int(InfoObject[1]) != 0):
PrintStr = "*ACCOUNTDISABLE* {0}".format(PrintStr)
descriptionValue = ""
if(InfoObject[2] != ""):
descriptionValue = "- {0}".format(InfoObject[2])
WritePage = WritePage + """<a style="text-decoration: none; color: black;" href="/users?action=info&object={0}"><div """.format(InfoObject[0])+ObjPage+""">{0} {1}</div></a><br>""".format(PrintStr,descriptionValue)
NPanel = """<form action="users" method="get" style="margin: 15px; position: absolute;bottom: 0; right: 0;">"""
elif(self.request.uri[:10] == "/computers"):
for InfoObject in self.ObjCursor.execute("""SELECT sAMAccountName,description FROM computers LIMIT {0},{1}""".format(GetArgUserOffset,GetArgUserCount)):
descriptionValue = ""
if(InfoObject[1] != ""):
descriptionValue = "- {0}".format(InfoObject[1])
WritePage = WritePage + """<a style="text-decoration: none; color: black;" href="/computers?action=info&object={0}"><div """.format(InfoObject[0])+ObjPage+""">{0} {1}</div></a><br>""".format(InfoObject[0],descriptionValue)
NPanel = """<form action="computers" method="get" style="margin: 15px; position: absolute;bottom: 0; right: 0;">"""
elif(self.request.uri[:7] == "/groups"):
for InfoObject in self.ObjCursor.execute("""SELECT sAMAccountName,description FROM groups LIMIT {0},{1}""".format(GetArgUserOffset,GetArgUserCount)):
descriptionValue = ""
if(InfoObject[1] != ""):
descriptionValue = "- {0}".format(InfoObject[1])
WritePage = WritePage + """<a style="text-decoration: none; color: black;" href="/groups?action=info&object={0}"><div """.format(InfoObject[0])+ObjPage+""">{0} {1}</div></a><br>""".format(InfoObject[0],descriptionValue)
NPanel = """<form action="groups" method="get" style="margin: 15px; position: absolute;bottom: 0; right: 0;">"""
NPanel = NPanel + """<input type="submit" name="action" value="back" style="border: 2px solid black;width: 70px; height: 38px; color: black; outline: none; background-color: white; text-decoration: none; border-radius: 21px;"/>"""
NPanel = NPanel + """<input type="text" name="count" placeholder="Count" value="{0}" style="border: 2px solid black;width: 100px;height: 38px; border-radius: 21px; outline: none; text-align: center;"/>""".format(GetArgUserCount)
NPanel = NPanel + """<input type="hidden" name="offset" value="{0}"/>""".format(GetArgUserOffset)
NPanel = NPanel + """<input type="submit" name="action" value="next" style="border: 2px solid black;width: 70px; height: 38px; color: black; outline: none; background-color: white; text-decoration: none; border-radius: 21px;"/></form>"""
self.write(WritePage+" </div>"+NPanel+MainPageEnd)
elif("action" in AllInputArg.keys()):
GetArgAction = self.get_argument("action")
if(GetArgAction == "search"):
GetArgSearch = self.get_argument("search")
if(GetArgSearch != ""):
if(self.request.uri[:6] == "/users"):
WritePage = MainPageStart + '<form action="users" ' + SearchPanel + ViewDiv
elif(self.request.uri[:10] == "/computers"):
WritePage = MainPageStart + '<form action="computers" ' + SearchPanel + ViewDiv
elif(self.request.uri[:7] == "/groups"):
WritePage = MainPageStart + '<form action="groups" ' + SearchPanel + ViewDiv
if(self.request.uri[:6] == "/users"):
for InfoObject in self.ObjCursor.execute("""SELECT sAMAccountName,userAccountControl,description FROM users WHERE sAMAccountName LIKE '%{0}%'""".format(GetArgSearch)):
PrintStr = "{0}".format(InfoObject[0])
if(UserAccountControl["ACCOUNTDISABLE"] & int(InfoObject[1]) != 0):
PrintStr = "*ACCOUNTDISABLE* {0}".format(PrintStr)
descriptionValue = ""
if(InfoObject[2] != ""):
descriptionValue = "- {0}".format(InfoObject[2])
WritePage = WritePage + """<a style="text-decoration: none; color: black;" href="/users?action=info&object={0}"><div """.format(InfoObject[0])+ObjPage+""">{0} {1}</div></a><br>""".format(PrintStr,descriptionValue)
elif(self.request.uri[:10] == "/computers"):
for InfoObject in self.ObjCursor.execute("""SELECT sAMAccountName,description FROM computers WHERE sAMAccountName LIKE '%{0}%'""".format(GetArgSearch)):
descriptionValue = ""
if(InfoObject[1] != ""):
descriptionValue = "- {0}".format(InfoObject[1])
WritePage = WritePage + """<a style="text-decoration: none; color: black;" href="/computers?action=info&object={0}"><div """.format(InfoObject[0])+ObjPage+""">{0} {1}</div></a><br>""".format(InfoObject[0],descriptionValue)
elif(self.request.uri[:7] == "/groups"):
for InfoObject in self.ObjCursor.execute("""SELECT sAMAccountName,description FROM groups WHERE sAMAccountName LIKE '%{0}%'""".format(GetArgSearch)):
descriptionValue = ""
if(InfoObject[1] != ""):
descriptionValue = "- {0}".format(InfoObject[1])
WritePage = WritePage + """<a style="text-decoration: none; color: black;" href="/groups?action=info&object={0}"><div """.format(InfoObject[0])+ObjPage+""">{0} {1}</div></a><br>""".format(InfoObject[0],descriptionValue)
self.write(WritePage+"""</div>"""+MainPageEnd)
elif(GetArgAction == "info"):
GetArgObj = self.get_argument("object")
if(GetArgObj != ""):
if(self.request.uri[:6] == "/users"):
SelectObj = self.ObjCursor.execute("""SELECT users.FullData from users where users.samaccountname='{0}'""".format(GetArgObj))
elif(self.request.uri[:10] == "/computers"):
SelectObj = self.ObjCursor.execute("""SELECT FullData FROM computers WHERE sAMAccountName='{0}'""".format(GetArgObj))
elif(self.request.uri[:7] == "/groups"):
SelectObj = self.ObjCursor.execute("""SELECT FullData FROM groups WHERE sAMAccountName='{0}'""".format(GetArgObj))
ObjValue = SelectObj.fetchone()
if(ObjValue != None):
JsonObj = json.loads(ObjValue[0])
if(self.request.uri[:6] == "/users"):
WritePage = MainPageStart + '<form action="users" ' + SearchPanel + ViewDiv
elif(self.request.uri[:10] == "/computers"):
WritePage = MainPageStart + '<form action="computers" ' + SearchPanel + ViewDiv
elif(self.request.uri[:7] == "/groups"):
WritePage = MainPageStart + '<form action="groups" ' + SearchPanel + ViewDiv
for CurrentKey in JsonObj.keys():
if(isinstance(JsonObj[CurrentKey],list)):
for CurrentValue in JsonObj[CurrentKey]:
if(CurrentKey == "member" or CurrentKey == "memberOf"):
WritePage = WritePage + """{0}: <a style="text-decoration: none; color: black;" href="/request?dn={1}">{1}</a><br>""".format(CurrentKey,CurrentValue)
else:
WritePage = WritePage + "{0}: {1}<br>".format(CurrentKey,CurrentValue)
else:
if(CurrentKey == "userAccountControl"):
PrintStr = ""
for CurrentUAC in UserAccountControl.keys():
if(UserAccountControl[CurrentUAC] & int(JsonObj[CurrentKey]) != 0):
PrintStr += " {0} |".format(CurrentUAC)
WritePage = WritePage + "{0}: {1} > {2}<br>".format(CurrentKey,JsonObj[CurrentKey],PrintStr[:-1])
else:
WritePage = WritePage + "{0}: {1}<br>".format(CurrentKey,JsonObj[CurrentKey])
SelectPwd = self.ObjCursor.execute("""SELECT LM,NT,pass FROM pwd WHERE sAMAccountName='{0}'""".format(GetArgObj))
CheckPwd = SelectPwd.fetchone()
if(CheckPwd != None):
WritePage = WritePage + "---------------------------<br>"
WritePage = WritePage + "{0}:{1}:{2}".format(CheckPwd[0],CheckPwd[1],CheckPwd[2])
self.write(WritePage+"""</div>"""+MainPageEnd)
else:
pass
elif(self.request.uri == "/home"):
WritePage = MainPageStart
CountUser = self.ObjCursor.execute("""SELECT count(*) FROM users""")
CountUserValue = CountUser.fetchone()
WritePage = WritePage + HomePage + "Users: {0}</div>".format(CountUserValue[0])
CountGroup = self.ObjCursor.execute("""SELECT count(*) FROM groups""")
CountGroupValue = CountUser.fetchone()
WritePage = WritePage + HomePage + "Groups: {0}</div>".format(CountGroupValue[0])
CountPC = self.ObjCursor.execute("""SELECT count(*) FROM computers""")
CountPCValue = CountUser.fetchone()
WritePage = WritePage + HomePage + "Computers: {0}</div>".format(CountPCValue[0])
CountHash = self.ObjCursor.execute("""SELECT count(*) FROM pwd""")
CountHashValue = CountHash.fetchone()
CountPass = self.ObjCursor.execute("""SELECT count(*) FROM pwd WHERE pass!=''""")
CountPassValue = CountHash.fetchone()
WritePage = WritePage + HomePage + "Passwords: {0}/{1}</div>".format(CountPassValue[0],CountHashValue[0])
self.write(WritePage+MainPageEnd)
elif(self.request.uri[:8] == "/request"):
AllInputArg = self.request.arguments
if("action" in AllInputArg.keys()):
GetArgAction = self.get_argument("action")
if(GetArgAction == "H"):
self.redirect("/home")
elif(GetArgAction == "T"):
self.redirect("/tools")
elif(GetArgAction == "U"):
self.redirect("/users?offset=0&count={0}".format(self.Settings["obj_count_page"]))
elif(GetArgAction == "C"):
self.redirect("/computers?offset=0&count={0}".format(self.Settings["obj_count_page"]))
elif(GetArgAction == "G"):
self.redirect("/groups?offset=0&count={0}".format(self.Settings["obj_count_page"]))
elif("dn" in AllInputArg.keys()):
GetArgAction = self.get_argument("dn")
CountUser = self.ObjCursor.execute("""SELECT sAMAccountName FROM users WHERE dn='{0}'""".format(GetArgAction))
CountUserValue = CountUser.fetchone()
if(CountUserValue != None):
self.redirect("/users?action=info&object={0}".format(CountUserValue[0]))
CountPC = self.ObjCursor.execute("""SELECT sAMAccountName FROM computers WHERE dn='{0}'""".format(GetArgAction))
CountPCValue = CountPC.fetchone()
if(CountPCValue != None):
self.redirect("/computers?action=info&object={0}".format(CountPCValue[0]))
CountGroup = self.ObjCursor.execute("""SELECT sAMAccountName FROM groups WHERE dn='{0}'""".format(GetArgAction))
CountGroupValue = CountGroup.fetchone()
if(CountGroupValue != None):
self.redirect("/groups?action=info&object={0}".format(CountGroupValue[0]))
else:
WritePage = MainPageStart + """<div style="margin: 10px;">Object not found</div>""" + MainPageEnd
self.write(WritePage)
class Reader(object):
def __init__(self,Settings):
self.ADInfoFiles = {}
self.Settings = Settings
if(os.path.isdir(Settings["db_name"])):
sys.exit("> {0} - it directory".format(Settings["db_name"]))
if(not os.path.exists(Settings["db_name"])):
self.ObjDb = sqlite3.connect(Settings["db_name"])
self.ObjCursor = self.ObjDb.cursor()
print("> create {0}".format(Settings["db_name"]))
self.ObjCursor.execute('''CREATE TABLE users (dn text, sAMAccountName text,userAccountControl int,description text, FullData text)''')
self.ObjCursor.execute('''CREATE TABLE groups (dn text, sAMAccountName text,description text, FullData text)''')
self.ObjCursor.execute('''CREATE TABLE pwd (sAMAccountName text, LM text, NT text, pass text)''')
self.ObjCursor.execute('''CREATE TABLE computers (dn text, sAMAccountName text,userAccountControl int,description text, FullData text)''')
self.ObjDb.commit()
print("> create 4 tables (users,groups,computers,pwd)")
else:
print("> connect {0}".format(Settings["db_name"]))
self.ObjDb = sqlite3.connect(Settings["db_name"])
self.ObjCursor = self.ObjDb.cursor()
# sAMAccountType:
# - SAM_DOMAIN_OBJECT 0x0
# - SAM_GROUP_OBJECT 0x10000000
# - SAM_NON_SECURITY_GROUP_OBJECT 0x10000001
# - SAM_ALIAS_OBJECT 0x20000000
# - SAM_NON_SECURITY_ALIAS_OBJECT 0x20000001
# - SAM_USER_OBJECT 0x30000000
# - SAM_NORMAL_USER_ACCOUNT 0x30000000
# - SAM_MACHINE_ACCOUNT 0x30000001
# - SAM_TRUST_ACCOUNT 0x30000002
# - SAM_APP_BASIC_GROUP 0x40000000
# - SAM_APP_QUERY_GROUP 0x40000001
# - SAM_ACCOUNT_TYPE_MAX 0x7fffffff
def AddObjectDB(self,ADObject):
#print(ADObject)
descriptionValue = ""
if("description" in ADObject.keys()):
descriptionValue = ADObject["description"]
if(int(ADObject["sAMAccountType"]) == 805306368): # user object
SelectObj = self.ObjCursor.execute("""SELECT sAMAccountName FROM users WHERE sAMAccountName='{0}'""".format(ADObject["sAMAccountName"]))
CheckObj = SelectObj.fetchone()
if(CheckObj == None):
self.ObjCursor.execute("""INSERT INTO users VALUES ('{0}','{1}',{2},'{3}','{4}')""".format(ADObject["dn"],ADObject["sAMAccountName"],ADObject["userAccountControl"],descriptionValue,json.dumps(ADObject)))
self.ObjDb.commit()
return("user","add")
elif(len(CheckObj) == 1):
self.ObjCursor.execute("""UPDATE users SET sAMAccountName='{1}', userAccountControl={2}, description='{3}', FullData='{4}' WHERE dn='{0}'""".format(ADObject["dn"],ADObject["sAMAccountName"],ADObject["userAccountControl"],descriptionValue,json.dumps(ADObject)))
self.ObjDb.commit()
return("user","update")
else:
return("user","fail")
elif(int(ADObject["sAMAccountType"]) == 805306369): # computer object
SelectObj = self.ObjCursor.execute("""SELECT sAMAccountName FROM computers WHERE sAMAccountName='{0}'""".format(ADObject["sAMAccountName"]))
CheckObj = SelectObj.fetchone()
if(CheckObj == None):
self.ObjCursor.execute("""INSERT INTO computers VALUES ('{0}','{1}',{2},'{3}','{4}')""".format(ADObject["dn"],ADObject["sAMAccountName"],ADObject["userAccountControl"],descriptionValue,json.dumps(ADObject)))
self.ObjDb.commit()
return("computer","add")
elif(len(CheckObj) == 1):
self.ObjCursor.execute("""UPDATE computers SET sAMAccountName='{1}', userAccountControl={2}, description='{3}', FullData='{4}' WHERE dn='{0}'""".format(ADObject["dn"],ADObject["sAMAccountName"],ADObject["userAccountControl"],descriptionValue,json.dumps(ADObject)))
self.ObjDb.commit()
return("computer","update")
else:
return("computer","fail")
elif(int(ADObject["sAMAccountType"]) == 536870912 or int(ADObject["sAMAccountType"]) == 268435456): # group object
SelectObj = self.ObjCursor.execute("""SELECT sAMAccountName FROM groups WHERE sAMAccountName='{0}'""".format(ADObject["sAMAccountName"]))
CheckObj = SelectObj.fetchone()
if(CheckObj == None):
self.ObjCursor.execute("""INSERT INTO groups VALUES ('{0}','{1}','{2}','{3}')""".format(ADObject["dn"],ADObject["sAMAccountName"],descriptionValue,json.dumps(ADObject)))
self.ObjDb.commit()
return("group","add")
elif(len(CheckObj) == 1):
self.ObjCursor.execute("""UPDATE groups SET sAMAccountName='{1}', description='{2}', FullData='{3}' WHERE dn='{0}'""".format(ADObject["dn"],ADObject["sAMAccountName"],descriptionValue,json.dumps(ADObject)))
self.ObjDb.commit()
return("group","update")
else:
return("group","fail")
def GetADObjects(self,ObjFile,WebObj):
self.WebObj = WebObj
ResultObject = {"user":{"add":0,"update":0,"fail":0},"group":{"add":0,"update":0,"fail":0},"computer":{"add":0,"update":0,"fail":0}}
if(self.Settings["format"] == "ADFind_default"):
ObjData = {}
CheckFirstObj = False
for CurrentLine in ObjFile:
SplitLine = CurrentLine[:-1].split(':')
if(len(SplitLine) == 2):
if(CheckFirstObj == False):
if(SplitLine[0] == "dn"):
CheckFirstObj = True
if(ObjData != {}):
ResultAdd = self.AddObjectDB(ObjData)
if(ResultAdd[1] == "add"):
ResultObject[ResultAdd[0]]["add"] += 1
elif(ResultAdd[1] == "update"):
ResultObject[ResultAdd[0]]["update"] += 1
elif(ResultAdd[1] == "fail"):
ResultObject[ResultAdd[0]]["fail"] += 1
ObjData = {}
ObjData[SplitLine[0]] = SplitLine[1]
else:
if(SplitLine[0] == "dn"):
if(ObjData != {}):
ResultAdd = self.AddObjectDB(ObjData)
if(ResultAdd[1] == "add"):
ResultObject[ResultAdd[0]]["add"] += 1
elif(ResultAdd[1] == "update"):
ResultObject[ResultAdd[0]]["update"] += 1
elif(ResultAdd[1] == "fail"):
ResultObject[ResultAdd[0]]["fail"] += 1
ObjData = {}
ObjData[SplitLine[0]] = SplitLine[1]
elif(SplitLine[0][1:] == "memberOf"
or SplitLine[0][1:] == "member"
or SplitLine[0][1:] == "servicePrincipalName"
or SplitLine[0][1:] == "objectClass"):
if(SplitLine[0][1:] not in ObjData.keys()):
ObjData[SplitLine[0][1:]] = [SplitLine[1][1:]]
else:
ObjData[SplitLine[0][1:]].append(SplitLine[1][1:])
else:
ObjData[SplitLine[0][1:]] = SplitLine[1][1:]
if(ObjData != {}):
ResultAdd = self.AddObjectDB(ObjData)
if(ResultAdd[1] == "add"):
ResultObject[ResultAdd[0]]["add"] += 1
elif(ResultAdd[1] == "update"):
ResultObject[ResultAdd[0]]["update"] += 1
elif(ResultAdd[1] == "fail"):
ResultObject[ResultAdd[0]]["fail"] += 1
if(ResultObject["user"]["add"] != 0 or ResultObject["user"]["update"] != 0 or ResultObject["user"]["fail"] != 0):
self.WebObj.write("User object:<br>- {0} add<br>- {1} update<br>- {2} fail".format(ResultObject["user"]["add"],ResultObject["user"]["update"],ResultObject["user"]["fail"]))
elif(ResultObject["group"]["add"] != 0 or ResultObject["group"]["update"] != 0 or ResultObject["group"]["fail"] != 0):
self.WebObj.write("Group object:<br>- {0} add<br>- {1} update<br>- {2} fail".format(ResultObject["group"]["add"],ResultObject["group"]["update"],ResultObject["group"]["fail"]))
elif(ResultObject["computer"]["add"] != 0 or ResultObject["computer"]["update"] != 0 or ResultObject["computer"]["fail"] != 0):
self.WebObj.write("Computer object:<br>- {0} add<br>- {1} update<br>- {2} fail".format(ResultObject["computer"]["add"],ResultObject["computer"]["update"],ResultObject["computer"]["fail"]))
# ADInfo file format:
# - ADFind_default
#
if __name__ == "__main__":
parser = argparse.ArgumentParser("ADContentViewer")
parser.add_argument("-db", help="db file name (default: adinfo.db)")
args = parser.parse_args()
if(args.db == None):
OutSqlFile = "adinfo.db"
else:
OutSqlFile = args.db
Settings = {"format":"ADFind_default",
"db_name":OutSqlFile,
"obj_count_page":"10"}
MainReader = Reader(Settings)
application = tornado.web.Application([
(r"/", Web,dict(Settings=Settings,MainReader=MainReader)),
(r"/home", Web,dict(Settings=Settings,MainReader=MainReader)),
(r"/request", Web,dict(Settings=Settings,MainReader=MainReader)),
(r"/users", Web,dict(Settings=Settings,MainReader=MainReader)),
(r"/groups", Web,dict(Settings=Settings,MainReader=MainReader)),
(r"/computers", Web,dict(Settings=Settings,MainReader=MainReader)),
(r"/tools", Web,dict(Settings=Settings,MainReader=MainReader)),
])
print("> go http://127.0.0.1:16600/")
application.listen(16600)
tornado.ioloop.IOLoop.current().start()
| 2.40625
| 2
|
app/urls.py
|
B-ROY/TESTGIT
| 2
|
12776322
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'app.thirdpart.views.main', name='首页'),
# url(r'^item/detail/$', 'app.thirdpart.views.item_detail', name='详情'),
# url(r'^mysite/', include('mysite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
#/admin/auth/user/
url(r'^$', 'app.customer.views.user.base'),
url(r'^homepage/$', 'app.customer.views.user.homepage'),
# url(r'^stat$', 'statistics.views.index.index', name="statistics_index"),
url(r'^audio/', include('app.audio.urls')),
url(r'^add/top', 'app.customer.views.user.add_top'),
url(r'^del/top', 'app.customer.views.user.delete_top'),
url(r'^top/position', 'app.customer.views.user.save_top_position'),
url(r'^admin/', include(admin.site.urls)),
url(r'^customer/', include('app.customer.urls')),
url(r'^signin/$', 'django.contrib.auth.views.login', {'template_name': 'signin.html'}, name="signin"),
url(r'^signout/$', 'django.contrib.auth.views.logout_then_login', name="signout"),
###################################################################################################################
# 定义静态文件处理函数
###################################################################################################################
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT,}),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.882813
| 2
|
mmseg/utils/inverted_residual_module.py
|
HusterRC/mmsegmentation
| 2
|
12776323
|
from mmcv.cnn import ConvModule, build_norm_layer
from torch import nn
class InvertedResidual(nn.Module):
"""Inverted residual module.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6')):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(in_channels * expand_ratio))
self.use_res_connect = self.stride == 1 \
and in_channels == out_channels
layers = []
if expand_ratio != 1:
# pw
layers.append(
ConvModule(
in_channels,
hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
# dw
ConvModule(
hidden_dim,
hidden_dim,
kernel_size=3,
padding=dilation,
stride=stride,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
# pw-linear
nn.Conv2d(hidden_dim, out_channels, 1, 1, 0, bias=False),
build_norm_layer(norm_cfg, out_channels)[1],
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
| 2.9375
| 3
|
nltk_download.py
|
anasayubi/implement_timmonsr-kylee84_research_paper
| 0
|
12776324
|
from nltk import download
download()
| 1.039063
| 1
|
051 - 100/ex078.py
|
SocrammBR/Desafios-Python-CursoEmVideo
| 0
|
12776325
|
<reponame>SocrammBR/Desafios-Python-CursoEmVideo
lista = []
for c in range(0, 5):
lista.append(int(input(f'Digite um valor para a posição {c}: ')))
print('=-=' * 10)
print(f'Você digitou os valores {lista}')
print(f'O maior valor digitado é o {max(lista)} e ele está na posição {lista.index(max(lista))}')
print(f'O menor valor digitado é o {min(lista)} e ele está na posição {lista.index(min(lista))}')
| 3.609375
| 4
|
evaluation/plot_mr-qr.py
|
cragkhit/elasticsearch
| 23
|
12776326
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df = pd.DataFrame({'Group': ['A', 'A', 'A', 'B', 'C', 'B', 'B', 'C', 'A', 'C'],
'Apple': np.random.rand(10),'Orange': np.random.rand(10)})
# df = df[['Group','Apple','Orange']]
dd = pd.melt(df, id_vars=['Group'], value_vars=['Apple', 'Orange'], var_name='Fruits')
sns.boxplot(x='Group', y='value', data=dd, hue='Fruits')
plt.show()
| 3.515625
| 4
|
pisces/util.py
|
omarmarcillo/piscesWIN
| 0
|
12776327
|
<filename>pisces/util.py<gh_stars>0
import logging
import math
from getpass import getpass
import numpy as np
import sqlalchemy as sa
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import NoSuchTableError, IntegrityError, OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm.exc import NoResultFound, UnmappedInstanceError
#import obspy.geodetics as geod
from obspy.core import AttribDict
from obspy.taup import taup
from pisces.schema.util import PiscesMeta
import pisces.schema.kbcore as kba
def db_connect(*args, **kwargs):
"""
Connect to your database.
Parameters
----------
backend : string
One of the SQLAlchemy connection strings from
http://docs.sqlalchemy.org/en/rel_0_7/core/engines.html#database-urls
user : string, optional
Not required for sqlite.
passwd : string, optional
Not needed for sqlite. Prompted if needed and not provided.
server : string, optional
Database host server.
port : string or integer, optional
Port on remote server.
instance : string, optional
The database instance. For sqlite, this is the file name.
conn : string, optional
A fully-formed SQLAlchemy style connection string.
Returns
-------
session : bound SQLAlchemy Session instance
Examples
--------
1. Connect to a local sqlite database file:
>>> meta, session = db_connect(conn='sqlite:///mydb.sqlite')
#or
>>> meta, session = db_connect(backend='sqlite', instance='mydb.sqlite')
Notes
-----
For connection string format, see:
http://docs.sqlalchemy.org/en/rel_0_8/core/engines.html
"""
#TODO: take advantage of sqlalchemy.engine.url.URL
#XXX: is not protected against using args and kwargs
if len(args) == 1:
kwargs['conn'] = args[0]
if kwargs.get('conn'):
conn = kwargs.get('conn')
else:
backend = kwargs.get('backend')
user = kwargs.get('user', '')
psswd = kwargs.get('passwd', '')
server = kwargs.get('server', '')
port = kwargs.get('port', '')
instance = kwargs.get('instance', '')
if backend is 'sqlite':
userpsswd = ''
else:
if user and not psswd:
psswd = getpass("Enter password for {0}: ".format(user))
userpsswd = ':'.join([user, psswd])
elif psswd and not user:
user = getpass("Enter username for given password: ")
userpsswd = ':'.join([user, psswd])
elif user and psswd:
userpsswd = ':'.join([user, psswd])
else:
userpsswd = ':'
if server:
serverport = '@' + server
if port:
serverport += ':' + str(port)
else:
serverport = ''
conn = "{0}://{1}{2}/{3}".format(backend, userpsswd, serverport, instance)
engine = sa.create_engine(conn)
session = Session(bind=engine)
return session
def url_connect(url):
"""
Connect to a database using an RFC-1738 compliant URL, like sqlalchemy's
create_engine, prompting for a password if a username is supplied.
Parameters
----------
url : string
A fully-formed SQLAlchemy style connection string.
See http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
Returns
-------
session : bound SQLAlchemy Session instance
Examples
--------
SQLite database file, local:
>>> url_connect('sqlite:///local/path/to/mydb.sqlite')
SQLite database file, full path:
>>> url_connect('sqlite:////full/path/to/mydb.sqlite')
Remote Oracle, OS-authenticated (no user or password needs to be specified)
>>> url_connect('oracle://dbserver.lanl.gov:8080/mydb')
Remote Oracle, password-authenticated (specify user, prompted for password)
>>> url_connect('oracle://<EMAIL>:8080/mydb')
Enter password for scott:
Remote Oracle, password-authenticated (password specified)
>>> url_connect('oracle://scott:<EMAIL>:8080/mydb')
"""
this_url = sa.engine.url.make_url(url)
if this_url.username and not this_url.password:
this_url.password = getpass("Enter password for {0}: ".format(this_url.username))
e = sa.create_engine(this_url)
session = Session(bind=e)
return session
def table_contains_all(itable, keys):
all([key in itable.columns for key in keys])
#def ormtable(fulltablename, base=None):
# """
# For known schema-qualified tables, use:
# Origin = ormtable('global.origin', base=kb.Origin)
# For arbitrary tables, use:
# MyTable = ormtable('jkmacc.sometable')
# For arbitrary tables without Pisces-specific method, use:
# MyTable = ormtable('jkmacc.sometable', base=declarative_base())
# """
#
# ORMBase = base if base else declarative_base(metaclass=PiscesMeta,
# constructor=None)
# parents = (ORMBase,)
# try:
# owner, tablename = fulltable.split('.')
# except ValueError:
# owner, tablename = None, fulltable
# if owner:
# parents += declarative_base(metadata=MetaData(schema=owner)),
#
# return type(fulltable.capitalize(), parents, {})
def get_tables(bind, fulltablenames, metadata=None, primary_keys=None,
base=None):
"""
Reflect/load an arbitrary database table as a mapped class.
This is a shortcut for SQLAlchemy's declarative mapping using __table__.
See http://docs.sqlalchemy.org/en/rel_0_9/orm/extensions/declarative.html#using-a-hybrid-approach-with-table.
Parameters
----------
bind : sqlalchemy.engine.base.Engine instance
Engine pointing to the target database.
fulltables : list of strings
Of the form ['owner1.tablename1', 'owner2.tablename2', ...]
Leave out 'owner.' if database doesn't use owners (sqlite, etc...)
metadata : sqlalchemy.MetaData, optional
MetaData into which reflected Tables go. If not supplied, a new one
is created, accessible from MyTable.metadata on one of the loaded
tables.
primary_keys : dict, optional
Tablename, primary key list pairs of the form,
{'owner1.tablename1': ['primary_key1', 'primary_key2']}
These are required if the table is a view or has no primary key.
base : sqlalchemy.ext.declarative.api.DeclarativeMeta, optional
The declarative base the from which loaded table classes will inherit.
The info dictionary of loaded Columns will be updated from those in
the base. These are used to generate default values and string
representations. Import from pisces.schema.css3, or extensions thereof.
Default, sqlalchemy.ext.declarative.api.DeclarativeMeta.
Returns
-------
list
Corresponding list of ORM table classes mapped to reflected tables,
Can be used for querying or making row instances.
Raises
------
sqlalchemy.exc.NoSuchTableError : Table doesn't exist.
sqlalchemy.exc.InvalidRequestError : Table already loaded in metadata.
sqlalchemy.exc.ArgumentError : Table has no primary key(s).
Notes
-----
In SQLAlchemy, a database account/owner is generally used with the "schema"
keyword argument.
For core tables in a Pisces schema, this function isn't recommended.
Instead, subclass from the known abstract table.
Examples
--------
# for unknown table
>>> import pisces.schema.css3 as css
>>> RandomTable = get_tables(engine, ['randomtable'])
# for a known/prototype table
>>> class Site(css.Site):
__tablename__ = 'myaccount.my_site_tablename'
"""
if not metadata:
metadata = sa.MetaData()
if not bind:
raise ValueError("Must provide bound metadata or bind.")
# we have metadata
if not bind:
bind = metadata.bind
ORMBase = base if base else declarative_base(metaclass=PiscesMeta,
constructor=None,
metadata=metadata)
colinfo = getattr(base, '_column_info_registry', {})
parents = (ORMBase,)
outTables = []
for fulltable in fulltablenames:
try:
owner, tablename = fulltable.split('.')
except ValueError:
# no owner given
owner, tablename = None, fulltable
# reflect the table
itable = sa.Table(tablename, metadata, autoload=True,
autoload_with=bind, schema=owner)
# update reflected table with known schema column info
if colinfo:
for col in itable.columns:
col.info.update(colinfo.get(col.name, {}))
dct = {'__table__': itable}
# put any desired __table_args__: {} here
# no primary key, can't map. spoof primary key mapping from inputs
if primary_keys and fulltable in primary_keys:
dct['__mapper_args__'] = {'primary_key': [getattr(itable.c, key) for key in primary_keys[fulltable]]}
ORMTable = type(tablename.capitalize(), parents, dct)
outTables.append(ORMTable)
return outTables
def make_table(fulltablename, prototype):
"""
Create a new ORM class/model on-the-fly from a prototype.
Parameters
----------
fulltablename : str
Schema-qualified name of the database table, like 'owner.tablename'
or just 'tablename'. The resulting classname will be the capitalized
tablename, like 'Tablename'.
prototype : sqlalchemy abstract ORM class
The prototype table class. pisces.schema.css.Site, for example.
Notes
-----
It's better to declare classes in an external module, and import them.
SQLAlchemy doesn't let you use the same table names twice, so on-the-fly
class creation and naming is risky:
1. You can't use make_tables again if you accidentally overwrite the
variable you used to hold the class you created.
2. You can't use make_tables again if you import something from a
script/module where make_tables was used with the same table name.
"""
try:
owner, tablename = fulltablename.split('.')
except ValueError:
owner, tablename = None, fulltablename
parents = (prototype,)
if owner:
OwnerBase = declarative_base(metadata=sa.MetaData(schema=owner))
parents = (OwnerBase, prototype)
else:
parents = (prototype,)
dct = {'__tablename__': tablename}
return type(tablename.capitalize(), parents, dct)
def make_same_size(lat1, lon1, lat2, lon2):
"""
Returns numpy arrays the same size as longest inputs.
assume: lat1/lon1 are same size and lat2/lon2 are same size
assume: the smaller of the sizes is a scalar
"""
#TODO: EAFP
lon1 = np.array(lon1)
lat1 = np.array(lat1)
lon2 = np.array(lon2)
lat2 = np.array(lat2)
#assume: lat1/lon1 are same size and lat2/lon2 are same size
#assume: the smaller of the sizes is a scalar
N1 = lon1.size
N2 = lon2.size
if N1 > N2:
lon2 = lon2.repeat(N1)
lat2 = lat2.repeat(N1)
elif N2 > N1:
lon1 = lon1.repeat(N2)
lat1 = lat1.repeat(N2)
return lat1, lon1, lat2, lon2
def gen_id(i=0):
"""
Produce a generator for sequential integer id values.
Examples
--------
>>> lastorid = 7
>>> orid = gen_id(lastorid)
>>> orid.next()
8
>>> orid.next()
9
Generate more than one at a time:
>>> orid, arid, wfid = (gen_id() for id in ['orid', 'arid', 'wfid'])
>>> orid.next(), arid.next()
(1, 1)
Dictionary of id generators for desired ids, starting where they left off.
ids not in Lastid will be missing
>>> ids = session.query(Lastid).filter(Lastid.keyname.in_(['orid','arid']).all()
>>> last = dict([(id.keyname, gen_id(id.keyvalue)) for id in ids])
>>> last['orid'].next()
8820005
"""
while 1:
i += 1
yield i
def travel_times(ref, deg=None, km=None, depth=0.):
"""
Get *approximate* relative travel time(s).
Parameters
----------
ref : list or tuple of strings and/or floats
Reference phase names or horizontal velocities [km/sec].
deg : float, optional
Degrees of arc between two points of interest (spherical earth).
km : float, optional
Horizontal kilometers between two points of interest (spherical earth).
depth : float, optional. default, 0.
Depth (positive down) of event, in kilometers.
Returns
-------
numpy.ndarray
Relative times, in seconds, same length as "ref". NaN if requested time
is undefined.
Examples
--------
Get relative P arrival and 2.7 km/sec surface wave arrival at 35 degrees
distance.
>>> times = travel_times(['P', 2.7], deg=35.0)
To get absolute window, add the origin time like:
>>> w1, w2 = times + epoch_origin_time
Notes
-----
Either deg or km must be indicated.
The user is responsible for adding/subtracting time (such as origin
time, pre-window noise time, etc.) from those predicted in order to define
a window.
Phase travel times use ak135.
"""
times = np.zeros(len(ref), dtype='float')
tt = None
for i, iref in enumerate(ref):
if isinstance(iref, str):
# phase time requested
if not tt:
tt
try:
idx = [ph['phase_name'] for ph in tt].index(iref)
itt = [ph['time'] for ph in tt][idx]
except ValueError:
# phase not found
itt = None
else:
# horizontal velocity
if not km:
km = deg*(2*math.pi/360.0)*6371.0
itt = km/iref
times[i] = itt
return times
def add_rows(session, rows, recurse=False):
"""Handle common errors with logging in SQLAlchemy add_all.
Tries to add in bulk. Failing that, it will rollback and optionally try
to add one at a time.
Parameters
----------
session : sqlalchemy.orm.Session
rows : list
Mapped table instances.
recurse : bool, optional
After failure, try to add records individually.
Returns
-------
num : int
Number of objects added. 0 if none.
e : exception or None
"""
e = None
num = 0
try:
session.add_all(rows)
session.commit()
num = len(rows)
except (ProgrammingError, UnmappedInstanceError) as e:
# IntegrityError: duplicate row(s)
# ProgrammingError: string encoding problem
# UnmappedInstanceError: tried to add something like a list or None
session.rollback()
logging.warning(str(e))
except IntegrityError:
print str(e)
session.rollback()
finally:
# always executed
if e and recurse:
# if an exception was thrown and recursion was requested
for row in rows:
i, e = add_rows(session, [row], recurse=False)
num += i
return num, e
def get_lastids(session, Lastid, keynames=None, expunge=True, create=False):
"""
Load or create Lastid instances into a convenient and readable
attribute-based dictionary.
Parameters
----------
session : sqlalchemy.orm.Session instance
Lastid : Lastid table class
ids : list or tuple of strings
Lastid.keyname values to load.
expunge : bool
If True, expunge loaded ids from the session. This frees you
to modify them without affecting the database from which they
were loaded. In this case, you'll have to add them back into a
session and commit them for their changes to be reflected on the
database.
create : bool
If True, create ids that don't already exist.
Examples
--------
Get and set lastid values directly by name or by attribute.
>>> last = get_lastids(session, Lastid, ['orid', 'arid'])
>>> last.orid, last['orid']
Lastid(keyname='orid'), Lastid(keyname='orid')
Test for their existence by name.
>>> 'orid' in last
True
Use the Lastid's 'next' generator behavior for readable code
>>> next(last.orid)
18
>>> last.orid.keyvalue
18
Update your database when you're done.
>>> session.add_all(ids.values())
>>> session.commit()
"""
last = AttribDict()
q = session.query(Lastid)
if keynames is None:
lastids = q.all()
else:
lastids = []
for keyname in keynames:
lastid = q.filter(Lastid.keyname == keyname).first()
if lastid:
lastids.append(lastid)
elif create:
lastid = Lastid(keyname=keyname, keyvalue=0)
session.add(lastid)
lastids.append(lastid)
for lastid in lastids:
if expunge:
session.expunge(lastid)
last[lastid.keyname] = lastid
return last
#CORETABLES = [CoreTable('affiliation', kba.Affiliation, kb.Affiliation),
# CoreTable('arrival', kba.Arrival, kb.Arrival),
# CoreTable('assoc', kba.Assoc, kb.Assoc),
# CoreTable('event', kba.Event, kb.Event),
# CoreTable('instrument', kba.Instrument, kb.Instrument),
# CoreTable('lastid', kba.Lastid, kb.Lastid),
# CoreTable('origin', kba.Origin, kb.Origin),
# CoreTable('site', kba.Site, kb.Site),
# CoreTable('sitechan', kba.Sitechan, kb.Sitechan),
# CoreTable('wfdisc', kba.Wfdisc, kb.Wfdisc)]
def get_options(db,prefix=None):
'''
for coretable in CORETABLES:
table_group.add_argument('--' + coretable.name,
default=None,
metavar='owner.tablename',
dest=coretable.name)
'''
options={'url':'sqlite:///'+db,'prefix':prefix}
return options
def get_or_create_tables(session, prefix=None, create=True, **tables):
"""
Load or create canonical ORM KB Core table classes.
Parameters
----------
session : sqlalchemy.orm.Session
prefix : str
Table name prefix for core tables, e.g. 'global.' for 'global.<tablename>'
create : bool
If True, create a table that isn't found.
Also accepted are canonical table name keywords with '[owner.]tablename'
arguments, which will replace any prefix-based core table names.
Returns
-------
tables : dict
Mapping between canonical table names and SQLA ORM classes.
e.g. {'origin': MyOrigin, ...}
"""
# The Plan:
# 1. For each core table, build or get the table name
# 2. If it's a vanilla table name, just use a pre-packaged table class
# 3. If not, try to autoload it.
# 4. If it doesn't exist, make it from a prototype and create it in the database.
# TODO: check options for which tables to produce.
tables = {}
for coretable in CORETABLES:
# build the table name
if options['prefix']==None:
fulltablename = coretable.name
else:
fulltablename = prefix + coretable.name
# fulltablename is either an arbitrary string or prefix + core name, but not None
# put table classes into the tables dictionary
if fulltablename == coretable.name:
# it's a vanilla table name. just use a pre-packaged table class instead of making one.
tables[coretable.name] = coretable.table
else:
tables[coretable.name] = ps.make_table(fulltablename, coretable.prototype)
tables[coretable.name].__table__.create(session.bind, checkfirst=True)
session.commit()
return tables
| 2.65625
| 3
|
removeKthNodeFromEnd.py
|
rramnani/LeetCode
| 0
|
12776328
|
def removeKthNodeFromEnd(head, k):
# Write your code here.
#length = len(head)
counter = 1
first = head
second = head
while counter <= k:
second = second.next
counter+=1
if second is None: # second is pointing to None => First is at the head of linked list
head.value = head.next.value
head.next = head.next.next
return # then we're done here
while second.next is not None:
second = second.next
first = first.next
first.next = first.next.next
return head
| 3.859375
| 4
|
src/hjerter/algorithms/hafm.py
|
chingyulin/ml_numpy
| 0
|
12776329
|
<filename>src/hjerter/algorithms/hafm.py
import numpy as np
p0 = np.array([1, 8])
p1 = np.array([7, 3])
line_a = np.stack([p0, p1])
q = np.array([3, 2])
v = p1 - p0
u = q - p0
def find_angle(v1, v2):
return np.arccos(np.sum(v1 * v2) / np.sqrt(np.sum(v1 ** 2) * np.sum(v2 ** 2)))
theta_0 = find_angle(u, v)
theta = np.arctan(-v[0] / v[1])
p_v_u = (np.sum(u * v) / np.sum(v ** 2)) * v
t = p0 + p_v_u
d = np.sqrt(np.sum((t - q) ** 2))
w = p1 - q
theta_1 = find_angle(w, v)
print("Theta_0", theta_0)
print("Theta_1", theta_1)
print("Theta", theta)
print("Distance", d)
| 3.203125
| 3
|
tests/test_tricks.py
|
phipleg/trafaret-config
| 26
|
12776330
|
<reponame>phipleg/trafaret-config<filename>tests/test_tricks.py
import unittest
from collections import OrderedDict
from textwrap import dedent
import trafaret as T
from .util import get_err
class TestCall(unittest.TestCase):
TRAFARET = T.Dict({
T.Key("call_dict", optional=True):
# The following is the ordered dict because we want to obey order
# in the error messages. Otherwise, it could be normal dict as well
T.Call(lambda _: T.DataError({
"anything": "bad idea",
"bad": "another bad idea",
})),
T.Key("call_str", optional=True):
T.Call(lambda _: T.DataError("some error")),
})
def test_call_dict(self):
self.assertEqual(get_err(self.TRAFARET, u"""
call_dict: "hello"
"""), dedent(u"""\
config.yaml:2: call_dict.anything: bad idea
config.yaml:2: call_dict.bad: another bad idea
"""))
def test_call_str(self):
self.assertEqual(get_err(self.TRAFARET, u"""
call_str: "hello"
"""), dedent(u"""\
config.yaml:2: call_str: some error
-> 'hello'
"""))
class TestForward(unittest.TestCase):
FWD = T.Forward()
TRAFARET = T.Dict({
T.Key("value", optional=True): FWD,
})
FWD << T.Int()
def test_int(self):
self.assertEqual(get_err(self.TRAFARET, u"""
value: "hello"
"""), dedent(u"""\
config.yaml:2: value: value can't be converted to int
-> 'hello'
"""))
| 2.75
| 3
|
archives/02/tootle/utils.py
|
asmodehn/caerbannog
| 1
|
12776331
|
import enum
# Directly, naively, modeled in python 3.6
# from the code shown in : https://www.youtube.com/watch?v=AG3KuqDbmhM
# Side Note: Yes, Python has enums !
class PenState(enum.Enum):
UP = -1
DOWN = 1
| 3.15625
| 3
|
main.py
|
adamd1985/random-streamers
| 0
|
12776332
|
from flask import (
Flask,
render_template,
Response,
stream_with_context,
send_from_directory,
)
from flask_cors import CORS
import os
import random
import json
import string
from time import sleep
from datetime import datetime, date, timedelta
def random_date(year_start=2000, year_end=2005):
"""Random datetime between 2 dates"""
start_date = datetime(year_start, 1, 1)
end_date = datetime(year_end, 1, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
random_seconds = random.randrange(0, 60 * 60 * 24)
rand_date = start_date + timedelta(
days=random_number_of_days, seconds=random_seconds
)
return rand_date
def random_data(include_digits=False, include_nulls=False):
"""Generate a random string of fixed length"""
size = random.randint(10, 200)
if include_nulls and bool(random.getrandbits(1)):
rand_str = None
elif include_digits:
rand_str = "".join(
random.choice(string.ascii_letters + string.digits) for i in range(1, size)
)
else:
rand_str = "".join(random.choice(string.ascii_letters) for i in range(1, size))
return rand_str
def generate(include_digits=False, include_nulls=False):
"""create and return data in small parts"""
for counter in range(1, 60):
obj = dict()
obj["id"] = counter
obj["date"] = random_date().strftime("%m/%d/%Y, %H:%M:%S %p")
obj["payload"] = random_data(include_digits, include_nulls)
json_obj = json.dumps(obj)
# sleep(1000)
yield json_obj
def create_app(config=None):
template_dir = os.path.relpath("./templates")
app = Flask(
__name__,
instance_relative_config=True,
template_folder=template_dir,
static_url_path="/static",
)
app.config.from_object(__name__)
if config is not None:
app.config.update(config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
CORS(app)
@app.after_request
def set_response_headers(response):
"""Ensures no cache"""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "0"
return response
@app.route("/stream1", methods=["GET"])
def gimme_data1():
"""streams down large data"""
# stream with context so the 'after_request' happens when streaming is finished
return Response(stream_with_context(generate()), mimetype="application/json")
@app.route("/stream2", methods=["GET"])
def gimme_data2():
"""streams down large data"""
# stream with context so the 'after_request' happens when streaming is finished
return Response(
stream_with_context(generate(include_digits=True)),
mimetype="application/json",
)
@app.route("/stream3", methods=["GET"])
def gimme_data3():
"""streams down large data"""
# stream with context so the 'after_request' happens when streaming is finished
return Response(
stream_with_context(generate(include_digits=True, include_nulls=True)),
mimetype="application/json",
)
@app.route("/")
def entry_point():
"""simple entry for test"""
return render_template("base.html")
return app
if __name__ == "__main__":
app = create_app()
app.run(host="0.0.0.0", port=random.randint(2000, 9000))
| 2.9375
| 3
|
{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/apps/common/views.py
|
powerdefy/cookiecutter-django-rest
| 0
|
12776333
|
<gh_stars>0
import asyncio
from django.db import transaction
from django.utils.decorators import classonlymethod, method_decorator
from django.views import View
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class AsyncView(View):
@classonlymethod
def as_view(cls, **initkwargs):
view = super().as_view(**initkwargs)
view._is_coroutine = asyncio.coroutines._is_coroutine
return view
| 2.1875
| 2
|
gelada_tests.py
|
marcysweber/lifetime-repro-success
| 0
|
12776334
|
import unittest
from agent import *
from completesimulation import HamadryasSim, HamaPopulation, GeladaSim, GelPopulation
from dispersal import HamadryasDispersal, GeladaDispersal
from group import HamadryasGroup, GeladaGroup
from seedgroups import HamadryasSeed, GeladaSeed
class DispersalTests(unittest.TestCase):
def setup_gelada(self):
sim = GeladaSim()
pop = GelPopulation()
band1 = GeladaGroup(1)
band2 = GeladaGroup(2)
pop.groupsdict[1] = band1
pop.groupsdict[2] = band2
GeladaSeed.addagenttoseed(1, band1, pop, 'm', None, None, 10, sim)
pass
def test_attractiveness_ranking(self):
pass
def test_become_bachelor(self):
pass
def test_inherit(self):
pass
def test_challenge(self):
pass
def test_fol_switch_OMU(self):
pass
def test_disperse_bands(self):
pass
def test_follow(self):
pass
def test_solitary(self):
pass
def test_follower(self):
pass
| 2.5
| 2
|
Ch9/fillTheGaps.py
|
AdamPellot/AutomateTheBoringStuffProjects
| 0
|
12776335
|
#! python3
# fillTheGaps.py - Finds all files with a given prefix, such as
# spam001.txt, spam002.txt, and so on, in a single
# folder and locates any gaps in the numbering. Have
# the program rename all the later files to close this
# gap.
# <NAME>
import re
import os
import shutil
# Returns boolean based on if the passed string has a file extension.
def isFileExtension(filename):
extensionRegex = re.compile(r'\.[a-zA-Z]{3,4}')
mo = extensionRegex.search(filename)
if not mo:
return True
else:
return False
print('Enter the absolute path of the folder you want to search:')
folder = input()
print('''Enter the name of the file without the desired prefix:
(Ex. Enter spam.txt instead of spam001.txt)''')
filename = input()
if isFileExtension(filename) is False:
while isFileExtension(filename) is False:
print('Invalid filename: File extension not found')
print('''Enter the name of the file without the desired prefix:
(Ex. Enter spam.txt instead of spam001.txt)''')
filename = input()
print('''Finally enter the prefix you would like to use starting at 1:
(Ex. 001, 01, 1)''')
prefix = input()
# Ensures extension starts at 1.
if prefix[-1] != '1':
while True:
print('Invalid Prefix')
print('''Please enter the prefix that starts at 1:
(Ex. 001, 01, 1)''')
extension = input()
if prefix[-1] == '1':
break
# If the prefix is something like 001, this holds those 0's.
charsBeforeNum = prefix[:-1]
# Create variable that holds the file extension.
extensionRegex = re.compile(r'\.[a-zA-Z]{3,4}')
mo = extensionRegex.search(filename)
extension = mo.group()
# Holds a string of the file without extension. So is spam.txt is spam.
filewoExtension = filename.replace(extension, '')
# Create regex that detects the file number.
fileNumRegex = re.compile(r'([1-9]+[0]*)\.')
fileNums = []
# Put the file numbers in a list.
for file in os.listdir(folder):
if filewoExtension in file:
mo = fileNumRegex.search(file)
fileNums.append(int(mo.group(1)))
# Sort the list of file numbers.
fileNums.sort()
# Determines where the gap in the numbering begins
for i in range(len(fileNums)):
if fileNums[i] + 1 != fileNums[i+1]:
gapStart = fileNums[i]
break
filesToBeRenamed = []
# Determines which numbered files have to be renamed to keep the numbering.
for file in os.listdir(folder):
if filewoExtension in file:
mo = fileNumRegex.search(file)
if int(mo.group(1)) > gapStart:
filesToBeRenamed.append(int(mo.group(1)))
# Sort the list of file numbers to be renamed.
filesToBeRenamed.sort()
newFileNum = gapStart + 1
# Fills in the gaps in the numbering.
for i in range(len(filesToBeRenamed)):
filePath = os.path.join(folder, filewoExtension + charsBeforeNum +
str(filesToBeRenamed[i]) + extension)
newFilePath = os.path.join(folder, filewoExtension + charsBeforeNum +
str(newFileNum) + extension)
newFileNum += 1
if os.path.exists(filePath):
os.rename(filePath, newFilePath)
| 4.15625
| 4
|
HayStack_Client/Inbox_Module.py
|
ConsensusGroup/Haystack
| 1
|
12776336
|
<reponame>ConsensusGroup/Haystack
####################################################################################
############## This module is used to handle the inbox of the client ###############
####################################################################################
from User_Modules import Initialization
from Tools_Module import Tools
from Configuration_Module import Configuration
from DynamicPublicLedger_Module import Dynamic_Public_Ledger
from User_Modules import User_Profile
from IOTA_Module import IOTA_Module
from Contact_Module import Contact_Client
class Inbox_Manager(Initialization, Tools):
def __init__(self):
Initialization.__init__(self)
Tools.__init__(self)
Configuration.__init__(self)
self.Received_Dir = str(self.InboxGenerator(Output_Directory = True).ReceivedMessages+"/"+Configuration().ReceivedMessages+".txt")
self.Relayed_Dir = str(self.InboxGenerator(Output_Directory = True).RelayedMessages+"/"+Configuration().RelayedMessage+".txt")
self.NotRelayed_Dir = str(self.InboxGenerator(Output_Directory = True).OutstandingRelay+"/"+Configuration().NotRelayedMessage+".txt")
self.Message_Inbox = self.UserFolder+"/"+self.MessageFolder+"/"+Configuration().ReceivedMessages+"/"+self.Inbox+".txt"
def Create_DB(self):
#Here we check if the DB files are already written.
self.Build_DB(File = self.Received_Dir)
self.Build_DB(File = self.Relayed_Dir)
self.Build_DB(File = self.NotRelayed_Dir)
return self
def Read_Tangle(self, IOTA_Instance, Block = "", From = "", To = ""):
RelayedMessages_Dictionary = self.Read_From_Json(directory = self.Relayed_Dir)
NotRelayed_Dictionary = self.Read_From_Json(directory = self.NotRelayed_Dir)
if Block != "":
Incoming = IOTA_Instance.Receive(Start = Block - self.Replay, Stop = Block + 1, JSON = True).Message
elif From != To != "":
Incoming = IOTA_Instance.Receive(Start = From, Stop = To, JSON = True).Message
else:
Incoming = []
for i in Incoming:
Bundle_Hash = str(i[0].get('bundle_hash'))
Message_Received = i[1][1:len(i[1])-1]
if self.Label_In_Dictionary(Input_Dictionary = RelayedMessages_Dictionary, Label = Bundle_Hash) == False:
if self.Label_In_Dictionary(Input_Dictionary = NotRelayed_Dictionary, Label = Bundle_Hash) == False:
NotRelayed_Dictionary = self.Add_To_Dictionary(Input_Dictionary = NotRelayed_Dictionary, Entry_Label = Bundle_Hash, Entry_Value = Message_Received)
#Now we write the dictionary to file.
self.Write_To_Json(directory = self.NotRelayed_Dir, Dictionary = NotRelayed_Dictionary)
return self
def Postprocessing_Packet(self, ToSend, Hash_Of_Incoming_Tx, IOTA_Instance):
Next_Address = ToSend[1]
Cipher = ToSend[0]
Relayed_Dictionary = self.Read_From_Json(directory = self.Relayed_Dir)
NotRelayed_Dictionary = self.Read_From_Json(directory = self.NotRelayed_Dir)
if Next_Address != '0'*81:
Relayed_Bundle_Hash = str(IOTA_Instance.Send(ReceiverAddress = Next_Address, Message = Cipher))
Relayed_Dictionary = self.Add_To_Dictionary(Input_Dictionary = Relayed_Dictionary, Entry_Label = Hash_Of_Incoming_Tx, Entry_Value = str(Relayed_Bundle_Hash))
NotRelayed_Dictionary = self.Remove_From_Dictionary(Input_Dictionary = NotRelayed_Dictionary, Label = Hash_Of_Incoming_Tx)
elif Next_Address == '0'*81:
Relayed_Dictionary = self.Add_To_Dictionary(Input_Dictionary = Relayed_Dictionary, Entry_Label = Hash_Of_Incoming_Tx, Entry_Value = str('0'*81))
NotRelayed_Dictionary = self.Remove_From_Dictionary(Input_Dictionary = NotRelayed_Dictionary, Label = Hash_Of_Incoming_Tx)
self.Write_To_Json(directory = self.Relayed_Dir, Dictionary = Relayed_Dictionary)
self.Write_To_Json(directory = self.NotRelayed_Dir, Dictionary = NotRelayed_Dictionary)
return self
def Addressed_To_Client(self, Message_PlainText, Symmetric_Message_Key):
Client_Dictionary = self.Read_From_Json(directory = self.Received_Dir)
if self.Label_In_Dictionary(Input_Dictionary = Client_Dictionary, Label = Message_PlainText) == False:
self.Add_To_Dictionary(Input_Dictionary = Client_Dictionary, Entry_Label = Message_PlainText, Entry_Value = self.String_To_Base64(Symmetric_Message_Key))
self.Write_To_Json(directory = self.Received_Dir, Dictionary = Client_Dictionary)
return self
def Completed_Messages(self, Input= []): #Add the message input later
#First create the inbox DB
self.Build_DB(File = self.Message_Inbox)
Inbox = self.Read_From_Json(directory = self.Message_Inbox)
Current_TangleTime = Dynamic_Public_Ledger().PublicIOTA.LatestTangleTime().TangleTime
for i in Input:
From_Address = i[0]
try:
hex = i[1].split(self.Identifier)
int(hex,16)
Ping = True
except:
Message = self.String_To_Base64(String = i[1])
Ping = False
if Ping == False:
Inbox = self.Add_To_Dictionary(Input_Dictionary = Inbox, Entry_Label = Message, Entry_Value = From_Address)
print("New Message!!!\n")
self.Write_To_Json(directory = self.Message_Inbox, Dictionary = Inbox)
return self
def Reconstruction_Of_Message(self, Verify):
#Make sure there is a file:
self.Create_DB()
#Read the file
Client_Dictionary = self.Read_From_Json(directory = self.Received_Dir)
Unique_SymKeys = []
for i in Client_Dictionary.values():
Unique_SymKeys.append(i)
Unique_SymKeys = set(Unique_SymKeys)
Message = []
for i in Unique_SymKeys:
Pieces_From_SymKey = []
Unmodified_Labels = []
for Cipher, Symkey in Client_Dictionary.items():
if i == Symkey:
Pieces_From_SymKey.append(str(Cipher).replace(Configuration().MessageIdentifier,''))
Unmodified_Labels.append(str(Cipher))
Sym_Key = self.Base64_To_String(str(i))
Format_To_Digest = [Pieces_From_SymKey, Sym_Key]
Output = Dynamic_Public_Ledger().Rebuild_Shrapnells(String = Format_To_Digest, Verify = Verify)
if isinstance(Output, list):
Message.append(Output)
for z in Unmodified_Labels:
Client_Dictionary = self.Remove_From_Dictionary(Input_Dictionary = Client_Dictionary, Label = z)
self.Write_To_Json(directory = self.Received_Dir, Dictionary = Client_Dictionary)
self.Completed_Messages(Input = Message)
if len(Message) == 0:
return [[False, False, False]]
else:
return Message
def Read_Stored_Messages(self):
Dictionary = self.Read_From_Json(directory = self.Message_Inbox)
Saved_Messages = self.Dictionary_To_List(Dictionary = Dictionary)
Data = []
for i in Saved_Messages:
Message = self.Base64_To_String(Encoded = i[0])
From_Address = i[1]
#Now check if the address is in the address book.
Output = Contact_Client().Retrieve_UserName_From_Address(Address_To_Search = From_Address)
if Output == False:
User = From_Address
elif isinstance(Output, list):
User = str(Output[0]+" ("+From_Address+")")
Data.append([Message, User])
return Data
class Trusted_Paths(Tools, Configuration, User_Profile):
def __init__(self):
Tools.__init__(self)
Configuration.__init__(self)
User_Profile.__init__(self)
self.Ledger_Accounts_Dir = str(self.UserFolder+"/"+self.PathFolder+"/"+self.Ledger_Accounts_File)
self.Last_Block_Dir = str(self.UserFolder+"/"+self.PathFolder+"/"+self.Last_Block)
self.Ping_Dir = str(self.UserFolder+"/"+self.PathFolder+"/"+self.Trajectory_Ping)
self.Incoming_Shrapnells = str(self.UserFolder+"/"+self.MessageFolder+"/"+Configuration().ReceivedMessages+"/"+Configuration().ReceivedMessages+".txt")
self.TrustedNodes_Dir = str(self.UserFolder+"/"+self.PathFolder+"/"+self.Trusted_Nodes)
self.Current_Block = Dynamic_Public_Ledger().Calculate_Block().Block
self.PrivateIOTA = IOTA_Module(Seed = self.Private_Seed)
def Build_LedgerDB(self):
self.Build_Directory(directory = str(self.UserFolder+"/"+self.PathFolder))
self.Build_DB(File = self.Ledger_Accounts_Dir)
self.Build_DB(File = self.Last_Block_Dir)
self.Build_DB(File = self.Ping_Dir)
#Read the file when the user was last online
Block_Number = self.Read_From_Json(directory = self.Last_Block_Dir)
#If the dictionary is empty
if Block_Number == {}:
Block_Number = self.Add_To_Dictionary(Input_Dictionary = Block_Number, Entry_Label = "Block", Entry_Value = self.Current_Block)
self.Write_To_Json(directory = self.Last_Block_Dir, Dictionary = Block_Number)
self.Last_Block_Online = self.Current_Block
else:
self.Last_Block_Online = Block_Number["Block"]
return self
def Catch_Up(self):
self.Build_LedgerDB()
Accounts = self.Read_From_Json(directory = self.Ledger_Accounts_Dir)
if self.Last_Block_Online != self.Current_Block:
self.Write_To_Json(directory = str(self.UserFolder+"/"+self.PathFolder+"/"+self.Current_Ledger_Accounts), Dictionary = {})
self.Write_To_Json(directory = self.TrustedNodes_Dir, Dictionary = {})
self.Write_To_Json(directory = self.Ping_Dir, Dictionary = {})
self.Last_Block_Online = self.Last_Block_Online-1
BlockDifference = int(self.Current_Block - self.Last_Block_Online)
if BlockDifference >= self.Replay:
Upperbound_Block = self.Last_Block_Online + self.Replay
Sync = "Syncing node."
else:
Upperbound_Block = self.Last_Block_Online + BlockDifference
if BlockDifference == 1:
Sync = "Node synced."
else:
Sync = "Syncing node."
for i in Dynamic_Public_Ledger().Check_User_In_Ledger(ScanAll = True, From = self.Last_Block_Online, To = Upperbound_Block).All_Accounts:
Accounts = self.Add_To_Dictionary(Input_Dictionary = Accounts, Entry_Label = i[0], Entry_Value = i[1])
self.Write_To_Json(directory = self.Ledger_Accounts_Dir, Dictionary = Accounts)
Inbox_Manager().Read_Tangle(IOTA_Instance = self.PrivateIOTA, From = self.Last_Block_Online-2, To = Upperbound_Block)
self.Write_To_Json(directory = self.Last_Block_Dir, Dictionary = self.Add_To_Dictionary(Input_Dictionary = {}, Entry_Label = "Block", Entry_Value = Upperbound_Block))
self.Output = str("Scanning from: "+str(self.Last_Block_Online) + " To: "+str(Upperbound_Block)+" Status: "+Sync)
self.Last_Block_Online = Upperbound_Block
if self.Current_Block == self.Last_Block_Online:
Inbox_Manager().Read_Tangle(IOTA_Instance = self.PrivateIOTA, Block = self.Current_Block)
return self
def Scan_Paths(self):
self.Build_DB(File = self.TrustedNodes_Dir)
Pings = self.Read_From_Json(directory = self.Ping_Dir)
Shrapnells = self.Read_From_Json(directory = self.Incoming_Shrapnells)
Pings_List = self.Dictionary_To_List(Dictionary = Pings)
Shrapnells_List = self.Dictionary_To_List(Dictionary = Shrapnells)
Found_Pings = []
#Search for ping fragments in the message bank and record them.
for i in Shrapnells_List:
for ping in Pings_List:
if ping[0] in i[0]:
Found_Pings.append(ping[0])
#Split the ping string to find non lazy addresses
Trusted_Nodes = []
for entry in Found_Pings:
String_split = Pings[entry].split("-->")
Block = String_split.pop(0)
Temp = []
for node in String_split:
if node == "LOCALCLIENT":
Break = True
else:
Break = False
Temp.append([node, Block])
if Break == True:
for i in Temp:
Trusted_Nodes.append(i)
#Read current DB
Nodes_Dictionary = self.Read_From_Json(directory = self.TrustedNodes_Dir)
for i in Trusted_Nodes:
Nodes_Dictionary = self.Add_To_Dictionary(Input_Dictionary = Nodes_Dictionary, Entry_Label = str(i[0]), Entry_Value = i[1])
self.Write_To_Json(directory = self.TrustedNodes_Dir, Dictionary = Nodes_Dictionary)
return self
| 1.929688
| 2
|
rcmd/__init__.py
|
sysr-q/rcmd.py
| 1
|
12776337
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import functools
import inspect
import string
import sys
import re
import rcmd.parser
__all__ = ("Rcmd",)
__version__ = "1.1.1"
PROMPT = "(Rcmd) "
PY2 = sys.version_info[0] == 2
DEFAULT_PARSER = rcmd.parser.Regex
class OrderedDefaultDict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
def __reduce__(self): # optional, for pickle support
args = (self.default_factory,) if self.default_factory else tuple()
return self.__class__, args, None, None, self.iteritems()
class Rcmd(object):
def __init__(self, module=None, prompt=None, parser=None, stdin=None, stdout=None):
""" TODO: document.
"""
if parser is None:
self.parser = DEFAULT_PARSER()
else:
self.parser = parser
self.module = module
if prompt is not None:
self.prompt = prompt
else:
self.prompt = PROMPT
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
if PY2:
self.inputter = raw_input
else:
self.inputter = input
# Junk we need in our loop.
self._eof = "\x00\x00" # any hard to enter string
self.use_rawinput = True
self.intro = None
self.lastcmd = ""
self.identchars = string.ascii_letters + string.digits + "_"
# Register decorators and whatnot.
self.command = self.parser.command
self.unregister = self.parser.unregister
self.events = OrderedDefaultDict(list)
events = ["emptyline", "default", #"bang", "question",
"precmd", "postcmd", "preloop", "postloop"]
for event in events:
self.easy_handler(event)(rcmd.parser.noop)
def _default(line):
self.stdout.write("*** Unknown syntax: {0}\n".format(line))
self.stdout.flush()
self.default(_default)
self.precmd(lambda line: line.strip())
self.postcmd(lambda stop, results, line: (stop, results))
def easy_handler(self, event):
def handler(f):
self.events[event] = [f]
return f
# Where the magic happens.
setattr(self, event, handler)
return handler
def loop(self, intro=None):
""" TODO as heck.
See Python's cmd.Cmd.cmdloop for some (somewhat horrifying)
example loops - that's what we're working similarly to.
"""
self.fire("preloop")
if intro is not None:
self.intro = intro
if self.intro is not None:
self.stdout.write(self.intro + "\n")
self.stdout.flush()
stop = None
while not stop:
if self.use_rawinput:
try:
line = self.inputter(self.prompt)
except EOFError:
line = self._eof
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = self._eof
else:
line = line.rstrip("\r\n")
line = self.fire("precmd", line)
stop, results = self.onecmd(line)
stop, results = self.fire("postcmd", stop, results, line)
self.fire("postloop")
def event(self, name):
def handler(f):
self.events[name].append(f)
f._handling = name
return f
return handler
def unevent(self, f):
if not hasattr(f, "_handling") or not f in self.events[f._handling]:
return False
del self.events[f._handling][f]
return True
def fire(self, name, *args, **kwargs):
if len(self.events[name]) == 1:
return self.events[name][0](*args, **kwargs)
for event in self.events[name]:
event(*args, **kwargs)
def onecmd(self, line):
if not line:
return self.fire("emptyline"), None
if line == self._eof:
return True, None
self.lastcmd = line
matches, args, kwargs = self.parser.best_guess(line)
if len(matches) == 0:
return self.fire("default", line), None
kwargs.setdefault("line", line)
results = []
for handlers in matches:
for function in handlers:
if function.no_args:
results.append(function())
elif function.options["inject"]:
results.append(function(*args, **kwargs))
else:
results.append(function(args))
return any(results), results
| 2.3125
| 2
|
src/438-find_all_anagram_substring.py
|
dennislblog/coding
| 0
|
12776338
|
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
# just use a sliding window
np, ns = len(p), len(s)
res = []
if np > ns: return []
map_ = [0] * 26
for i,x in enumerate(p):
map_[ord(x) - 97] -= 1
map_[ord(s[i])-97] += 1
for i in range(ns-np+1):
# the last index to check is ns-np
if not any(map_):
res.append(i)
# kick out s[i] and add s[i+np]
if i + np < ns:
map_[ord(s[i])-97] -= 1
map_[ord(s[i+np])-97] += 1
return res
| 2.671875
| 3
|
tools/writeBurlyWeights.py
|
fsanges/glTools
| 165
|
12776339
|
import maya.mel as mm
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.mesh
import glTools.utils.skinCluster
import os.path
def writeBurlyWeights(mesh,skinCluster,influence,filePath):
'''
'''
# Get basic procedure information
burly = 'dnBurlyDeformer1'
vtxCount = mc.polyEvaluate(mesh,v=True)
inf = mc.ls(influence,l=True)
# Check skinCluster
if not glTools.utils.skinCluster.isSkinCluster(skinCluster):
raise Exception('Object "'+skinCluster+'" is not a valid skinCluster!')
# Get skinCluster Fn
skinFn = glTools.utils.skinCluster.getSkinClusterFn(skinCluster)
# Get influence dag path
influencePath = glTools.utils.base.getMDagPath(influence)
# Get points affected by influence
infSelectionList = OpenMaya.MSelectionList()
infWeightList = OpenMaya.MFloatArray()
skinFn.getPointsAffectedByInfluence(influencePath,infSelectionList,infWeightList)
infObjectPath = OpenMaya.MDagPath()
infComponentList = OpenMaya.MObject()
infSelectionList.getDagPath(0,infObjectPath,infComponentList)
# Get affect point indices
infComponentIndex = OpenMaya.MIntArray()
infComponentIndexFn = OpenMaya.MFnSingleIndexedComponent(infComponentList)
infComponentIndexFn.getElements(infComponentIndex)
infComponentIndex = list(infComponentIndex)
# Get affect point position and normal arrays
infComponentPosArray = OpenMaya.MPointArray()
infComponentNormArray = OpenMaya.MVectorArray()
infComponentVtxIt = OpenMaya.MItMeshVertex(infObjectPath,infComponentList)
normal = OpenMaya.MVector()
while not infComponentVtxIt.isDone():
infComponentPosArray.append(infComponentVtxIt.position(OpenMaya.MSpace.kWorld))
infComponentVtxIt.getNormal(normal)
infComponentNormArray.append(normal)
infComponentVtxIt.next()
# Open file
fileId = open(filePath, "w")
# Header
header = [ '<?xml version="1.0" standalone="no" ?>\n',
'<dnWeights type="dnBurlyDeformer" version="1.0" name="'+burly+'">\n',
'\t<Map name="'+inf[0]+'">\n',
'\t\t<Topology vertexCount="'+str(vtxCount)+'"/>\n' ]
fileId.writelines(header)
# Weights
weights = ['\t\t<Weights>\n']
for i in range(len(infComponentIndex)):
if not i%5: weights.append('\t\t\t')
weights.append(str(infWeightList[i]) + ' ')
if i%5 == 4: weights.append('\n')
weights.append('\n\t\t</Weights>\n')
fileId.writelines(weights)
# Indices
indices = ['\t\t<Indices>\n']
for i in range(len(infComponentIndex)):
if not i%10: indices.append('\t\t\t')
indices.append(str(infComponentIndex[i]) + ' ')
if i%10 == 9: indices.append('\n')
indices.append('\n\t\t</Indices>\n')
fileId.writelines(indices)
# Position
pos = ['\t\t<Positions>\n']
for i in range(len(infComponentIndex)):
if not i%2: pos.append('\t\t\t')
pos.append(str(infComponentPosArray[i][0])+' '+str(infComponentPosArray[i][1])+' '+str(infComponentPosArray[i][2])+' ')
if i%2: pos.append('\n')
pos.append('\n\t\t</Positions>\n')
fileId.writelines(pos)
# Normals
norm = ['\t\t<Normals>\n']
for i in range(len(infComponentIndex)):
if not i%2: norm.append('\t\t\t')
norm.append(str(infComponentNormArray[i][0])+' '+str(infComponentNormArray[i][1])+' '+str(infComponentNormArray[i][2])+' ')
if i%2: norm.append('\n')
norm.append('\n\t\t</Normals>\n')
fileId.writelines(norm)
# Radii
radii = ['\t\t<Radii>\n']
for i in range(len(infComponentIndex)):
if not i%6: radii.append('\t\t\t')
radii.append('0.01 ')
if i%6 == 5: radii.append('\n')
radii.append('\n\t\t</Radii>\n')
fileId.writelines(radii)
# Footer
footer = ['\t</Map>','\n</dnWeights>']
fileId.writelines(footer)
# Close file
fileId.close()
def writeBurlyWeights_allInfluences(mesh,skinCluster,directoryPath):
'''
'''
# Check mesh
if not glTools.utils.mesh.isMesh(mesh):
raise Exception('Object "'+mesh+'" contains no valid polygon mesh!')
# Check skinCluster
if not glTools.utils.skinCluster.isSkinCluster(skinCluster):
raise Exception('Object "'+skinCluster+'" is not a valid skinCluster!')
# Check directory
if not os.path.isdir(directoryPath):
raise Exception('Directory path "'+directoryPath+'" does not exist!')
# Get skinCluster influences
influenceList = mc.skinCluster(skinCluster,q=True,inf=True)
# Write weights
for influence in influenceList:
writeBurlyWeights(mesh,skinCluster,influence,directoryPath+influence+'.xml')
def loadBurlyWeights(burlyDeformer,directoryPath):
'''
'''
# Check burly deformer
if not mc.objExists(burlyDeformer):
raise Exception('Burly deformer "'+burlyDeformer+'" does not exist!')
# Check directory path
if not directoryPath.endswith('/'): directoryPath+='/'
if not os.path.isdir(directoryPath):
raise Exception('Directory path "'+directoryPath+'" does not exist!')
# Get directory listing
fileList = [i for i in os.listdir(directoryPath) if i.endswith('.xml')]
# Load weights
for filePath in fileList:
fileId = directoryPath+filePath
influence = filePath.replace('.xml','')
mm.eval('dnBurlyDeformer -loadWeights "'+fileId+'" "'+burlyDeformer+'" "'+influence+'"')
def convertToBurly(skinCluster,burlyDeformerName=''):
'''
'''
# Check skinCluster
if not mc.objExists(skinCluster):
raise Exception('SkinCluster "'+skinCluster+'" does not exist!')
if not glTools.utils.skinCluster.isSkinCluster(skinCluster):
raise Exception('Object "'+skinCluster+'" is not a valid skinCluster deformer!')
# Get affected mesh
#mesh =
# Designate temporary path for exported weight files
dirPath = '/usr/tmp/'
# Export skinCluster weight files
influenceList = mc.skinCluster(skinCluster,q=True,inf=True)
writeBurlyWeights_allInfluences(mesh,skinCluster,dirPath)
# Create burly deformer
mm.eval('dnBurlyDeformer_createNamed("'+geo+'","'+burlyDeformerName+'")')
| 1.898438
| 2
|
awslogs_watch/lib/execute.py
|
deresmos/awslogs-watch
| 3
|
12776340
|
import shlex
import sys
from subprocess import PIPE, Popen
from typing import List
class Executer:
SUCCESS = 0
ERROR = 1
@staticmethod
def run(command: str) -> None:
p = Popen(shlex.split(command))
print(f"-> {command}")
p.communicate()
if p.returncode == Executer.ERROR:
sys.exit(Executer.ERROR)
@staticmethod
def run_pipe(command: str) -> List[str]:
p = Popen(shlex.split(command), stdout=PIPE)
print(f"-> {command}")
result = p.communicate()
if p.returncode == Executer.ERROR:
sys.exit(Executer.ERROR)
return_lines = result[0].decode("utf-8").split("\n")
return return_lines
@staticmethod
def exit_error():
sys.exit(Executer.ERROR)
| 2.890625
| 3
|
daily-problems/Day3/problem3.py
|
jeffreycshelton/ghp-challenges
| 0
|
12776341
|
<filename>daily-problems/Day3/problem3.py<gh_stars>0
'''
An N x N board contains only 0s and 1s. In each move, you can swap any 2 rows with each other, or any 2 columns with each other.
What is the minimum number of moves to transform the board into a "chessboard" - a board where no 0s and no 1s are 4-directionally
adjacent? If the task is impossible, return -1.
Examples:
Input: board = [[0,1,1,0],[0,1,1,0],[1,0,0,1],[1,0,0,1]]
Output: 2
Explanation:
One potential sequence of moves is shown below, from left to right:
0110 1010 1010
0110 --> 1010 --> 0101
1001 0101 1010
1001 0101 0101
The first move swaps the first and second column.
The second move swaps the second and third row.
Input: board = [[0, 1], [1, 0]]
Output: 0
Explanation:
Also note that the board with 0 in the top left corner,
01
10
is also a valid chessboard.
Input: board = [[1, 0], [1, 0]]
Output: -1
Explanation:
No matter what sequence of moves you make, you cannot end with a valid chessboard.
'''
# Aryan Mittal's Solution
def movesToChessboard(board):
"""
:type board: List[List[int]]
:rtype: int
"""
| 4.0625
| 4
|
lemon_markets/account.py
|
Fadope1/lemon-markets-python-sdk
| 16
|
12776342
|
<gh_stars>10-100
from typing import Union
from lemon_markets.common.helpers import UUIDObjectMixin
from lemon_markets.common.objects import AbstractApiObjectMixin, ListMixin, ListIterator
from lemon_markets.common.requests import ApiRequest
class AccountState:
uuid: str
_token: str
_cash_to_invest: float = None
_total_balance: float = None
def fetch_account_state(self):
request = ApiRequest(
endpoint="accounts/{}/state/".format(self.uuid),
method="GET",
authorization_token=self._token
)
self._cash_to_invest = request.response.get("cash_to_invest")
self._total_balance = request.response.get("total_balance")
@property
def cash_in_invest(self) -> float:
if not self._cash_to_invest:
self.fetch_account_state()
return self._cash_to_invest
@property
def total_balance(self) -> float:
if not self._total_balance:
self.fetch_account_state()
return self._total_balance
class Account(AccountState, UUIDObjectMixin, AbstractApiObjectMixin, ListMixin):
_token: str
class Fields(AbstractApiObjectMixin.Fields):
name: str
type: str
currency: str
uuid: str
def __init__(self, uuid: str, authorization_token: Union[str, "Token"] = None):
super().__init__(uuid=uuid, authorization_token=authorization_token)
self.uuid = uuid
self._token = authorization_token
@property
def token(self) -> str:
"""
An account can have multiple tokens. Thus, this makes calling the API way easier.
:return: {str}
"""
return self._token if self._token else None
def retrieve(self):
self.check_instance()
request = ApiRequest(
endpoint="accounts/{}/".format(self.uuid),
method="GET",
authorization_token=self._token
)
self.set_data(request.response)
return self
@staticmethod
def list(authorization_token: Union[str, "Token"], limit: int = None, offset: int = None) -> ListIterator:
return ListMixin.list(object_class=Account,
authorization_token=authorization_token,
limit=limit,
offset=offset
)
| 2.71875
| 3
|
analysis/explainshell/manager.py
|
lizhi16/dockerfile_analysis_framework
| 3
|
12776343
|
import sys, os, argparse, logging, glob
from explainshell import options, store, fixer, manpage, errors, util, config
from explainshell.algo import classifier
logger = logging.getLogger('explainshell.manager')
class managerctx(object):
def __init__(self, classifier, store, manpage):
self.classifier = classifier
self.store = store
self.manpage = manpage
self.name = manpage.name
self.classifiermanpage = None
self.optionsraw = None
self.optionsextracted = None
self.aliases = None
class manager(object):
'''the manager uses all parts of the system to read, classify, parse, extract
and write a man page to the database'''
def __init__(self, dbhost, dbname, paths, overwrite=False, drop=False):
self.paths = paths
self.overwrite = overwrite
self.store = store.store(dbname, dbhost)
self.classifier = classifier.classifier(self.store, 'bayes')
self.classifier.train()
if drop:
self.store.drop(True)
def ctx(self, m):
return managerctx(self.classifier, self.store, m)
def _read(self, ctx, frunner):
frunner.pre_get_raw_manpage()
ctx.manpage.read()
ctx.manpage.parse()
assert len(ctx.manpage.paragraphs) > 1
ctx.manpage = store.manpage(ctx.manpage.shortpath, ctx.manpage.name,
ctx.manpage.synopsis, ctx.manpage.paragraphs, list(ctx.manpage.aliases))
frunner.post_parse_manpage()
def _classify(self, ctx, frunner):
ctx.classifiermanpage = store.classifiermanpage(ctx.name, ctx.manpage.paragraphs)
frunner.pre_classify()
_ = list(ctx.classifier.classify(ctx.classifiermanpage))
frunner.post_classify()
def _extract(self, ctx, frunner):
options.extract(ctx.manpage)
frunner.post_option_extraction()
if not ctx.manpage.options:
logger.warn("couldn't find any options for manpage %s", ctx.manpage.name)
def _write(self, ctx, frunner):
frunner.pre_add_manpage()
return ctx.store.addmanpage(ctx.manpage)
def _update(self, ctx, frunner):
frunner.pre_add_manpage()
return ctx.store.updatemanpage(ctx.manpage)
def process(self, ctx):
frunner = fixer.runner(ctx)
self._read(ctx, frunner)
self._classify(ctx, frunner)
self._extract(ctx, frunner)
m = self._write(ctx, frunner)
return m
def edit(self, m, paragraphs=None):
ctx = self.ctx(m)
frunner = fixer.runner(ctx)
if paragraphs:
m.paragraphs = paragraphs
frunner.disable('paragraphjoiner')
frunner.post_option_extraction()
else:
self._extract(ctx, frunner)
m = self._update(ctx, frunner)
return m
def run(self):
added = []
exists = []
for path in self.paths:
try:
m = manpage.manpage(path)
logger.info('handling manpage %s (from %s)', m.name, path)
try:
mps = self.store.findmanpage(m.shortpath[:-3])
mps = [mp for mp in mps if m.shortpath == mp.source]
if mps:
assert len(mps) == 1
mp = mps[0]
if not self.overwrite or mp.updated:
logger.info('manpage %r already in the data store, not overwriting it', m.name)
exists.append(m)
continue
except errors.ProgramDoesNotExist:
pass
# the manpage is not in the data store; process and add it
ctx = self.ctx(m)
m = self.process(ctx)
if m:
added.append(m)
except errors.EmptyManpage, e:
logger.error('manpage %r is empty!', e.args[0])
except ValueError:
logger.fatal('uncaught exception when handling manpage %s', path)
except KeyboardInterrupt:
raise
except:
logger.fatal('uncaught exception when handling manpage %s', path)
raise
if not added:
logger.warn('no manpages added')
else:
self.findmulticommands()
return added, exists
def findmulticommands(self):
manpages = {}
potential = []
for _id, m in self.store.names():
if '-' in m:
potential.append((m.split('-'), _id))
else:
manpages[m] = _id
mappings = set([x[0] for x in self.store.mappings()])
mappingstoadd = []
multicommands = {}
for p, _id in potential:
if ' '.join(p) in mappings:
continue
if p[0] in manpages:
mappingstoadd.append((' '.join(p), _id))
multicommands[p[0]] = manpages[p[0]]
for src, dst in mappingstoadd:
self.store.addmapping(src, dst, 1)
logger.info('inserting mapping (multicommand) %s -> %s', src, dst)
for multicommand, _id in multicommands.iteritems():
self.store.setmulticommand(_id)
logger.info('making %r a multicommand', multicommand)
return mappingstoadd, multicommands
def main(files, dbname, dbhost, overwrite, drop, verify):
if verify:
s = store.store(dbname, dbhost)
ok = s.verify()
return 0 if ok else 1
if drop:
if raw_input('really drop db (y/n)? ').strip().lower() != 'y':
drop = False
else:
overwrite = True # if we drop, no need to take overwrite into account
gzs = set()
for path in files:
if os.path.isdir(path):
gzs.update([os.path.abspath(f) for f in glob.glob(os.path.join(path, '*.gz'))])
else:
gzs.add(os.path.abspath(path))
m = manager(dbhost, dbname, gzs, overwrite, drop)
added, exists = m.run()
for mp in added:
print 'successfully added %s' % mp.source
if exists:
print 'these manpages already existed and werent overwritten: \n\n%s' % '\n'.join([m.path for m in exists])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='process man pages and save them in the store')
parser.add_argument('--log', type=str, default='ERROR', help='use log as the logger log level')
parser.add_argument('--overwrite', action='store_true', default=False, help='overwrite man pages that already exist in the store')
parser.add_argument('--drop', action='store_true', default=False, help='delete all existing man pages')
parser.add_argument('--db', default='explainshell', help='mongo db name')
parser.add_argument('--host', default=config.MONGO_URI, help='mongo host')
parser.add_argument('--verify', action='store_true', default=False, help='verify db integrity')
parser.add_argument('files', nargs='*')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log.upper()))
sys.exit(main(args.files, args.db, args.host, args.overwrite, args.drop, args.verify))
| 2.640625
| 3
|
models/tests/test_utils.py
|
WangYuNeng/EvolutionaryGAN-pytorch
| 0
|
12776344
|
from unittest import TestCase
import torch
from models.utils import combine_mapping_networks, categorize_mappings
from models.networks.fc import FCGenerator
class UtilTests(TestCase):
def setUp(self) -> None:
self.state_dicts = [FCGenerator().state_dict() for _ in range(5)]
self.mappings = [torch.eye(300, 300) for _ in range(2)]
def test_combine_networks(self):
child = combine_mapping_networks(*self.mappings, is_SO=True)
self.assertTrue(
torch.all(child['module.layer'] == self.mappings[0])
)
def test_combine_networks_r(self):
for i in range(len(self.mappings)):
self.mappings[i][0] = -self.mappings[i][0]
child = combine_mapping_networks(*self.mappings, is_SO=False)
self.assertTrue(
torch.all(child['module.layer'] == self.mappings[0])
)
| 2.453125
| 2
|
cpu/test_center.py
|
xavierboud/tictactoe
| 0
|
12776345
|
from django.test import SimpleTestCase
from cpu.center import Center
from game.transforms import Board
class CenterAiTest(SimpleTestCase):
def test_picks_center(self):
data = [' '] * 9
cpu = Center()
move = cpu.play(Board(data), 'x', 'o')
self.assertEquals(move, 4)
def test_wins_if_possible(self):
data = [
' ', ' ', ' ',
'o', 'x', ' ',
'o', ' ', 'x',
]
cpu = Center()
move = cpu.play(Board(data), 'x', 'o')
self.assertEquals(move, 0)
def test_defends_if_needed(self):
data = [
' ', ' ', ' ',
' ', 'x', ' ',
'o', ' ', 'x',
]
cpu = Center()
move = cpu.play(Board(data), 'o', 'x')
self.assertEquals(move, 0)
| 2.546875
| 3
|
13_baseline_interface.py
|
saikrishnar/TTS_Interface
| 0
|
12776346
|
<reponame>saikrishnar/TTS_Interface<filename>13_baseline_interface.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
ZetCode PyQt4 tutorial
In this example, we receive data from
a QtGui.QInputDialog dialog.
author: <NAME>
website: zetcode.com
last edited: October 2011
"""
import sys, os
from PyQt4 import QtGui
from PyQt4.QtGui import QSound
#import pyttsx
#engine=pyttsx.init()
import pyttsx
engine=pyttsx.init()
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
self.btn = QtGui.QPushButton('Speak', self)
self.btn.move(20, 270)
#self.btn.clicked.connect(self.showDialog)
self.btn.clicked.connect(self.readText)
self.te = QtGui.QTextEdit(self)
self.te.move(20, 22)
self.btn = QtGui.QPushButton('Browse Text', self)
self.btn.move(140, 270)
#self.btn.clicked.connect(self.showDialog)
self.btn.clicked.connect(self.selectFile_text)
self.btn = QtGui.QPushButton('Browse Audio', self)
self.btn.move(260, 270)
#self.btn.clicked.connect(self.showDialog)
self.btn.clicked.connect(self.selectFile_audio)
self.setGeometry(600, 600, 150, 150)
self.setWindowTitle('Text to Speech Demo')
self.show()
def showDialog(self):
text, ok = QtGui.QInputDialog.getText(self, 'TTS Demo',
'Enter the text to read:')
if ok:
self.le.setText(str(text))
s = str(text)
#cmd = 'espeak ' + s
#print cmd
#os.system(cmd)
#engine.say(s)
#engine.runAndWait()
cmd = 'echo s | festival --tts'
os.system(cmd)
def selectFile_text(self):
#self.te.setText(QtGui.QFileDialog.getOpenFileName())
fileName = QtGui.QFileDialog.getOpenFileName()
f = open(fileName)
s = ' '
for line in f:
s = s + line
self.te.setText(s)
def selectFile_audio(self):
QtGui.QSound.play(QtGui.QFileDialog.getOpenFileName())
def readText(self):
text = self.te.toPlainText()
print text
#engine.say(text)
#engine.runAndWait()
read_lines = 0
for line in text:
cmd = 'echo ' + str(line) + ' | festival --tts'
os.system(cmd)
engine.say(s)
engine.runAndWait()
return
def readText(self):
text = self.te.toPlainText()
print text
engine.say(text)
engine.runAndWait()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.671875
| 3
|
test/test_dummy.py
|
darnir/firefly-cli
| 13
|
12776347
|
import unittest
class TestCase(unittest.TestCase):
def test_dummy(self):
self.assertEqual('tests to be added', 'tests to be added')
if __name__ == '__main__':
unittest.main()
| 2.53125
| 3
|
academics/models.py
|
judeakinwale/SMS-backup
| 0
|
12776348
|
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from datetime import datetime
# Create your models here.
class Faculty(models.Model):
"""Model definition for Faculty."""
name = models.CharField(max_length=250, unique=True)
code = models.IntegerField(null=True, blank=True, unique=True)
description = models.TextField(null=True, blank=True)
dean = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_staff': True},
null=True,
blank=True
)
is_active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
class Meta:
"""Meta definition for Faculty."""
ordering = ['id']
verbose_name = _('Faculty')
verbose_name_plural = _('Faculty')
def save(self, *args, **kwargs):
if self.dean:
try:
dean = self.dean.staff_set.all().first()
dean.is_dean_of_faculty = True
dean.save()
print(f"{dean} \n Is dean of faculty: {dean.is_dean_of_faculty}")
except Exception:
self.dean.get_staff()
dean = self.dean.staff_set.all().first()
dean.is_dean_of_faculty = True
dean.save()
print(f"{dean} \n Is dean of faculty: {dean.is_dean_of_faculty}")
super(Faculty, self).save(*args, **kwargs) # Call the real save() method
def __str__(self):
"""String representation of Faculty."""
return self.name
class Department(models.Model):
"""Model definition for Department."""
faculty = models.ForeignKey(Faculty, on_delete=models.CASCADE)
name = models.CharField(max_length=250, unique=True)
code = models.CharField(max_length=250, null=True, blank=True, unique=True)
description = models.TextField(null=True, blank=True)
head = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_staff': True},
null=True,
blank=True
)
is_active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
class Meta:
"""Meta definition for Department."""
ordering = ['id']
verbose_name = _('Department')
verbose_name_plural = _('Departments')
def save(self, *args, **kwargs):
if self.head:
try:
head = self.head.staff_set.all().first()
head.is_head_of_department = True
head.save()
print(f"{head} \n Is head of department: {head.is_head_of_department}")
except Exception:
self.head.get_staff()
head = self.head.staff_set.all().first()
head.is_head_of_department = True
head.save()
print(f"{head} \n Is head of department: {head.is_head_of_department}")
super(Department, self).save(*args, **kwargs) # Call the real save() method
def __str__(self):
"""String representation of Department."""
return self.name
class Specialization(models.Model):
"""Model definition for Specialization."""
department = models.ForeignKey(Department, on_delete=models.CASCADE)
name = models.CharField(max_length=250, unique=True)
code = models.CharField(max_length=250, null=True, blank=True, unique=True)
max_level = models.ForeignKey("Level", on_delete=models.DO_NOTHING, null=True, blank=True)
description = models.TextField(null=True, blank=True)
is_active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
class Meta:
"""Meta definition for Specialization."""
ordering = ['id']
verbose_name = _('Specialization')
verbose_name_plural = _('Specializations')
def __str__(self):
"""String representation of Specialization."""
return self.name
class Course(models.Model):
"""Model definition for Course."""
specialization = models.ForeignKey(Specialization, on_delete=models.CASCADE)
name = models.CharField(max_length=250, unique=True)
code = models.CharField(max_length=250, null=True, blank=True, unique=True)
description = models.TextField(null=True, blank=True)
coordinator = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_staff': True},
null=True,
blank=True
)
is_active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
class Meta:
"""Meta definition for Course."""
ordering = ['id']
verbose_name = _('Course')
verbose_name_plural = _('Courses')
def __str__(self):
"""String representation of Course."""
return self.name
class Level(models.Model):
"""Model definition for Level."""
class LevelChoices(models.IntegerChoices):
ONE = 100
TWO = 200
THREE = 300
FOUR = 400
FIVE = 500
code = models.IntegerField(
choices=LevelChoices.choices,
null=True,
default=LevelChoices.ONE,
)
class Meta:
"""Meta definition for Level."""
ordering = ['id']
verbose_name = _('Level')
verbose_name_plural = _('Levels')
def __str__(self):
"""String representation of Level"""
return f"{self.code}"
class Semester(models.Model):
"""Model definition for Semester."""
class SemesterChoices(models.IntegerChoices):
FIRST = 1, '1st Semester'
SECOND = 2, '2nd Semester'
semester = models.IntegerField(
choices=SemesterChoices.choices,
null=True,
default=SemesterChoices.FIRST
)
class Meta:
"""Meta definition for Semester."""
ordering = ['id']
verbose_name = 'Semester'
verbose_name_plural = 'Semesters'
def __str__(self):
"""String representation of Semester."""
return f"{self.semester}"
class Session(models.Model):
"""Model definition for Session."""
year = models.CharField(max_length=4)
is_current = models.BooleanField(default=False)
class Meta:
"""Meta definition for Session."""
ordering = ['year']
verbose_name = 'Session'
verbose_name_plural = 'Sessions'
def save(self, *args, **kwargs):
current_year = datetime.today().year
year = datetime.strptime(self.year, "%Y").year
if year == current_year:
self.is_current = True
super(Session, self).save(*args, **kwargs) # Call the real save() method
def __str__(self):
"""String representation of Session."""
return f'{self.year} / {datetime.strptime(self.year, "%Y").year + 1}'
| 2.59375
| 3
|
src/python/fibonacci.py
|
allenjzhang/playground
| 1
|
12776349
|
memMap = {}
def fibonacci (n):
if (n not in memMap):
if n <= 0:
print("Invalid input")
elif n == 1:
memMap[n] = 0
elif n == 2:
memMap[n] = 1
else:
memMap[n] = fibonacci (n-1) + fibonacci (n-2)
return memMap[n]
def fibonacciSlow (n):
if n <= 0:
print("Invalid input")
elif n == 1:
return 0
elif n == 2:
return 1
else:
return fibonacci (n-1) + fibonacci (n-2)
print(fibonacci (1000))
print("---------------")
print(fibonacciSlow (1000))
| 4.03125
| 4
|
yogadl/_core.py
|
aaron276h/YogaDL
| 74
|
12776350
|
<reponame>aaron276h/YogaDL
# Copyright 2020 Determined AI. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The core interfaces of the yoga data layer.
"""
import abc
from typing import Any, Callable, Optional, Union
import tensorflow as tf
# TODO: Make sure users are not required to have TF, PyTorch,
# and TP dataflows all installed to use this.
Submittable = Union[
tf.data.Dataset,
]
class Stream:
"""
Stream contains a generator of data and other required information
to feed into framework specific data APIs.
"""
def __init__(
self,
iterator_fn: Callable,
length: int,
output_types: Any = None,
output_shapes: Any = None,
):
self.iterator_fn = iterator_fn
self.length = length
self.output_types = output_types
self.output_shapes = output_shapes
def __iter__(self) -> Any:
"""
Iterate through the records in the stream.
"""
return self.iterator_fn()
def __len__(self) -> int:
"""
Return the length of the stream, which may differ from the length of the dataset.
"""
return self.length
class DataRef(metaclass=abc.ABCMeta):
"""
The base interface for a reference to a dataset in the yogadl framework.
The DataRef may refer to a dataset in a remote storage location; it need not refer to locally-
available data. The only mechanism for accessing the records inside the dataset is to create a
Stream and to iterate through them.
By specifying all of the random-access options up front, the backend which provides the DataRef
can provide performance-optimized streaming, since it is guaranteed with yogadl that lower
layers will operate without random access.
"""
@abc.abstractmethod
def stream(
self,
start_offset: int = 0,
shuffle: bool = False,
skip_shuffle_at_epoch_end: bool = False,
shuffle_seed: Optional[int] = None,
shard_rank: int = 0,
num_shards: int = 1,
drop_shard_remainder: bool = False,
) -> Stream:
"""
Create a sequentially accessible set of records from the dataset, according to the
random-access arguments given as parameters.
"""
pass
@abc.abstractmethod
def __len__(self) -> int:
"""
Return the length of the dataset that the DataRef refers to.
"""
pass
class Storage(metaclass=abc.ABCMeta):
"""
Storage is a cache for datasets.
Storage accepts datasets in various forms via submit(), and returns DataRef objects via
fetch().
Conceptually, Storage is sort of like a DataRef factory. It stores datasets
in an unspecified format, and returns objects which implement the DataRef
interface.
Note that submit() and fetch() are not multiprocessing-safe by default.
The @cacheable decorator should be safe to call simultaneously from
many threads, processes, or machines.
"""
@abc.abstractmethod
def submit(self, data: Submittable, dataset_id: str, dataset_version: str) -> None:
"""
Stores dataset to a cache.
"""
pass
@abc.abstractmethod
def fetch(self, dataset_id: str, dataset_version: str) -> DataRef:
"""
Fetch a dataset from storage and provide a DataRef for streaming it.
"""
pass
@abc.abstractmethod
def cacheable(self, dataset_id: str, dataset_version: str) -> Callable:
"""
A decorator that calls submit and fetch and is responsible for coordinating
amongst instances of Storage in different processes.
"""
pass
| 1.929688
| 2
|