hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e7351729964b02b462566b09dcddbc2209fa1c8 | 3,561 | py | Python | levelheap-micha-4d/levels/gamma.py | triffid/kiki | b64b8524063c149a5cc9118f48d80afec1d8a942 | [
"Unlicense"
] | 2 | 2020-01-04T23:44:10.000Z | 2020-07-12T17:10:09.000Z | levelheap-micha-4d/levels/gamma.py | triffid/kiki | b64b8524063c149a5cc9118f48d80afec1d8a942 | [
"Unlicense"
] | null | null | null | levelheap-micha-4d/levels/gamma.py | triffid/kiki | b64b8524063c149a5cc9118f48d80afec1d8a942 | [
"Unlicense"
] | 1 | 2022-03-16T05:43:33.000Z | 2022-03-16T05:43:33.000Z | # level design by Michael Abel
schemes=[test_scheme, tron_scheme,candy_scheme, default_scheme,
green_scheme, yellow_scheme, blue_scheme, red_scheme, metal_scheme, bronze_scheme]
# .................................................................................................................
def func_gamma():
s = world.getSize()
world.switch_countera = 0
world.switch_counter = 0
def aswitched ():
applyColorScheme (schemes[world.switch_countera])
if world.switch_countera==len(schemes)-1 :
world.switch_countera=0
else:
world.switch_countera+=1
def switched (switch):
world.switch_counter += switch.isActive() and 1 or -1
exit = kikiObjectToGate(world.getObjectWithName("exit"))
exit.setActive(world.switch_counter == 4)
aswitch = KikiSwitch()
bswitch = KikiSwitch()
cswitch = KikiSwitch()
dswitch = KikiSwitch()
eswitch = KikiSwitch()
aswitch.getEventWithName("switched").addAction ( continuous ( aswitched ))
bswitch.getEventWithName("switched").addAction ( continuous (lambda s= bswitch : switched(s) ))
cswitch.getEventWithName("switched").addAction ( continuous (lambda s= cswitch : switched(s) ))
dswitch.getEventWithName("switched").addAction ( continuous (lambda s= dswitch : switched(s) ))
eswitch.getEventWithName("switched").addAction ( continuous (lambda s= eswitch : switched(s) ))
world.addObjectAtPos (aswitch , KikiPos (s.x-1,0,0))
world.addObjectAtPos (bswitch , KikiPos (0,0,0))
world.addObjectAtPos (KikiMutant() , KikiPos (s.x/2,0,0))
world.addObjectLine(KikiWall, KikiPos(0,0,1), KikiPos(s.x,0,1))
world.addObjectLine(KikiWall, KikiPos(0,1,0), KikiPos(s.x,1,0))
world.addObjectLine(KikiWall, KikiPos(0,2,2), KikiPos(s.x-3,2,2))
# world.addObjectAtPos (KikiSwitch() , KikiPos (s.x-3,2,2))
world.addObjectLine(KikiWall, KikiPos(2,2,2), KikiPos(2,2,s.z-3))
# world.addObjectAtPos (KikiSwitch() , KikiPos (2,2,s.z-3))
world.addObjectLine(KikiWall, KikiPos(2,2,4), KikiPos(2,s.y-3,4))
#exit world.addObjectAtPos (KikiSwitch() , KikiPos (2,s.y-3,4))
world.addObjectLine(KikiWall, KikiPos(2,4,4), KikiPos(s.x-4,4,4))
world.addObjectAtPos (cswitch , KikiPos (s.x-3,4,4))
world.addObjectLine(KikiWall, KikiPos(4,4,4), KikiPos(4,4,s.z-4))
world.addObjectAtPos (dswitch , KikiPos (4,4,s.z-3))
world.addObjectLine(KikiWall, KikiPos(4,4,6), KikiPos(4,s.y-4,6))
world.addObjectAtPos (eswitch , KikiPos (4,s.y-3,6))
level_dict["gamma"] = {
"scheme": "tron_scheme",
"size": (10,10,10),
"intro": "gamma",
"help": (
"",
"",
""
),
"player": { "coordinates": (0,5,0),
"nostatus": 0,
},
"exits": [
{
"name": "exit",
"active": 0,
#"position": (0,0,0),
"coordinates": (2,7,4), #absolute coord
},
],
"create": func_gamma,
}
# .................................................................................................................
| 42.392857 | 115 | 0.511092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.191238 |
0e7360046447f24e5c5fb81df9b707bff42c305e | 13,241 | py | Python | feature_importance/feature_attribution.py | UMCUGenetics/cancer_type_classification_from_sparse_SNV_data | 03a768d0f999b9d3ab544909c1ca534c85d7cd17 | [
"Apache-2.0"
] | null | null | null | feature_importance/feature_attribution.py | UMCUGenetics/cancer_type_classification_from_sparse_SNV_data | 03a768d0f999b9d3ab544909c1ca534c85d7cd17 | [
"Apache-2.0"
] | null | null | null | feature_importance/feature_attribution.py | UMCUGenetics/cancer_type_classification_from_sparse_SNV_data | 03a768d0f999b9d3ab544909c1ca534c85d7cd17 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
copied from: https://stackoverflow.com/questions/45466020/how-to-export-keras-h5-to-tensorflow-pb
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
def load_pb_model(model_path):
"""Loads a the Protocol Buffers cancer type classification model and creates a TensorFlow session for it."""
graph = tf.Graph()
cfg = tf.ConfigProto(gpu_options={'allow_growth':True})
sess = tf.InteractiveSession(graph=graph, config=cfg)
graph_def = tf.GraphDef()
with tf.gfile.Open(model_path, "rb") as f:
data = f.read()
graph_def.ParseFromString(data)
tf.import_graph_def(graph_def)
return sess, graph
def T(graph, layer):
"""Helper for getting layer output tensor"""
return graph.get_tensor_by_name(layer)
def supplement_graph(graph):
"""Supplement the cancer type classification graph with a gradients operator to compute the
gradients for the prediction at a particular label (specified by a placeholder)
with respect to the input.
"""
with graph.as_default():
label_index = tf.placeholder(tf.int32, [])
for i in graph.get_operations():
print(i.name)
inp = T(graph, 'import/input_1:0')
label_prediction = T(graph, 'import/dense_1/Softmax:0')[:, label_index]
return inp, label_index, T(graph, 'import/dense_1/Softmax:0'), tf.gradients(label_prediction, inp)[0]
def make_predictions_and_gradients(sess, graph):
"""Returns a function that can be used to obtain the predictions and gradients
from the cancer type classification network for a set of inputs.
The returned function is meant to be provided as an argument to the integrated_gradients
method.
"""
inp, label_index, predictions, grads = supplement_graph(graph)
run_graph = sess.make_callable([predictions, grads], feed_list=[inp, label_index])
def f(samples_to_test, target_label_index):
return run_graph(samples_to_test, target_label_index)
return f
def top_label_id_and_score(sample_, preds_and_grads_fn):
"""Returns the label id and corresponding value of the object class that receives the highest softmax value.
"""
# Evaluate the SOFTMAX output layer for the sample and
# determine the label for the highest-scoring class
preds, _ = preds_and_grads_fn([sample_], 0)
id = np.argmax(preds[0])
return id, preds[0][id]
def integrated_gradients(
inp,
target_label_index,
predictions_and_gradients,
baseline,
steps=50):
"""Computes integrated gradients for a given network and prediction label.
Integrated gradients is a technique for attributing a deep network's
prediction to its input features. It was introduced by:
https://arxiv.org/abs/1703.01365
In addition to the integrated gradients tensor, the method also
returns some additional debugging information for sanity checking
the computation. See sanity_check_integrated_gradients for how this
information is used.
This method only applies to classification networks, i.e., networks
that predict a probability distribution across two or more class labels.
Access to the specific network is provided to the method via a
'predictions_and_gradients' function provided as argument to this method.
The function takes a batch of inputs and a label, and returns the
predicted probabilities of the label for the provided inputs, along with
gradients of the prediction with respect to the input. Such a function
should be easy to create in most deep learning frameworks.
Args:
inp: The specific input for which integrated gradients must be computed.
target_label_index: Index of the target class for which integrated gradients
must be computed.
predictions_and_gradients: This is a function that provides access to the
network's predictions and gradients. It takes the following
arguments:
- inputs: A batch of tensors of the same same shape as 'inp'. The first
dimension is the batch dimension, and rest of the dimensions coincide
with that of 'inp'.
- target_label_index: The index of the target class for which gradients
must be obtained.
and returns:
- predictions: Predicted probability distribution across all classes
for each input. It has shape <batch, num_classes> where 'batch' is the
number of inputs and num_classes is the number of classes for the model.
- gradients: Gradients of the prediction for the target class (denoted by
target_label_index) with respect to the inputs. It has the same shape
as 'inputs'.
baseline: [optional] The baseline input used in the integrated
gradients computation. If None (default), the all zero tensor with
the same shape as the input (i.e., 0*input) is used as the baseline.
The provided baseline and input must have the same shape.
steps: [optional] Number of intepolation steps between the baseline
and the input used in the integrated gradients computation. These
steps along determine the integral approximation error. By default,
steps is set to 50.
Returns:
integrated_gradients: The integrated_gradients of the prediction for the
provided prediction label to the input. It has the same shape as that of
the input.
The following output is meant to provide debug information for sanity
checking the integrated gradients computation.
See also: sanity_check_integrated_gradients
prediction_trend: The predicted probability distribution across all classes
for the various (scaled) inputs considered in computing integrated gradients.
It has shape <steps, num_classes> where 'steps' is the number of integrated
gradient steps and 'num_classes' is the number of target classes for the
model.
"""
if baseline is None:
baseline = 0 * inp
assert (baseline.shape == inp.shape)
# Scale input and compute gradients.
scaled_inputs = [baseline + (float(i) / steps) * (inp - baseline) for i in range(0, steps + 1)]
predictions, grads = predictions_and_gradients(scaled_inputs,
target_label_index) # shapes: <steps+1>, <steps+1, inp.shape>
# Use trapezoidal rule to approximate the integral.
# See Section 4 of the following paper for an accuracy comparison between
# left, right, and trapezoidal IG approximations:
# "Computing Linear Restrictions of Neural Networks", Matthew Sotoudeh, Aditya V. Thakur
# https://arxiv.org/abs/1908.06214
grads = (grads[:-1] + grads[1:]) / 2.0
avg_grads = np.average(grads, axis=0)
integrated_gradients = (inp - baseline) * avg_grads # shape: <inp.shape>
return integrated_gradients, predictions, grads
if __name__ == '__main__':
fold = int(sys.argv[1])
model_filepath = str(sys.argv[2])
data_file = str(sys.argv[3])
driver_data_file = str(sys.argv[4])
motif_data_file = str(sys.argv[5])
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# Force Tensorflow to use a single thread
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
model = load_model(model_filepath)
frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs])
### Save the frozen tf graph into a Protocol Buffers file
model_folder, model_filename = os.path.split(model_filepath)
model_basename, file_extension = os.path.splitext(model_filepath)
model_filename = model_basename.split("/")[len(model_basename.split("/"))-1]
print(model_filename)
print(model.summary())
pb_model_filename = str(model_filename + ".pb")
tf.train.write_graph(frozen_graph, model_folder, pb_model_filename, as_text=False)
sess, graph = load_pb_model(os.path.join(model_folder,pb_model_filename))
# Load the cancer labels
# format: cancer type names in order, 1 cancer type per line,
# order should follow the LabelEncoder() class order used for training
labels = np.array(open("./label_vocab_for_integrated_gradients_FINAL.txt").read().split('\n'))
# Make the predictions_and_gradients function
cancermodel_predictions_and_gradients = make_predictions_and_gradients(sess, graph)
# Load data
data = pd.read_csv(data_file, index_col=[0])
driver_data = pd.read_csv(driver_data_file, index_col=[0])
motif_data = pd.read_csv(motif_data_file, index_col=[0])
test_samples = pd.read_csv('./test_stratified_idx_pcawg.csv', index_col=[0])
test_samples.columns = ['guid', 'split']
test_samples = test_samples[test_samples.split == fold]
test_data = data[data['guid'].isin(test_samples.guid)]
test_data = test_data.sort_values(by=['guid'])
test_guid = list(test_data['guid'])
test_data_driver = driver_data[driver_data['guid'].isin(test_samples.guid)]
test_data_driver = test_data_driver.sort_values(by=['guid'])
test_data_motif = motif_data[motif_data['guid'].isin(test_samples.guid)]
test_data_motif = test_data_motif.sort_values(by=['guid'])
test_data = test_data.drop(['guid'], axis=1)
test_data_driver = test_data_driver.drop(['guid'], axis=1)
test_data_motif = test_data_motif.drop(['guid'], axis=1)
x_test_bin = test_data.values
x_test_driver = test_data_driver.values
x_test_motif = test_data_motif.values
x_test = np.concatenate((x_test_bin, x_test_driver, x_test_motif), axis=1)
y_test = test_data.index
# Get feature list
features_ = list(test_data.columns)
features_.extend(list(test_data_driver.columns))
features_.extend(list(test_data_motif.columns))
# Get baseline
averages_and_zeros = [0.0 for column_ in list(test_data.columns.values)]
averages_and_zeros.extend([0.0 for column_ in list(test_data_driver.columns.values)])
averages_and_zeros.extend([0.0 for column_ in list(test_data_motif.columns.values)])
bline = np.reshape(np.asarray(averages_and_zeros), (x_test.shape[1],))
sample_attributions = list()
index_list = list()
for cnc_ in labels:
print(cnc_)
for sdx, s_label in enumerate(y_test):
if cnc_ != s_label:
continue
actual_sample = x_test[sdx]
index_list.append(test_guid[sdx])
# Determine top label and score.
top_label_id, score = top_label_id_and_score(actual_sample, cancermodel_predictions_and_gradients)
if labels[top_label_id] != s_label:
print("Prediction mismatch: {}".format(test_guid[sdx]))
print("correct label: {}".format(s_label))
print("predicted label: {}".format(labels[top_label_id]))
# Compute attributions based on the integrated gradients method
attributions, predictions, prediction_trend = integrated_gradients(actual_sample,
top_label_id,
cancermodel_predictions_and_gradients,
baseline=bline,
steps=200)
sample_attributions.append(attributions)
### Save attributions into file
attribution_data = pd.DataFrame(sample_attributions, columns=features_, index=index_list)
attribution_data.to_csv("./feature_attributions/{}_fold{}_0_baseline.txt".format(model_filename,fold), index=True) | 46.953901 | 118 | 0.701458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,371 | 0.481157 |
0e736be5fe9338ed998f699968b1944d73ad1c08 | 2,405 | py | Python | rolldecayestimators/tests/test_polynom_estimator.py | martinlarsalbert/rolldecay-estimators | 4d70da6058720ecbcecba3ed94c40f287a115e05 | [
"BSD-3-Clause"
] | 1 | 2021-05-21T06:05:20.000Z | 2021-05-21T06:05:20.000Z | rolldecayestimators/tests/test_polynom_estimator.py | martinlarsalbert/rolldecay-estimators | 4d70da6058720ecbcecba3ed94c40f287a115e05 | [
"BSD-3-Clause"
] | null | null | null | rolldecayestimators/tests/test_polynom_estimator.py | martinlarsalbert/rolldecay-estimators | 4d70da6058720ecbcecba3ed94c40f287a115e05 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import pandas as pd
import os.path
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import VarianceThreshold
import sympy as sp
from rolldecayestimators.polynom_estimator import Polynom
import matplotlib.pyplot as plt
@pytest.fixture
def data():
# Generate some random regression data:
X, y = make_regression(n_samples=1000, n_features=1, n_informative=10, noise=10, random_state=1)
yield X, y
@pytest.fixture
def model(data):
X=data[0]
y=data[1]
polynomial_features = PolynomialFeatures(degree=1)
variance_treshold = VarianceThreshold()
linear_regression = LinearRegression()
select_k_best = SelectKBest(k=1, score_func=f_regression)
steps = [
('polynomial_feature', polynomial_features),
# ('standard_scaler', standard_scaler),
('variance_treshold', variance_treshold),
('select_k_best', select_k_best),
('linear_regression', linear_regression)
]
model = Pipeline(steps=steps)
model.fit(X=X, y=y)
yield model
def test_fit(data, model):
X=data[0]
x=X[:,0]
y=data[1]
y_symbol = sp.symbols('y')
polynom = Polynom(model=model, columns=['B_e_hat'], y_symbol=y_symbol)
polynom.fit(X=X, y=y)
fig,ax=plt.subplots()
ax.plot(x,y, '.')
ax.plot(x, model.predict(X=X), label='model')
ax.plot(x, polynom.predict(X=X), '--', label='polynom')
ax.legend()
plt.show()
@pytest.fixture
def polynom(data, model):
X = data[0]
y = data[1]
y_symbol = sp.symbols('y')
polynom = Polynom(model=model, columns=['B_e_hat'], y_symbol=y_symbol)
polynom.fit(X=X, y=y)
yield polynom
def test_predict_dict(polynom):
data = {
'B_e_hat':1,
}
polynom.predict(X=data)
def test_predict_series(polynom):
data = {
'B_e_hat':1,
}
data = pd.Series(data)
polynom.predict(X=data)
def test_save_load(tmpdir, polynom):
file_path=os.path.join(str(tmpdir), 'test.sym')
polynom.save(file_path=file_path)
polynom2 = Polynom.load(file_path=file_path)
data = {
'B_e_hat': 1,
}
polynom2.predict(X=data)
| 23.125 | 100 | 0.686071 | 0 | 0 | 968 | 0.402495 | 1,016 | 0.422453 | 0 | 0 | 235 | 0.097713 |
0e75465f62c387ef9aa64e8ae316dc9423908eba | 2,151 | py | Python | jzl/utils/wrappers.py | elijahc/jzlsdk | fc04a60383e6e71d50d27e8d04dff96f18ee7b06 | [
"MIT"
] | null | null | null | jzl/utils/wrappers.py | elijahc/jzlsdk | fc04a60383e6e71d50d27e8d04dff96f18ee7b06 | [
"MIT"
] | 7 | 2019-12-16T20:48:16.000Z | 2022-02-09T23:31:19.000Z | jzl/utils/wrappers.py | elijahc/jzl | fc04a60383e6e71d50d27e8d04dff96f18ee7b06 | [
"MIT"
] | null | null | null | import scipy.io as sio
import numpy as np
class MatWrapper(object):
def __init__(self,mat_file):
self.mat_fp = mat_file
self.data = None
class NeuroSurgMat(MatWrapper):
def __init__(self, mat_file):
self.mat_fp = mat_file
self.data = None
self._clfp = None
self._cmacro_lfp = None
self._metadata = None
@property
def CLFP(self):
# Lazy load CLFP files
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._clfp is None:
clfp = np.empty((3,self.data['CLFP_01'].shape[1]))
for i in np.arange(3):
clfp[i,:] = np.squeeze(self.data['CLFP_0'+str(i+1)])
self._clfp = clfp
return self._clfp
@property
def CMacro_LFP(self):
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._cmacro_lfp is None:
cmacro_lfp = np.empty((3,self.data['CMacro_LFP_01'].shape[1]))
for i in np.arange(3):
cmacro_lfp[i,:] = np.squeeze(self.data['CMacro_LFP_0'+str(i+1)])
self._cmacro_lfp = cmacro_lfp
return self._cmacro_lfp
@property
def metadata(self):
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._metadata is None:
self._metadata = {
'lfp':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
'mer':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
'eeg':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
}
for rec in list(self._metadata.keys()):
self._metadata[rec]['sampFreqHz']=self.data[rec][0][0][0][0][0]
self._metadata[rec]['timeStart']=np.squeeze(self.data[rec][0][0][1]).item()
self._metadata[rec]['timeEnd']=np.squeeze(self.data[rec][0][0][2]).item()
return self._metadata
class NeuroSurgDataset(object):
def __init__(self, data_dir):
self.data_dir = data_dir
# TODO Check if manifest file exists, if not create empty one
| 33.609375 | 91 | 0.566713 | 2,103 | 0.977685 | 0 | 0 | 1,590 | 0.739191 | 0 | 0 | 272 | 0.126453 |
0e75958c82fc2714586320be787064c237d82eab | 123 | py | Python | condition/leap year or not using conditional operator.py | PraghadeshManivannan/Python | 7a42269c5f8cfd9178f1ed39ffc2afea1dd6c5aa | [
"MIT"
] | null | null | null | condition/leap year or not using conditional operator.py | PraghadeshManivannan/Python | 7a42269c5f8cfd9178f1ed39ffc2afea1dd6c5aa | [
"MIT"
] | null | null | null | condition/leap year or not using conditional operator.py | PraghadeshManivannan/Python | 7a42269c5f8cfd9178f1ed39ffc2afea1dd6c5aa | [
"MIT"
] | null | null | null | a = int(input("Enter the year:"))
print(a,"is leap year") if a%4 == 0 and a%400 == 0 else print(a,"is not a leap year")
| 41 | 87 | 0.601626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.414634 |
0e762537c53ad43de3d76c0baf22e9a8eda7f44a | 245 | py | Python | Aula_8/Aula8.py | Mateus-Silva11/AulasPython | d34dc4f62ade438e68b0a80e0baac4d6ec0d378e | [
"MIT"
] | null | null | null | Aula_8/Aula8.py | Mateus-Silva11/AulasPython | d34dc4f62ade438e68b0a80e0baac4d6ec0d378e | [
"MIT"
] | null | null | null | Aula_8/Aula8.py | Mateus-Silva11/AulasPython | d34dc4f62ade438e68b0a80e0baac4d6ec0d378e | [
"MIT"
] | null | null | null | #Tuplas
numeros = [1,2,4,5,6,7,8,9] #lista
usuario = {'Nome':'Mateus' , 'senha':123456789 } #dicionario
pessoa = ('Mateus' , 'Alves' , 16 , 14 , 90) #tupla
print(numeros)
print(usuario)
print(pessoa)
numeros[1] = 8
usuario['senha'] = 4545343
| 18.846154 | 60 | 0.644898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.297959 |
0e771285f72212fd64fb27aaa4e22a0fd1363127 | 7,242 | py | Python | image-generation/variational-auto-encoder/vq-vae/models/vq_vae.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | 228 | 2017-11-20T06:05:56.000Z | 2022-03-23T12:40:05.000Z | image-generation/variational-auto-encoder/vq-vae/models/vq_vae.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | 36 | 2018-01-11T23:26:20.000Z | 2022-03-12T00:53:38.000Z | image-generation/variational-auto-encoder/vq-vae/models/vq_vae.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | 76 | 2017-11-22T22:00:00.000Z | 2022-03-28T05:58:57.000Z | # Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
import numpy as np
np.random.seed(1)
class ResidualStack(object):
def __init__(self, in_channels, num_hidden, num_res_layers, rng=313):
self.in_channels = in_channels
self.num_hidden = num_hidden
self.num_res_layers = num_res_layers
self.rng = rng
def __call__(self, x, test):
out = x
for i in range(self.num_res_layers):
out = self.res_block(out, scope_name='res_block_'+str(i))
return F.relu(out)
def res_block(self, x, scope_name='res_block', test=False):
with nn.parameter_scope(scope_name):
out = F.relu(x)
out = PF.convolution(out, self.num_hidden, (3, 3),
stride=(1, 1), pad=(1, 1), with_bias=False, name='conv_1', rng=self.rng)
out = PF.batch_normalization(out, name='bn_1', batch_stat=not test)
out = F.relu(out)
out = PF.convolution(out, self.num_hidden, (1, 1),
stride=(1, 1), with_bias=False, name='conv_2', rng=self.rng)
out = PF.batch_normalization(out, name='bn_2', batch_stat=not test)
return x + out
class VectorQuantizer(object):
def __init__(self, embedding_dim, num_embedding, commitment_cost, rng,
scope_name='vector_quantizer'):
self.embedding_dim = embedding_dim
self.num_embedding = num_embedding
self.commitment_cost = commitment_cost
self.rng = rng
self.scope_name = scope_name
with nn.parameter_scope(scope_name):
self.embedding_weight = nn.parameter.get_parameter_or_create('W', shape=(self.num_embedding, self.embedding_dim),
initializer=I.UniformInitializer((-1./self.num_embedding, 1./self.num_embedding), rng=self.rng), need_grad=True)
def __call__(self, x, return_encoding_indices=False):
x = F.transpose(x, (0, 2, 3, 1))
x_flat = x.reshape((-1, self.embedding_dim))
x_flat_squared = F.broadcast(
F.sum(x_flat**2, axis=1, keepdims=True), (x_flat.shape[0], self.num_embedding))
emb_wt_squared = F.transpose(
F.sum(self.embedding_weight**2, axis=1, keepdims=True), (1, 0))
distances = x_flat_squared + emb_wt_squared - 2 * \
F.affine(x_flat, F.transpose(self.embedding_weight, (1, 0)))
encoding_indices = F.min(
distances, only_index=True, axis=1, keepdims=True)
encoding_indices.need_grad = False
quantized = F.embed(encoding_indices.reshape(
encoding_indices.shape[:-1]), self.embedding_weight).reshape(x.shape)
if return_encoding_indices:
return encoding_indices, F.transpose(quantized, (0, 3, 1, 2))
encodings = F.one_hot(encoding_indices, (self.num_embedding,))
e_latent_loss = F.mean(F.squared_error(
quantized.get_unlinked_variable(need_grad=False), x))
q_latent_loss = F.mean(F.squared_error(
quantized, x.get_unlinked_variable(need_grad=False)))
loss = q_latent_loss + self.commitment_cost*e_latent_loss
quantized = x + (quantized - x).get_unlinked_variable(need_grad=False)
avg_probs = F.mean(encodings, axis=0)
perplexity = F.exp(-F.sum(avg_probs*F.log(avg_probs+1.0e-10)))
return loss, F.transpose(quantized, (0, 3, 1, 2)), perplexity, encodings
class VQVAE(object):
def __init__(self, config, training=True):
self.in_channels = config['model']['in_channels']
self.num_hidden = config['model']['num_hidden']
self.num_res_layers = config['model']['num_res_layers']
self.rng = np.random.RandomState(config['model']['rng'])
self.encoder_res_stack = ResidualStack(in_channels=self.num_hidden,
num_hidden=self.num_hidden, num_res_layers=self.num_res_layers,
rng=self.rng)
self.decoder_res_stack = ResidualStack(in_channels=self.num_hidden,
num_hidden=self.num_hidden, num_res_layers=self.num_res_layers,
rng=self.rng)
self.num_embedding = config['model']['num_embeddings']
self.embedding_dim = config['model']['embedding_dim']
self.commitment_cost = config['model']['commitment_cost']
self.decay = config['model']['decay']
self.training = training
self.vq = VectorQuantizer(
self.embedding_dim, self.num_embedding, self.commitment_cost, self.rng)
def encoder(self, x, test):
with nn.parameter_scope('encoder'):
out = PF.convolution(x, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='conv_1', rng=self.rng)
out = PF.batch_normalization(out, batch_stat=not test)
out = F.relu(out)
out = PF.convolution(out, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='conv_2', rng=self.rng)
out = self.encoder_res_stack(out, test=test)
return out
def decoder(self, x, test):
with nn.parameter_scope('decoder'):
out = self.decoder_res_stack(x, test=test)
out = F.relu(out)
out = PF.deconvolution(out, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='deconv_1', rng=self.rng)
out = PF.batch_normalization(out, batch_stat=not test)
out = F.relu(out)
out = PF.deconvolution(out, self.in_channels, (4, 4), stride=(2, 2),
pad=(1, 1), name='deconv_2', rng=self.rng)
out = F.tanh(out)
return out
def __call__(self, img, return_encoding_indices=False, quantized_as_input=False, test=False):
with nn.parameter_scope('vq_vae'):
# import pdb; pdb.set_trace()
if quantized_as_input:
return self.decoder(img, test)
z = self.encoder(img, test)
z = PF.convolution(z, self.embedding_dim, (1, 1), stride=(1, 1))
if return_encoding_indices:
return self.vq(z, return_encoding_indices=True)
loss, quantized, perplexity, encodings = self.vq(z)
img_recon = self.decoder(quantized, test)
return loss, img_recon, perplexity
| 42.350877 | 185 | 0.611019 | 6,441 | 0.889395 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.129522 |
0e78b39fa7d281be1d08a36cc35b24cc2b0c544b | 3,464 | py | Python | src/pyherc/test/builders/level.py | tuturto/pyherc | 4e7c72a4d80d335f7d3c48cecac96cd7105acac4 | [
"MIT"
] | 25 | 2015-07-21T12:40:42.000Z | 2021-09-23T09:00:45.000Z | src/pyherc/test/builders/level.py | tuturto/pyherc | 4e7c72a4d80d335f7d3c48cecac96cd7105acac4 | [
"MIT"
] | 65 | 2015-02-15T19:42:19.000Z | 2018-01-03T10:22:35.000Z | src/pyherc/test/builders/level.py | tuturto/pyherc | 4e7c72a4d80d335f7d3c48cecac96cd7105acac4 | [
"MIT"
] | 3 | 2017-06-15T13:07:49.000Z | 2019-04-15T02:18:39.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for level builder
"""
from pyherc.data import new_level, Model, wall_tile, floor_tile, add_character
class LevelBuilder():
"""
Class for building levels
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.characters = []
self.level_size = (10, 10)
self.floor_tile = 1
self.wall_tile = None
self.empty_wall_tile = None
self.solid_wall_tile = 11
self.walls = []
self.model = Model()
def with_model(self, model):
self.model = model
return self
def with_floor_tile(self, tile):
self.floor_tile = tile
return self
def with_wall_tile(self, tile):
self.wall_tile = tile
return self
def with_solid_wall_tile(self, tile):
self.solid_wall_tile = tile
return self
def with_character(self, character, location=None):
"""
Place given character to level
:param character: character to place to level
:type character: Character
"""
if hasattr(character, 'build'):
new_character = character.build()
else:
new_character = character
if location is not None:
new_character.location = location
self.characters.append(new_character)
return self
def with_wall_at(self, location):
"""
Place wall to given location
:param location: location of the wall
:type location: (int, int)
"""
self.walls.append(location)
return self
def with_size(self, size):
"""
Configure size
"""
self.level_size = size
return self
def build(self):
"""
Build level
Returns:
Level
"""
level = new_level(self.model)
for x_loc in range(self.level_size[0]):
for y_loc in range(self.level_size[1]):
wall_tile(level, (x_loc, y_loc), self.wall_tile)
floor_tile(level, (x_loc, y_loc), self.floor_tile)
for wall in self.walls:
wall_tile(level, wall, self.solid_wall_tile)
for creature in self.characters:
add_character(level, creature.location, creature)
return level
| 29.109244 | 79 | 0.634815 | 2,223 | 0.641744 | 0 | 0 | 0 | 0 | 0 | 0 | 1,614 | 0.465935 |
0e7a071ddbe4675462eb93a7095f9190e337de5c | 22,595 | py | Python | magma/backend/coreir/coreir_transformer.py | leonardt/magma | d3e8c9500ec3b167df8ed067e0c0305781c94ab6 | [
"MIT"
] | 167 | 2017-10-08T00:59:22.000Z | 2022-02-08T00:14:39.000Z | magma/backend/coreir/coreir_transformer.py | leonardt/magma | d3e8c9500ec3b167df8ed067e0c0305781c94ab6 | [
"MIT"
] | 719 | 2017-08-29T17:58:28.000Z | 2022-03-31T23:39:18.000Z | magma/backend/coreir/coreir_transformer.py | leonardt/magma | d3e8c9500ec3b167df8ed067e0c0305781c94ab6 | [
"MIT"
] | 14 | 2017-09-01T03:25:16.000Z | 2021-11-05T13:30:24.000Z | from abc import ABC, abstractmethod
from copy import copy
import json
import logging
import os
import coreir as pycoreir
from magma.digital import Digital
from magma.array import Array
from magma.bits import Bits
from magma.backend.check_wiring_context import check_wiring_context
from magma.backend.coreir.coreir_utils import (
attach_debug_info, check_magma_interface, constant_to_value, get_inst_args,
get_module_of_inst, magma_interface_to_coreir_module_type,
magma_port_to_coreir_port, make_cparams, map_genarg,
magma_name_to_coreir_select, Slice)
from magma.compile_exception import UnconnectedPortException
from magma.interface import InterfaceKind
from magma.is_definition import isdefinition
from magma.linking import (
get_linked_modules, has_default_linked_module, get_default_linked_module)
from magma.logging import root_logger
from magma.passes import dependencies
from magma.tuple import Tuple
from magma.backend.util import get_codegen_debug_info
from magma.clock import is_clock_or_nested_clock
from magma.passes.clock import (
drive_all_undriven_clocks_in_value, get_all_output_clocks_in_defn)
from magma.config import get_debug_mode
from magma.protocol_type import MagmaProtocol, MagmaProtocolMeta
from magma.ref import PortViewRef, ArrayRef
from magma.symbol_table import SYMBOL_TABLE_EMPTY
# NOTE(rsetaluri): We do not need to set the level of this logger since it has
# already been done in backend/coreir/coreir_backend.py.
_logger = root_logger().getChild("coreir_backend")
_generator_callbacks = {}
def _is_generator(ckt_or_inst):
return ckt_or_inst.coreir_genargs is not None
def _coreir_longname(magma_defn_or_decl, coreir_module_or_generator):
# NOTE(rsetaluri): This is a proxy to exposing a pycoreir/coreir-c API to
# get a module's longname. This logic should be identical right now. Another
# caveat is that we don't elaborate the CoreIR generator at the magma level,
# so it's longname needs to be dynamically reconstructed anyway.
namespace = coreir_module_or_generator.namespace.name
prefix = "" if namespace == "global" else f"{namespace}_"
longname = prefix + coreir_module_or_generator.name
if isinstance(coreir_module_or_generator, pycoreir.Module):
return longname
assert isinstance(coreir_module_or_generator, pycoreir.Generator)
param_keys = coreir_module_or_generator.params.keys()
for k in param_keys:
v = magma_defn_or_decl.coreir_genargs[k]
longname += f"__{k}{v}"
return longname
def _collect_drivers(value):
"""
Iterate over value to collect the child drivers, packing slices together
"""
drivers = []
start_idx = 0
for i in range(1, len(value)):
# If the next value item is not a reference to an array of bits where
# the array matches the previous item and the index is incremented by
# one, append the current slice to drivers (may introduce slices of
# length 1)
if not (
isinstance(value[i].name, ArrayRef) and
issubclass(value[i].name.array.T, Digital) and
isinstance(value[i - 1].name, ArrayRef) and
value[i].name.array is value[i - 1].name.array and
value[i].name.index == value[i - 1].name.index + 1
):
drivers.append(value[start_idx:i])
start_idx = i
drivers.append(value[start_idx:])
return drivers
def _unwrap(x):
if isinstance(x, MagmaProtocol):
return x._get_magma_value_()
if isinstance(x, MagmaProtocolMeta):
return x._to_magma_()
return x
class TransformerBase(ABC):
__MISSING = object()
def __init__(self, backend, opts):
self.backend = backend
self.opts = opts
self.ran = False
self._children = None
def run(self):
if self.ran:
raise RuntimeError("Can only run transformer once")
self._children = self.children()
for child in self._children:
child.run()
self.run_self()
self.ran = True
@abstractmethod
def children(self):
raise NotImplementedError()
def run_self(self):
pass
def get_opt(self, key, default=__MISSING):
if default is TransformerBase.__MISSING:
return self.opts[key]
return self.opts.get(key, default)
class LeafTransformer(TransformerBase):
def children(self):
return []
class DefnOrDeclTransformer(TransformerBase):
def __init__(self, backend, opts, defn_or_decl):
super().__init__(backend, opts)
self.defn_or_decl = defn_or_decl
self.coreir_module = None
def children(self):
if _is_generator(self.defn_or_decl):
return [GeneratorTransformer(
self.backend, self.opts, self.defn_or_decl)]
try:
coreir_module = self.backend.get_module(self.defn_or_decl)
_logger.debug(f"{self.defn_or_decl} already compiled, skipping")
self.coreir_module = coreir_module
return []
except KeyError:
pass
if not isdefinition(self.defn_or_decl):
return [DeclarationTransformer(self.backend,
self.opts,
self.defn_or_decl)]
wrapped = getattr(self.defn_or_decl, "wrappedModule", None)
if wrapped and wrapped.context is self.backend.context:
return [WrappedTransformer(self.backend,
self.opts,
self.defn_or_decl)]
return [DefinitionTransformer(self.backend,
self.opts,
self.defn_or_decl)]
def run_self(self):
self._run_self_impl()
self._generate_symbols()
self._link_default_module()
self._link_modules()
def _link_default_module(self):
if not has_default_linked_module(self.defn_or_decl):
return
target = get_default_linked_module(self.defn_or_decl)
target = self.backend.get_module(target)
self.coreir_module.link_default_module(target)
def _link_modules(self):
targets = get_linked_modules(self.defn_or_decl)
for key, target in targets.items():
target = self.backend.get_module(target)
self.coreir_module.link_module(key, target)
def _generate_symbols(self):
if not self.get_opt("generate_symbols", False):
return
out_module_name = _coreir_longname(
self.defn_or_decl, self.coreir_module)
self.opts.get("symbol_table").set_module_name(
self.defn_or_decl.name, out_module_name)
def _run_self_impl(self):
if self.coreir_module:
return
self.coreir_module = self._children[0].coreir_module
self.backend.add_module(self.defn_or_decl, self.coreir_module)
if isdefinition(self.defn_or_decl):
self.defn_or_decl.wrappedModule = self.coreir_module
libs = self.backend.included_libs()
self.defn_or_decl.coreir_wrapped_modules_libs_used = libs
class GeneratorTransformer(TransformerBase):
def __init__(self, backend, opts, defn_or_decl):
super().__init__(backend, opts)
self.defn_or_decl = defn_or_decl
self.coreir_module = None
def children(self):
try:
coreir_module = self.backend.get_module(self.defn_or_decl)
_logger.debug(f"{self.defn_or_decl} already compiled, skipping")
self.coreir_module = coreir_module
return []
except KeyError:
pass
assert not isdefinition(self.defn_or_decl)
return [DeclarationTransformer(self.backend,
self.opts,
self.defn_or_decl)]
def run_self(self):
self._generate_symbols()
if self.coreir_module is not None:
return
self.coreir_module = self._children[0].coreir_module
def _generate_symbols(self):
if not self.get_opt("generate_symbols", False):
return
global _generator_callbacks
def _callback(coreir_inst):
magma_names = list(self.defn_or_decl.interface.ports.keys())
coreir_names = list(k for k, _ in coreir_inst.module.type.items())
assert len(magma_names) == len(coreir_names)
for magma_name, coreir_name in zip(magma_names, coreir_names):
self.opts.get("symbol_table").set_port_name(
self.defn_or_decl.name, magma_name, coreir_name)
assert self.defn_or_decl not in _generator_callbacks
_generator_callbacks[self.defn_or_decl] = _callback
class InstanceTransformer(LeafTransformer):
def __init__(self, backend, opts, inst, defn):
super().__init__(backend, opts)
self.inst = inst
self.defn = defn
self.coreir_inst_gen = None
def run_self(self):
self.coreir_inst_gen = self.run_self_impl()
def run_self_impl(self):
_logger.debug(
f"Compiling instance {(self.inst.name, type(self.inst).name)}"
)
defn = type(self.inst)
if hasattr(self.inst, "namespace"):
lib = self.backend.get_lib(self.inst.namespace)
else:
lib = self.backend.get_lib(self.inst.coreir_lib)
if self.inst.coreir_lib == "global":
lib = self.get_opt("user_namespace", lib)
if not _is_generator(self.inst):
module = get_module_of_inst(self.backend.context, self.inst, lib)
args = get_inst_args(self.inst)
args = self.backend.context.new_values(args)
return lambda m: m.add_module_instance(self.inst.name, module, args)
generator = lib.generators[defn.coreir_name]
config_args = {k: v for k, v in self.inst.coreir_configargs.items()}
config_args = self.backend.context.new_values(config_args)
gen_args = {k: map_genarg(self.backend.context, v)
for k, v in defn.coreir_genargs.items()}
gen_args = self.backend.context.new_values(gen_args)
return lambda m: m.add_generator_instance(
self.inst.name, generator, gen_args, config_args)
class WrappedTransformer(LeafTransformer):
def __init__(self, backend, opts, defn):
super().__init__(backend, opts)
self.defn = defn
self.coreir_module = self.defn.wrappedModule
self.backend.include_lib_or_libs(
self.defn.coreir_wrapped_modules_libs_used)
class DefinitionTransformer(TransformerBase):
def __init__(self, backend, opts, defn):
super().__init__(backend, opts)
self.defn = defn
self.coreir_module = None
self.decl_tx = DeclarationTransformer(self.backend,
self.opts,
self.defn)
self.inst_txs = {
inst: InstanceTransformer(self.backend, self.opts, inst, self.defn)
for inst in self.defn.instances
}
self.clocks = get_all_output_clocks_in_defn(defn)
self._constant_cache = {}
def children(self):
children = []
if not self.get_opt("skip_instance_graph", False):
deps = dependencies(self.defn, include_self=False)
opts = self.opts.copy()
opts.update({"skip_instance_graph": True})
children += [DefnOrDeclTransformer(self.backend, opts, dep)
for dep in deps]
children += [self.decl_tx]
children += self.inst_txs.values()
return children
def run_self(self):
_logger.debug(f"Compiling definition {self.defn}")
self.coreir_module = self.decl_tx.coreir_module
if self.defn.inline_verilog_strs:
inline_verilog = "\n\n".join(x[0] for x in
self.defn.inline_verilog_strs)
connect_references = {}
for _, inline_value_map in self.defn.inline_verilog_strs:
for key, value in inline_value_map.items():
connect_references[key] = magma_port_to_coreir_port(value)
self.coreir_module.add_metadata("inline_verilog", json.dumps(
{"str": inline_verilog,
"connect_references": connect_references}
))
for name, module in self.defn.compiled_bind_modules.items():
self.backend.bind_module(name, module)
self.coreir_module.definition = self.get_coreir_defn()
def _generate_symbols(self, coreir_insts):
if not self.get_opt("generate_symbols", False):
return
for inst, coreir_inst in coreir_insts.items():
self.get_opt("symbol_table").set_instance_name(
self.defn.name, inst.name,
(SYMBOL_TABLE_EMPTY, coreir_inst.name))
self.get_opt("symbol_table").set_instance_type(
self.defn.name, inst.name, type(inst).name)
def get_coreir_defn(self):
coreir_defn = self.coreir_module.new_definition()
coreir_insts = {inst: self.inst_txs[inst].coreir_inst_gen(coreir_defn)
for inst in self.defn.instances}
# Call generator callback if necessary.
global _generator_callbacks
for inst, coreir_inst in coreir_insts.items():
try:
callback = _generator_callbacks.pop(type(inst))
except KeyError:
continue
callback(coreir_inst)
self._generate_symbols(coreir_insts)
# If this module was imported from verilog, do not go through the
# general module construction flow. Instead just attach the verilog
# source as metadata and return the module.
if hasattr(self.defn, "verilogFile") and self.defn.verilogFile:
metadata = json.dumps({"verilog_string": self.defn.verilogFile})
self.coreir_module.add_metadata("verilog", metadata)
return coreir_defn
if hasattr(self.defn, "verilog") and self.defn.verilog:
metadata = json.dumps({"verilog_body": self.defn.verilog})
self.coreir_module.add_metadata("verilog", metadata)
return coreir_defn
if self.defn.coreir_lib is not None:
self.backend.include_lib_or_libs(self.defn.coreir_lib)
for name, port in self.defn.interface.ports.items():
_logger.debug(f"{name}, {port}, {port.is_output()}")
for inst, coreir_inst in coreir_insts.items():
if get_codegen_debug_info() and getattr(inst, "debug_info", False):
attach_debug_info(coreir_inst, inst.debug_info)
if getattr(inst, "coreir_metadata"):
for k, v in inst.coreir_metadata.items():
coreir_inst.add_metadata(k, json.dumps(v))
for inst in coreir_insts:
for name, port in inst.interface.ports.items():
self.connect_non_outputs(coreir_defn, port)
for port in self.defn.interface.ports.values():
self.connect_non_outputs(coreir_defn, port)
return coreir_defn
def connect_non_outputs(self, module_defn, port):
# Recurse into non input types that may contain inout children.
if isinstance(port, Tuple) and not port.is_input() or \
isinstance(port, Array) and not port.T.is_input():
for elem in port:
self.connect_non_outputs(module_defn, elem)
elif not port.is_output():
self.connect(module_defn, port, port.trace())
def get_source(self, port, value, module_defn):
port = _unwrap(port)
value = _unwrap(value)
if isinstance(value, pycoreir.Wireable):
return value
if isinstance(value, Slice):
return module_defn.select(value.get_coreir_select())
if isinstance(value, Bits) and value.const():
return self._const_instance(value, len(value), module_defn)
if value.anon() and isinstance(value, Array):
drivers = _collect_drivers(value)
offset = 0
for d in drivers:
d = _unwrap(d)
if len(d) == 1:
# _collect_drivers will introduce a slice of length 1 for
# non-slices, so we index them here with 0 to unpack the
# extra array dimension
self.connect(module_defn, port[offset], d[0])
else:
self.connect(module_defn,
Slice(port, offset, offset + len(d)),
Slice(d[0].name.array, d[0].name.index,
d[-1].name.index + 1))
offset += len(d)
return None
if isinstance(value, Tuple) and value.anon():
for p, v in zip(port, value):
self.connect(module_defn, p, v)
return None
if value.const():
return self._const_instance(value, None, module_defn)
if isinstance(value.name, PortViewRef):
return module_defn.select(
magma_name_to_coreir_select(value.name))
return module_defn.select(magma_port_to_coreir_port(value))
def connect(self, module_defn, port, value):
if value is None and is_clock_or_nested_clock(type(port)):
with self.defn.open():
if not drive_all_undriven_clocks_in_value(port, self.clocks):
# No default clock
raise UnconnectedPortException(port)
value = port.trace()
if value is None:
if port.is_inout():
return # skip inouts because they might be conn. as an input.
if getattr(self.defn, "_ignore_undriven_", False):
return
raise UnconnectedPortException(port)
check_wiring_context(port, value)
source = self.get_source(port, value, module_defn)
if not source:
return
sink = module_defn.select(magma_port_to_coreir_port(port))
module_defn.connect(source, sink)
if get_codegen_debug_info() and getattr(port, "debug_info", False):
attach_debug_info(module_defn, port.debug_info, source, sink)
def _const_instance(self, constant, num_bits, module_defn):
value = constant_to_value(constant)
key = (value, num_bits)
try:
return self._constant_cache[key]
except KeyError:
pass
if num_bits is None:
config = self.backend.context.new_values({"value": bool(value)})
name = f"bit_const_{value}_{num_bits}"
mod = self.backend.get_lib("corebit").modules["const"]
module_defn.add_module_instance(name, mod, config)
else:
config = self.backend.context.new_values({"value": value})
name = f"const_{value}_{num_bits}"
gen = self.backend.get_lib("coreir").generators["const"]
gen_args = self.backend.context.new_values({"width": num_bits})
module_defn.add_generator_instance(name, gen, gen_args, config)
out = module_defn.select(f"{name}.out")
return self._constant_cache.setdefault(key, out)
class DeclarationTransformer(LeafTransformer):
def __init__(self, backend, opts, decl):
super().__init__(backend, opts)
self.decl = decl
self.coreir_module = None
def run_self(self):
self.coreir_module = self._run_self_impl()
self._generate_symbols()
def _generate_symbols(self):
if not self.get_opt("generate_symbols", False):
return
if _is_generator(self.decl):
return
magma_names = list(self.decl.interface.ports.keys())
coreir_names = list(k for k, _ in self.coreir_module.type.items())
assert len(magma_names) == len(coreir_names)
for magma_name, coreir_name in zip(magma_names, coreir_names):
self.opts.get("symbol_table").set_port_name(
self.decl.name, magma_name, coreir_name)
def _run_self_impl(self):
self.decl = self.decl
_logger.debug(f"Compiling declaration {self.decl}")
if self.decl.coreir_lib is not None:
self.backend.include_lib_or_libs(self.decl.coreir_lib)
# These libraries are already available by default in coreir, so we
# don't need declarations.
if self.decl.coreir_lib in ["coreir", "corebit", "commonlib",
"memory"]:
lib = self.backend.get_lib(self.decl.coreir_lib)
if not _is_generator(self.decl):
return lib.modules[self.decl.coreir_name]
return lib.generators[self.decl.coreir_name]
try:
coreir_module = self.backend.get_module(self.decl)
_logger.debug(f"{self.decl} already compiled, skipping")
return coreir_module
except KeyError:
pass
if get_debug_mode():
check_magma_interface(self.decl.interface)
module_type = magma_interface_to_coreir_module_type(
self.backend.context, self.decl.interface)
if isinstance(self.decl.interface, InterfaceKind):
module_type = self.backend.context.Flip(module_type)
kwargs = {}
if hasattr(self.decl, "coreir_config_param_types"):
param_types = self.decl.coreir_config_param_types
kwargs["cparams"] = make_cparams(self.backend.context, param_types)
if hasattr(self.decl, "namespace"):
# Allows users to choose namespace explicitly with
# class MyCircuit(m.Circuit):
# namespace = "foo"
# overrides user_namespace setting
namespace = self.backend.get_lib(self.decl.namespace)
else:
namespace = self.get_opt("user_namespace",
self.backend.context.global_namespace)
coreir_module = namespace.new_module(
self.decl.coreir_name, module_type, **kwargs)
if get_codegen_debug_info() and self.decl.debug_info:
attach_debug_info(coreir_module, self.decl.debug_info)
for key, value in self.decl.coreir_metadata.items():
coreir_module.add_metadata(key, json.dumps(value))
return coreir_module
| 41.534926 | 80 | 0.633105 | 18,961 | 0.839168 | 0 | 0 | 75 | 0.003319 | 0 | 0 | 2,466 | 0.109139 |
0e7cf013eb13eaa544422d0897234c6664f7cce5 | 5,371 | py | Python | src/encoded/tests/test_static_page.py | 4dn-dcic/fourfron | 29601961706d2371b982e57ae085e8ebec3b2714 | [
"MIT"
] | 11 | 2016-11-23T02:33:13.000Z | 2021-06-18T14:21:20.000Z | src/encoded/tests/test_static_page.py | 4dn-dcic/fourfron | 29601961706d2371b982e57ae085e8ebec3b2714 | [
"MIT"
] | 1,159 | 2016-11-21T15:40:24.000Z | 2022-03-29T03:18:38.000Z | src/encoded/tests/test_static_page.py | 4dn-dcic/fourfron | 29601961706d2371b982e57ae085e8ebec3b2714 | [
"MIT"
] | 5 | 2017-01-27T16:36:15.000Z | 2019-06-14T14:39:54.000Z | import pytest
import webtest
from dcicutils.qa_utils import notice_pytest_fixtures
from .workbook_fixtures import app_settings, app # are these needed? -kmp 12-Mar-2021
notice_pytest_fixtures(app_settings, app)
pytestmark = [pytest.mark.indexing, pytest.mark.working]
@pytest.fixture(scope='module')
def help_page_section_json():
return {
"title": "",
"name" : "help.user-guide.rest-api.rest_api_submission",
"file": "/docs/source/rest_api_submission.rst",
"uuid" : "442c8aa0-dc6c-43d7-814a-854af460b020"
}
@pytest.fixture(scope='module')
def help_page_json():
return {
"name": "help/user-guide/rest-api",
"title": "The REST-API",
"content": ["442c8aa0-dc6c-43d7-814a-854af460b020"],
"uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540d",
"table-of-contents": {
"enabled": True,
"header-depth": 4,
"list-styles": ["decimal", "lower-alpha", "lower-roman"]
}
}
@pytest.fixture(scope='module')
def help_page_json_draft():
return {
"name": "help/user-guide/rest-api-draft",
"title": "The REST-API",
"content": ["442c8aa0-dc6c-43d7-814a-854af460b020"],
"uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540c",
"table-of-contents": {
"enabled": True,
"header-depth": 4,
"list-styles": ["decimal", "lower-alpha", "lower-roman"]
},
"status" : "draft"
}
@pytest.fixture(scope='module')
def help_page_json_deleted():
return {
"name": "help/user-guide/rest-api-deleted",
"title": "The REST-API",
"content": ["442c8aa0-dc6c-43d7-814a-854af460b020"],
"uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540a",
"table-of-contents": {
"enabled": True,
"header-depth": 4,
"list-styles": ["decimal", "lower-alpha", "lower-roman"]
},
"status" : "deleted"
}
@pytest.fixture(scope='module')
def posted_help_page_section(testapp, help_page_section_json):
try:
res = testapp.post_json('/static-sections/', help_page_section_json, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_section_json['uuid'], status=301).follow()
val = res.json
return val
@pytest.fixture(scope='module')
def help_page(testapp, posted_help_page_section, help_page_json):
try:
res = testapp.post_json('/pages/', help_page_json, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_json['uuid'], status=301).follow()
val = res.json
return val
@pytest.fixture(scope='module')
def help_page_deleted(testapp, posted_help_page_section, help_page_json_draft):
try:
res = testapp.post_json('/pages/', help_page_json_draft, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_json_draft['uuid'], status=301).follow()
val = res.json
return val
@pytest.fixture(scope='module')
def help_page_restricted(testapp, posted_help_page_section, help_page_json_deleted):
try:
res = testapp.post_json('/pages/', help_page_json_deleted, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_json_deleted['uuid'], status=301).follow()
val = res.json
return val
def test_get_help_page(testapp, help_page):
help_page_url = "/" + help_page['name']
res = testapp.get(help_page_url, status=200)
assert res.json['@id'] == help_page_url
assert res.json['@context'] == help_page_url
assert 'HelpPage' in res.json['@type']
assert 'StaticPage' in res.json['@type']
#assert res.json['content'] == help_page['content'] # No longer works latter is set to an @id of static_section
assert 'Accession and uuid are automatically assigned during initial posting' in res.json['content'][0]['content'] # Instead lets check what we have embedded on GET request is inside our doc file (rest_api_submission.md).
assert res.json['toc'] == help_page['table-of-contents']
def test_get_help_page_deleted(anonhtmltestapp, help_page_deleted):
help_page_url = "/" + help_page_deleted['name']
anonhtmltestapp.get(help_page_url, status=403)
def test_get_help_page_no_access(anonhtmltestapp, testapp, help_page_restricted):
help_page_url = "/" + help_page_restricted['name']
anonhtmltestapp.get(help_page_url, status=403)
testapp.get(help_page_url, status=200)
def test_page_unique_name(testapp, help_page, help_page_deleted):
# POST again with same name and expect validation error
new_page = {'name': help_page['name']}
res = testapp.post_json('/page', new_page, status=422)
expected_val_err = "%s already exists with name '%s'" % (help_page['uuid'], new_page['name'])
actual_error_description = res.json['errors'][0]['description']
print("expected:", expected_val_err)
print("actual:", actual_error_description)
assert expected_val_err in actual_error_description
# also test PATCH of an existing page with another name
res = testapp.patch_json(help_page_deleted['@id'], {'name': new_page['name']}, status=422)
assert expected_val_err in res.json['errors'][0]['description']
| 36.537415 | 225 | 0.661329 | 0 | 0 | 0 | 0 | 3,184 | 0.592813 | 0 | 0 | 1,772 | 0.32992 |
0e7d4d108567f466b35fc7926c86a921b3e00477 | 1,374 | py | Python | api/views.py | rukbotto/reviews-django | 28ff12a490d160b76ca75f5cc450fc7556996962 | [
"MIT"
] | null | null | null | api/views.py | rukbotto/reviews-django | 28ff12a490d160b76ca75f5cc450fc7556996962 | [
"MIT"
] | null | null | null | api/views.py | rukbotto/reviews-django | 28ff12a490d160b76ca75f5cc450fc7556996962 | [
"MIT"
] | 1 | 2018-09-06T18:52:33.000Z | 2018-09-06T18:52:33.000Z | from django.http import Http404
from django.shortcuts import render
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.models import Review
from api.serializers import ReviewSerializer
class ReviewListView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
serializer = ReviewSerializer(request.user.reviews.all(), many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
data = request.data
data['ip_address'] = request.META.get('REMOTE_ADDR')
serializer = ReviewSerializer(data=data)
if serializer.is_valid():
serializer.save(user=request.user)
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class ReviewDetailView(APIView):
def get(self, request, *args, **kwargs):
try:
review = Review.objects.get(pk=kwargs['pk'])
if request.user != review.user:
return Response({}, status.HTTP_403_FORBIDDEN)
except Review.DoesNotExist:
raise Http404
serializer = ReviewSerializer(review)
return Response(serializer.data)
| 35.230769 | 76 | 0.703057 | 1,049 | 0.763464 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.021106 |
0e7dc78c76ad3448f8b2889d760c4e349ce77e58 | 1,429 | py | Python | test/connector/exchange/altmarkets/test_altmarkets_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 542 | 2021-12-17T22:34:31.000Z | 2022-03-31T14:36:23.000Z | test/connector/exchange/altmarkets/test_altmarkets_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 291 | 2021-12-17T20:07:53.000Z | 2022-03-31T11:07:23.000Z | test/connector/exchange/altmarkets/test_altmarkets_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 220 | 2021-12-17T12:41:23.000Z | 2022-03-31T23:03:22.000Z | #!/usr/bin/env python
import sys
import asyncio
import logging
import unittest
import conf
from os.path import join, realpath
from hummingbot.connector.exchange.altmarkets.altmarkets_user_stream_tracker import AltmarketsUserStreamTracker
from hummingbot.connector.exchange.altmarkets.altmarkets_auth import AltmarketsAuth
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
sys.path.insert(0, realpath(join(__file__, "../../../../../")))
logging.basicConfig(level=METRICS_LOG_LEVEL)
class AltmarketsUserStreamTrackerUnitTest(unittest.TestCase):
api_key = conf.altmarkets_api_key
api_secret = conf.altmarkets_secret_key
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.trading_pairs = ["BTC-USD"]
cls.user_stream_tracker: AltmarketsUserStreamTracker = AltmarketsUserStreamTracker(
altmarkets_auth=AltmarketsAuth(cls.api_key, cls.api_secret),
trading_pairs=cls.trading_pairs)
cls.user_stream_tracker_task: asyncio.Task = safe_ensure_future(cls.user_stream_tracker.start())
def test_user_stream(self):
# Wait process some msgs.
print("\nSleeping for 30s to gather some user stream messages.")
self.ev_loop.run_until_complete(asyncio.sleep(30.0))
print(self.user_stream_tracker.user_stream)
| 37.605263 | 111 | 0.773268 | 864 | 0.604619 | 0 | 0 | 462 | 0.323303 | 0 | 0 | 129 | 0.090273 |
0e7f4389c015b9220c6189155e2e9c20712acc78 | 209 | py | Python | Lesson_n1/logger/simple-logger.py | LemuelPuglisi/TutoratoTap | 81ce9d0f2163d18683c5aacde42b39f5a2e618a6 | [
"MIT"
] | 8 | 2022-03-22T10:12:03.000Z | 2022-03-28T17:44:08.000Z | Lesson_n1/logger/simple-logger.py | LemuelPuglisi/TutoratoTap | 81ce9d0f2163d18683c5aacde42b39f5a2e618a6 | [
"MIT"
] | null | null | null | Lesson_n1/logger/simple-logger.py | LemuelPuglisi/TutoratoTap | 81ce9d0f2163d18683c5aacde42b39f5a2e618a6 | [
"MIT"
] | null | null | null | import time
def log():
""" Python dummy logger example.
"""
t = 0
while True:
print(f'time: {t} \t log sent.')
t += 1
time.sleep(1)
if __name__ == '__main__': log() | 17.416667 | 40 | 0.492823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.363636 |
0e80028221b4b905d8118772396dbaa3bc538fda | 1,568 | py | Python | appzoo/utils/streamlit_utils.py | streamlit-badge-bot/AppZoo | 86547fdc5209fa137b0a6384d63e92f263c1e160 | [
"MIT"
] | 5 | 2020-11-05T12:13:45.000Z | 2021-11-19T12:26:49.000Z | appzoo/utils/streamlit_utils.py | streamlit-badge-bot/AppZoo | 86547fdc5209fa137b0a6384d63e92f263c1e160 | [
"MIT"
] | null | null | null | appzoo/utils/streamlit_utils.py | streamlit-badge-bot/AppZoo | 86547fdc5209fa137b0a6384d63e92f263c1e160 | [
"MIT"
] | 3 | 2020-11-23T23:06:34.000Z | 2021-04-18T02:12:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : StreamlitApp.
# @File : utils
# @Time : 2020/11/3 12:17 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description : https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py
import pandas as pd
import streamlit as st
from io import StringIO
def sidebar(st):
st.sidebar.radio('R:', [1, 2])
def file_uploader(st):
uploaded_file = st.file_uploader(
'File uploader') # <streamlit.uploaded_file_manager.UploadedFile object at 0x1779c5938>
if uploaded_file is not None:
bytes_data = uploaded_file.read()
with open("uploaded_file", "wb") as f:
f.write(bytes_data)
return "uploaded_file"
# bytes_data = uploaded_file.read()
# st.write(bytes_data)
# # To convert to a string based IO:
# stringio = StringIO(uploaded_file.decode("utf-8"))
# st.write(stringio)
# # To read file as string:
# string_data = stringio.read()
# st.write(string_data)
# # Can be used wherever a "file-like" object is accepted:
# dataframe = pd.read_csv(uploaded_file)
# st.write(dataframe)
def image_display(st, header2image=[('header', 'image')], **kwargs):
image_num = len(header2image)
cols = st.beta_columns(image_num)
for col, (header, image) in zip(cols, header2image):
with col:
# st.header
st.subheader(header)
st.image(image, **kwargs)
| 30.153846 | 96 | 0.612883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 825 | 0.524809 |
0e810e0c87499969e85aa82e852af21b94d75db6 | 1,918 | py | Python | examples/no_ui/huobitest.py | tienjunhsu/vnpy | be18953fbe74c408682a3ffd48e5fa93e2386284 | [
"MIT"
] | null | null | null | examples/no_ui/huobitest.py | tienjunhsu/vnpy | be18953fbe74c408682a3ffd48e5fa93e2386284 | [
"MIT"
] | null | null | null | examples/no_ui/huobitest.py | tienjunhsu/vnpy | be18953fbe74c408682a3ffd48e5fa93e2386284 | [
"MIT"
] | null | null | null | import multiprocessing
from time import sleep
from datetime import datetime, time
from logging import INFO
from vnpy.event import EventEngine
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.gateway.hbdm import HbdmGateway
from vnpy.gateway.hbsdm import HbsdmGateway
from vnpy.app.cta_strategy import CtaStrategyApp
from vnpy.app.cta_strategy.base import EVENT_CTA_LOG
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
Product,
Status,
OrderType,
Interval
)
from vnpy.trader.object import (
OrderRequest,
)
SETTINGS["log.active"] = True
SETTINGS["log.level"] = INFO
SETTINGS["log.console"] = True
ctp_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
hb_setting = {
"API Key": "",
"Secret Key": "",
"会话数": 3,
"代理地址": "",
"代理端口": ""
}
def main():
SETTINGS["log.file"] = True
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
hbdm = main_engine.add_gateway(HbdmGateway)
hbsdm = main_engine.add_gateway(HbsdmGateway)
cta_engine = main_engine.add_app(CtaStrategyApp)
main_engine.write_log("主引擎创建成功")
log_engine = main_engine.get_engine("log")
event_engine.register(EVENT_CTA_LOG, log_engine.process_log_event)
main_engine.write_log("注册日志事件监听")
main_engine.connect(hb_setting, "HBDM")
main_engine.connect(hb_setting, "HBSDM")
main_engine.write_log("连接CTP接口")
sleep(10)
cta_engine.init_engine()
main_engine.write_log("CTA策略初始化完成")
cta_engine.init_all_strategies()
sleep(60) # Leave enough time to complete strategy initialization
main_engine.write_log("CTA策略全部初始化")
cta_engine.start_all_strategies()
main_engine.write_log("CTA策略全部启动")
if __name__ == "__main__":
main()
| 22.045977 | 71 | 0.683003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.223823 |
0e83097f53c2e6c4632f27c5c21128930923b98d | 2,928 | py | Python | scripts/has_prebuilt.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | scripts/has_prebuilt.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | 2 | 2021-03-26T00:29:32.000Z | 2021-04-30T21:29:33.000Z | scripts/has_prebuilt.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to check if the package(s) have prebuilts.
The script must be run inside the chroot. The output is a json dict mapping the
package atoms to a boolean for whether a prebuilt exists.
"""
from __future__ import print_function
import json
import os
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.lib.parser import package_info
if cros_build_lib.IsInsideChroot():
from chromite.lib import depgraph
def GetParser():
"""Build the argument parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument(
'-b',
'--build-target',
dest='build_target_name',
help='The build target that is being checked.')
parser.add_argument(
'--output',
type='path',
required=True,
help='The file path where the result json should be stored.')
parser.add_argument(
'packages',
nargs='+',
help='The package atoms that are being checked.')
return parser
def _ParseArguments(argv):
"""Parse and validate arguments."""
parser = GetParser()
opts = parser.parse_args(argv)
if not os.path.exists(os.path.dirname(opts.output)):
parser.error('Path containing the output file does not exist.')
# Manually parse the packages as CPVs.
packages = []
for pkg in opts.packages:
cpv = package_info.parse(pkg)
if not cpv.atom:
parser.error('Invalid package atom: %s' % pkg)
packages.append(cpv)
opts.packages = packages
opts.Freeze()
return opts
def main(argv):
opts = _ParseArguments(argv)
cros_build_lib.AssertInsideChroot()
board = opts.build_target_name
bests = {}
for cpv in opts.packages:
bests[cpv.atom] = portage_util.PortageqBestVisible(cpv.atom, board=board)
# Emerge args:
# g: use binpkgs (needed to find if we have one)
# u: update packages to latest version (want updates to invalidate binpkgs)
# D: deep -- consider full tree rather that just immediate deps
# (changes in dependencies and transitive deps can invalidate a binpkg)
# N: Packages with changed use flags should be considered
# (changes in dependencies and transitive deps can invalidate a binpkg)
# q: quiet (simplifies output)
# p: pretend (don't actually install it)
args = ['-guDNqp', '--with-bdeps=y', '--color=n']
if board:
args.append('--board=%s' % board)
args.extend('=%s' % best.cpf for best in bests.values())
generator = depgraph.DepGraphGenerator()
generator.Initialize(args)
results = {}
for atom, best in bests.items():
results[atom] = generator.HasPrebuilt(best.cpf)
osutils.WriteFile(opts.output, json.dumps(results))
| 28.705882 | 79 | 0.705601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,309 | 0.447063 |
0e83218e9a9ab8e19a0dc8d1f227f7ad4339d871 | 23,057 | py | Python | horizon/operational_mgmt/inventory/views.py | open-power-ref-design-toolkit/opsmgr | ce9d5dc8a4038f22302a168288a6a4f6683dcd45 | [
"Apache-2.0"
] | 5 | 2017-05-10T00:32:37.000Z | 2019-08-21T09:32:01.000Z | horizon/operational_mgmt/inventory/views.py | open-power-ref-design-toolkit/opsmgr | ce9d5dc8a4038f22302a168288a6a4f6683dcd45 | [
"Apache-2.0"
] | null | null | null | horizon/operational_mgmt/inventory/views.py | open-power-ref-design-toolkit/opsmgr | ce9d5dc8a4038f22302a168288a6a4f6683dcd45 | [
"Apache-2.0"
] | 3 | 2017-05-10T00:32:40.000Z | 2018-10-16T19:18:18.000Z | # Copyright 2016, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
import logging
from operational_mgmt import resource
from operational_mgmt.inventory import forms as project_forms
from operational_mgmt.inventory import tables as project_tables
from operational_mgmt.inventory import tabs as inventoryRacks_tabs
import opsmgr.inventory.rack_mgr as rack_mgr
import opsmgr.inventory.resource_mgr as resource_mgr
class IndexView(tabs.TabbedTableView):
tab_group_class = inventoryRacks_tabs.InventoryRacksTabs
table_class = project_tables.ResourcesTable
template_name = 'op_mgmt/inventory/index.html'
page_title = _("Inventory")
class DetailView(tabs.TabView):
tab_group_class = inventoryRacks_tabs.ResourceDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ resource.name|default:resource.id }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
resource = self.get_data()
table = project_tables.ResourcesTable(self.request)
context["resource"] = resource
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(resource)
return context
@memoized.memoized_method
def get_data(self):
try:
__method__ = 'views.DetailView.get_data'
failure_message = str("Unable to retrieve detailed data for the resource")
if "resource_id" in self.kwargs:
try:
(rc, result_dict) = resource_mgr.list_resources(
None, False, None, [self.kwargs['resource_id']])
except Exception as e:
logging.error("%s: Exception received trying to retrieve"
" resource information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. Resource details called with no context
# of what to display. Need to display an error message because the
# view will not be primed with required data
logging.error("%s: DetailView called with no resource id"
" context.", __method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one resource in the results...just
# return the first value
if len(result_dict['resources']) > 0:
return resource.Resource(result_dict['resources'][0])
else:
logging.error("%s: list_resources returned no information for"
" resource with resource id %s",
__method__, self.kwargs["resource_id"])
messages.error(self.request, failure_message)
return
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve resource details.'),
redirect=redirect)
def get_redirect_url(self):
return reverse('horizon:op_mgmt:inventory:index')
def get_tabs(self, request, *args, **kwargs):
resource = self.get_data()
return self.tab_group_class(request, resource=resource, **kwargs)
class AddResourceView(forms.ModalFormView):
template_name = 'op_mgmt/inventory/addResource.html'
modal_header = _("Add Resource")
form_id = "add_resource_form"
form_class = project_forms.AddResourceForm
submit_label = _("Add Resource")
submit_url = "horizon:op_mgmt:inventory:addResource"
success_url = reverse_lazy('horizon:op_mgmt:inventory:index')
page_title = _("Add Resource")
def get_initial(self):
# Need the rack for the active tab
rack = self.get_object()
# Prime the rack information on our AddResourceForm
if rack:
return {'rackid': rack['rackid'],
'rack_label': rack['label']}
else:
return
@memoized.memoized_method
def get_object(self):
__method__ = 'views.AddResourceView.get_object'
failure_message = str("Unable to retrieve rack information " +
" for the resource being added.")
if "rack_id" in self.kwargs:
try:
(rc, result_dict) = rack_mgr.list_racks(
None, False, [self.kwargs["rack_id"]])
except Exception as e:
logging.error("%s: Exception received trying to retrieve rack"
" information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. AddResourceView called with no context of
# what rack the resource is being added. Need to display an error
# message because the dialog will not be primed with required data
logging.error("%s: AddResourceView called with no rack id"
" context.", __method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one rack in the results...just return
# the first value
if len(result_dict['racks']) > 0:
return result_dict['racks'][0]
else:
logging.error("%s: list_rack returned no information for"
" rack with rack id %s",
__method__, self.kwargs["rack_id"])
messages.error(self.request, failure_message)
return
def get_context_data(self, **kwargs):
# place the rack id onto the submit url
context = super(AddResourceView, self).get_context_data(**kwargs)
args = (self.get_object()['rackid'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
class EditResourceView(forms.ModalFormView):
template_name = 'op_mgmt/inventory/editResource.html'
modal_header = _("Edit Resource")
form_id = "edit_resource_form"
form_class = project_forms.EditResourceForm
submit_label = _("Edit Resource")
submit_url = "horizon:op_mgmt:inventory:editResource"
success_url = reverse_lazy('horizon:op_mgmt:inventory:index')
page_title = _("Edit Resource")
def get_initial(self):
# Need the resource being edited
selected_resource = self.get_object()
if selected_resource:
return {'label': selected_resource['label'],
'auth_method': selected_resource['auth_method'],
'rackid': selected_resource['rackid'],
'eiaLocation': selected_resource['rack-eia-location'],
'ip_address': selected_resource['ip-address'],
'userID': selected_resource['userid'],
'resourceId': selected_resource['resourceid']}
else:
return
@memoized.memoized_method
def get_object(self):
__method__ = 'views.EditResourceView.get_object'
failure_message = str("Unable to retrieve resource information" +
" for the resource being edited.")
if "resource_id" in self.kwargs:
try:
(rc, result_dict) = resource_mgr.list_resources(
None, False, None, [self.kwargs['resource_id']])
except Exception as e:
logging.error("%s: Exception received trying to retrieve"
" resource information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. EditResourceView called with no context
# of what to edit. Need to display an error message because the
# dialog will not be primed with required data
logging.error("%s: EditResourceView called with no resource id"
" context.", __method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one resource in the results...just
# return the first value
if len(result_dict['resources']) > 0:
return result_dict['resources'][0]
else:
logging.error("%s: list_resources returned no information for"
" resource with resource id %s",
__method__, self.kwargs["resource_id"])
messages.error(self.request, failure_message)
return
def get_context_data(self, **kwargs):
# place the resource id on to the submit url
context = super(EditResourceView, self).get_context_data(**kwargs)
args = (self.get_object()['resourceid'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
class ChangePasswordView(forms.ModalFormView):
template_name = 'op_mgmt/inventory/changePassword.html'
modal_header = _("Change Password")
form_id = "change_password_form"
form_class = project_forms.ChangePasswordForm
submit_label = _("Change Password")
submit_url = "horizon:op_mgmt:inventory:changePassword"
success_url = reverse_lazy('horizon:op_mgmt:inventory:index')
page_title = _("Change Password")
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(ChangePasswordView, self).dispatch(*args, **kwargs)
def get_initial(self):
# Need the resource and user information to prime the dialog
selected_resource = self.get_object()
if selected_resource:
return {'label': selected_resource['label'],
'userID': selected_resource['userid'],
'resourceid': selected_resource['resourceid']}
else:
return
@memoized.memoized_method
def get_object(self):
__method__ = 'views.ChangePasswordView.get_object'
failure_message = str("Unable to retrieve resource information for" +
" the resource having password changed.")
if "resource_id" in self.kwargs:
try:
(rc, result_dict) = resource_mgr.list_resources(
None, False, None, [self.kwargs['resource_id']])
except Exception as e:
logging.error("%s: Exception received trying to retrieve"
" resource information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. ChangePasswordView called with no context
# of what to edit. Need to display an error message because the
# dialog will not be primed with required data
logging.error("%s: ChangePasswordView called with no context.",
__method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one resource in the results...just
# return the first value
if len(result_dict['resources']) > 0:
return result_dict['resources'][0]
else:
logging.error("%s: list_resources returned no information for"
" resource with resource id %s",
__method__, self.kwargs["resource_id"])
messages.error(self.request, failure_message)
return
def get_context_data(self, **kwargs):
# place the resource id on the submit url
context = super(ChangePasswordView, self).get_context_data(**kwargs)
args = (self.get_object()['resourceid'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
class EditRackView(forms.ModalFormView):
template_name = 'op_mgmt/inventory/editRack.html'
modal_header = _("Edit Rack Details")
form_id = "edit_rack"
form_class = project_forms.EditRackForm
submit_label = _("Edit Rack")
submit_url = "horizon:op_mgmt:inventory:editRack"
success_url = reverse_lazy("horizon:op_mgmt:inventory:index")
def get_initial(self):
# Need the rack being edited
rack = self.get_object()
if rack:
return {'rack_id': rack['rackid'],
'label': rack['label'],
'data_center': rack['data-center'],
'room': rack['room'],
'row': rack['row'],
'notes': rack['notes']}
else:
return
@memoized.memoized_method
def get_object(self):
__method__ = 'views.EditRackView.get_object'
failure_message = str("Unable to retrieve rack information" +
" for the rack being edited.")
if "rack_id" in self.kwargs:
try:
(rc, result_dict) = rack_mgr.list_racks(
None, False, [self.kwargs["rack_id"]])
except Exception as e:
logging.error("%s: Exception received trying to retrieve"
" rack information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. EditRackView called with no context
# of what to edit. Need to display an error message because
# the dialog will not be primed with required data
logging.error("%s: EditRackView called with no context.",
__method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one resource in the results...just
# return the first value
if len(result_dict['racks']) > 0:
return result_dict['racks'][0]
else:
logging.error("%s: list_racks returned no information for"
" rack with rack id %s",
__method__, self.kwargs["rack_id"])
messages.error(self.request, failure_message)
return
def get_context_data(self, **kwargs):
# place the rack id on to the submit url
context = super(EditRackView, self).get_context_data(**kwargs)
args = (self.get_object()['rackid'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
class RemoveRackView(forms.ModalFormView):
template_name = 'op_mgmt/inventory/removeRack.html'
modal_header = _("Confirm Remove Rack")
form_id = "remove_rack_form"
form_class = project_forms.RemoveRackForm
submit_label = _("Remove Rack")
submit_url = "horizon:op_mgmt:inventory:removeRack"
success_url = reverse_lazy("horizon:op_mgmt:inventory:index")
page_title = _("Confirm Remove Rack")
def get_initial(self):
# Need the rack being edited
rack = self.get_object()
if rack:
return {'rack_id': rack['rackid'],
'label': rack['label']}
else:
return
@memoized.memoized_method
def get_object(self):
__method__ = 'views.RemoveRackView.get_object'
failure_message = str("Unable to retrieve rack information" +
" for the rack being removed.")
if "rack_id" in self.kwargs:
try:
(rc, result_dict) = rack_mgr.list_racks(
None, False, [self.kwargs["rack_id"]])
except Exception as e:
logging.error("%s: Exception received trying to retrieve"
" rack information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. RemoveRackView called with no context
# of what to edit. Need to display an error message because
# the dialog will not be primed with required data
logging.error("%s: RemoveRackView called with no context.",
__method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one resource in the results...just
# return the first value
if len(result_dict['racks']) > 0:
return result_dict['racks'][0]
else:
logging.error("%s: list_racks returned no information for"
" rack with rack id %s",
__method__, self.kwargs["rack_id"])
messages.error(self.request, failure_message)
return
def get_context_data(self, **kwargs):
# place the rack id on to the submit url
context = super(RemoveRackView, self).get_context_data(**kwargs)
args = (self.get_object()['rackid'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
class RemoveResourcesView(tables.DataTableView, forms.ModalFormView):
table_class = project_tables.RemoveResourcesTable
modal_header = _("Remove Resources")
modal_id = "remove_resources_modal"
template_name = 'op_mgmt/inventory/removeResources.html'
submit_url = reverse_lazy('horizon:op_mgmt:inventory:index')
submit_label = _("Close")
page_title = _("Remove Resources")
@memoized.memoized_method
def get_object(self):
__method__ = 'views.RemoveResourcesView.get_object'
failure_message = str("Unable to retrieve rack information" +
" for the resources being removed.")
if "rack_id" in self.kwargs:
try:
(rc, result_dict) = rack_mgr.list_racks(
None, False, [self.kwargs["rack_id"]])
except Exception as e:
logging.error("%s: Exception received trying to retrieve"
" resource information. Exception is: %s",
__method__, e)
exceptions.handle(self.request, failure_message)
return
else:
# This is unexpected. RemoveResourcesView called with no context
# of what to edit. Need to display an error message because
# the dialog will not be primed with required data
logging.error("%s: RemoveResourcesView called with no context.",
__method__)
messages.error(self.request, failure_message)
return
if rc != 0:
messages.error(self.request, failure_message)
return
else:
# We should have at least one rack in the results...just
# return the first value
if len(result_dict['racks']) > 0:
return result_dict['racks'][0]
else:
logging.error("%s: list_racks returned no information for"
" rack with rack id %s",
__method__, self.kwargs["rack_id"])
messages.error(self.request, failure_message)
return
# Used to populate the table of resources to remove (for the given rack)
def get_data(self):
__method__ = 'views.RemoveResourcesView.get_data'
resources = []
rack_id = int(self.kwargs["rack_id"])
logging.debug("%s: before retrieving resources for rack: %s",
__method__, rack_id)
# retrieve resources for the rack id passed in (rack_id may be -1 on
# the initial pass)
(rc, result_dict) = resource_mgr.list_resources(None, False, None,
None, False, False,
[rack_id])
if rc != 0:
messages.error(self.request, _('Unable to retrieve Operational'
' Management inventory information'
' for resources.'))
logging.error('%s: Unable to retrieve Operational Management'
'inventory information. A Non-0 return code returned'
' from resource_mgr.list_resources. The return code'
' is: %s', __method__, rc)
else:
all_resources = result_dict['resources']
for raw_resource in all_resources:
resources.append(resource.Resource(raw_resource))
logging.debug("%s: Found %s resources",
__method__, len(resources))
return resources
| 42.856877 | 86 | 0.585202 | 21,645 | 0.93876 | 0 | 0 | 13,068 | 0.566769 | 0 | 0 | 7,558 | 0.327796 |
0e84e4b1430330a3daa5f05da337c132509ce937 | 15,547 | py | Python | umbra/controller.py | galgeek/umbra | 1a9a39606856b30f17885518dba8436c8909e0a2 | [
"Apache-2.0"
] | 48 | 2015-01-15T15:49:07.000Z | 2021-07-03T17:10:33.000Z | umbra/controller.py | galgeek/umbra | 1a9a39606856b30f17885518dba8436c8909e0a2 | [
"Apache-2.0"
] | 21 | 2015-07-30T23:03:08.000Z | 2020-05-24T17:17:54.000Z | umbra/controller.py | galgeek/umbra | 1a9a39606856b30f17885518dba8436c8909e0a2 | [
"Apache-2.0"
] | 23 | 2015-01-16T15:22:57.000Z | 2021-07-14T13:38:47.000Z | #!/usr/bin/env python
# vim: set sw=4 et:
import logging
import json
import time
import threading
import kombu
import socket
from brozzler.browser import BrowserPool, BrowsingException
import brozzler
import urlcanon
class AmqpBrowserController:
"""
Consumes amqp messages representing requests to browse urls, from the
specified amqp queue (default: "urls") on the specified amqp exchange
(default: "umbra"). Incoming amqp message is a json object with 3
attributes:
{
"clientId": "umbra.client.123",
"url": "http://example.com/my_fancy_page",
"behaviorParameters": {"some":"parameter","another":"thing"},
"metadata": {"arbitrary":"fields", "etc":4}
}
"url" is the url to browse.
"clientId" uniquely identifies the client of umbra. Umbra uses the clientId
as the amqp routing key, to direct information via amqp back to the client.
It sends this information on the same specified amqp exchange (default:
"umbra").
"behaviorParameters" are used to populate the javascript behavior template.
Each url requested in the browser is published to amqp this way. The
outgoing amqp message is a json object:
{
"url": "http://example.com/images/embedded_thing.jpg",
"method": "GET",
"headers": {"User-Agent": "...", "Accept": "...", ...},
"parentUrl": "http://example.com/my_fancy_page",
"parentUrlMetadata": {"arbitrary":"fields", "etc":4, ...}
}
POST requests have an additional field, postData.
"""
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, amqp_url='amqp://guest:guest@localhost:5672/%2f',
chrome_exe='chromium-browser', max_active_browsers=1,
queue_name='urls', exchange_name='umbra', routing_key='urls'):
self.amqp_url = amqp_url
self.queue_name = queue_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self.max_active_browsers = max_active_browsers
self._browser_pool = BrowserPool(
size=max_active_browsers, chrome_exe=chrome_exe,
ignore_cert_errors=True)
def start(self):
self._browsing_threads = set()
self._browsing_threads_lock = threading.Lock()
self._exchange = kombu.Exchange(name=self.exchange_name, type='direct',
durable=True)
self._reconnect_requested = False
self._producer = None
self._producer_lock = threading.Lock()
with self._producer_lock:
self._producer_conn = kombu.Connection(self.amqp_url)
self._producer = self._producer_conn.Producer(serializer='json')
self._consumer_thread = threading.Thread(target=self._consume_amqp, name='AmqpConsumerThread')
self._consumer_stop = threading.Event()
self._consumer_thread.start()
def shutdown(self):
self.logger.info("shutting down amqp consumer {}".format(self.amqp_url))
self._consumer_stop.set()
self._consumer_thread.join()
def shutdown_now(self):
self.logger.info("shutting down amqp consumer %s", self.amqp_url)
self._consumer_stop.set()
with self._browsing_threads_lock:
for th in self._browsing_threads:
if th.is_alive():
brozzler.thread_raise(th, brozzler.ShutdownRequested)
# self._browser_pool.shutdown_now()
self._consumer_thread.join()
def reconnect(self, *args, **kwargs):
self._reconnect_requested = True
self._browser_pool.shutdown_now()
def _wait_for_and_browse_urls(self, conn, consumer, timeout):
start = time.time()
browser = None
while not self._consumer_stop.is_set() and time.time() - start < timeout and not self._reconnect_requested:
try:
browser = self._browser_pool.acquire() # raises KeyError if none available
def callback(body, message):
try:
client_id = body.get('clientId')
url = body['url']
metadata = body.get('metadata')
behavior_parameters = body.get('behaviorParameters')
username = body.get('username')
password = body.get('password')
except:
self.logger.error("unable to decipher message %s",
message, exc_info=True)
self.logger.error("discarding bad message")
message.reject()
browser.stop()
self._browser_pool.release(browser)
return
self._start_browsing_page(
browser, message, client_id, url, metadata,
behavior_parameters, username, password)
consumer.callbacks = [callback]
while True:
try:
conn.drain_events(timeout=0.5)
break # out of "while True" to acquire another browser
except socket.timeout:
pass
except socket.error:
self.logger.error("problem consuming messages from AMQP, will try reconnecting after active browsing finishes", exc_info=True)
self._reconnect_requested = True
if self._consumer_stop.is_set() or time.time() - start >= timeout or self._reconnect_requested:
browser.stop()
self._browser_pool.release(browser)
break
except brozzler.browser.NoBrowsersAvailable:
# no browsers available
time.sleep(0.5)
except:
self.logger.critical("problem with browser initialization", exc_info=True)
time.sleep(0.5)
finally:
consumer.callbacks = None
def _wait_for_active_browsers(self):
self.logger.info("waiting for browsing threads to finish")
while True:
with self._browsing_threads_lock:
if len(self._browsing_threads) == 0:
break
time.sleep(0.5)
self.logger.info("active browsing threads finished")
def _consume_amqp(self):
# XXX https://webarchive.jira.com/browse/ARI-3811
# After running for some amount of time (3 weeks in the latest case),
# consumer looks normal but doesn't consume any messages. Not clear if
# it's hanging in drain_events() or not. As a temporary measure for
# mitigation (if it works) or debugging (if it doesn't work), close and
# reopen the connection every 2.5 hours
RECONNECT_AFTER_SECONDS = 150 * 60
url_queue = kombu.Queue(self.queue_name, exchange=self._exchange, routing_key=self.routing_key)
while not self._consumer_stop.is_set():
try:
self.logger.info("connecting to amqp exchange={} at {}".format(self._exchange.name, self.amqp_url))
self._reconnect_requested = False
with kombu.Connection(self.amqp_url) as conn:
conn.default_channel.basic_qos(
prefetch_count=self.max_active_browsers,
prefetch_size=0, a_global=False)
with conn.Consumer(url_queue) as consumer:
self._wait_for_and_browse_urls(
conn, consumer, timeout=RECONNECT_AFTER_SECONDS)
# need to wait for browsers to finish here, before closing
# the amqp connection, because they use it to do
# message.ack() after they finish browsing a page
self._wait_for_active_browsers()
except BaseException as e:
self.logger.error("caught exception {}".format(e), exc_info=True)
time.sleep(0.5)
self.logger.error("attempting to reopen amqp connection")
def _start_browsing_page(
self, browser, message, client_id, url, parent_url_metadata,
behavior_parameters=None, username=None, password=None):
def on_response(chrome_msg):
if (chrome_msg['params']['response']['url'].lower().startswith('data:')
or chrome_msg['params']['response']['fromDiskCache']):
return
request_headers = chrome_msg['params']['response'].get('requestHeaders', {})
payload = {
'url': chrome_msg['params']['response']['url'],
'headers': request_headers,
'parentUrl': url,
'parentUrlMetadata': parent_url_metadata,
}
if ':method' in request_headers:
# happens when http transaction is http 2.0
payload['method'] = request_headers[':method']
elif 'requestHeadersText' in chrome_msg['params']['response']:
req = chrome_msg['params']['response']['requestHeadersText']
payload['method'] = req[:req.index(' ')]
else:
self.logger.warn('unable to identify http method (assuming GET) chrome_msg=%s',
chrome_msg)
payload['method'] = 'GET'
self.logger.debug(
'sending to amqp exchange=%s routing_key=%s payload=%s',
self.exchange_name, client_id, payload)
with self._producer_lock:
publish = self._producer_conn.ensure(self._producer,
self._producer.publish)
publish(payload, exchange=self._exchange, routing_key=client_id)
def post_outlinks(outlinks=None):
def prune_outlinks(dirty_links, block_list=None):
'''
Filter for valid schemes, remove URL fragments, and drop any other designated URLs from the list.
'''
links = set()
dirty_links = set(dirty_links)
self.logger.info('Pruning links...')
for link in dirty_links:
link = urlcanon.parse_url(link)
if link.scheme in (b'http', b'https', b'ftp'):
urlcanon.canon.remove_fragment(link)
link = str(link).strip()
links.add(link)
self.logger.info('Pruning complete.')
# Need to remove after link fragments have been removed to prevent duplication.
self.logger.info('Removing Links: %s', ', '.join(block_list))
links = links.difference(block_list)
return links
outlinks = prune_outlinks(outlinks, {url})
self.logger.info('Posting Outlinks:\n\t%s', '\n\t'.join(sorted(outlinks)))
for link in outlinks:
# Each of these payload fields are required by AMQPUrlReceiver.java
#+ in Heritrix.
payload = {
'url': link,
'headers': {},
'parentUrl': url,
'parentUrlMetadata': parent_url_metadata,
'method': 'GET',
}
self.logger.debug(
'sending outlink to amqp exchange=%s routing_key=%s payload=%s',
self.exchange_name, client_id, payload)
with self._producer_lock:
publish = self._producer_conn.ensure(self._producer,
self._producer.publish)
publish(payload, exchange=self._exchange, routing_key=client_id)
def browse_page_sync():
self.logger.info(
'browser=%s client_id=%s url=%s behavior_parameters=%s',
browser, client_id, url, behavior_parameters)
try:
browser.start()
final_page_url, outlinks = browser.browse_page(
url, on_response=on_response,
behavior_parameters=behavior_parameters,
username=username, password=password)
# Temporarily commenting out for https://webarchive.jira.com/browse/AITFIVE-1295
#post_outlinks(outlinks)
message.ack()
except brozzler.PageInterstitialShown as e:
self.logger.info("page interstitial shown, likely unsupported http auth, for url {} - {}".format(url, e))
message.reject()
except brozzler.ShutdownRequested as e:
self.logger.info("browsing did not complete normally, requeuing url {} - {}".format(url, e))
message.requeue() # republish?
except BrowsingException as e:
self.logger.warn("browsing did not complete normally, republishing url {} - {}".format(url, e))
republish_amqp(self, message)
except:
self.logger.critical("problem browsing page, republishing url {}, may have lost browser process".format(url), exc_info=True)
republish_amqp(self, message)
finally:
browser.stop()
self._browser_pool.release(browser)
def republish_amqp(self, message):
# republish on exception, not requeue!
payload = json.loads(to_str(message.body))
message.ack()
max_retries = 5
if 'metadata' in payload:
if not 'retries' in payload['metadata']:
payload['metadata']['retries'] = 1
else:
if payload['metadata']['retries'] >= max_retries:
return
payload['metadata']['retries'] += 1
self.logger.debug(
're-publishing url to amqp exchange=%s routing_key=%s payload=%s',
self.exchange_name, self.routing_key, payload)
with self._producer_lock:
publish = self._producer_conn.ensure(self._producer,
self._producer.publish)
publish(payload, exchange=self._exchange, routing_key=self.routing_key)
def to_str(bytes_or_str):
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode() # uses 'utf-8' for encoding
else:
value = bytes_or_str
return value # Instance of str
def browse_thread_run_then_cleanup():
browse_page_sync()
self.logger.info(
'removing thread %s from self._browsing_threads',
threading.current_thread())
with self._browsing_threads_lock:
self._browsing_threads.remove(threading.current_thread())
thread_name = "BrowsingThread:%s" % browser.chrome.port
th = threading.Thread(target=browse_thread_run_then_cleanup, name=thread_name)
self.logger.info('adding thread %s to self._browsing_threads', th)
with self._browsing_threads_lock:
self._browsing_threads.add(th)
th.start()
| 43.918079 | 150 | 0.566154 | 15,326 | 0.985785 | 0 | 0 | 0 | 0 | 0 | 0 | 4,368 | 0.280955 |
0e85f028f93bb5f31397176f386cc2e9751681f4 | 465 | py | Python | Alura/MLClassificacao/A2V1dados.py | EduardoMoraesRitter/Alura | c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6 | [
"MIT"
] | null | null | null | Alura/MLClassificacao/A2V1dados.py | EduardoMoraesRitter/Alura | c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6 | [
"MIT"
] | null | null | null | Alura/MLClassificacao/A2V1dados.py | EduardoMoraesRitter/Alura | c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6 | [
"MIT"
] | null | null | null | import csv
def carregar_acessos():
dados = []#lado direito
marcacoes =[]#as classificaçoes lado esquerdo
#abrir o arquivo
arquivo = open('acesso.csv', 'r')
#leitor de csv
leitor = csv.reader(arquivo)
#ler cada linha
for acessou_home,acessou_como_funciona,acessou_contato,comprou in leitor:
dados.append([acessou_home,acessou_como_funciona,acessou_contato])
marcacoes.append(comprou)
return dados, marcacoes
| 23.25 | 77 | 0.703226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.227468 |
0e86920fef555c6bd8efe1dbece15e67133d5a86 | 2,087 | py | Python | Program/DPDP/dpdp/utils/profile_utils.py | italogs/HGS-CVRP | 24b3e7b8525f4b2b2de69fd162bd10bedb39e51f | [
"MIT"
] | null | null | null | Program/DPDP/dpdp/utils/profile_utils.py | italogs/HGS-CVRP | 24b3e7b8525f4b2b2de69fd162bd10bedb39e51f | [
"MIT"
] | null | null | null | Program/DPDP/dpdp/utils/profile_utils.py | italogs/HGS-CVRP | 24b3e7b8525f4b2b2de69fd162bd10bedb39e51f | [
"MIT"
] | 1 | 2021-06-09T12:55:19.000Z | 2021-06-09T12:55:19.000Z | import time
import numpy as np
import torch
class Profiler:
def __init__(self, dummy=False, device=None):
self.events = []
self.dummy = dummy
self.device = device if device != torch.device('cpu') else None
self.log('start')
def log(self, name):
if self.dummy:
return
# Optionally synchronize cuda before logging time
if self.device is not None:
torch.cuda.synchronize(self.device)
self.events.append((name, time.time()))
def print_profile_summary(self, step, detailed=False):
event_names, event_times = zip(*self.events)
total_duration = event_times[-1] - event_times[0]
print(
"-------------- Step {} total duration: {:.3f} ms -------------------".format(step, total_duration * 1000))
event_durations = np.diff(event_times)
event_names = event_names[1:]
total_generate = sum(d for e, d in zip(event_names, event_durations) if "expansion" in e)
total_reduce = sum(d for e, d in zip(event_names, event_durations) if "reduced" in e)
print("Total generate expansions time {:.3f} ms".format(total_generate * 1000))
print("Total topk selection time {:.3f} ms".format(total_reduce * 1000))
print(
"Total rest time {:.3f} ms".format((total_duration - total_generate - total_reduce) * 1000))
maxlen = max(len(en) for en in event_names)
if detailed:
for i in np.argsort(-event_durations): # Sort descending by duration
print(("{:" + str(maxlen) + "s} {:.3f} ms").format(event_names[i], event_durations[i] * 1000))
def debug_memory(device=None):
print('*' * 20, "Memory Dump", '*' * 20)
if device is not None:
print(torch.cuda.memory_summary(device))
import gc
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.device, obj.dtype, obj.size())
except:
pass | 40.134615 | 119 | 0.597508 | 1,633 | 0.782463 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.167226 |
0e873042296e41b0fec86717d2fc6d5d5d3721f0 | 3,818 | py | Python | reviewboard/admin/views.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | 1 | 2020-02-11T07:09:14.000Z | 2020-02-11T07:09:14.000Z | reviewboard/admin/views.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | null | null | null | reviewboard/admin/views.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | null | null | null | import logging
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from djblets.siteconfig.views import site_settings as djblets_site_settings
from reviewboard.admin.checks import check_updates_required
from reviewboard.admin.cache_stats import get_cache_stats, get_has_cache_stats
from reviewboard.admin.forms import SSHSettingsForm
from reviewboard.reviews.models import Group, DefaultReviewer
from reviewboard.scmtools.models import Repository
from reviewboard.scmtools import sshutils
@staff_member_required
def dashboard(request, template_name="admin/dashboard.html"):
"""
Displays the administration dashboard, containing news updates and
useful administration tasks.
"""
return render_to_response(template_name, RequestContext(request, {
'user_count': User.objects.count(),
'reviewgroup_count': Group.objects.count(),
'defaultreviewer_count': DefaultReviewer.objects.count(),
'repository_count': Repository.objects.accessible(request.user).count(),
'has_cache_stats': get_has_cache_stats(),
'title': _("Dashboard"),
'root_path': settings.SITE_ROOT + "admin/db/"
}))
@staff_member_required
def cache_stats(request, template_name="admin/cache_stats.html"):
"""
Displays statistics on the cache. This includes such pieces of
information as memory used, cache misses, and uptime.
"""
cache_stats = get_cache_stats()
return render_to_response(template_name, RequestContext(request, {
'cache_hosts': cache_stats,
'cache_backend': cache.__module__,
'title': _("Server Cache"),
'root_path': settings.SITE_ROOT + "admin/db/"
}))
@staff_member_required
def site_settings(request, form_class,
template_name="siteconfig/settings.html"):
return djblets_site_settings(request, form_class, template_name, {
'root_path': settings.SITE_ROOT + "admin/db/"
})
@staff_member_required
def ssh_settings(request, template_name='admin/ssh_settings.html'):
key = sshutils.get_user_key()
if request.method == 'POST':
form = SSHSettingsForm(request.POST, request.FILES)
if form.is_valid():
try:
form.create(request.FILES)
return HttpResponseRedirect('.')
except Exception, e:
# Fall through. It will be reported inline and in the log.
logging.error('Uploading SSH key failed: %s' % e)
else:
form = SSHSettingsForm()
public_key = ''
if key:
fingerprint = sshutils.humanize_key(key)
else:
fingerprint = None
return render_to_response(template_name, RequestContext(request, {
'title': _('SSH Settings'),
'key': key,
'fingerprint': fingerprint,
'public_key': sshutils.get_public_key(key),
'form': form,
}))
def manual_updates_required(request,
template_name="admin/manual_updates_required.html"):
"""
Checks for required manual updates and displays informational pages on
performing the necessary updates.
"""
updates = check_updates_required()
return render_to_response(template_name, RequestContext(request, {
'updates': [render_to_string(template_name,
RequestContext(request, extra_context))
for (template_name, extra_context) in updates],
}))
| 35.027523 | 80 | 0.701676 | 0 | 0 | 0 | 0 | 2,370 | 0.620744 | 0 | 0 | 895 | 0.234416 |
0e882c9b948e6d3c33c4a811397e4287f0b75a40 | 2,603 | py | Python | src/wls_filter.py | ray075hl/singleLDR2HDR | 28cff4fcd5aa9a4c37931cce96562a2bdd5375fe | [
"MIT"
] | 35 | 2018-06-08T00:28:55.000Z | 2022-03-30T13:40:10.000Z | src/wls_filter.py | ray075hl/singleLDR2HDR | 28cff4fcd5aa9a4c37931cce96562a2bdd5375fe | [
"MIT"
] | null | null | null | src/wls_filter.py | ray075hl/singleLDR2HDR | 28cff4fcd5aa9a4c37931cce96562a2bdd5375fe | [
"MIT"
] | 16 | 2018-11-02T15:08:39.000Z | 2022-02-14T09:58:57.000Z | """
WLS filter: Edge-preserving smoothing based onthe weightd least squares
optimization framework, as described in Farbman, Fattal, Lischinski, and
Szeliski, "Edge-Preserving Decompositions for Multi-Scale Tone and Detail
Manipulation", ACM Transactions on Graphics, 27(3), August 2008.
Given an input image IN, we seek a new image OUT, which, on the one hand,
is as close as possible to IN, and, at the same time, is as smooth as
possible everywhere, except across significant gradients in L.
"""
import cv2
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, lsqr
def wlsFilter(IN, Lambda=1.0, Alpha=1.2):
"""
IN : Input image (2D grayscale image, type float)
Lambda : Balances between the data term and the smoothness term.
Increasing lbda will produce smoother images.
Default value is 1.0
Alpha : Gives a degree of control over the affinities by
non-lineary scaling the gradients. Increasing alpha
will result in sharper preserved edges. Default value: 1.2
"""
L = np.log(IN+1e-22) # Source image for the affinity matrix. log_e(IN)
smallNum = 1e-6
height, width = IN.shape
k = height * width
# Compute affinities between adjacent pixels based on gradients of L
dy = np.diff(L, n=1, axis=0) # axis=0 is vertical direction
dy = -Lambda/(np.abs(dy)**Alpha + smallNum)
dy = np.pad(dy, ((0,1),(0,0)), 'constant') # add zeros row
dy = dy.flatten(order='F')
dx = np.diff(L, n=1, axis=1)
dx = -Lambda/(np.abs(dx)**Alpha + smallNum)
dx = np.pad(dx, ((0,0),(0,1)), 'constant') # add zeros col
dx = dx.flatten(order='F')
# Construct a five-point spatially inhomogeneous Laplacian matrix
B = np.concatenate([[dx], [dy]], axis=0)
d = np.array([-height, -1])
A = spdiags(B, d, k, k)
e = dx
w = np.pad(dx, (height, 0), 'constant'); w = w[0:-height]
s = dy
n = np.pad(dy, (1, 0), 'constant'); n = n[0:-1]
D = 1.0 - (e + w + s + n)
A = A + A.transpose() + spdiags(D, 0, k, k)
# Solve
OUT = spsolve(A, IN.flatten(order='F'))
return np.reshape(OUT, (height, width), order='F')
# Unit test
if __name__ == '__main__':
image = cv2.imread('1.png')
if image.shape[2] == 4: # Format RGBA
image = image[:,:, 0:3] # Discard alpha channel
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image1 = 1.0*image / np.max(image)
result = wlsFilter(image1)
cv2.imshow('1', result)
cv2.waitKey(0)
| 34.25 | 81 | 0.62159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,322 | 0.507876 |
0e88cbb20b73b185549520afd3dddebdd1c5d349 | 881 | py | Python | tests/Bug1161780.py | grangier/python-soappy | 41158e4afabe3af2ff414b1c4be35907bbf5dc81 | [
"BSD-3-Clause"
] | 1 | 2015-01-19T02:11:57.000Z | 2015-01-19T02:11:57.000Z | tests/Bug1161780.py | grangier/python-soappy | 41158e4afabe3af2ff414b1c4be35907bbf5dc81 | [
"BSD-3-Clause"
] | 2 | 2017-02-03T20:11:57.000Z | 2019-09-09T19:10:49.000Z | tests/Bug1161780.py | grangier/python-soappy | 41158e4afabe3af2ff414b1c4be35907bbf5dc81 | [
"BSD-3-Clause"
] | 3 | 2016-04-22T17:38:29.000Z | 2019-08-13T14:38:37.000Z | #!/usr/bin/env python
import sys
sys.path.insert(1, "..")
from SOAPpy.Errors import Error
from SOAPpy.Parser import parseSOAPRPC
original = """<?xml version="1.0"?>
<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<doSingleRecord SOAP-ENC:root="1">
</doSingleRecord>
</SOAP-ENV:Body>
<ErrorString>The CustomerID tag could not be found or the number contained in the tag was invalid</ErrorString></SOAP-ENV:Envelope>
"""
try:
parseSOAPRPC(original, attrs = 1)
except Error, e:
if e.msg != "expected nothing, got `ErrorString'":
raise AssertionError, "Incorrect error message generated: " + e.msg
else:
raise AssertionError, "Incorrect error message generated"
print "Success"
| 30.37931 | 132 | 0.725312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.68672 |
0e8961f357932c6f57b68ab9acb081d4fe8c71fd | 4,864 | py | Python | catalyst/core/misc.py | tadejsv/catalyst | 2553ce8fd7cecc025ad88819aea73faf8abb229b | [
"Apache-2.0"
] | 206 | 2018-10-05T19:16:47.000Z | 2019-01-19T21:10:41.000Z | catalyst/core/misc.py | tadejsv/catalyst | 2553ce8fd7cecc025ad88819aea73faf8abb229b | [
"Apache-2.0"
] | 20 | 2018-10-07T06:30:49.000Z | 2019-01-17T17:26:15.000Z | catalyst/core/misc.py | tadejsv/catalyst | 2553ce8fd7cecc025ad88819aea73faf8abb229b | [
"Apache-2.0"
] | 22 | 2018-10-06T12:34:08.000Z | 2019-01-10T16:00:48.000Z | from typing import Dict, List, Tuple, Union
from collections import OrderedDict
from functools import lru_cache
import warnings
from torch.utils.data import BatchSampler, DataLoader
from catalyst.core.callback import (
Callback,
CallbackWrapper,
IBackwardCallback,
ICriterionCallback,
IOptimizerCallback,
ISchedulerCallback,
)
from catalyst.typing import RunnerCriterion, RunnerOptimizer, RunnerScheduler
def get_original_callback(callback: Callback) -> Callback:
"""Docs."""
while isinstance(callback, CallbackWrapper):
callback = callback.callback
return callback
def callback_isinstance(callback: Callback, class_or_tuple) -> bool:
"""Check if callback is the same type as required ``class_or_tuple``
Args:
callback: callback to check
class_or_tuple: class_or_tuple to compare with
Returns:
bool: true if first object has the required type
"""
callback = get_original_callback(callback)
return isinstance(callback, class_or_tuple)
def sort_callbacks_by_order(
callbacks: Union[List, Dict, OrderedDict]
) -> "OrderedDict[str, Callback]":
"""Creates an sequence of callbacks and sort them.
Args:
callbacks: either list of callbacks or ordered dict
Returns:
sequence of callbacks sorted by ``callback order``
Raises:
TypeError: if `callbacks` is out of `None`, `dict`, `OrderedDict`, `list`
"""
if callbacks is None:
output = OrderedDict()
elif isinstance(callbacks, (dict, OrderedDict)):
output = [(k, v) for k, v in callbacks.items()]
output = sorted(output, key=lambda x: x[1].order)
output = OrderedDict(output)
elif isinstance(callbacks, list):
output = sorted(callbacks, key=lambda x: x.order)
output = OrderedDict([(i, value) for i, value in enumerate(output)])
else:
raise TypeError(
f"Callbacks must be either Dict/OrderedDict or list, "
f"got {type(callbacks)}"
)
return output
@lru_cache(maxsize=42)
def is_str_intersections(origin_string: str, strings: Tuple):
"""Docs."""
return any(x in origin_string for x in strings)
def get_loader_batch_size(loader: DataLoader):
"""Docs."""
batch_size = loader.batch_size
if batch_size is not None:
return batch_size
batch_size = loader.batch_sampler.batch_size
if batch_size is not None:
return batch_size
raise NotImplementedError(
"No `batch_size` found,"
"please specify it with `loader.batch_size`,"
"or `loader.batch_sampler.batch_size`"
)
def get_loader_num_samples(loader: DataLoader):
"""Docs."""
batch_size = get_loader_batch_size(loader)
if isinstance(loader.batch_sampler, BatchSampler):
# pytorch default item-based samplers
if loader.drop_last:
return (len(loader.dataset) // batch_size) * batch_size
else:
return len(loader.dataset)
else:
# pytorch batch-based samplers
return len(loader) * batch_size
def check_callbacks(
callbacks: OrderedDict,
criterion: RunnerCriterion = None,
optimizer: RunnerOptimizer = None,
scheduler: RunnerScheduler = None,
):
"""Docs."""
callback_exists = lambda callback_fn: any(
callback_isinstance(x, callback_fn) for x in callbacks.values()
)
if criterion is not None and not callback_exists(ICriterionCallback):
warnings.warn(
"No ``ICriterionCallback/CriterionCallback`` were found "
"while runner.criterion is not None."
"Do you compute the loss during ``runner.handle_batch``?"
)
if (criterion is not None or optimizer is not None) and not callback_exists(
IBackwardCallback
):
warnings.warn(
"No ``IBackwardCallback/BackwardCallback`` were found "
"while runner.criterion/optimizer is not None."
"Do you backward the loss during ``runner.handle_batch``?"
)
if optimizer is not None and not callback_exists(IOptimizerCallback):
warnings.warn(
"No ``IOptimizerCallback/OptimizerCallback`` were found "
"while runner.optimizer is not None."
"Do run optimisation step pass during ``runner.handle_batch``?"
)
if scheduler is not None and not callback_exists(ISchedulerCallback):
warnings.warn(
"No ``ISchedulerCallback/SchedulerCallback`` were found "
"while runner.scheduler is not None."
"Do you make scheduler step during ``runner.handle_batch``?"
)
__all__ = [
"get_original_callback",
"callback_isinstance",
"check_callbacks",
"is_str_intersections",
"get_loader_batch_size",
"get_loader_num_samples",
"sort_callbacks_by_order",
]
| 31.380645 | 81 | 0.668791 | 0 | 0 | 0 | 0 | 152 | 0.03125 | 0 | 0 | 1,658 | 0.340872 |
0e897b9b6ee11290f8bbe871a90dc748ef78c7fe | 2,013 | py | Python | expandimage.py | LuisLinan/helicity_fluxes | 9b6f4049c819d758bca6e524a80c6d7a09e7b179 | [
"MIT"
] | null | null | null | expandimage.py | LuisLinan/helicity_fluxes | 9b6f4049c819d758bca6e524a80c6d7a09e7b179 | [
"MIT"
] | null | null | null | expandimage.py | LuisLinan/helicity_fluxes | 9b6f4049c819d758bca6e524a80c6d7a09e7b179 | [
"MIT"
] | null | null | null | import numpy as np
def place_mirror(im, x1, x2, y1, y2, mr):
""" Place an image mr in specified locations of an image im. The edge locations in im where mr is to be placed are
(x1,y1) and (x2,y2)
Programmer
---------
Manolis K. Georgoulis (JHU/APL, 10/12/05)
"""
nxa = np.zeros(2)
nya = np.zeros(2)
res = im[x1:x2 + 1, y1:y2 + 1].shape
nxa[0] = res[0]
nya[0] = res[1]
res = mr.shape
nxa[1] = res[0]
nya[1] = res[1]
nx = np.min(nxa)
ny = np.min(nya)
im[x1:x1 + nx, y1:y1 + ny] = mr[0:nx, 0:ny]
return im
def expand_image(im, ext_x, ext_y, mirror=0):
"""Enlarge the linear dimensions of an image by a (ext_x,ext_y) and put the initial image at the center.
If the keyword /mirror is set, the additional space corresponds to a mirror image of the initial image.
Programmer
----------
Manolis K. Georgoulis (JHU/APL, 09/30/05)
"""
res = im.shape
id1 = res[0]
id2 = res[1]
mim = np.zeros((int(id1 + ext_x), int(id2 + ext_y)))
stx = np.fix(np.float(ext_x) / 2. + 0.5)
sty = np.fix(np.float(ext_y) / 2. + 0.5)
mim[int(stx):int(stx + id1), int(sty):int(sty + id2)] = im
if mirror != 0:
if stx <= id1:
xmr = int(stx)
else:
xmr = int(id1)
mr1 = im[0:xmr, :]
mr1 = np.flip(mr1, axis=0)
mr2 = im[id1 - xmr:id1, :]
mr2 = np.flip(mr2, axis=0)
mim = place_mirror(mim, 0, stx - 1, sty, sty + id2 - 1, mr1)
mim = place_mirror(mim, stx + id1, id1 + ext_x - 1, sty, sty + id2 - 1, mr2)
if sty <= id2:
ymr = int(sty)
else:
ymr = int(id2)
mr1 = mim[:, ymr:2 * ymr]
mr1 = np.flip(mr1, axis=1)
mr2 = mim[:, id2:ymr + id2]
mr2 = np.flip(mr2, axis=1)
mim = place_mirror(mim, 0, id1 + ext_x - 1, 0, sty - 1, mr1)
mim = place_mirror(mim, 0, id1 + ext_x - 1, sty + id2, id2 + ext_y - 1, mr2)
return mim, stx, sty
| 25.807692 | 118 | 0.524093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.261798 |
0e8a4f5e382a07daf6ff75e3e2955a20dbb893fe | 4,898 | py | Python | python/protobufs/services/team/actions/get_teams_pb2.py | getcircle/protobuf-registry | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | [
"MIT"
] | null | null | null | python/protobufs/services/team/actions/get_teams_pb2.py | getcircle/protobuf-registry | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | [
"MIT"
] | null | null | null | python/protobufs/services/team/actions/get_teams_pb2.py | getcircle/protobuf-registry | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/team/actions/get_teams.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protobufs.services.common import containers_pb2 as protobufs_dot_services_dot_common_dot_containers__pb2
from protobufs.services.team import containers_pb2 as protobufs_dot_services_dot_team_dot_containers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/team/actions/get_teams.proto',
package='services.team.actions.get_teams',
syntax='proto3',
serialized_pb=b'\n/protobufs/services/team/actions/get_teams.proto\x12\x1fservices.team.actions.get_teams\x1a*protobufs/services/common/containers.proto\x1a(protobufs/services/team/containers.proto\"\x8c\x01\n\tRequestV1\x12<\n\ninflations\x18\x01 \x01(\x0b\x32(.services.common.containers.InflationsV1\x12\x34\n\x06\x66ields\x18\x02 \x01(\x0b\x32$.services.common.containers.FieldsV1\x12\x0b\n\x03ids\x18\x03 \x03(\t\"=\n\nResponseV1\x12/\n\x05teams\x18\x01 \x03(\x0b\x32 .services.team.containers.TeamV1b\x06proto3'
,
dependencies=[protobufs_dot_services_dot_common_dot_containers__pb2.DESCRIPTOR,protobufs_dot_services_dot_team_dot_containers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.team.actions.get_teams.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inflations', full_name='services.team.actions.get_teams.RequestV1.inflations', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fields', full_name='services.team.actions.get_teams.RequestV1.fields', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ids', full_name='services.team.actions.get_teams.RequestV1.ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=171,
serialized_end=311,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.team.actions.get_teams.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='teams', full_name='services.team.actions.get_teams.ResponseV1.teams', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=313,
serialized_end=374,
)
_REQUESTV1.fields_by_name['inflations'].message_type = protobufs_dot_services_dot_common_dot_containers__pb2._INFLATIONSV1
_REQUESTV1.fields_by_name['fields'].message_type = protobufs_dot_services_dot_common_dot_containers__pb2._FIELDSV1
_RESPONSEV1.fields_by_name['teams'].message_type = protobufs_dot_services_dot_team_dot_containers__pb2._TEAMV1
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.team.actions.get_teams_pb2'
# @@protoc_insertion_point(class_scope:services.team.actions.get_teams.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.team.actions.get_teams_pb2'
# @@protoc_insertion_point(class_scope:services.team.actions.get_teams.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
# @@protoc_insertion_point(module_scope)
| 38.566929 | 519 | 0.780523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,472 | 0.300531 |
0e8cf0d37b01be1452f5feb60a1354161f4516a1 | 795 | py | Python | albumy/blueprints/user.py | puppetgc/albumy | 8dce6a2f7828defa69fdb942ac38551cb81b079d | [
"MIT"
] | null | null | null | albumy/blueprints/user.py | puppetgc/albumy | 8dce6a2f7828defa69fdb942ac38551cb81b079d | [
"MIT"
] | null | null | null | albumy/blueprints/user.py | puppetgc/albumy | 8dce6a2f7828defa69fdb942ac38551cb81b079d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:author: TianMing Xu (徐天明)
:url: http://greyli.com
:copyright: © 2021 TianMing Xu <78703671@qq.com>
:license: MIT, see LICENSE for more details.
"""
from flask import render_template, current_app, request, Blueprint
from albumy.models import User, Photo
user_bp = Blueprint('user', __name__)
@user_bp.route('/<username>')
def index(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
per_page = current_app.config['ALBUMY_PHOTO_PER_PAGE']
pagination = Photo.query.with_parent(user).order_by(Photo.timestamp.desc()).paginate(page, per_page)
photos = pagination.items
return render_template('user/index.html', user=user, pagination=pagination, photos=photos)
| 34.565217 | 104 | 0.715723 | 0 | 0 | 0 | 0 | 454 | 0.566085 | 0 | 0 | 263 | 0.32793 |
0e8d1d299b98c3c9fde54963bdd473b0d3eb4f2b | 187 | py | Python | Tests/Aula_20.py | o-Ian/Practice-Python | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | [
"MIT"
] | 4 | 2021-04-23T18:07:58.000Z | 2021-05-12T11:38:14.000Z | Tests/Aula_20.py | o-Ian/Practice-Python | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | [
"MIT"
] | null | null | null | Tests/Aula_20.py | o-Ian/Practice-Python | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | [
"MIT"
] | null | null | null | def lin():
print('-' * 35)
# Principal program
lin()
print(' IAN STIGLIANO SILVA ')
lin()
lin()
print(' CURSO EM VÍDEO ')
lin()
lin()
print(' GUSTAVO GUANABARA ')
lin()
| 12.466667 | 34 | 0.572193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.515957 |
0e8d916a04b0ea3e2a7473717726afba28ca4c92 | 5,304 | py | Python | transparentai/datasets/variable/variable.py | Nathanlauga/transparentai | 9c21c60fe68170f46ba4455173ce80493e3b3bb5 | [
"MIT"
] | 7 | 2020-01-20T09:59:51.000Z | 2021-11-06T07:10:27.000Z | transparentai/datasets/variable/variable.py | Nathanlauga/transparentai | 9c21c60fe68170f46ba4455173ce80493e3b3bb5 | [
"MIT"
] | 8 | 2020-03-24T14:28:50.000Z | 2020-07-31T14:55:06.000Z | transparentai/datasets/variable/variable.py | Nathanlauga/transparentai | 9c21c60fe68170f46ba4455173ce80493e3b3bb5 | [
"MIT"
] | null | null | null | __all__ = [
'describe_number',
'describe_datetime',
'describe_object',
'describe'
]
import pandas as pd
import numpy as np
from scipy import stats
from transparentai import utils
def describe_common(arr):
"""Common descriptive statistics about an array.
Returned statistics:
- Count of valid values
- Count of missing values
Parameters
----------
arr: array like
Array of value to get desriptive statistics from
Raises
------
TypeError:
arr is not an array like
"""
if not utils.is_array_like(arr):
raise TypeError('arr is not an array like')
if type(arr) in [list, np.ndarray]:
arr = pd.Series(arr)
n = len(arr)
missing_val = arr.isna().sum()
desc = {}
desc['valid values'] = n - missing_val
desc['missing values'] = missing_val
return desc
def describe_number(arr):
"""Descriptive statistics about a number array.
Returned statistics:
- Count of valid values
- Count of missing values
- Mean
- Mode
- Min
- Quantitle 25%
- Median
- Quantile 75%
- Max
Parameters
----------
arr: array like
Array of value to get desriptive statistics from
Raises
------
TypeError:
arr is not an array like
TypeError:
arr is not a number array
"""
if not utils.is_array_like(arr):
raise TypeError('arr is not an array like')
if utils.find_dtype(arr) != 'number':
raise TypeError('arr is not a number array')
desc = describe_common(arr)
desc['mean'] = np.round(np.mean(arr), 4)
desc['mode'] = stats.mode(arr)[0][0]
desc['min'] = np.min(arr)
desc['quantile 25%'] = np.quantile(arr, 0.25)
desc['quantile 50%'] = np.median(arr)
desc['quantile 75%'] = np.quantile(arr, 0.75)
desc['max'] = np.max(arr)
return desc
def describe_datetime(arr, format='%Y-%m-%d'):
"""Descriptive statistics about a datetime array.
Returned statistics:
- Count of valid values
- Count of missing values
- Count of unique values
- Most common value
- Min
- Mean
- Max
Parameters
----------
arr: array like
Array of value to get desriptive statistics from
format: str
String format for datetime value
Raises
------
TypeError:
arr is not an array like
TypeError:
arr is not a datetime array
"""
if not utils.is_array_like(arr):
raise TypeError('arr is not an array like')
if utils.find_dtype(arr) != 'datetime':
raise TypeError('arr is not a datetime array')
if type(arr) in [list, np.ndarray]:
arr = pd.Series(arr)
arr = pd.to_datetime(arr, errors='coerce')
desc = describe_common(arr)
desc['unique values'] = arr.nunique()
desc['most common'] = arr.mode()[0].strftime(format)
desc['min'] = arr.min().strftime(format)
desc['mean'] = arr.mean().strftime(format)
desc['max'] = arr.max().strftime(format)
return desc
def describe_object(arr):
"""Descriptive statistics about an object array.
Returned statistics:
- Count of valid values
- Count of missing values
- Count of unique values
- Most common value
Parameters
----------
arr: array like
Array of value to get desriptive statistics from
Raises
------
TypeError:
arr is not an array like
TypeError:
arr is not an object array
"""
if not utils.is_array_like(arr):
raise TypeError('arr is not an array like')
if utils.find_dtype(arr) != 'object':
raise TypeError('arr is not an object array')
if type(arr) in [list, type(np.array([]))]:
arr = pd.Series(arr)
desc = describe_common(arr)
desc['unique values'] = arr.nunique()
desc['most common'] = arr.mode()[0]
return desc
def describe(arr):
"""Descriptive statistics about an array.
Depending on the detected dtype (number, date, object)
it returns specific stats.
Common statistics for all dtype (using describe_common):
- Count of valid values
- Count of missing values
Number statistics (using describe_number):
- Mean
- Mode
- Min
- Quantitle 25%
- Median
- Quantile 75%
- Max
Datetime statistics (using describe_datetime):
- Count of unique values
- Most common value
- Min
- Mean
- Max
Object statistics (using describe_datetime):
- Count of unique values
- Most common value
Parameters
----------
arr: array like
Array of value to get desriptive statistics from
Returns
-------
dict
Dictionnary with descriptive statistics
Raises
------
TypeError:
arr is not an array like
"""
if not utils.is_array_like(arr):
raise TypeError('arr is not an array like')
if type(arr) == list:
arr = np.array(arr)
if type(arr) in [pd.Series, pd.DataFrame]:
arr = arr.to_numpy()
dtype = utils.find_dtype(arr)
if dtype == 'number':
return describe_number(arr)
elif dtype == 'object':
return describe_object(arr)
elif dtype == 'datetime':
return describe_datetime(arr)
return None
| 21.737705 | 60 | 0.607278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,136 | 0.591252 |
0e8f0fd36260f6f9e8bd5e30e3b45277ea628c6a | 1,472 | py | Python | obniz/obniz/libs/measurements/measure.py | izm51/obniz-python-sdk | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | [
"MIT"
] | 11 | 2019-03-22T12:02:11.000Z | 2021-01-21T04:57:18.000Z | obniz/obniz/libs/measurements/measure.py | izm51/obniz-python-sdk | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | [
"MIT"
] | 5 | 2019-03-02T08:28:25.000Z | 2021-02-02T22:06:37.000Z | obniz/obniz/libs/measurements/measure.py | izm51/obniz-python-sdk | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | [
"MIT"
] | 3 | 2019-07-20T06:55:09.000Z | 2019-12-04T05:05:00.000Z | from ..utils.util import ObnizUtil
class ObnizMeasure:
def __init__(self, obniz):
self.obniz = obniz
self._reset()
def _reset(self):
self.observers = []
def echo(self, params):
err = ObnizUtil._required_keys(
params, ["io_pulse", "pulse", "pulse_width", "io_echo", "measure_edges"]
)
if err:
raise Exception(
"Measure start param '" + err + "' required, but not found "
)
self.params = ObnizUtil._key_filter(
params,
[
"io_pulse",
"pulse",
"pulse_width",
"io_echo",
"measure_edges",
"timeout",
"callback",
],
)
echo = {}
echo["io_pulse"] = self.params["io_pulse"]
echo["pulse"] = self.params["pulse"]
echo["pulse_width"] = self.params["pulse_width"]
echo["io_echo"] = self.params["io_echo"]
echo["measure_edges"] = self.params["measure_edges"]
if type(self.params.get("timeout")) is int:
echo["timeout"] = self.params["timeout"]
self.obniz.send({"measure": {"echo": echo}})
if "callback" in self.params:
self.observers.append(self.params["callback"])
def notified(self, obj):
if len(self.observers):
callback = self.observers.pop(0)
callback(obj["echo"])
| 28.307692 | 84 | 0.506114 | 1,434 | 0.974185 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.240489 |
0e8f1b9e020cbcff46ef73a5b53081ba0b48bcd4 | 7,869 | py | Python | media_grab-container/src/test/unit_test/controllers_test/test_CompletedDownloadsController.py | tomconnolly94/media_grab | 6c1da232fefdcbcb9ae2d79926f31e65970dbfa8 | [
"MIT"
] | null | null | null | media_grab-container/src/test/unit_test/controllers_test/test_CompletedDownloadsController.py | tomconnolly94/media_grab | 6c1da232fefdcbcb9ae2d79926f31e65970dbfa8 | [
"MIT"
] | null | null | null | media_grab-container/src/test/unit_test/controllers_test/test_CompletedDownloadsController.py | tomconnolly94/media_grab | 6c1da232fefdcbcb9ae2d79926f31e65970dbfa8 | [
"MIT"
] | null | null | null | # external dependencies
import unittest
import mock
import os
from unittest.mock import call
import shutil
from mock import MagicMock
from datetime import datetime, timedelta
# internal dependencies
from src.controllers import CompletedDownloadsController
from src.dataTypes.ProgramMode import PROGRAM_MODE
from src.test.unit_test.testUtilities import FakeFileSystemItem, cleanUpDirs, getEnvMockFunc, fakeRecycleBinDir, fakeTargetTvDir, fakeDumpCompleteDir
class TestCompletedDownloadsController(unittest.TestCase):
@mock.patch("src.controllers.CompletedDownloadsController.permanentlyDeleteExpiredItems")
@mock.patch("src.strategies.AuditSeasonStrategy.AuditSeasonStrategy.audit")
@mock.patch("src.strategies.AuditEpisodeStrategy.AuditEpisodeStrategy.audit")
@mock.patch("src.interfaces.FolderInterface.getDirContents")
@mock.patch('os.getenv')
@mock.patch("logging.info")
def test_auditDumpCompleteDir(self, loggingInfoMock, getEnvMock, getDirContentsMock, AuditEpisodeStrategyAuditMock, AuditSeasonStrategyAuditMock, permanentlyDeleteExpiredItemsMock):
# config fake data
fakeDirName = "fakeDirName1"
fakeFileSystemItems = [
FakeFileSystemItem(fakeDirName, "fakeName1")
]
os.environ["TV_TARGET_DIR"] = fakeTargetTvDir
# config mocks
getDirContentsMock.return_value = fakeFileSystemItems
getEnvMock.side_effect = getEnvMockFunc
AuditEpisodeStrategyAuditMock.return_value = True
AuditSeasonStrategyAuditMock.return_value = True
CompletedDownloadsController.auditDumpCompleteDir()
# asserts
loggingInfoCalls = [
call("File auditing started."),
call(f"Items in dump_complete directory: {[fakeDirName]}")
]
loggingInfoMock.assert_has_calls(loggingInfoCalls)
getDirContentsMock.assert_called_with(fakeDumpCompleteDir)
permanentlyDeleteExpiredItemsMock.assert_called()
@mock.patch("src.controllers.CompletedDownloadsController.permanentlyDeleteExpiredItems")
@mock.patch("src.interfaces.QBittorrentInterface.getInstance")
@mock.patch("logging.info")
@mock.patch("src.utilities.AuditUtilities.reportItemAlreadyExists")
@mock.patch('os.getenv')
def test_auditFilesWithFileSystem(self, getEnvMock, reportItemAlreadyExistsMock, loggingInfoMock, qBittorrentInterfaceGetInstanceMock, permanentlyDeleteExpiredItemsMock):
# init items
downloadingItems = [
"fake tv show name--s01e01/fake tv show name.s01e01.mp4", # mediaGrab initiated download - no sub folder
"fake tv show name--s01e02/fake-tv-show-name.s01.e02/fake-tv-show-name.s01.e02.mp4", # mediaGrab initiated download - with sub folder
"fake tv show name s01e03.mp4", # manually initiated download - no sub folder
"fake-tv-show-name.s01.e04/fake-tv-show-name.s01.e04.mp4", # manually initiated download - with sub folder
"non-parsable item",
"fake tv show name--s02/fake-tv-show-name.s02/fake-tv-show-name.s02.e01.mp4",
"fake tv show name--s02/fake-tv-show-name.s02/fake-tv-show-name.s02.e02.mp4",
"fake tv show name--s02/fake-tv-show-name.s02/fake-tv-show-name.s02.e03.mp4",
"fake tv show name--s02/fake-tv-show-name.s02/fake-tv-show-name.s02.e04.mp4",
"fake tv show name--s02/fake-tv-show-name.s02/fake-tv-show-name.s02.e05.mp4"
] # representation of what is in the dump_complete folder
# config fake data #
mode = PROGRAM_MODE.TV
expectedTvShowName = "Fake tv show name"
directoriesToCleanUp = [ fakeTargetTvDir, fakeDumpCompleteDir, fakeRecycleBinDir ]
# create mock for instance
qBittorrentInterfaceInstanceMock = MagicMock()
# assign mocked instance to return_value for mocked getInstance()
qBittorrentInterfaceGetInstanceMock.return_value = qBittorrentInterfaceInstanceMock
qBittorrentInterfaceInstanceMock.qBittorrentInterfaceInstanceMock.pauseTorrent.return_value = True
os.environ["TV_TARGET_DIR"] = fakeTargetTvDir
os.environ["RECYCLE_BIN_DIR"] = fakeRecycleBinDir
# config mocks
getEnvMock.side_effect = getEnvMockFunc
# setup fake files
for path in downloadingItems:
pathParts = path.split("/")
directories = pathParts[:-1]
newDirPath = fakeDumpCompleteDir
# create directories
for directory in directories:
newDirPath = os.path.join(newDirPath, directory)
if not os.path.isdir(newDirPath):
os.mkdir(newDirPath)
newFilePath = os.path.join(fakeDumpCompleteDir, path)
if not os.path.isfile(newFilePath):
# os.mknod(newFilePath)
size = 50000001
with open('%s' % newFilePath, 'wb') as fout:
fout.write(os.urandom(size)) # 1
try:
# assert state is as expected before audit method is called
self.assertEqual(0, len(list(os.scandir(fakeTargetTvDir))))
self.assertEqual(len(downloadingItems), 10)
self.assertEqual(0, len(list(os.scandir(fakeRecycleBinDir))))
# run auditDumpCompleteDir
CompletedDownloadsController.auditDumpCompleteDir()
# assert that the contents of downloadingItems has been moved from the `dummy_directories/dump_complete` directory to the `dummy_directories/tv` directory
self.assertEqual(4, len(list(os.scandir(os.path.join(
fakeTargetTvDir, expectedTvShowName, "Season 1")))))
self.assertEqual(1, len(list(os.scandir(fakeDumpCompleteDir))))
self.assertEqual(3, len(list(os.scandir(fakeRecycleBinDir))))
loggingInfoMock.assert_called()
permanentlyDeleteExpiredItemsMock.assert_called()
reportItemAlreadyExistsMock.assert_not_called()
finally:
# clean up moved files
cleanUpDirs(directoriesToCleanUp, downloadingItems)
pass
@mock.patch('src.interfaces.FolderInterface.deleteFile')
@mock.patch('src.interfaces.FolderInterface.deleteDir')
@mock.patch('os.path.getctime')
@mock.patch('src.interfaces.FolderInterface.getDirContents')
@mock.patch('os.getenv')
def test_permanentlyDeleteExpiredItems(self, getEnvMock, getDirContentsMock, getctimeMock, deleteDirMock, deleteFileMock):
logsDir = "logs"
def fakeGetDirContents(directory):
assert(directory in [fakeRecycleBinDir, logsDir])
return [
FakeFileSystemItem("fakeDirName1", os.path.join(directory, "fakePath1")),
FakeFileSystemItem("fakeDirName2", os.path.join(directory, "fakePath2"))
]
# config mocks
getDirContentsMock.side_effect = fakeGetDirContents
getctimeMock.side_effect = [
(datetime.now() - timedelta(weeks=5)).timestamp(), # 5 weeks old
(datetime.now() - timedelta(weeks=3)).timestamp(), # 3 weeks old
(datetime.now() - timedelta(days=8)).timestamp(), # 8 days old
(datetime.now() - timedelta(days=6)).timestamp(), # 6 days old
]
getEnvMock.side_effect = getEnvMockFunc
# run testable function
CompletedDownloadsController.permanentlyDeleteExpiredItems()
# mock asserts
deleteDirMock.assert_called_with(
os.path.join(fakeRecycleBinDir, "fakePath1"))
deleteFileMock.assert_called_with(os.path.join(logsDir, "fakePath1"))
if __name__ == '__main__':
unittest.main()
# N.B. test cant be debugged from the vs code side bar, to debug test, add breakpoint and hit f5, this should be fixed soon
| 45.75 | 185 | 0.686618 | 7,233 | 0.919177 | 0 | 0 | 7,133 | 0.906468 | 0 | 0 | 2,554 | 0.324565 |
0e8ff2bd4049ece744d225d9c8cbf9ccd1e11cba | 14,313 | py | Python | framework/generated/vulkan_generators/vulkan_referenced_resource_consumer_body_generator.py | tomped01/gfxreconstruct | ca0eacd7b69fa01a0a9cb2b1259242f341919a43 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | framework/generated/vulkan_generators/vulkan_referenced_resource_consumer_body_generator.py | tomped01/gfxreconstruct | ca0eacd7b69fa01a0a9cb2b1259242f341919a43 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | framework/generated/vulkan_generators/vulkan_referenced_resource_consumer_body_generator.py | tomped01/gfxreconstruct | ca0eacd7b69fa01a0a9cb2b1259242f341919a43 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2020 LunarG, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os, re, sys
from base_generator import *
class VulkanReferencedResourceBodyGeneratorOptions(BaseGeneratorOptions):
"""Options for generating a C++ class for detecting unreferenced resource handles in a capture file"""
def __init__(
self,
blacklists=None, # Path to JSON file listing apicalls and structs to ignore.
platformTypes=None, # Path to JSON file listing platform (WIN32, X11, etc.) defined types.
filename=None,
directory='.',
prefixText='',
protectFile=False,
protectFeature=True
):
BaseGeneratorOptions.__init__(
self, blacklists, platformTypes, filename, directory, prefixText,
protectFile, protectFeature
)
# VulkanReferencedResourceBodyGenerator - subclass of BaseGenerator.
# Generates C++ member definitions for the VulkanReferencedResource class responsible for
# determining which resource handles are used or unused in a capture file.
class VulkanReferencedResourceBodyGenerator(BaseGenerator):
"""Generate a C++ class for detecting unreferenced resource handles in a capture file"""
# All resource and resource associated handle types to be processed.
RESOURCE_HANDLE_TYPES = [
'VkBuffer', 'VkImage', 'VkBufferView', 'VkImageView', 'VkFramebuffer',
'VkDescriptorSet', 'VkCommandBuffer'
]
# Handle types that contain resource and child resource handle types.
CONTAINER_HANDLE_TYPES = ['VkDescriptorSet']
# Handle types that use resource and child resource handle types.
USER_HANDLE_TYPES = ['VkCommandBuffer']
def __init__(
self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout
):
BaseGenerator.__init__(
self,
processCmds=True,
processStructs=True,
featureBreak=False,
errFile=errFile,
warnFile=warnFile,
diagFile=diagFile
)
# Map of Vulkan structs containing handles to a list values for handle members or struct members
# that contain handles (eg. VkGraphicsPipelineCreateInfo contains a VkPipelineShaderStageCreateInfo
# member that contains handles).
self.structsWithHandles = dict()
self.pNextStructs = dict(
) # Map of Vulkan structure types to sType value for structs that can be part of a pNext chain.
self.commandInfo = dict() # Map of Vulkan commands to parameter info
self.restrictHandles = True # Determines if the 'isHandle' override limits the handle test to only the values conained by RESOURCE_HANDLE_TYPES.
# Method override
# yapf: disable
def beginFile(self, genOpts):
BaseGenerator.beginFile(self, genOpts)
write('#include "generated/generated_vulkan_referenced_resource_consumer.h"', file=self.outFile)
self.newline()
write('#include <cassert>', file=self.outFile)
self.newline()
write('GFXRECON_BEGIN_NAMESPACE(gfxrecon)', file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(decode)', file=self.outFile)
# yapf: enable
# Method override
# yapf: disable
def endFile(self):
for cmd, info in self.commandInfo.items():
returnType = info[0]
params = info[2]
if params and params[0].baseType == 'VkCommandBuffer':
# Check for parameters with resource handle types.
handles = self.getParamListHandles(params[1:])
if (handles):
# Generate a function to add handles to the command buffer's referenced handle list.
cmddef = '\n'
# Temporarily remove resource only matching restriction from isHandle() when generating the function signature.
self.restrictHandles = False
cmddef += self.makeConsumerFuncDecl(returnType, 'VulkanReferencedResourceConsumer::Process_' + cmd, params) + '\n'
self.restrictHandles = True
cmddef += '{\n'
indent = self.INDENT_SIZE * ' '
# Add unreferenced parameter macros.
unrefCount = 0
for param in params[1:]:
if not param in handles:
cmddef += indent + 'GFXRECON_UNREFERENCED_PARAMETER({});\n'.format(param.name)
unrefCount += 1
if unrefCount > 0:
cmddef += '\n'
for index, handle in enumerate(handles):
cmddef += self.trackCommandHandle(index, params[0].name, handle, indent=indent)
cmddef += '}'
write(cmddef, file=self.outFile)
self.newline()
write('GFXRECON_END_NAMESPACE(decode)', file=self.outFile)
write('GFXRECON_END_NAMESPACE(gfxrecon)', file=self.outFile)
# Finish processing in superclass
BaseGenerator.endFile(self)
# yapf: enable
#
# Method override
def genStruct(self, typeinfo, typename, alias):
BaseGenerator.genStruct(self, typeinfo, typename, alias)
if not alias:
self.checkStructMemberHandles(typename, self.structsWithHandles)
# Track this struct if it can be present in a pNext chain.
parentStructs = typeinfo.elem.get('structextends')
if parentStructs:
sType = self.makeStructureTypeEnum(typeinfo, typename)
if sType:
self.pNextStructs[typename] = sType
#
# Indicates that the current feature has C++ code to generate.
def needFeatureGeneration(self):
if self.featureCmdParams:
return True
return False
#
# Performs C++ code generation for the feature.
def generateFeature(self):
for cmd in self.getFilteredCmdNames():
self.commandInfo[cmd] = self.featureCmdParams[cmd]
#
# Override method to check for handle type, only matching resource handle types.
def isHandle(self, baseType):
if self.restrictHandles:
if baseType in self.RESOURCE_HANDLE_TYPES:
return True
return False
else:
return BaseGenerator.isHandle(self, baseType)
#
# Create list of parameters that have handle types or are structs that contain handles.
def getParamListHandles(self, values):
handles = []
for value in values:
if self.isHandle(value.baseType):
handles.append(value)
elif self.isStruct(
value.baseType
) and (value.baseType in self.structsWithHandles):
handles.append(value)
return handles
#
#
# yapf: disable
def trackCommandHandle(self, index, commandParamName, value, valuePrefix='', indent=''):
body = ''
tail = ''
indexName = None
countName = None
valueName = valuePrefix + value.name
isHandle = self.isHandle(value.baseType)
if (value.isPointer or value.isArray) and value.name != 'pnext_value':
if index > 0:
body += '\n'
accessOperator = '->'
if not valuePrefix:
# If there is no prefix, this is the pointer parameter received by the function, which should never be null.
body += indent + 'assert({} != nullptr);\n'.format(value.name)
body += '\n'
else:
# If there is a prefix, this is a struct member. We need to determine the type of access operator to use
# for the member of a 'decoded' struct type, where handle member types will be HandlePointerDecoder, but
# struct member types will be unique_ptr<StructPointerDecoder>.
if isHandle:
accessOperator = '.'
# Add IsNull and HasData checks for the pointer decoder, before accessing its data.
# Note that this does not handle the decoded struct member cases for static arrays, which would need to use '.' instead of '->'.
body += indent + 'if (!{prefix}{name}{op}IsNull() && ({prefix}{name}{op}HasData()))\n'.format(prefix=valuePrefix, name=value.name, op=accessOperator)
body += indent + '{\n'
tail = indent + '}\n' + tail
indent += ' ' * self.INDENT_SIZE
# Get the pointer from the pointer decoder object.
valueName = '{}_ptr'.format(value.name)
if isHandle:
body += indent + 'auto {} = {}{}{}GetPointer();\n'.format(valueName, valuePrefix, value.name, accessOperator)
else:
body += indent + 'auto {} = {}{}{}GetMetaStructPointer();\n'.format(valueName, valuePrefix, value.name, accessOperator)
# Add a for loop for an array of values.
if value.isArray:
indexName = '{}_index'.format(value.name)
countName = '{}_count'.format(value.name)
body += indent + 'size_t {} = {}{}{}GetLength();\n'.format(countName, valuePrefix, value.name, accessOperator)
body += indent + 'for (size_t {i} = 0; {i} < {}; ++{i})\n'.format(countName, i=indexName)
body += indent + '{\n'
tail = indent + '}\n' + tail
indent += ' ' * self.INDENT_SIZE
# Insert commands to add handles to a container, or to process struct members that contain handles.
if isHandle:
if value.isArray:
valueName = '{}[{}]'.format(valueName, indexName)
elif value.isPointer:
valueName = '(*{})'.format(valueName)
if value.baseType in self.CONTAINER_HANDLE_TYPES:
body += indent + 'GetTable().AddContainerToUser({}, {});\n'.format(commandParamName, valueName)
elif value.baseType in self.USER_HANDLE_TYPES:
body += indent + 'GetTable().AddUserToUser({}, {});\n'.format(commandParamName, valueName)
else:
body += indent + 'GetTable().AddResourceToUser({}, {});\n'.format(commandParamName, valueName)
elif self.isStruct(value.baseType) and (value.baseType in self.structsWithHandles):
if value.isArray:
accessOperator = '[{}].'.format(indexName)
else:
accessOperator = '->'
for index, entry in enumerate(self.structsWithHandles[value.baseType]):
if entry.name == 'pNext':
extStructsWithHandles = [extStruct for extStruct in self.registry.validextensionstructs[value.baseType] if extStruct in self.structsWithHandles]
if extStructsWithHandles:
body += indent + 'const VkBaseInStructure* pnext_header = nullptr;\n'
body += indent + 'if ({name}->pNext != nullptr)\n'.format(name=valueName)
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'pnext_header = reinterpret_cast<const VkBaseInStructure*>({}->pNext->GetPointer());\n'.format(valueName)
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
body += indent + 'while (pnext_header)\n'
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'switch (pnext_header->sType)\n'
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'default:\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'break;\n'
indent = indent[:-self.INDENT_SIZE]
for extStruct in extStructsWithHandles:
body += indent + 'case {}:\n'.format(self.pNextStructs[extStruct])
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'auto pnext_value = reinterpret_cast<const Decoded_{}*>({}->pNext->GetPointer());\n'.format(extStruct, valueName)
body += self.trackCommandHandle(index, commandParamName, ValueInfo('pnext_value', extStruct, 'const {} *'.format(extStruct), 1), '', indent=indent)
body += indent + 'break;\n'
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
body += indent + 'pnext_header = pnext_header->pNext;\n'
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
else:
body += self.trackCommandHandle(index, commandParamName, entry, valueName + accessOperator, indent)
return body + tail
# yapf: enable
| 47.082237 | 175 | 0.591979 | 12,909 | 0.901907 | 0 | 0 | 0 | 0 | 0 | 0 | 5,362 | 0.374624 |
0e90f2a19fcb16a87a416f29066cb56fdda1a489 | 8,658 | py | Python | python_modules/libraries/dagster-k8s/dagster_k8s/container_context.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-k8s/dagster_k8s/container_context.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-k8s/dagster_k8s/container_context.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, cast
import kubernetes
import dagster._check as check
from dagster.config.validate import process_config
from dagster.core.errors import DagsterInvalidConfigError
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.utils import parse_env_var
from dagster.utils import make_readonly_value, merge_dicts
if TYPE_CHECKING:
from . import K8sRunLauncher
from .job import DagsterK8sJobConfig
from .models import k8s_snake_case_dict
def _dedupe_list(values):
return list(set([make_readonly_value(value) for value in values]))
class K8sContainerContext(
NamedTuple(
"_K8sContainerContext",
[
("image_pull_policy", Optional[str]),
("image_pull_secrets", List[Dict[str, str]]),
("service_account_name", Optional[str]),
("env_config_maps", List[str]),
("env_secrets", List[str]),
("env_vars", List[str]),
("volume_mounts", List[Dict[str, Any]]),
("volumes", List[Dict[str, Any]]),
("labels", Dict[str, str]),
("namespace", Optional[str]),
("resources", Dict[str, Any]),
],
)
):
"""Encapsulates configuration that can be applied to a K8s job running Dagster code.
Can be persisted on a PipelineRun at run submission time based on metadata from the
code location and then included in the job's configuration at run launch time or step
launch time."""
def __new__(
cls,
image_pull_policy: Optional[str] = None,
image_pull_secrets: Optional[List[Dict[str, str]]] = None,
service_account_name: Optional[str] = None,
env_config_maps: Optional[List[str]] = None,
env_secrets: Optional[List[str]] = None,
env_vars: Optional[List[str]] = None,
volume_mounts: Optional[List[Dict[str, Any]]] = None,
volumes: Optional[List[Dict[str, Any]]] = None,
labels: Optional[Dict[str, str]] = None,
namespace: Optional[str] = None,
resources: Optional[Dict[str, Any]] = None,
):
return super(K8sContainerContext, cls).__new__(
cls,
image_pull_policy=check.opt_str_param(image_pull_policy, "image_pull_policy"),
image_pull_secrets=check.opt_list_param(image_pull_secrets, "image_pull_secrets"),
service_account_name=check.opt_str_param(service_account_name, "service_account_name"),
env_config_maps=check.opt_list_param(env_config_maps, "env_config_maps"),
env_secrets=check.opt_list_param(env_secrets, "env_secrets"),
env_vars=check.opt_list_param(env_vars, "env_vars"),
volume_mounts=[
k8s_snake_case_dict(kubernetes.client.V1VolumeMount, mount)
for mount in check.opt_list_param(volume_mounts, "volume_mounts")
],
volumes=[
k8s_snake_case_dict(kubernetes.client.V1Volume, volume)
for volume in check.opt_list_param(volumes, "volumes")
],
labels=check.opt_dict_param(labels, "labels"),
namespace=check.opt_str_param(namespace, "namespace"),
resources=check.opt_dict_param(resources, "resources"),
)
def merge(self, other: "K8sContainerContext") -> "K8sContainerContext":
# Lists of attributes that can be combined are combined, scalar values are replaced
# prefering the passed in container context
return K8sContainerContext(
image_pull_policy=(
other.image_pull_policy if other.image_pull_policy else self.image_pull_policy
),
image_pull_secrets=_dedupe_list(other.image_pull_secrets + self.image_pull_secrets),
service_account_name=(
other.service_account_name
if other.service_account_name
else self.service_account_name
),
env_config_maps=_dedupe_list(other.env_config_maps + self.env_config_maps),
env_secrets=_dedupe_list(other.env_secrets + self.env_secrets),
env_vars=_dedupe_list(other.env_vars + self.env_vars),
volume_mounts=_dedupe_list(other.volume_mounts + self.volume_mounts),
volumes=_dedupe_list(other.volumes + self.volumes),
labels=merge_dicts(other.labels, self.labels),
namespace=other.namespace if other.namespace else self.namespace,
resources=other.resources if other.resources else self.resources,
)
def get_environment_dict(self) -> Dict[str, str]:
parsed_env_var_tuples = [parse_env_var(env_var) for env_var in self.env_vars]
return {env_var_tuple[0]: env_var_tuple[1] for env_var_tuple in parsed_env_var_tuples}
@staticmethod
def create_for_run(
pipeline_run: PipelineRun, run_launcher: Optional["K8sRunLauncher"]
) -> "K8sContainerContext":
context = K8sContainerContext()
if run_launcher:
context = context.merge(
K8sContainerContext(
image_pull_policy=run_launcher.image_pull_policy,
image_pull_secrets=run_launcher.image_pull_secrets,
service_account_name=run_launcher.service_account_name,
env_config_maps=run_launcher.env_config_maps,
env_secrets=run_launcher.env_secrets,
env_vars=run_launcher.env_vars,
volume_mounts=run_launcher.volume_mounts,
volumes=run_launcher.volumes,
labels=run_launcher.labels,
namespace=run_launcher.job_namespace,
resources=run_launcher.resources,
)
)
run_container_context = (
pipeline_run.pipeline_code_origin.repository_origin.container_context
if pipeline_run.pipeline_code_origin
else None
)
if not run_container_context:
return context
return context.merge(K8sContainerContext.create_from_config(run_container_context))
@staticmethod
def create_from_config(run_container_context) -> "K8sContainerContext":
run_k8s_container_context = (
run_container_context.get("k8s", {}) if run_container_context else {}
)
if not run_k8s_container_context:
return K8sContainerContext()
processed_container_context = process_config(
DagsterK8sJobConfig.config_type_container_context(), run_k8s_container_context
)
if not processed_container_context.success:
raise DagsterInvalidConfigError(
"Errors while parsing k8s container context",
processed_container_context.errors,
run_k8s_container_context,
)
processed_context_value = cast(Dict, processed_container_context.value)
return K8sContainerContext(
image_pull_policy=processed_context_value.get("image_pull_policy"),
image_pull_secrets=processed_context_value.get("image_pull_secrets"),
service_account_name=processed_context_value.get("service_account_name"),
env_config_maps=processed_context_value.get("env_config_maps"),
env_secrets=processed_context_value.get("env_secrets"),
env_vars=processed_context_value.get("env_vars"),
volume_mounts=processed_context_value.get("volume_mounts"),
volumes=processed_context_value.get("volumes"),
labels=processed_context_value.get("labels"),
namespace=processed_context_value.get("namespace"),
resources=processed_context_value.get("resources"),
)
def get_k8s_job_config(self, job_image, run_launcher) -> DagsterK8sJobConfig:
return DagsterK8sJobConfig(
job_image=job_image if job_image else run_launcher.job_image,
dagster_home=run_launcher.dagster_home,
image_pull_policy=self.image_pull_policy,
image_pull_secrets=self.image_pull_secrets,
service_account_name=self.service_account_name,
instance_config_map=run_launcher.instance_config_map,
postgres_password_secret=run_launcher.postgres_password_secret,
env_config_maps=self.env_config_maps,
env_secrets=self.env_secrets,
env_vars=self.env_vars,
volume_mounts=self.volume_mounts,
volumes=self.volumes,
labels=self.labels,
resources=self.resources,
)
| 44.4 | 99 | 0.664703 | 8,026 | 0.927004 | 0 | 0 | 2,968 | 0.342804 | 0 | 0 | 1,044 | 0.120582 |
0e92fd11a801e6b4a8ab702830466beac2af8d01 | 8,809 | py | Python | fudge-domain.py | jordantrc/domain-fudgery | 21e86adca50ec73662159c0c2c5efd5a93f49cd2 | [
"MIT"
] | null | null | null | fudge-domain.py | jordantrc/domain-fudgery | 21e86adca50ec73662159c0c2c5efd5a93f49cd2 | [
"MIT"
] | null | null | null | fudge-domain.py | jordantrc/domain-fudgery | 21e86adca50ec73662159c0c2c5efd5a93f49cd2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# fudge-domain.py
#
# Finds potentially useful domains
# which are visually similar to the
# target domain and ascertains whether
# these domains are currently available
# (not registered). Also checks if any TLDs
# are not registered for the domain.
#
# Usage:
# domain-fudgery.py [options] [domain]
# Only works with second-level domain names,
# e.g. google.com, amazon.co.uk
#
# OPTIONS:
#
# TLD Options:
# --country-code-tlds check country code TLDs
# --original-tlds check original TLDs (.com, .net, .org)
# --custom-tlds check additional list of TLDS, comma separated
#
# General Options:
# --file load domains from the given filename
# one domain per line
# --no-active use only passive checks
# --no-whois do not perform whois checks
#
import argparse
import itertools
import os
import sys
from dns import name
from dns import message
CHARACTER_LOOK_ALIKES = {
'a': ['d'],
'A': ['4'],
'b': ['1o', 'lo'],
'B': ['8'],
'd': ['ol', 'o1'],
'E': ['3'],
'i': ['1', 'l'],
'I': ['1', 'l'],
'l': ['1', 'i'],
'm': ['rn'],
'o': ['0'],
'O': ['0'],
'Q': ['O'],
's': ['5'],
'S': ['5'],
'T': ['7'],
'w': ['vv'],
'W': ['VV'],
'z': ['2'],
'Z': ['2'],
'0': ['O'],
'1': ['l'],
'2': ['Z'],
'4': ['A'],
'5': ['S'],
'7': ['T'],
'8': ['B']
}
# original TLDs, does not include restricted-use
# TLDs .edu, .gov, .mil, .int
TLDS_ORIGINAL = ['.com', '.net', '.org']
# country code TLDs
TLDS_COUNTRY_CODE = [
'.ac','.ad','.ae','.af','.ag','.ai','.al','.am','.ao','.aq','.ar','.as','.at','.au','.aw','.ax',
'.az','.ba','.bb','.bd','.be','.bf','.bg','.bh','.bi','.bj','.bm','.bn','.bo','.bq','.br','.bs',
'.bt','.bw','.by','.bz','.ca','.cc','.cd','.cf','.cg','.ch','.ci','.ck','.cl','.cm','.cn','.co',
'.cr','.cu','.cv','.cw','.cx','.cy','.cz','.de','.dj','.dk','.dm','.do','.dz','.ec','.ee','.eg',
'.eh','.er','.es','.et','.eu','.fi','.fj','.fk','.fm','.fo','.fr','.ga','.gd','.ge','.gf','.gg',
'.gh','.gi','.gl','.gm','.gn','.gp','.gq','.gr','.gs','.gt','.gu','.gw','.gy','.hk','.hm','.hn',
'.hr','.ht','.hu','.id','.ie','.il','.im','.in','.io','.iq','.ir','.is','.it','.je','.jm','.jo',
'.jp','.ke','.kg','.kh','.ki','.km','.kn','.kp','.kr','.kw','.ky','.kz','.la','.lb','.lc','.li',
'.lk','.lr','.ls','.lt','.lu','.lv','.ly','.ma','.mc','.md','.me','.mg','.mh','.mk','.ml','.mm',
'.mn','.mo','.mp','.mq','.mr','.ms','.mt','.mu','.mv','.mw','.mx','.my','.mz','.na','.nc','.ne',
'.nf','.ng','.ni','.nl','.no','.np','.nr','.nu','.nz','.om','.pa','.pe','.pf','.pg','.ph','.pk',
'.pl','.pm','.pn','.pr','.ps','.pt','.pw','.py','.qa','.re','.ro','.rs','.ru','.rw','.sa','.sb',
'.sc','.sd','.se','.sg','.sh','.si','.sk','.sl','.sm','.sn','.so','.sr','.ss','.st','.su','.sv',
'.sx','.sy','.sz','.tc','.td','.tf','.tg','.th','.tj','.tk','.tl','.tm','.tn','.to','.tr','.tt',
'.tv','.tw','.tz','.ua','.ug','.uk','.us','.uy','.uz','.va','.vc','.ve','.vg','.vi','.vn','.vu',
'.wf','.ws','.ye','.yt','.za','.zm'
]
# country codes with restricted second level domains (individuals or companies can
# only register third level domains)
TLDS_COUNTRY_CODE_RESTRICTED_LVL2 = [
'.au','.bn','.bt','.cy','.et','.fk','.gh','.gn','.gu','.jm','.ke','.kh','.kp','.kw','.lb','.lr',
'.ls','.mm','.mq','.mt','.mz','.ni','.np','.pa','.pg','.py','.qa','.sb','.sv','.sz','.th','.tz',
'.ve','.ye'
]
# the second level domains for those domains above that can be used
# for third level domains
TLDS_COUNTRY_CODE_UNRESTRICTED_LVL2 = [
'.com.au','.net.au','.org.au','.asn.au','.id.au','.com.bn','.edu.bn','.net.bn','.org.bn','.bt',
'.com.bt','.edu.bt','.net.bt','.org.bt','.ac.cy','.net.cy','.org.cy','.pro.cy','.name.cy',
'.ekloges.cy','.tm.cy','.ltd.cy','.biz.cy','.press.cy','.parliament.cy','.com.cy',
'.centralbank.cy','.com.et','.org.et','.edu.et','.net.et','.name.et','.co.fk','.org.fk',
'.ac.fk','.nom.fk','.net.fk','.com.gh','.edu.gh','.com.gn','.ac.gn','.org.gn','.net.gn',
'.com.gu','.net.gu','.org.gu','.edu.gu','.com.jm','.net.jm','.org.jm','.edu.jm','.co.ke',
'.or.ke','.ne.ke','.go.ke','.ac.ke','.sc.ke','.me.ke','.mobi.ke','.info.ke','.per.kh','.com.kh',
'.edu.kh','.net.kh','.org.kh','.aca.kp','.com.kp','.edu.kp','.law.kp','.org.kp','.rep.kp',
'.net.kp','.sca.kp','.com.kw','.ind.kw','.net.kw','.org.kw','.emb.kw','.edu.kw','.com.lb',
'.edu.lb','.net.lb','.org.lb','.com.lr','.edu.lr','.org.lr','.net.lr','.ac.ls','.co.ls',
'.net.ls','.nul.ls','.org.ls','.sc.ls','.net.mm','.com.mm','.edu.mm','.org.mm','.edu.mt',
'.com.mt','.net.mt','.org.mt','.co.mz','.net.mz','.org.mz','.ac.mz','.edu.mz','.gob.ni',
'.co.ni','.com.ni','.ac.ni','.edu.ni','.org.ni','.nom.ni','.net.ni','.edu.np','.com.np',
'.org.np','.net.np','.aero.np','.asia.np','.biz.np','.coop.np','.info.np','.jobs.np','.mobi.np',
'.museum.np','.name.np','.pro.np','.services.np','.travel.np','.net.pa','.com.pa','.ac.pa',
'.sld.pa','.edu.pa','.org.pa','.abo.pa','.ing.pa','.med.pa','.nom.pa','.com.pg','.net.pg',
'.ac.pg','.org.pg','.com.py','.coop.py','.edu.py','.org.py','.net.py','.una.py','.com.qa',
'.edu.qa','.sch.qa','.net.qa','.org.qa','.com.sb','.net.sb','.edu.sv','.com.sv','.org.sv',
'.red.sv','.co.sz','.ac.sz','.org.sz','.ac.th','.co.th','.or.th','.net.th','.in.th','.co.tz',
'.ac.tz','.or.tz','.ne.tz','.hotel.tz','.mobi.tz','.tv.tz','.info.tz','.me.tz','.arts.ve',
'.co.ve','.com.ve','.info.ve','.net.ve','.org.ve','.radio.ve','.web.ve','.com.ye','.co.ye',
'.ltd.ye','.me.ye','.net.ye','.org.ye','.plc.ye'
]
def replacement_combinations(indices):
"""returns a list of all possible replacement combinations for count
instances of a character in a string"""
result = []
for i in range(1, len(indices) + 1):
for c in itertools.combinations(indices, i):
result.append(c)
return result
def permutate_domain(domain, character, replacements):
"""returns all permutations of character replacements"""
new_domains = []
indices = [ i for i, ltr in enumerate(domain) if ltr == character ]
combinations = replacement_combinations(indices)
for c in combinations:
new_domain = domain
for i in c:
for r in replacements:
new_domain = new_domain[:i] + r + new_domain[i + 1:]
new_domains.append(new_domain)
return new_domains
def domain_permutations(domain, orig_tld, country_code_tlds=False, original_tlds=False, custom_tlds=[]):
"""returns a list of domains to check"""
result = []
domains = [domain, domain.upper()]
# character replacement
for c in CHARACTER_LOOK_ALIKES.keys():
for d in domains:
count = d.count(c)
if count > 0:
permutated_domains = permutate_domain(d, c, CHARACTER_LOOK_ALIKES[c])
for p in permutated_domains:
print(p + orig_tld)
def main():
"""Main function."""
parser = argparse.ArgumentParser(description="Finds fudged domains.")
parser.add_argument("--country-code-tlds", action='store_true', dest="country_code_tld", help="look for unregistered country code TLDs")
parser.add_argument("--original-tlds", action='store_true', dest="original_tld", help="look for unregistered original TLDs")
parser.add_argument("--custom-tlds", dest="custom_tld", help="look for custom list of TLDs")
parser.add_argument("--no-whois", action='store_true', dest="no_whois", help="disable whois queries")
parser.add_argument("--file", dest="file", help="file containing DNS names to load")
parser.add_argument("--no-active", action='store_true', dest="no_active", help="disable active checks")
parser.add_argument("domain", nargs='*', help="domain to fudge")
args = parser.parse_args()
# ensure at least one domain was provided
if not args.file and not args.domain:
print("[-] must provide a domain as argument or a file containing domains")
sys.exit(1)
domains = []
if args.file:
if os.path.isfile(args.file):
with open(args.file, "r") as fd:
domains = fd.readlines()
else:
print("[-] file not found or permission denied")
sys.exit(1)
if args.domain is not None:
domains.append(args.domain[0])
# for each domain, determine TLDs for domain
for d in domains:
domain_parts = d.split(".")
domain = domain_parts[0]
tld = "." + ".".join(domain_parts[1:])
domain_permutations(domain, tld)
if __name__ == "__main__":
main() | 43.181373 | 140 | 0.517993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,419 | 0.615166 |
0e968767fd95ce20a6bb33677442fc2d6eaecce8 | 1,680 | py | Python | crudbuilder/tables.py | rbuchli/django-crudbuilder | c43001f19c6dc64b0bd8ac389a95067da735073d | [
"Apache-2.0"
] | null | null | null | crudbuilder/tables.py | rbuchli/django-crudbuilder | c43001f19c6dc64b0bd8ac389a95067da735073d | [
"Apache-2.0"
] | null | null | null | crudbuilder/tables.py | rbuchli/django-crudbuilder | c43001f19c6dc64b0bd8ac389a95067da735073d | [
"Apache-2.0"
] | null | null | null | import django_tables2 as tables
from django_tables2.utils import A
from .abstract import BaseBuilder
from .helpers import model_class_form, plural, custom_postfix_url
class TableBuilder(BaseBuilder):
"""
Table builder which returns django_tables2 instance
app : app name
model : model name for which table will be generated
table_fields : display fields for tables2 class
css_table : css class for generated tables2 class
"""
def generate_table(self):
model_class = self.get_model_class()
detail_url_name = '{}-{}-detail'.format(
self.app, custom_postfix_url(self.crud(), self.model)
)
main_attrs = dict(
pk=tables.LinkColumn(detail_url_name, args=[A('pk')])
)
# separator = ', '
meta_attrs = dict(
model=self.get_model_class,
# There may be only one field. That's why we have to check first and create a tuple later
# changed by Reto Buchli, 04.06.2021, removed, wrong column order
# fields=('pk',) + tuple(self.tables2_fields) if isinstance(self.tables2_fields, str) else (
# self.tables2_fields) if self.tables2_fields else ('pk',),
fields=('pk',) + self.tables2_fields if self.tables2_fields else ('pk',),
attrs={
"class": self.tables2_css_class,
"empty_text": "No {} exist".format(plural(self.model, ''))
})
main_attrs['Meta'] = type('Meta', (), meta_attrs)
klass = type(
model_class_form(self.model + 'Table'),
(tables.Table,),
main_attrs
)
return klass
| 34.285714 | 104 | 0.610714 | 1,509 | 0.898214 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.389286 |
0e96eee1110feda2c2d2b2c769427a9d03407ff4 | 4,769 | py | Python | sales/tests/test_views.py | MatsLanGoH/greengrocer | bf22bd15e359f1e6d1827992db5abde9f8db53d5 | [
"MIT"
] | null | null | null | sales/tests/test_views.py | MatsLanGoH/greengrocer | bf22bd15e359f1e6d1827992db5abde9f8db53d5 | [
"MIT"
] | null | null | null | sales/tests/test_views.py | MatsLanGoH/greengrocer | bf22bd15e359f1e6d1827992db5abde9f8db53d5 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from sales.models import Fruit, Transaction
from datetime import timedelta
# Create your tests here.
class FruitListViewTest(TestCase):
def setUp(self):
# Create a user
test_user1 = User.objects.create_user(username='testuser1')
test_user1.set_password('12345')
test_user1.save()
@classmethod
def setUpTestData(cls):
# Create 30 types of fruit for pagination tests
number_of_fruits = 30
for fruit_num in range(number_of_fruits):
Fruit.objects.create(name='Fruit %s' % fruit_num, price=100)
def test_view_redirects_to_top_when_not_logged_in(self):
resp = self.client.get(reverse('fruits'))
self.assertEqual(resp.status_code, 302)
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('fruits'))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('fruits'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sales/fruit_list.html')
def test_pagination_is_twenty(self):
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('fruits'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('is_paginated' in resp.context)
self.assertTrue(resp.context['is_paginated'] is True)
self.assertTrue(len(resp.context['fruit_list']) == 20)
def test_pagination_lists_all_items(self):
# Get second page and confirm it has exactly 10 remaining items
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('fruits') + '?page=2')
self.assertEqual(resp.status_code, 200)
self.assertTrue('is_paginated' in resp.context)
self.assertTrue(resp.context['is_paginated'] is True)
self.assertTrue(len(resp.context['fruit_list']) == 10)
class TransactionListViewTest(TestCase):
def setUp(self):
# Create a user
test_user1 = User.objects.create_user(username='testuser1')
test_user1.set_password('12345')
test_user1.save()
@classmethod
def setUpTestData(cls):
# Create 3 types of fruit
number_of_fruits = 30
for fruit_num in range(number_of_fruits):
Fruit.objects.create(name='Fruit %s' % fruit_num, price=100)
# Create 30 types of Transaction for pagination tests
number_of_transactions = 30
for transaction_num in range(number_of_transactions):
fruit = Fruit.objects.get(id=transaction_num + 1)
Transaction.objects.create(fruit=fruit, num_items=transaction_num, amount=transaction_num * 100,
created_at=timezone.now() - timedelta(
days=number_of_transactions - transaction_num))
def test_view_redirects_to_top_when_not_logged_in(self):
resp = self.client.get(reverse('transactions'))
self.assertEqual(resp.status_code, 302)
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('transactions'))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('transactions'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sales/transaction_list.html')
def test_pagination_is_twenty(self):
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('transactions'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('is_paginated' in resp.context)
self.assertTrue(resp.context['is_paginated'] is True)
self.assertTrue(len(resp.context['transaction_list']) == 20)
def test_pagination_lists_all_items(self):
# Get second page and confirm it has exactly 10 remaining items
self.client.login(username='testuser1', password='12345')
resp = self.client.get(reverse('transactions') + '?page=2')
self.assertEqual(resp.status_code, 200)
self.assertTrue('is_paginated' in resp.context)
self.assertTrue(resp.context['is_paginated'] is True)
self.assertTrue(len(resp.context['transaction_list']) == 10)
| 40.415254 | 108 | 0.682952 | 4,513 | 0.94632 | 0 | 0 | 977 | 0.204865 | 0 | 0 | 858 | 0.179912 |
0e9715b939023c2f60d9e725e63e63323e77455b | 2,591 | py | Python | src/solution/112_path_sum.py | rsj217/leetcode-in-python3 | f5d9fa50e55ce60a159f9a8ccf6080dc86f56852 | [
"MIT"
] | 1 | 2021-03-01T07:33:45.000Z | 2021-03-01T07:33:45.000Z | src/solution/112_path_sum.py | rsj217/leetcode-in-python3 | f5d9fa50e55ce60a159f9a8ccf6080dc86f56852 | [
"MIT"
] | null | null | null | src/solution/112_path_sum.py | rsj217/leetcode-in-python3 | f5d9fa50e55ce60a159f9a8ccf6080dc86f56852 | [
"MIT"
] | null | null | null | import random
from src.datastruct.bin_treenode import TreeNode
import unittest
class Solution:
def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:
num = random.randint(0, 1)
d = {
0: self.dfs,
1: self.postorder,
2: self.bfs,
}
return d[num](root, targetSum)
def bfs(self, root: TreeNode, targetSum: int) -> bool:
if root is None:
return False
stack = [(root, root.val)]
while len(stack) > 0:
node, path_sum = stack.pop()
if node.left is None and node.right is None and path_sum == targetSum:
return True
if node.right is not None:
stack.append((node.right, path_sum + node.right.val))
if node.left is not None:
stack.append((node.left, path_sum + node.left.val))
return False
def postorder(self, root: TreeNode, targetSum: int) -> bool:
node = root
stack = []
pre_node = None
path_sum = 0
while True:
while node is not None:
path_sum += node.val
stack.append(node)
node = node.left
if len(stack) <= 0:
break
if stack[-1].right != pre_node:
node = stack[-1].right
pre_node = None
else:
pre_node = stack.pop()
if pre_node.left is None and pre_node.right is None and path_sum == targetSum:
return True
path_sum -= pre_node.val
return False
def dfs(self, root: TreeNode, targetSum: int) -> bool:
def _dfs(node: TreeNode, path_sum: int) -> bool:
if node is None:
return False
if node.left is None and node.right is None:
return path_sum + node.val == targetSum
path_sum += node.val
return _dfs(node.left, path_sum) or _dfs(node.right, path_sum)
return _dfs(root, 0)
class TestSolution(unittest.TestCase):
def setUp(self):
self.test_case = [
([5, 4, 8, 11, None, 13, 4, 7, 2, None, None, None, 1], 22, True),
([1, 2, 3], 5, False),
([], 1, False)
]
self.s = Solution()
def test_solution(self):
for nums, target, answer in self.test_case:
root = TreeNode.create(nums)
ans = self.s.hasPathSum(root, target)
self.assertEqual(answer, ans)
if __name__ == '__main__':
unittest.main()
| 30.845238 | 94 | 0.521034 | 2,457 | 0.948283 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.00386 |
0e97700b68b96eb221fafae3d8068522c2cf22b7 | 728 | py | Python | setup.py | aloosley/python-highcharts-df | e9c644cf74dae17cbe52dc9912c56b0a465f9d95 | [
"MIT"
] | null | null | null | setup.py | aloosley/python-highcharts-df | e9c644cf74dae17cbe52dc9912c56b0a465f9d95 | [
"MIT"
] | null | null | null | setup.py | aloosley/python-highcharts-df | e9c644cf74dae17cbe52dc9912c56b0a465f9d95 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Alex Loosley <a.loosley@reply.de>"
from setuptools import setup, find_packages
setup(
name='python-highcharts_df',
version='0.1.0',
description='python-highcharts_df wrapper for customizable pretty plotting quickly from pandas dataframes',
author="Alex Loosley",
author_email='a.loosley@reply.de',
license='GNU',
packages=find_packages(),
zip_safe=False,
install_requires=[
"colour>=0.1.2",
"python-highcharts>=0.3.0"
],
dependency_links=[],
# include_package_data=True, # should work, but doesn't, I think pip does not recognize git automatically
package_data={
'data': ['*/*'],
}
)
| 26.962963 | 111 | 0.656593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.548077 |
0e97a5a6084ace1e9c1382face6d45b053e07a8c | 4,013 | py | Python | main.py | XianwuLin/debian-package-dependencies-terminator | 46ff43b08404737e9dc95c9550f6711ecd2dc9ec | [
"MIT"
] | null | null | null | main.py | XianwuLin/debian-package-dependencies-terminator | 46ff43b08404737e9dc95c9550f6711ecd2dc9ec | [
"MIT"
] | null | null | null | main.py | XianwuLin/debian-package-dependencies-terminator | 46ff43b08404737e9dc95c9550f6711ecd2dc9ec | [
"MIT"
] | null | null | null | # /usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import logging
import os
import shutil
import tarfile
import tempfile
import docker
from flask import Flask, jsonify, request, send_file, abort
app = Flask(__name__)
port = 8765
download_folder = "./debs"
if not os.path.exists(download_folder):
os.mkdir(download_folder)
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w") as tar:
for fpath in glob.glob(os.path.join(source_dir, "*.deb")):
tar.add(fpath, arcname=os.path.basename(fpath))
Docker_image_dict = {
"ubuntu 16.04": "ubuntu1604:0.1.0"
}
@app.route('/api/dpdt/create', methods=['POST'])
def create():
req = request.json
system_version = req["system_version"]
command = req["command"]
tmp_folder = tempfile.mkdtemp()
os.mkdir(os.path.join(tmp_folder, 'apt'))
with open(os.path.join(tmp_folder, 'main.sh'), "w") as f:
f.write(command.replace("\r\n", "\n"))
client = docker.from_env()
container = client.containers.run(Docker_image_dict[system_version],
volumes={os.path.join(tmp_folder, 'apt/'): {'bind': '/var/cache/apt/archives/',
'mode': 'rw'},
os.path.join(tmp_folder, 'main.sh'): {'bind': '/root/main.sh',
'mode': 'ro'}},
detach=True)
return jsonify({"status": 2000, "result": {"id": container.id}})
@app.route('/api/dpdt/status', methods=['GET'])
def status():
container_id = request.args.get('id')
client = docker.from_env()
try:
container = client.containers.get(container_id)
if container.status == "exited":
local_debs_path = ""
for item in container.attrs["Mounts"]:
if item["Destination"] == "/var/cache/apt/archives":
local_debs_path = item["Source"]
if container.attrs["State"]["ExitCode"] != 0:
# if return code is not zero, there must be some mistakes in command.
result = {"status": 4001, "message": container.logs()}
shutil.rmtree(os.path.dirname(local_debs_path))
container.remove()
return jsonify(result)
else:
# create finish, and create tar file
if not local_debs_path:
return jsonify({"status": 4002, "message": "find debs folder error"})
deb_tar_file_path = os.path.join(download_folder, container_id + ".tar")
make_tarfile(deb_tar_file_path, local_debs_path)
# clean container and tmp folder
shutil.rmtree(os.path.dirname(local_debs_path))
container.remove()
return jsonify({"status": 2000, "message": "finish",
"result": {"path": '/api/dpdt/download?id=' + container_id}})
elif container.status == "dead":
# container dead
return jsonify({"status": 4001, "message": container.logs()})
else:
# container is running and return logs
return jsonify({"status": 2001, "message": "running", "result": {"logs": container.logs()}})
except Exception as e:
logging.exception(e)
return jsonify({"status": 4000, "message": "id is unvalid"})
@app.route('/api/dpdt/download', methods=['GET'])
def download():
container_id = request.args.get('id')
deb_tar_file_path = os.path.join(download_folder, container_id + ".tar")
if os.path.exists(deb_tar_file_path):
return send_file(deb_tar_file_path, as_attachment=True)
else:
abort(404)
@app.route('/', methods=['GET'])
def index():
return send_file('./index.html')
if __name__ == '__main__':
app.run(port=port, host='0.0.0.0')
| 37.157407 | 117 | 0.56616 | 0 | 0 | 0 | 0 | 3,306 | 0.823823 | 0 | 0 | 875 | 0.218041 |
0e97bd1c773c1a5622d071293cbf5def12d114a3 | 3,101 | py | Python | api/models.py | WalkingMachine/wonderland | 44e27ccdd981c6e6d2a8e7944156a8bc9e730931 | [
"Apache-2.0"
] | 3 | 2017-06-10T15:49:47.000Z | 2019-03-15T10:04:31.000Z | api/models.py | WalkingMachine/wonderland | 44e27ccdd981c6e6d2a8e7944156a8bc9e730931 | [
"Apache-2.0"
] | 11 | 2017-06-05T20:19:32.000Z | 2018-06-16T21:03:37.000Z | api/models.py | WalkingMachine/wonderland | 44e27ccdd981c6e6d2a8e7944156a8bc9e730931 | [
"Apache-2.0"
] | 2 | 2017-07-17T18:03:45.000Z | 2021-11-12T03:36:58.000Z | from django.db import models
# Description of an object in the arena
class Entity(models.Model):
entityId = models.AutoField(primary_key=True)
entityClass = models.CharField(max_length=30)
entityName = models.CharField(max_length=30, null=True, blank=True)
entityCategory = models.CharField(max_length=30, null=True, blank=True)
entityColor = models.CharField(max_length=30, null=True, blank=True)
entityWeight = models.FloatField(default=None, null=True, blank=True)
entitySize = models.FloatField(default=None, null=True, blank=True)
entityIsRoom = models.BooleanField(default=False, blank=True)
entityIsWaypoint = models.BooleanField(default=False, blank=True)
entityIsContainer = models.BooleanField(default=False, blank=True)
entityGotPosition = models.BooleanField(default=False, blank=True)
# The position of the object in space if available
entityPosX = models.FloatField(default=None, null=True, blank=True)
entityPosY = models.FloatField(default=None, null=True, blank=True)
entityPosZ = models.FloatField(default=None, null=True, blank=True)
entityPosYaw = models.FloatField(default=None, null=True, blank=True)
entityPosPitch = models.FloatField(default=None, null=True, blank=True)
entityPosRoll = models.FloatField(default=None, null=True, blank=True)
# The position to reach to be able to catch the object
entityWaypointX = models.FloatField(default=None, null=True, blank=True)
entityWaypointY = models.FloatField(default=None, null=True, blank=True)
entityWaypointYaw = models.FloatField(default=None, null=True, blank=True)
# Just for serializer
depth_waypoint = models.IntegerField(null=True, blank=True)
depth_position = models.IntegerField(null=True, blank=True)
entityContainer = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return self.entityClass + " - " + str(self.entityId)
# Description of an object in the arena
class People(models.Model):
peopleId = models.AutoField(primary_key=True)
peopleRecognitionId = models.IntegerField(null=True, blank=True, unique=True)
peopleName = models.CharField(max_length=30, null=True, blank=True)
peopleAge = models.IntegerField(null=True, blank=True)
peopleColor = models.CharField(max_length=30, null=True, blank=True)
peoplePose = models.CharField(max_length=30, null=True, blank=True)
peoplePoseAccuracy = models.FloatField(default=None, null=True, blank=True)
peopleEmotion = models.CharField(max_length=30, null=True, blank=True)
peopleEmotionAccuracy = models.FloatField(default=None, null=True, blank=True)
peopleGender = models.CharField(max_length=10, null=True, blank=True)
peopleGenderAccuracy = models.FloatField(default=None, null=True, blank=True)
peopleIsOperator = models.BooleanField(default=False)
def __str__(self):
return str(self.peopleId) + "(" + str(
self.peopleRecognitionId) + ") - " + self.peopleGender + " - " + self.peopleColor + " - " + self.peoplePose
| 46.984848 | 119 | 0.741696 | 2,986 | 0.962915 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.075137 |
0e97d08d8c72216b971d686bd4bb0a40920e98b4 | 1,134 | py | Python | Problemset/reorder-list/reorder-list.py | worldwonderer/algorithm | 083178b2d987de7f6020aceca869a353c0b4b1f3 | [
"MIT"
] | 1 | 2021-01-30T01:52:46.000Z | 2021-01-30T01:52:46.000Z | Problemset/reorder-list/reorder-list.py | worldwonderer/algorithm | 083178b2d987de7f6020aceca869a353c0b4b1f3 | [
"MIT"
] | 1 | 2021-12-15T14:54:06.000Z | 2021-12-15T14:54:06.000Z | Problemset/reorder-list/reorder-list.py | worldwonderer/algorithm | 083178b2d987de7f6020aceca869a353c0b4b1f3 | [
"MIT"
] | 2 | 2021-04-19T03:32:18.000Z | 2021-06-22T07:06:01.000Z |
# @Title: 重排链表 (Reorder List)
# @Author: 18015528893
# @Date: 2021-02-12 16:05:36
# @Runtime: 100 ms
# @Memory: 23.9 MB
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
if head is None or head.next is None:
return
s = head
f = head.next
while f and f.next:
f = f.next.next
s = s.next
l2 = s.next
def reverse(head):
pre = None
cur = head
while cur:
tmp = cur.next
cur.next = pre
pre = cur
cur = tmp
return pre
l2 = reverse(l2)
s.next = None
l1 = head
while l2:
tmp = l1.next
l1.next = l2
l2 = l2.next
l1 = l1.next
l1.next = tmp
l1 = l1.next
| 22.68 | 61 | 0.444444 | 859 | 0.752189 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.302977 |
0e9b3e893453a362db8cc46d3b47dec9fab0247b | 662 | py | Python | lib/JumpScale/baselib/changetracker/ChangeTrackerFactory.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | null | null | null | lib/JumpScale/baselib/changetracker/ChangeTrackerFactory.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | 4 | 2016-08-25T12:08:39.000Z | 2018-04-12T12:36:01.000Z | lib/JumpScale/baselib/changetracker/ChangeTrackerFactory.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | 3 | 2016-03-08T07:49:34.000Z | 2018-10-19T13:56:43.000Z | from JumpScale import j
from .ChangeTrackerClient import ChangeTrackerClient
class ChangeTrackerFactory:
def __init__(self):
self.logenable=True
self.loglevel=5
self._cache={}
def get(self, gitlabName="incubaid"):
name="%s_%s"%(blobclientName,gitlabName)
if gitlabName in self._cache:
return self._cache[gitlabName]
self._cache[gitlabName]= ChangeTrackerClient(gitlabName)
return self._cache[gitlabName]
def _log(self,msg,category="",level=5):
if level<self.loglevel+1 and self.logenable:
j.logger.log(msg,category="changetracker.%s"%category,level=level)
| 28.782609 | 78 | 0.678248 | 580 | 0.876133 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.055891 |
0e9c0c6abc17a0bcd1785e2bfd3e4fe09ec9ea9b | 322 | py | Python | test_db.py | landynS8990/collabora8e | 40c0fed070f0ec2b51913fc87a9ef9b88feca083 | [
"Apache-2.0"
] | null | null | null | test_db.py | landynS8990/collabora8e | 40c0fed070f0ec2b51913fc87a9ef9b88feca083 | [
"Apache-2.0"
] | null | null | null | test_db.py | landynS8990/collabora8e | 40c0fed070f0ec2b51913fc87a9ef9b88feca083 | [
"Apache-2.0"
] | null | null | null | import os
import psycopg2
DATABASE_URL = os.environ.get('DATABASE_URL')
def test_db():
conn = psycopg2.connect(DATABASE_URL)
cur = conn.cursor()
cur.execute("SELECT * FROM country;")
for country in cur:
print(country)
cur.close()
conn.close()
if __name__ == '__main__':
test_db()
| 16.1 | 45 | 0.645963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.149068 |
0e9c65c00eed8b90cbe776879ddc8f8c8c52fe94 | 1,108 | py | Python | VanillaGift.py | ytcrackers/Vanilla-Card-Balance-Checkers | 1a86301892b2d503a95974e291963474264b24b1 | [
"MIT"
] | 2 | 2021-04-21T04:50:44.000Z | 2022-03-23T14:23:16.000Z | VanillaGift.py | ytcrackers/Vanilla-Card-Balance-Checkers | 1a86301892b2d503a95974e291963474264b24b1 | [
"MIT"
] | 1 | 2020-04-13T22:43:01.000Z | 2020-04-13T22:43:01.000Z | VanillaGift.py | rip/Vanilla-Card-Balance-Checkers | 1a86301892b2d503a95974e291963474264b24b1 | [
"MIT"
] | null | null | null | from requests_html import HTMLSession
from sys import argv
if len(argv) != 2:
print("Usage: python3 VanillaGift.py VanillaGift.txt")
else: # VanillaGift card balance checker
for card in reversed(list(open(argv[1]))):
cardNumber, expMonth, expYear, cvv = card.rstrip().split(':')
c = cardNumber + ' ' + expMonth + ' ' + expYear + ' ' + cvv
ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit" + \
"/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
with HTMLSession() as s:
s.get('https://www.vanillagift.com/',
headers={'User-Agent': ua},
timeout=15) # get incapsula cookie, bypasses rate limiting waf?
x = s.post("https://www.vanillagift.com/loginCard",
headers={'User-Agent': ua},
data={
'cardNumber': cardNumber,
'expMonth': expMonth,
'expYear': expYear,
'cvv': cvv,
'origin': 'homeLogin' # this one isn't required...
}, # ...but may help to look more nonchalant...
timeout=15)
try:
b = x.html.find('div.SSaccountAmount', first=True).text
print(b, c)
except:
print("$.err", c) | 25.181818 | 68 | 0.626354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.461191 |
0e9c8f54c4bfa1885450e92bd3ef2811ccdb169b | 30,069 | py | Python | acme/tests/test_pmap.py | esi-neuroscience/acme | 0bd8b6332a98f637c0638b5319afe81bf5bd4977 | [
"BSD-3-Clause"
] | 1 | 2021-07-01T13:34:59.000Z | 2021-07-01T13:34:59.000Z | acme/tests/test_pmap.py | esi-neuroscience/acme | 0bd8b6332a98f637c0638b5319afe81bf5bd4977 | [
"BSD-3-Clause"
] | 17 | 2020-12-29T15:04:09.000Z | 2022-03-01T10:21:54.000Z | acme/tests/test_pmap.py | esi-neuroscience/acme | 0bd8b6332a98f637c0638b5319afe81bf5bd4977 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Testing module for ACME's `ParallelMap` interface
#
# Builtin/3rd party package imports
from multiprocessing import Value
import os
import sys
import pickle
import shutil
import inspect
import subprocess
import getpass
import time
import itertools
import logging
from typing import Type
import h5py
import pytest
import signal as sys_signal
import numpy as np
import dask.distributed as dd
from glob import glob
from scipy import signal
# Import main actors here
from acme import ParallelMap, cluster_cleanup, esi_cluster_setup
from acme.shared import is_slurm_node
# Construct decorators for skipping certain tests
skip_in_win32 = pytest.mark.skipif(sys.platform == "win32", reason="Not running in Windows")
# Functions that act as stand-ins for user-funcs
def simple_func(x, y, z=3):
return (x + y) * z
def medium_func(x, y, z=3, w=np.ones((3, 3))):
return (sum(x) + y) * z * w.max()
def hard_func(x, y, z=3, w=np.zeros((3, 1)), **kwargs):
return sum(x) + y, z * w
def lowpass_simple(h5name, channel_no):
with h5py.File(h5name, "r") as h5f:
channel = h5f["data"][:, channel_no]
b = h5f["data"].attrs["b"]
a = h5f["data"].attrs["a"]
res = signal.filtfilt(b, a, channel, padlen=200)
return res
def lowpass_hard(arr_like, b, a, res_dir, res_base="lowpass_hard_", dset_name="custom_dset_name", padlen=200, taskID=None):
channel = arr_like[:, taskID]
res = signal.filtfilt(b, a, channel, padlen=padlen)
h5name = os.path.join(res_dir, res_base +"{}.h5".format(taskID))
with h5py.File(h5name, "w") as h5f:
h5f.create_dataset(dset_name, data=res)
return
def pickle_func(arr, b, a, channel_no, sabotage_hdf5=False):
res = signal.filtfilt(b, a, arr[:, channel_no], padlen=200)
if sabotage_hdf5:
if channel_no % 2 == 0:
return {"b" : b}
return res
# Perform SLURM-specific tests only on cluster nodes
useSLURM = is_slurm_node()
# Main testing class
class TestParallelMap():
# Construct linear combination of low- and high-frequency sine waves
# and use an IIR filter to reconstruct the low-frequency component
nChannels = 32
nTrials = 8
fData = 2
fNoise = 64
fs = 1000
t = np.linspace(-1, 1, fs)
orig = np.sin(2 * np.pi * fData * t)
sig = orig + np.sin(2 * np.pi * fNoise * t)
cutoff = 50
b, a = signal.butter(8, 2 * cutoff / fs)
# Blow up the signal to have "channels" and "trials": even/odd channels have
# opposing periodicity; do the same to the low-freq component
sig = np.repeat(sig.reshape(-1, 1), axis=1, repeats=nChannels)
sig[:, ::2] *= -1
sig = np.tile(sig, (nTrials, 1))
orig = np.repeat(orig.reshape(-1, 1), axis=1, repeats=nChannels)
orig[:, ::2] *= -1
orig = np.tile(orig, (nTrials, 1))
# Error tolerance for low-pass filtered results
tol = 1e-3
# Test setup of `ParallelMap` w/different functions args/kwargs
def test_init(self):
# Collected auto-generated output directories in list for later cleanup
outDirs = []
# Basic functionality w/simplest conceivable user-func
pmap = ParallelMap(simple_func, [2, 4, 6, 8], 4, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(simple_func, [2, 4, 6, 8], y=4, setup_interactive=False) # pos arg referenced via kwarg, cfg #2
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(simple_func, 0, 4, z=[3, 4, 5, 6], setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(simple_func, [2, 4, 6, 8], [2, 2], n_inputs=2, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
# User func has `np.ndarray` as keyword
pmap = ParallelMap(medium_func, [2, 4, 6, 8], y=[2, 2], n_inputs=2, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(medium_func, None, None, w=[np.ones((3, 3)), 2 * np.ones((3,3))], setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(medium_func, None, None, z=np.zeros((3,)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(medium_func, None, None, z=np.zeros((3, 1)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
# Lots of ways for this to go wrong...
pmap = ParallelMap(hard_func, [2, 4, 6, 8], 2, w=np.ones((3,)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, [2, 4, 6, 8], y=22, w=np.ones((7, 1)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, np.ones((3,)), 1, w=np.ones((7, 1)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, [2, 4, 6, 8], [2, 2], z=np.array([1, 2]), w=np.ones((8, 1)), n_inputs=2, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, [2, 4, 6, 8], [2, 2], w=np.ones((8, 1)), n_inputs=4, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
# Ensure erroneous/ambiguous setups trigger the appropriate errors:
# not enough positional args
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, 4, setup_interactive=False)
assert "simple_func expects 2 positional arguments ('x', 'y'), found 1" in str(valerr.value)
# invalid kwargs
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, 4, 4, z=3, w=4, setup_interactive=False)
assert "simple_func accepts at maximum 1 keyword arguments ('z'), found 2" in str(valerr.value)
# ill-posed parallelization: two candidate lists for input distribution
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, [2, 4, 6, 8], [2, 2], setup_interactive=False)
assert "automatic input distribution failed: found 2 objects containing 2 to 4 elements" in str(valerr.value)
# ill-posed parallelization: two candidate lists for input distribution (`x` and `w`)
with pytest.raises(ValueError) as valerr:
ParallelMap(medium_func, [1, 2, 3], None, w=[np.ones((3,3)), 2 * np.ones((3,3))], setup_interactive=False)
assert "automatic input distribution failed: found 2 objects containing 2 to 3 elements." in str(valerr.value)
# invalid input spec
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, [2, 4, 6, 8], [2, 2], n_inputs=3, setup_interactive=False)
assert "No object has required length of 3 matching `n_inputs`" in str(valerr.value)
# invalid input spec: `w` expects a NumPy array, thus it is not considered for input distribution
with pytest.raises(ValueError) as valerr:
ParallelMap(hard_func, [2, 4, 6, 8], [2, 2], w=np.ones((8, 1)), n_inputs=8, setup_interactive=False)
assert "No object has required length of 8 matching `n_inputs`" in str(valerr.value)
# Clean up testing folder and any running clients
cluster_cleanup()
for folder in outDirs:
shutil.rmtree(folder, ignore_errors=True)
# Functionality tests: perform channel-concurrent low-pass filtering
def test_filter_example(self):
# If called by `test_existing_cluster` use pre-allocated client for all computations
try:
dd.get_client()
existingClient = True
except ValueError:
existingClient = False
# Create tmp directory and create data-containers
tempDir = os.path.join(os.path.abspath(os.path.expanduser("~")), "acme_tmp")
if useSLURM:
tempDir = "/cs/home/{}/acme_tmp".format(getpass.getuser())
os.makedirs(tempDir, exist_ok=True)
sigName = os.path.join(tempDir, "sigdata.h5")
origName = os.path.join(tempDir, "origdata.h5")
with h5py.File(sigName, "w") as sigFile:
dset = sigFile.create_dataset("data", data=self.sig)
dset.attrs["b"] = self.b
dset.attrs["a"] = self.a
with h5py.File(origName, "w") as origFile:
origFile.create_dataset("data", data=self.orig)
# Collected auto-generated output directories in list for later cleanup
outDirs = []
# Parallelize across channels, write results to disk
with ParallelMap(lowpass_simple, sigName, range(self.nChannels), setup_interactive=False) as pmap:
resOnDisk = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
assert len(pmap.kwargv["outFile"]) == pmap.n_calls
resFiles = [os.path.join(pmap.kwargv["outDir"][0], outFile) for outFile in pmap.kwargv["outFile"]]
assert resOnDisk == resFiles
assert all(os.path.isfile(fle) for fle in resOnDisk)
# Compare computed single-channel results to expected low-freq signal
for chNo, h5name in enumerate(resOnDisk):
with h5py.File(h5name, "r") as h5f:
assert np.mean(np.abs(h5f["result_0"][()] - self.orig[:, chNo])) < self.tol
# Same, but collect results in memory: ensure nothing freaky happens
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
write_worker_results=False,
setup_interactive=False) as pmap:
resInMem = pmap.compute()
for chNo in range(self.nChannels):
assert np.mean(np.abs(resInMem[chNo] - self.orig[:, chNo])) < self.tol
# Be double-paranoid: ensure on-disk and in-memory results match up
for chNo, h5name in enumerate(resOnDisk):
with h5py.File(h5name, "r") as h5f:
assert np.array_equal(h5f["result_0"][()], resInMem[chNo])
# Simulate user-defined results-directory
tempDir2 = os.path.join(os.path.abspath(os.path.expanduser("~")), "acme_tmp_lowpass_hard")
if useSLURM:
tempDir2 = "/cs/home/{}/acme_tmp_lowpass_hard".format(getpass.getuser())
shutil.rmtree(tempDir2, ignore_errors=True)
os.makedirs(tempDir2, exist_ok=True)
# Same task, different function: simulate user-defined saving scheme and "weird" inputs
sigData = h5py.File(sigName, "r")["data"]
res_base = "lowpass_hard_"
dset_name = "custom_dset_name"
with ParallelMap(lowpass_hard,
sigData,
self.b,
self.a,
res_dir=tempDir2,
res_base=res_base,
dset_name=dset_name,
padlen=[200] * self.nChannels,
n_inputs=self.nChannels,
write_worker_results=False,
setup_interactive=False) as pmap:
pmap.compute()
resFiles = glob(os.path.join(tempDir2, res_base + "*"))
assert len(resFiles) == pmap.n_calls
# Compare computed single-channel results to expected low-freq signal
for chNo in range(self.nChannels):
h5name = res_base + "{}.h5".format(chNo)
with h5py.File(os.path.join(tempDir2, h5name), "r") as h5f:
assert np.mean(np.abs(h5f[dset_name][()] - self.orig[:, chNo])) < self.tol
# Ensure log-file generation produces a non-empty log-file at the expected location
# Bonus: leave computing client alive and vet default SLURM settings
if not existingClient:
cluster_cleanup(pmap.client)
for handler in pmap.log.handlers:
if isinstance(handler, logging.FileHandler):
pmap.log.handlers.remove(handler)
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
logfile=True,
stop_client=False,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
logFileList = [handler.baseFilename for handler in pmap.log.handlers if isinstance(handler, logging.FileHandler)]
assert len(logFileList) == 1
logFile = logFileList[0]
assert os.path.dirname(os.path.realpath(__file__)) in logFile
with open(logFile, "r") as fl:
assert len(fl.readlines()) > 1
# Ensure client has not been killed; perform post-hoc check of default SLURM settings
assert dd.get_client()
client = dd.get_client()
if useSLURM and not existingClient:
assert pmap.n_calls == pmap.n_jobs
assert len(client.cluster.workers) == pmap.n_jobs
partition = client.cluster.job_header.split("-p ")[1].split("\n")[0]
assert "8GB" in partition
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert round(memory[0] / 1000**3) == [int(s) for s in partition if s.isdigit()][0]
# Same, but use custom log-file
for handler in pmap.log.handlers:
if isinstance(handler, logging.FileHandler):
pmap.log.handlers.remove(handler)
customLog = os.path.join(tempDir, "acme_log.txt")
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
logfile=customLog,
verbose=True,
stop_client=True,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
assert os.path.isfile(customLog)
with open(customLog, "r") as fl:
assert len(fl.readlines()) > 1
# Ensure client has been stopped
with pytest.raises(ValueError):
dd.get_client()
# Underbook SLURM (more calls than jobs)
partition = "8GBXS"
n_jobs = int(self.nChannels / 2)
mem_per_job = "2GB"
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
partition=partition,
n_jobs=n_jobs,
mem_per_job=mem_per_job,
stop_client=False,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Post-hoc check of client to ensure custom settings were respected
client = pmap.client
assert pmap.n_calls == self.nChannels
if useSLURM:
assert pmap.n_jobs == n_jobs
assert len(client.cluster.workers) == pmap.n_jobs
actualPartition = client.cluster.job_header.split("-p ")[1].split("\n")[0]
assert actualPartition == partition
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert round(memory[0] / 1000**3) == int(mem_per_job.replace("GB", ""))
# Let `cluster_cleanup` murder the custom setup and ensure it did its job
if not existingClient:
cluster_cleanup(pmap.client)
with pytest.raises(ValueError):
dd.get_client()
# Overbook SLURM (more jobs than calls)
partition = "8GBXS"
n_jobs = self.nChannels + 2
mem_per_job = "3000MB"
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
partition=partition,
n_jobs=n_jobs,
mem_per_job=mem_per_job,
stop_client=False,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Post-hoc check of client to ensure custom settings were respected
client = pmap.client
assert pmap.n_calls == self.nChannels
if useSLURM:
assert pmap.n_jobs == n_jobs
assert len(client.cluster.workers) == pmap.n_jobs
actualPartition = client.cluster.job_header.split("-p ")[1].split("\n")[0]
assert actualPartition == partition
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert round(memory[0] / 1000**3) * 1000 == int(mem_per_job.replace("MB", ""))
if not existingClient:
cluster_cleanup(pmap.client)
# Close any open HDF5 files to not trigger any `OSError`s, close running clusters
# and clean up tmp dirs and created directories/log-files
sigData.file.close()
try:
os.unlink(logFile)
except PermissionError:
pass
shutil.rmtree(tempDir, ignore_errors=True)
shutil.rmtree(tempDir2, ignore_errors=True)
for folder in outDirs:
shutil.rmtree(folder, ignore_errors=True)
# Wait a second (literally) so that no new parallel jobs started by
# `test_existing_cluster` erroneously use existing HDF files
time.sleep(1.0)
# Test if pickling/emergency pickling and I/O in general works as intended
def test_pickling(self):
# Collected auto-generated output directories in list for later cleanup
outDirs = []
# Execute `pickle_func` w/regular HDF5 saving
with ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=False,
n_inputs=self.nChannels,
setup_interactive=False) as pmap:
hdfResults = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Execute `pickle_func` w/pickling
with ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
n_inputs=self.nChannels,
write_pickle=True,
setup_interactive=False) as pmap:
pklResults = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Ensure HDF5 and pickle match up
for chNo, h5name in enumerate(hdfResults):
with open(pklResults[chNo], "rb") as pkf:
pklRes = pickle.load(pkf)
with h5py.File(h5name, "r") as h5f:
assert np.array_equal(pklRes, h5f["result_0"][()])
# Test emergency pickling
with ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=True,
n_inputs=self.nChannels,
setup_interactive=False) as pmap:
mixedResults = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Ensure non-compliant dicts were pickled, rest is in HDF5
for chNo, fname in enumerate(mixedResults):
if chNo % 2 == 0:
assert fname.endswith(".pickle")
with open(fname, "rb") as pkf:
assert np.array_equal(self.b, pickle.load(pkf)["b"])
else:
assert fname.endswith(".h5")
with h5py.File(fname, "r") as h5f:
with h5py.File(hdfResults[chNo], "r") as h5ref:
assert np.array_equal(h5f["result_0"][()], h5ref["result_0"][()])
# Test write breakdown (both for HDF5 saving and pickling)
pmap = ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=True,
n_inputs=self.nChannels,
setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap.kwargv["outDir"][0] = "/path/to/nowhere"
with pytest.raises(RuntimeError) as runerr:
pmap.compute()
assert "<ACMEdaemon> Parallel computation failed" in str(runerr.value)
pmap = ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=True,
n_inputs=self.nChannels,
write_pickle=True,
setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap.kwargv["outDir"][0] = "/path/to/nowhere"
with pytest.raises(RuntimeError) as runerr:
pmap.compute()
assert "<ACMEdaemon> Parallel computation failed" in str(runerr.value)
# Clean up testing folder
for folder in outDirs:
shutil.rmtree(folder, ignore_errors=True)
# test if KeyboardInterrupts are handled correctly
@skip_in_win32
def test_cancel(self):
# Setup temp-directory layout for subprocess-scripts and prepare interpreters
tempDir = os.path.join(os.path.abspath(os.path.expanduser("~")), "acme_tmp")
os.makedirs(tempDir, exist_ok=True)
pshells = [os.path.join(os.path.split(sys.executable)[0], pyExec) for pyExec in ["python", "ipython"]]
# Prepare ad-hoc script for execution in new process
scriptName = os.path.join(tempDir, "dummy.py")
scriptContents = \
"from acme import ParallelMap\n" +\
"import time\n" +\
"def long_running(dummy):\n" +\
" time.sleep(10)\n" +\
" return\n" +\
"if __name__ == '__main__':\n" +\
" with ParallelMap(long_running, [None]*2, setup_interactive=False, write_worker_results=False) as pmap: \n" +\
" pmap.compute()\n" +\
" print('ALL DONE')\n"
with open(scriptName, "w") as f:
f.write(scriptContents)
# Execute the above script both in Python and iPython to ensure global functionality
for pshell in pshells:
# Launch new process in background (`stdbuf` prevents buffering of stdout)
proc = subprocess.Popen("stdbuf -o0 " + pshell + " " + scriptName,
shell=True, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
# Wait for ACME to start up (as soon as logging info is shown, `pmap.compute()` is running)
# However: don't wait indefinitely - if `pmap.compute` is not started within 30s, abort
logStr = "<ParallelMap> INFO: Log information available at"
buffer = bytearray()
timeout = 30
t0 = time.time()
for line in itertools.takewhile(lambda x: time.time() - t0 < timeout, iter(proc.stdout.readline, b"")):
buffer.extend(line)
if logStr in line.decode("utf8"):
break
assert logStr in buffer.decode("utf8")
# Wait a bit, then simulate CTRL+C in sub-process; make sure the above
# impromptu script did not run to completion *but* the created client was
# shut down with CTRL + C
time.sleep(2)
os.killpg(proc.pid, sys_signal.SIGINT)
time.sleep(1)
out = proc.stdout.read().decode()
assert "ALL DONE" not in out
assert "INFO: <cluster_cleanup> Successfully shut down" in out
# Almost identical script, this time use an externally started client
scriptName = os.path.join(tempDir, "dummy2.py")
scriptContents = \
"from acme import ParallelMap, esi_cluster_setup\n" +\
"import time\n" +\
"def long_running(dummy):\n" +\
" time.sleep(10)\n" +\
" return\n" +\
"if __name__ == '__main__':\n" +\
" client = esi_cluster_setup(partition='8GBDEV',n_jobs=1, interactive=False)\n" +\
" with ParallelMap(long_running, [None]*2, setup_interactive=False, write_worker_results=False) as pmap: \n" +\
" pmap.compute()\n" +\
" print('ALL DONE')\n"
with open(scriptName, "w") as f:
f.write(scriptContents)
# Test script functionality in both Python and iPython
for pshell in pshells:
proc = subprocess.Popen("stdbuf -o0 " + sys.executable + " " + scriptName,
shell=True, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
logStr = "<ParallelMap> INFO: Log information available at"
buffer = bytearray()
timeout = 30
t0 = time.time()
for line in itertools.takewhile(lambda x: time.time() - t0 < timeout, iter(proc.stdout.readline, b"")):
buffer.extend(line)
if logStr in line.decode("utf8"):
break
assert logStr in buffer.decode("utf8")
time.sleep(2)
os.killpg(proc.pid, sys_signal.SIGINT)
time.sleep(2)
out = proc.stdout.read().decode()
assert "ALL DONE" not in out
assert "<ParallelMap> INFO: <ACME> CTRL + C acknowledged, client and workers successfully killed" in out
# Ensure random exception does not immediately kill an active client
scriptName = os.path.join(tempDir, "dummy3.py")
scriptContents = \
"from acme import esi_cluster_setup\n" +\
"import time\n" +\
"if __name__ == '__main__':\n" +\
" esi_cluster_setup(partition='8GBDEV',n_jobs=1, interactive=False)\n" +\
" time.sleep(60)\n"
with open(scriptName, "w") as f:
f.write(scriptContents)
proc = subprocess.Popen("stdbuf -o0 " + sys.executable + " " + scriptName,
shell=True, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
# Give the client time to start up, then send a floating-point exception
# (equivalent to a `ZeroDivsionError` to the child process)
time.sleep(5)
assert proc.poll() is None
proc.send_signal(sys_signal.SIGFPE)
# Ensure the `ZeroDivsionError` did not kill the process. Then terminate it
# and confirm that the floating-exception was propagated correctly
assert proc.poll() is None
proc.terminate()
proc.wait()
assert proc.returncode in [-sys_signal.SIGFPE.value, -sys_signal.SIGTERM.value]
# Clean up tmp folder
shutil.rmtree(tempDir, ignore_errors=True)
# test esi-cluster-setup called separately before pmap
def test_existing_cluster(self):
# Test custom SLURM cluster setup
if useSLURM:
# Ensure invalid partition/memory specifications are caught
with pytest.raises(ValueError):
esi_cluster_setup(partition="invalid", interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(mem_per_job="invalidGB", interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(mem_per_job="-20MB", interactive=False)
cluster_cleanup()
# Over-allocation of memory should default to partition max
client = esi_cluster_setup(partition="8GBDEV", n_jobs=1, mem_per_job="9000MB", interactive=False)
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert np.round(memory / 1000**3)[0] == 8
cluster_cleanup(client)
# Test if invalid extra args are caught
slurmOut = "/cs/home/{}/acme_out".format(getpass.getuser())
with pytest.raises(TypeError):
esi_cluster_setup(job_extra="--output={}".format(slurmOut), interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(job_extra=["output={}".format(slurmOut)], interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(job_extra=["--output=/path/to/nowhere"], interactive=False)
cluster_cleanup()
# Supply extra args to start client for actual tests
client = esi_cluster_setup(partition="8GBXS", job_extra=["--output={}".format(slurmOut)], interactive=False)
assert "--output={}".format(slurmOut) in client.cluster.job_header
else:
client = esi_cluster_setup(n_jobs=6, interactive=False)
# Re-run tests with pre-allocated client (except for `test_cancel`)
skipTests = ["test_existing_cluster", "test_cancel"]
all_tests = [attr for attr in self.__dir__()
if (inspect.ismethod(getattr(self, attr)) and attr not in skipTests)]
for test in all_tests:
getattr(self, test)()
client.close()
client.cluster.close()
if useSLURM:
shutil.rmtree(slurmOut, ignore_errors=True)
| 45.697568 | 135 | 0.587083 | 28,079 | 0.933819 | 0 | 0 | 5,891 | 0.195916 | 0 | 0 | 7,680 | 0.255413 |
0e9e025b01eb969d408f82347a8fc498cac9ab24 | 766 | py | Python | tests/frontend/analysis_frontend.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | 1 | 2019-09-26T08:16:30.000Z | 2019-09-26T08:16:30.000Z | tests/frontend/analysis_frontend.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | null | null | null | tests/frontend/analysis_frontend.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the analysis front-end object."""
import unittest
from plaso.frontend import analysis_frontend
from plaso.storage import zip_file as storage_zip_file
from tests.frontend import test_lib
class AnalysisFrontendTests(test_lib.FrontendTestCase):
"""Tests for the analysis front-end object."""
def testOpenStorage(self):
"""Tests the OpenStorage function."""
test_front_end = analysis_frontend.AnalysisFrontend()
storage_file_path = self._GetTestFilePath([u'psort_test.proto.plaso'])
storage_file = test_front_end.OpenStorage(storage_file_path)
self.assertIsInstance(storage_file, storage_zip_file.StorageFile)
storage_file.Close()
if __name__ == '__main__':
unittest.main()
| 25.533333 | 74 | 0.763708 | 472 | 0.616188 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.266319 |
0ea2730c361b568ac507b349f4219e90dc8e1b4e | 388 | py | Python | forms/forms/constants.py | dowjcr/forms | daba956b779dc2b8054aeab841835d3748c1c50f | [
"MIT"
] | null | null | null | forms/forms/constants.py | dowjcr/forms | daba956b779dc2b8054aeab841835d3748c1c50f | [
"MIT"
] | 1 | 2021-10-03T10:23:17.000Z | 2021-10-03T10:23:17.000Z | forms/forms/constants.py | dowjcr/forms | daba956b779dc2b8054aeab841835d3748c1c50f | [
"MIT"
] | null | null | null | """Stores constants used as numbers for readability that are used across all apps"""
class AdminRoles:
""" """
JCRTREASURER = 1
SENIORTREASURER = 2
BURSARY = 3
ASSISTANTBURSAR = 4
CHOICES = (
(JCRTREASURER, 'JCR Treasurer'),
(SENIORTREASURER, 'Senior Treasurer'),
(BURSARY, 'Bursary'),
(ASSISTANTBURSAR, 'Assistant Bursar')
)
| 24.25 | 84 | 0.615979 | 301 | 0.775773 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.389175 |
0ea30d061c0f6731c0b56ded5ff02ccd54a258dd | 647 | py | Python | simpsonMethod.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | 1 | 2021-06-08T06:53:50.000Z | 2021-06-08T06:53:50.000Z | simpsonMethod.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | null | null | null | simpsonMethod.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | null | null | null | import numpy as np
from numpy import log
#Se define la función a integrar
def f(x):
return 1 / log(x)
#Implementación del método de Simpson
#Parámetros:
#f es la función a integrar
#a el límite inferior de la integral
#b el límite superior de la integral
#n el número de intervalos
def simpson (f, a, b ,n):
h = (b - a) / n
g = f(a) + f(b)
#Suma de áreas
for i in range (1, n // 2):
g = g + 2 * f(a + 2 * i * h)
for i in range (0, n // 2):
g = g + 4 * f(a + (2 * i + 1) * h)
return h * g / 3
def main():
li = simpson(f, 2, 3 ,16)
print("Li(3): ", li)
main()
| 22.310345 | 46 | 0.525502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.378049 |
0ea3b1c8782a586b2613aff8826b4b27c668ede7 | 1,517 | py | Python | setup.py | cloudify-cosmo/cloudify-cluster-manager | c453f65e6c6aa8622751805df2a2549db4fb3685 | [
"Apache-2.0"
] | 2 | 2020-11-28T11:48:36.000Z | 2020-11-28T11:48:38.000Z | setup.py | cloudify-cosmo/cloudify-cluster-manager | c453f65e6c6aa8622751805df2a2549db4fb3685 | [
"Apache-2.0"
] | 5 | 2020-10-12T16:48:09.000Z | 2021-09-13T15:17:28.000Z | setup.py | cloudify-cosmo/cloudify-cluster-manager | c453f65e6c6aa8622751805df2a2549db4fb3685 | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from os import path
from setuptools import setup
def get_readme_contents():
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
return f.read()
setup(
name='cloudify-cluster-manager',
long_description=get_readme_contents(),
long_description_content_type='text/markdown',
version='1.0.15',
author='Cloudify',
author_email='cosmo-admin@cloudify.co',
packages=['cfy_cluster_manager'],
include_package_data=True,
license='LICENSE',
description="Install a Cloudify cluster",
entry_points={
'console_scripts': [
'cfy_cluster_manager = cfy_cluster_manager.main:main'
]
},
install_requires=[
'pyyaml>=5.3.0,<5.4.0',
'jinja2>=2.11.0,<2.12.0',
'fabric>=2.5.0,<2.6.0',
'cryptography==3.3.2'
]
)
| 30.34 | 79 | 0.678972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.616348 |
0ea7201b51048203ec576eca4a186c7f31e54813 | 169 | py | Python | practice/bitwise_operations/ex2.py | recursivelycurious/wordnik-repl | 9d9e96a8ebc79b95f135d5bc871602b65d2d2b79 | [
"MIT"
] | 346 | 2016-02-22T20:21:10.000Z | 2022-01-27T20:55:53.000Z | Language Skills/Python/Unit 10/2-Introduction to Bitwise Operators/Binary representation/2-Lesson I0_ The Base 2 Number System_.py | vpstudios/Codecademy-Exercise-Answers | ebd0ee8197a8001465636f52c69592ea6745aa0c | [
"MIT"
] | 55 | 2016-04-07T13:58:44.000Z | 2020-06-25T12:20:24.000Z | Language Skills/Python/Unit 10/2-Introduction to Bitwise Operators/Binary representation/2-Lesson I0_ The Base 2 Number System_.py | vpstudios/Codecademy-Exercise-Answers | ebd0ee8197a8001465636f52c69592ea6745aa0c | [
"MIT"
] | 477 | 2016-02-21T06:17:02.000Z | 2021-12-22T10:08:01.000Z | print 0b1, #1
print 0b10, #2
print 0b11, #3
print 0b100, #4
print 0b101, #5
print 0b110, #6
print 0b111 #7
print "******"
print 0b1 + 0b11
print 0b11 * 0b11
| 15.363636 | 17 | 0.609467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.130178 |
0ea75c30072eee31077e1649933e02a8c3a47e21 | 2,617 | py | Python | meiduo_mall/scripts/regenerate_detail_html.py | 1103928458/meiduo_drf | 49595755f264b09ea748b4deb8a88bba5eb8557b | [
"MIT"
] | null | null | null | meiduo_mall/scripts/regenerate_detail_html.py | 1103928458/meiduo_drf | 49595755f264b09ea748b4deb8a88bba5eb8557b | [
"MIT"
] | null | null | null | meiduo_mall/scripts/regenerate_detail_html.py | 1103928458/meiduo_drf | 49595755f264b09ea748b4deb8a88bba5eb8557b | [
"MIT"
] | 1 | 2020-11-10T07:22:42.000Z | 2020-11-10T07:22:42.000Z | # from django.shortcuts import render
# import os
# from django.conf import settings
# from goods.models import SKU
# from contents.utils import get_categories
# from goods.utils import get_breadcrumb
#
# def generate_static_sku_detail_html(sku_id):
#
# sku = SKU.objects.get(id=sku_id)
#
# category = sku.category # 三级类别数据
# spu = sku.spu
#
# """1.准备当前商品的规格选项列表 [8, 11]"""
# # 获取出当前正显示的sku商品的规格选项id列表
# current_sku_spec_qs = sku.specs.order_by('spec_id')
# current_sku_option_ids = [] # [8, 11]
# for current_sku_spec in current_sku_spec_qs:
# current_sku_option_ids.append(current_sku_spec.option_id)
#
# """2.构造规格选择仓库
# {(8, 11): 3, (8, 12): 4, (9, 11): 5, (9, 12): 6, (10, 11): 7, (10, 12): 8}
# """
# # 构造规格选择仓库
# temp_sku_qs = spu.sku_set.all() # 获取当前spu下的所有sku
# # 选项仓库大字典
# spec_sku_map = {} # {(8, 11): 3, (8, 12): 4, (9, 11): 5, (9, 12): 6, (10, 11): 7, (10, 12): 8}
# for temp_sku in temp_sku_qs:
# # 查询每一个sku的规格数据
# temp_spec_qs = temp_sku.specs.order_by('spec_id')
# temp_sku_option_ids = [] # 用来包装每个sku的选项值
# for temp_spec in temp_spec_qs:
# temp_sku_option_ids.append(temp_spec.option_id)
# spec_sku_map[tuple(temp_sku_option_ids)] = temp_sku.id
#
# """3.组合 并找到sku_id 绑定"""
# spu_spec_qs = spu.specs.order_by('id') # 获取当前spu中的所有规格
#
# for index, spec in enumerate(spu_spec_qs): # 遍历当前所有的规格
# spec_option_qs = spec.options.all() # 获取当前规格中的所有选项
# temp_option_ids = current_sku_option_ids[:] # 复制一个新的当前显示商品的规格选项列表
# for option in spec_option_qs: # 遍历当前规格下的所有选项
# temp_option_ids[index] = option.id # [8, 12]
# option.sku_id = spec_sku_map.get(tuple(temp_option_ids)) # 给每个选项对象绑定下他sku_id属性
#
# # spec.spec_options = spec_option_qs # 把规格下的所有选项绑定到规格对象的spec_options属性上
#
# context = {
# 'categories': get_categories(), # 商品分类
# 'breadcrumb': get_breadcrumb(category), # 面包屑导航
# 'sku': sku, # 当前要显示的sku模型对象
# 'category': category, # 当前的显示sku所属的三级类别
# 'spu': spu, # sku所属的spu
# 'spec_qs': spu_spec_qs, # 当前商品的所有规格数据
# }
#
# response = render(None, 'detail.html', context)
# html_text = response.content.decode()
# file_path = os.path.join(settings.STATICFILES_DIRS[0], 'detail/' + str(sku_id) + '.html')
# with open(file_path, 'w') as f:
# f.write(html_text)
#
#
# if __name__ == '__main__':
# skus = SKU.objects.all()
# for sku in skus:
# print(sku.id)
# generate_static_sku_detail_html(sku.id) | 37.927536 | 101 | 0.620558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,019 | 0.977972 |
0ea886e8f085826ecb3e920ac582aec32c17c562 | 1,990 | py | Python | chatbot/algorithm/entity_recognizer.py | ningxie1991/Movie-Chatbot | d73eb26c0b682e0426b0b2b8aa7b49059f44a5a1 | [
"MIT"
] | null | null | null | chatbot/algorithm/entity_recognizer.py | ningxie1991/Movie-Chatbot | d73eb26c0b682e0426b0b2b8aa7b49059f44a5a1 | [
"MIT"
] | null | null | null | chatbot/algorithm/entity_recognizer.py | ningxie1991/Movie-Chatbot | d73eb26c0b682e0426b0b2b8aa7b49059f44a5a1 | [
"MIT"
] | null | null | null | import os
import joblib
import numpy as np
import pandas as pd
import sklearn_crfsuite
from sklearn_crfsuite import metrics
from chatbot.algorithm.question_answering.utils.ner import collate, sent2features, sent2labels
class MoviesNER:
def __init__(self):
# train_cased = /data/mit_movies_corpus/engtrain_cased.csv
# test_cased = /data/mit_movies_corpus/engtest_cased.csv
# train= /data/mit_movies_corpus/engtrain.csv
# test = /data/mit_movies_corpus/engtest.csv
dirname = os.path.dirname(__file__)
self.df_train = pd.read_csv(os.path.join(dirname, '../../data/mit_movies_corpus/engtrain.csv'))
self.df_test = pd.read_csv(os.path.join(dirname, '../../data/mit_movies_corpus/engtest.csv'))
self.crf = sklearn_crfsuite.CRF(
algorithm='l2sgd', # l2sgd: Stochastic Gradient Descent with L2 regularization term
max_iterations=1000, # maximum number of iterations
)
self.classes = np.unique(self.df_train.Tag.values)
train_sentences = collate(self.df_train)
test_sentences = collate(self.df_test)
self.X_train = [sent2features(s) for s in train_sentences]
self.y_train = [sent2labels(s) for s in train_sentences]
self.X_test = [sent2features(s) for s in test_sentences]
self.y_test = [sent2labels(s) for s in test_sentences]
def fit(self):
self.crf.fit(self.X_train, self.y_train)
def evaluate(self):
y_pred = self.crf.predict(self.X_test)
print("--- performance of the CRF model")
print(metrics.flat_classification_report(self.y_test, y_pred, labels=self.classes))
return y_pred
def save_model(self, relative_file_path):
dir_name = os.path.dirname(__file__)
file_name = os.path.join(dir_name, relative_file_path)
joblib.dump(self.crf, file_name)
if __name__ == "__main__":
ner = MoviesNER()
ner.fit()
ner.save_model('./output/ner_best.sav') | 39.019608 | 103 | 0.68995 | 1,658 | 0.833166 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.229146 |
0ea910932de6945668274ae7ffdc58e15331f4f5 | 393 | py | Python | env/lib/python3.6/site-packages/torch/jit/passes/inplace.py | bopopescu/smart_contracts7 | 40a487cb3843e86ab5e4cb50b1aafa2095f648cd | [
"Apache-2.0"
] | null | null | null | env/lib/python3.6/site-packages/torch/jit/passes/inplace.py | bopopescu/smart_contracts7 | 40a487cb3843e86ab5e4cb50b1aafa2095f648cd | [
"Apache-2.0"
] | null | null | null | env/lib/python3.6/site-packages/torch/jit/passes/inplace.py | bopopescu/smart_contracts7 | 40a487cb3843e86ab5e4cb50b1aafa2095f648cd | [
"Apache-2.0"
] | 1 | 2020-07-24T17:53:25.000Z | 2020-07-24T17:53:25.000Z |
def _check_inplace(trace):
"""Checks that all PythonOps that were not translated into JIT format are out of place.
Should be run after the ONNX pass.
"""
graph = trace.graph()
for node in graph.nodes():
if node.kind() == 'PythonOp':
if node.i('inplace'):
raise RuntimeError("inplace {} not supported in the JIT".format(node.pyname()))
| 32.75 | 95 | 0.620865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.486005 |
0ea94d6989972b8a0054db5ba41efd5546eaa245 | 219 | py | Python | tests/test_logging.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | 31 | 2018-07-16T15:03:14.000Z | 2022-03-10T08:36:09.000Z | tests/test_logging.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | 216 | 2018-07-18T20:00:34.000Z | 2021-10-05T17:40:47.000Z | tests/test_logging.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | 10 | 2019-02-17T00:56:58.000Z | 2021-11-05T13:31:37.000Z | import fault.logging
def test_logging_smoke():
fault.logging.info("some info msg")
fault.logging.debug("some debug msg")
fault.logging.warning("some warning msg")
fault.logging.error("some error msg")
| 24.333333 | 45 | 0.716895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.296804 |
0eaa19e87c71dad721ec740eabbb2e6449cdf569 | 5,600 | py | Python | rdftools/__init__.py | johnstonskj/rdftools | 9f027520dec4cb35422a78a8d9a97bc5210db8f9 | [
"MIT"
] | 1 | 2022-02-03T18:41:49.000Z | 2022-02-03T18:41:49.000Z | rdftools/__init__.py | johnstonskj/rdftools | 9f027520dec4cb35422a78a8d9a97bc5210db8f9 | [
"MIT"
] | 6 | 2018-03-01T23:21:24.000Z | 2018-03-07T18:35:52.000Z | rdftools/__init__.py | johnstonskj/rdftools | 9f027520dec4cb35422a78a8d9a97bc5210db8f9 | [
"MIT"
] | null | null | null | import argparse
import i18n
import logging
import os
import rdflib
import sys
from termcolor import colored
from timeit import default_timer as timer
__VERSION__ = '0.2.0'
__LOG__ = None
FORMATS = ['nt', 'n3', 'turtle', 'rdfa', 'xml', 'pretty-xml']
HEADER_SEP = '='
COLUMN_SEP = '|'
EMPTY_LINE = ''
COLUMN_SPEC = '{:%d}'
USE_COLOR = False
def startup(description_key, add_args, read_files=True, argv=None):
global __LOG__, USE_COLOR
configure_translation()
description = i18n.t(description_key)
parser = configure_argparse(description, read_files)
if callable(add_args):
parser = add_args(parser)
if argv is None:
command = parser.parse_args()
else:
command = parser.parse_args(argv)
USE_COLOR = command.use_color
process = parser.prog
__LOG__ = configure_logging(process, command.verbose)
__LOG__.info(i18n.t('rdftools.started', tool=process, name=description))
__LOG__.info(argv)
return (__LOG__, command)
def configure_translation(force_locale=None):
i18n.load_path.append(os.path.join(os.path.dirname(__file__), 'messages'))
if force_locale is not None:
i18n.set('locale', force_locale)
i18n.set('fallback', 'en')
def configure_argparse(description, read_files=True):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', default=0, action='count')
parser.add_argument('-b', '--base', action='store')
if read_files:
parser.add_argument('-i', '--input',
type=argparse.FileType('r'), nargs='*')
parser.add_argument('-r', '--read', action='store', choices=FORMATS)
parser.add_argument('-c', '--use-color', action='store_true')
return parser
def configure_logging(name, level):
global __LOG__
logging.basicConfig(format='%(asctime)-15s %(module)s.%(funcName)s:' +
'%(lineno)d [%(levelname)s] %(message)s')
logger = logging.getLogger(name)
if level > 2:
logger.setLevel(logging.INFO)
elif level > 1:
logger.setLevel(logging.DEBUG)
elif level > 0:
logger.setLevel(logging.WARN)
else:
logger.setLevel(logging.ERROR)
logger.info(i18n.t('rdftools.logging', level=logger.getEffectiveLevel()))
__LOG__ = logger
return logger
def read_into(input, format, graph, base=None):
start = end = 0
if format is None:
if input is None:
format = FORMATS[0]
else:
format = rdflib.util.guess_format(input.name)
if input is None:
__LOG__.info(i18n.t('rdftools.read_stdin', format=format))
start = timer()
graph.parse(source=sys.stdin.buffer, format=format, publicID=base)
end = timer()
else:
__LOG__.info(i18n.t('rdftools.read_file',
name=input.name, format=format))
start = timer()
graph.parse(source=input.name, format=format, publicID=base)
end = timer()
__LOG__.info(i18n.t('rdftools.read_complete',
len=len(graph), time=end - start))
return graph
def read(input, format, base=None):
graph = rdflib.Graph()
return read_into(input, format, graph, base)
def read_all(inputs, format, base=None):
graph = rdflib.Graph()
for input in inputs:
graph = read_into(input, format, graph, base)
return graph
def write(graph, output, format, base=None):
__LOG__.debug(i18n.t('rdftools.write', graph=graph, len=len(graph)))
start = end = 0
if format is None:
if output is None:
format = FORMATS[0]
else:
format = rdflib.util.guess_format(output.name)
if output is None:
__LOG__.info(i18n.t('rdftools.write_stdout', format=format))
start = timer()
data = graph.serialize(format=format, base=base)
end = timer()
try:
# This fails on Travis ONLY for Python 3.4
sys.stdout.buffer.write(data)
except AttributeError:
sys.stdout.write(data.decode('utf-8'))
else:
__LOG__.info(i18n.t('rdftools.write_file',
name=output.name, format=format))
start = timer()
graph.serialize(destination=output.name, format=format, base=base)
end = timer()
__LOG__.debug(i18n.t('rdftools.write_complete', time=(end - start)))
def get_terminal_width(default=80):
import shutil
return shutil.get_terminal_size((default, 20))[0]
def header(str):
return colored(str, attrs=['reverse']) if USE_COLOR else str
def line(str):
return colored(str, attrs=['dark']) if USE_COLOR else str
def comment(str):
return colored(str, attrs=['dark']) if USE_COLOR else str
def report(columns, rows, timer=0):
# TODO: Should also take this as a parameter? so "rdf query -c 80 -q ..."
width = get_terminal_width()
col_width = int((width - len(columns)) / len(columns))
col_string = COLUMN_SPEC % col_width
for column in columns:
print(header(col_string.format(column)), end=line(COLUMN_SEP))
print(EMPTY_LINE)
for column in columns:
print(line(HEADER_SEP * col_width), end=line(COLUMN_SEP))
print(EMPTY_LINE)
for row in rows:
for col in columns:
print(col_string.format(row[col]), end=line(COLUMN_SEP))
print(EMPTY_LINE)
if timer != 0:
print(comment(i18n.t('rdftools.report_timed',
len=len(rows), time=timer)))
else:
print(comment(i18n.t('rdftools.report_timed',
len=len(rows))))
| 30.601093 | 78 | 0.637321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 657 | 0.117321 |
0eaded91ac94008a42cad76b8f191ad6073a2d0e | 681 | py | Python | app/utils/scrapy.py | edementyev/py-telegram-broker | 7cf6ab9a243e1309e4edd8257efb0f6a7340bfae | [
"MIT"
] | null | null | null | app/utils/scrapy.py | edementyev/py-telegram-broker | 7cf6ab9a243e1309e4edd8257efb0f6a7340bfae | [
"MIT"
] | 7 | 2020-12-07T09:11:01.000Z | 2022-03-02T18:15:01.000Z | app/utils/scrapy.py | edementyev/py-telegram-broker | 7cf6ab9a243e1309e4edd8257efb0f6a7340bfae | [
"MIT"
] | null | null | null | from loguru import logger
from scrapy.crawler import CrawlerProcess
from scrapy.utils.log import DEFAULT_LOGGING
from scrapy.utils.project import get_project_settings
from scrape_magic.spiders.gatherer_spider import GathererSpider
from scrape_magic.spiders.starcity_spider import StarcitySpider
settings = get_project_settings()
DEFAULT_LOGGING["loggers"] = dict(scrapy={"level": "INFO"}, twisted={"level": "ERROR"})
process = CrawlerProcess(settings, install_root_handler=False)
def update_items():
process.crawl(StarcitySpider)
process.crawl(GathererSpider)
try:
process.start(stop_after_crawl=False)
except RuntimeError as e:
logger.error(e)
| 32.428571 | 87 | 0.790015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.052863 |
0eae926a9f1f0cb7eec42a2b4785dbee7f9eaa01 | 1,842 | py | Python | app/services/auth.py | sloppysid/faunadb-hipflask | 554fb8d35a26d990ac1207af1d475a36720b102f | [
"MIT"
] | 1 | 2022-01-16T15:03:48.000Z | 2022-01-16T15:03:48.000Z | app/services/auth.py | sloppysid/faunadb-hipflask | 554fb8d35a26d990ac1207af1d475a36720b102f | [
"MIT"
] | null | null | null | app/services/auth.py | sloppysid/faunadb-hipflask | 554fb8d35a26d990ac1207af1d475a36720b102f | [
"MIT"
] | 1 | 2021-11-30T08:08:02.000Z | 2021-11-30T08:08:02.000Z | from flask import (
Blueprint, request, jsonify, render_template, session, redirect, url_for
)
from app.models import User
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/login', methods=['GET','POST'])
def login():
if request.method == 'GET':
return render_template("login.html")
email_address = request.form['email_address']
password = request.form['password']
if request.method == 'POST':
email_address = request.form['email_address']
password = request.form['password']
"""
ADD POST LOGIN LOGIC HERE
"""
else:
error = "Incorrect login details"
return render_template("login.html", error=error)
@bp.route('/logout', methods=['GET'])
def logout():
session.pop('user')
return redirect(url_for('auth.login'))
@bp.route('/update_password', methods=['GET', 'POST'])
def update_password():
if request.method == 'GET':
token = request.args.get('token')
user_id = request.args.get('user_id')
#Check if token is valid
# if
return render_template('error_page.html', error=error)
User.update()
elif request.method == 'POST':
"""
ADD PASSWORD RESET LOGIC HERE
"""
return render_template('login.html', message=message)
| 29.709677 | 480 | 0.445711 | 0 | 0 | 0 | 0 | 1,171 | 0.635722 | 0 | 0 | 391 | 0.212269 |
0eafba60413846c138910dc0d814f1ad191425ea | 1,841 | py | Python | run_extraction_and_generation.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | 1 | 2020-10-01T01:07:11.000Z | 2020-10-01T01:07:11.000Z | run_extraction_and_generation.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | null | null | null | run_extraction_and_generation.py | aychen99/Excavating-Occaneechi-Town | 6e864ca69ff1881554eb4c88aebed236bafbeaf4 | [
"MIT"
] | null | null | null | import json
import pathlib
from src.extract_old_site.extract import run_extraction
from src.generate_new_site.generate import generate_site
if __name__ == "__main__":
script_root_dir = pathlib.Path(__file__).parent
config = None
with open((script_root_dir / "config.json")) as f:
config = json.load(f)
# Resolve any default config values
if config["extractionOutputDirPath"] == "Default":
config["extractionOutputDirPath"] = str(script_root_dir / "jsons")
if config["generationOutputDirPath"] == "Default":
config["generationOutputDirPath"] = str(script_root_dir / "newdig")
(script_root_dir / "newdig").mkdir(parents=True, exist_ok=True)
# Set up for generating the site
dig_dir = str((pathlib.Path(config["digParentDirPath"]) / "dig").as_posix())
input_dir = config["extractionOutputDirPath"]
output_dir = config["generationOutputDirPath"]
overwrite_out = config["overwriteExistingGeneratedFiles"]
copy_images = config["copyImages"]
copy_videos = config["copyVideos"]
copy_data = config["copyData"]
# Run extraction and site generation
if config['runExtraction']:
print("\n-----------------------------------\n"
"Extracting old site data.\n")
run_extraction(config)
else:
print("\n-----------------------------------\n"
"SKIPPING extracting old site data.\n")
if config['runGeneration']:
print("\n-----------------------------------\n"
"Generating new site files.\n")
generate_site(dig_dir, input_dir, output_dir, overwrite_out, copy_images, copy_videos, copy_data)
else:
print("\n-----------------------------------\n"
"SKIPPING generating new site files.\n")
# if config['runDigPro']:
# TODO
# pass
| 38.354167 | 105 | 0.612167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 774 | 0.420424 |
0eaff1b214599d29b4c8677703247eacd71b69b2 | 671 | py | Python | learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | e0202ceeb99fc6cb869906fd8e7cc79a173a315e | [
"MIT"
] | 128 | 2017-03-20T12:42:38.000Z | 2022-03-12T07:06:55.000Z | learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | e0202ceeb99fc6cb869906fd8e7cc79a173a315e | [
"MIT"
] | 4 | 2017-03-18T04:37:36.000Z | 2017-07-09T14:11:18.000Z | learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | e0202ceeb99fc6cb869906fd8e7cc79a173a315e | [
"MIT"
] | 77 | 2017-03-19T06:49:39.000Z | 2022-03-12T07:06:56.000Z | from sklearn.neural_network import MLPClassifier
from commons import variables
from commons import tools
from scipy.stats import mode
def learn(x, y, test_x):
(temp_x, temp_y) = tools.simple_negative_sample(x, y, variables.select_rate_nn)
clf = MLPClassifier(hidden_layer_sizes=(variables.unit_num_nn,), random_state=2017, max_iter=2000,
alpha=variables.alpha_nn,
learning_rate_init=variables.learning_rate_init_nn,solver="adam",activation="relu").fit(temp_x, temp_y)
prediction_list = clf.predict(test_x)
prediction_list_prob = clf.predict_proba(test_x)
return prediction_list,prediction_list_prob
| 39.470588 | 127 | 0.746647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.017884 |
0eb0a8f9ad766e5dfaab13e68e0f57e269d67f78 | 981 | py | Python | alpacka/envs/__init__.py | shoot-tree-search/sts | 2d9f19a40c7fb1c637dd3bd230942c01f14927e1 | [
"MIT"
] | 2 | 2021-01-03T04:21:56.000Z | 2021-02-12T12:54:58.000Z | alpacka/envs/__init__.py | shoot-tree-search/sts | 2d9f19a40c7fb1c637dd3bd230942c01f14927e1 | [
"MIT"
] | null | null | null | alpacka/envs/__init__.py | shoot-tree-search/sts | 2d9f19a40c7fb1c637dd3bd230942c01f14927e1 | [
"MIT"
] | null | null | null | """Environments."""
import gin
from alpacka.envs import bin_packing
from alpacka.envs import cartpole
from alpacka.envs import gfootball
from alpacka.envs import octomaze
from alpacka.envs import sokoban
from alpacka.envs.base import *
from alpacka.envs.wrappers import *
# Configure envs in this module to ensure they're accessible via the
# alpacka.envs.* namespace.
def configure_env(env_class):
return gin.external_configurable(
env_class, module='alpacka.envs'
)
ActionNoiseSokoban = configure_env(sokoban.ActionNoiseSokoban) # pylint: disable=invalid-name
BinPacking = configure_env(bin_packing.BinPacking) # pylint: disable=invalid-name
CartPole = configure_env(cartpole.CartPole) # pylint: disable=invalid-name
GoogleFootball = configure_env(gfootball.GoogleFootball) # pylint: disable=invalid-name
Octomaze = configure_env(octomaze.Octomaze) # pylint: disable=invalid-name
Sokoban = configure_env(sokoban.Sokoban) # pylint: disable=invalid-name
| 35.035714 | 93 | 0.796126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.313965 |
0eb0ceb7a9d1916d4e8a0ee0147a33ecff45bec7 | 7,294 | py | Python | pymbolic/interop/ast.py | alexfikl/pymbolic | 74bf37c6cde8e92303d30dcf722c5c317d00d6b3 | [
"MIT"
] | null | null | null | pymbolic/interop/ast.py | alexfikl/pymbolic | 74bf37c6cde8e92303d30dcf722c5c317d00d6b3 | [
"MIT"
] | null | null | null | pymbolic/interop/ast.py | alexfikl/pymbolic | 74bf37c6cde8e92303d30dcf722c5c317d00d6b3 | [
"MIT"
] | null | null | null | from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2015 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import ast
import pymbolic.primitives as p
__doc__ = r'''
An example::
src = """
def f():
xx = 3*y + z * (12 if x < 13 else 13)
yy = f(x, y=y)
"""
import ast
mod = ast.parse(src.replace("\n ", "\n"))
print(ast.dump(mod))
from pymbolic.interop.ast import ASTToPymbolic
ast2p = ASTToPymbolic()
for f in mod.body:
if not isinstance(f, ast.FunctionDef):
continue
for stmt in f.body:
if not isinstance(stmt, ast.Assign):
continue
lhs, = stmt.targets
lhs = ast2p(lhs)
rhs = ast2p(stmt.value)
print(lhs, rhs)
.. autoclass:: ASTToPymbolic
'''
class ASTMapper(object):
def __call__(self, expr, *args, **kwargs):
return self.rec(expr, *args, **kwargs)
def rec(self, expr, *args, **kwargs):
mro = list(type(expr).__mro__)
dispatch_class = kwargs.pop("dispatch_class", type(self))
while mro:
method_name = "map_"+mro.pop(0).__name__
try:
method = getattr(dispatch_class, method_name)
except AttributeError:
pass
else:
return method(self, expr, *args, **kwargs)
return self.not_supported(expr)
def not_supported(self, expr):
raise NotImplementedError(
"%s does not know how to map type '%s'"
% (type(self).__name__,
type(expr).__name__))
# {{{ mapper
def _add(x, y):
return p.Sum((x, y))
def _sub(x, y):
return p.Sum((x, p.Product(((-1), y))))
def _mult(x, y):
return p.Product((x, y))
def _neg(x):
return -x
class ASTToPymbolic(ASTMapper):
bin_op_map = {
ast.Add: _add,
ast.Sub: _sub,
ast.Mult: _mult,
# MatMult
ast.Div: p.Quotient,
ast.FloorDiv: p.FloorDiv,
ast.Mod: p.Remainder,
ast.Pow: p.Power,
ast.LShift: p.LeftShift,
ast.RShift: p.RightShift,
ast.BitOr: p.BitwiseOr,
ast.BitXor: p.BitwiseXor,
ast.BitAnd: p.BitwiseAnd,
}
def map_BinOp(self, expr): # noqa
try:
op_constructor = self.bin_op_map[type(expr.op)]
except KeyError:
raise NotImplementedError(
"%s does not know how to map operator '%s'"
% (type(self).__name__,
type(expr.op).__name__))
return op_constructor(self.rec(expr.left), self.rec(expr.right))
unary_op_map = {
ast.Invert: _neg,
ast.Not: p.LogicalNot,
# ast.UAdd:
ast.USub: _neg,
}
def map_UnaryOp(self, expr): # noqa
try:
op_constructor = self.unary_op_map[type(expr.op)]
except KeyError:
raise NotImplementedError(
"%s does not know how to map operator '%s'"
% (type(self).__name__,
type(expr.op).__name__))
return op_constructor(self.rec(expr.operand))
def map_IfExp(self, expr): # noqa
# (expr test, expr body, expr orelse)
return p.If(self.rec(expr.test), self.rec(expr.body), self.rec(expr.orelse))
comparison_op_map = {
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
# Is
# IsNot
# In
# NotIn
}
def map_Compare(self, expr): # noqa
# (expr left, cmpop* ops, expr* comparators)
op, = expr.ops
try:
comp = self.comparison_op_map[type(op)]
except KeyError:
raise NotImplementedError(
"%s does not know how to map operator '%s'"
% (type(self).__name__,
type(op).__name__))
# FIXME: Support strung-together comparisons
right, = expr.comparators
return p.Comparison(self.rec(expr.left), comp, self.rec(right))
def map_Call(self, expr): # noqa
# (expr func, expr* args, keyword* keywords)
func = self.rec(expr.func)
args = tuple(self.rec(arg) for arg in expr.args)
if expr.keywords:
return p.CallWithKwargs(func, args,
dict(
(kw.arg, self.rec(kw.value))
for kw in expr.keywords))
else:
return p.Call(func, args)
def map_Num(self, expr): # noqa
# (object n) -- a number as a PyObject.
return expr.n
def map_Str(self, expr): # noqa
return expr.s
def map_Bytes(self, expr): # noqa
return expr.s
# 3.8 and up
def map_Constant(self, expr): # noqa
# (singleton value)
return expr.value
def map_NameConstant(self, expr): # noqa
# (singleton value)
return expr.value
def map_Attribute(self, expr): # noqa
# (expr value, identifier attr, expr_context ctx)
return p.Lookup(self.rec(expr.value), expr.attr)
def map_Subscript(self, expr): # noqa
# (expr value, slice slice, expr_context ctx)
def none_or_rec(x):
if x is None:
return x
else:
return self.rec(x)
if isinstance(expr.slice, slice):
index = slice(
none_or_rec(expr.slice.start),
none_or_rec(expr.slice.stop),
none_or_rec(expr.slice.step))
else:
index = none_or_rec(expr.slice)
return p.Subscript(
self.rec(expr.value),
index)
# def map_Starred(self, expr):
def map_Name(self, expr): # noqa
# (identifier id, expr_context ctx)
return p.Variable(expr.id)
def map_Tuple(self, expr): # noqa
# (expr* elts, expr_context ctx)
return tuple(self.rec(ti) for ti in expr.elts)
# }}}
# vim: foldmethod=marker
| 28.271318 | 84 | 0.559364 | 5,186 | 0.710995 | 0 | 0 | 0 | 0 | 0 | 0 | 2,546 | 0.349054 |
0eb181bcfa8fca128622c8a736645617de6b33ea | 1,398 | py | Python | testing.py | mfamilia/minimize_energy_consumption | d2925a305e6f04bef642b62c7f03b53ae9437a7b | [
"MIT"
] | 1 | 2019-03-25T11:56:05.000Z | 2019-03-25T11:56:05.000Z | testing.py | mfamilia/minimize_energy_consumption | d2925a305e6f04bef642b62c7f03b53ae9437a7b | [
"MIT"
] | null | null | null | testing.py | mfamilia/minimize_energy_consumption | d2925a305e6f04bef642b62c7f03b53ae9437a7b | [
"MIT"
] | null | null | null | import os
import numpy as np
import random as rn
from environment import Environment
from keras.models import load_model
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
number_actions = 5
direction_boundary = (number_actions - 1) / 2
temperature_step = 1.5
env = Environment(initial_number_users = 20, initial_rate_data = 30)
env.train = False
model = load_model("model.h5")
def enery_direction(action, boundary, step):
action_change = action - boundary
if (action_change < 0):
direction = -1
else:
direction = 1
energy_ai = abs(action_change) * step
return energy_ai, direction
current_state, _, _ = env.observe()
month_minutes = 30 * 24 * 60
year_minutes = 12 * month_minutes
for timestep in range(0, year_minutes):
q_values = model.predict(current_state)
action = np.argmax(q_values[0])
energy_ai, direction = enery_direction(action, direction_boundary, temperature_step)
month = int(timestep / month_minutes)
next_state, _, _ = env.update_env(direction, energy_ai, month)
current_state = next_state
print("\n")
print("Energy Spent with an AI: {:.0f}".format(env.total_energy_ai))
print("Energy Spent with no AI: {:.0f}".format(env.total_energy_noai))
energy_saved = env.total_energy_noai - env.total_energy_ai
print("ENERGY SAVED: {:.0f} %".format(energy_saved / env.total_energy_noai * 100))
| 29.744681 | 88 | 0.723891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.087983 |
7eba7ca1ca7a464de4552b722ba1235657cc1c18 | 126 | py | Python | roomlistwatcher/infrastructure/producing/topics.py | dnguyen0304/room-list-watcher | 7ac4d5172de22dd8906662da521995c8e06c2617 | [
"MIT"
] | null | null | null | roomlistwatcher/infrastructure/producing/topics.py | dnguyen0304/room-list-watcher | 7ac4d5172de22dd8906662da521995c8e06c2617 | [
"MIT"
] | null | null | null | roomlistwatcher/infrastructure/producing/topics.py | dnguyen0304/room-list-watcher | 7ac4d5172de22dd8906662da521995c8e06c2617 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from roomlistwatcher.common import utility
class Topic(utility.AutomatedEnum):
ROOM_FOUND = ()
| 15.75 | 42 | 0.698413 | 55 | 0.436508 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.18254 |
7ebae62d64d2c33a33e2537a143db8237e76925d | 3,992 | py | Python | GithubP1.py | rcamposm/ChallengePython | 014d32b38d90a1a3be3d102d5123d9e34bacc2dc | [
"MIT"
] | null | null | null | GithubP1.py | rcamposm/ChallengePython | 014d32b38d90a1a3be3d102d5123d9e34bacc2dc | [
"MIT"
] | null | null | null | GithubP1.py | rcamposm/ChallengePython | 014d32b38d90a1a3be3d102d5123d9e34bacc2dc | [
"MIT"
] | null | null | null | ******************* PARTE I *******************************
#Instalamos git en la terminal de VSC
$sudo apt-get install git -y
#Revisamos la versión del Git que hemos instalado
$git --version
# Podemos ver también un resumen de las principales funcionalidades de Git
$git
#Creamos una carpeta
$mkdir ChallengePython
#Accedemos a la carpeta
$cd ChallengePython/
******************* PARTE II *******************************
$git init //inicializar el repositorio
#Con el `$git init` se crean dos áreas.
- Un área en memoria RAM llamada `Staging`
- Un área llamada repositorio `/.git/`
#Conectare a GitHub
#Configurar como variable global tu usuario
$git config --global user.name "rcamposm" //(nombre de github , sino tiene crear)
#Configurar como variable global tu correo
$git config --global user.email "raquelcamposm@gmail.com"
$git config --global color.ul true
$git config --list
******************* PARTE III *******************************
1. Nos logueamos a GitHub
2. Creamos un repositorio: "PythonChallenge"
- Public Mode
- Add a README File
- Choose a license: MIT License
# En la consola del VSC luego de abrir la carpeta donde queremos guardar el proyecto
$git clone https://github.com/rcamposm/ChallengePython.git
******************* PARTE IV *******************************
#Lista el contenido~
$ls -ltr
$git status // visualizar cambios
# Los archivos que salen en rojo se encuentran en el Working Directory.
# Los archivos que salen en verde se encuentran en el Staging Area.
# Crear el archivo HelloPython.py con un print("Hola Immuners!)
$git status // visualizar cambios
#Con el git add se pasa el archivo al staging área y espera que lo pases al repositorio o que lo remuevas con rm.
$git add HelloPython.py //Agregamos el archivo al repositorio
$git add . // Agregar los cambios de la carpeta en la que nos encontramos agregar todo
$git commit -m "First python program" HelloPython.py// Agregamos los cambios para el repositorio
$git log nombre_de_archivos.extensión //histórico de cambios con detalles
$git log HelloPython.py
$git log #muestra el hostorial de los registros (Commits) del proyecto con sus respectivos autores, hora específica y descripciones.
$git show archivo.extensión //Muestra todos los cambios
******************* PARTE V *******************************
#Subimos al repositorio Remoto
$ git push -u origin main
Username raquelcamposm@gmail.com
pass:
# Got push envía los commits al repositorio remoto de GitHub
#Verificamos que los archivos se hayan subido al repositorio remoto (GitHub)
$ git push //envia a otro repositorio remoto lo que estamos haciendo
$ git pull //traer repositorio remoto
******************* PARTE VI *******************************
#git branch + name: Crear una rama.
#git branch: Lista de ramas y saber en que ramas estamos.
#git checkout + branch: Para movernos entre ramas.
#Para verificar donde estamos posicionados
$ git log --oneline
$ git branch
#Para crear una rama
$ git branch ramaParrafo
# Modificamos el archivo HelloPython.py --> print("Hola Immuners! hoy estamos 22/11")
$git add HelloPython.py //Agregamos el archivo al repositorio
$git commit -m "Actualizamos el párrafo" HelloPython.py// Agregamos los cambios para el repositorio
#Luego nos movemos a la rama master para poder copiar los cambios de la rama al master
$ git checkout main
#Agregamos los cambios de la rama al master
# Merge para fusionar las ramas
$ git merge ramaParrafo
#Subimos al repositorio remoto
$ git push
# Verificamos que el archivo HelloPython.py está actualizado también en el Github
******************* PARTE VII *******************************
# Subir la rama al repositorio remoto (GitHub)
$ git branch
$ git checkout ramaParrafo
$ git push
$ git push --set-upstream origin ramaParrafo
******************* MÁS ************************************
Git reset vs. Git rm
*********************************************************************************************
| 31.68254 | 132 | 0.667585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,787 | 0.44597 |
7ebbd0a9762fef47be2234772336bb6ea7cc2c82 | 11,661 | py | Python | packages/How_to_implement_Azure_machine_learning/aml_modeling/project/modeling/models/ebm_models.py | dochines/OpenEduAnalytics | a1882dec7ad8e1606b88a8042dcf95d5c03b42dc | [
"CC-BY-4.0",
"MIT"
] | null | null | null | packages/How_to_implement_Azure_machine_learning/aml_modeling/project/modeling/models/ebm_models.py | dochines/OpenEduAnalytics | a1882dec7ad8e1606b88a8042dcf95d5c03b42dc | [
"CC-BY-4.0",
"MIT"
] | 1 | 2021-11-11T16:38:33.000Z | 2021-11-11T16:38:33.000Z | packages/How_to_implement_Azure_machine_learning/aml_modeling/project/modeling/models/ebm_models.py | dochines/OpenEduAnalytics | a1882dec7ad8e1606b88a8042dcf95d5c03b42dc | [
"CC-BY-4.0",
"MIT"
] | 1 | 2021-11-11T00:09:55.000Z | 2021-11-11T00:09:55.000Z | from typing import List, Tuple
import mlflow
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor
from ..OEA_model import OEAModelInterface, ModelType, ExplanationType
from ..modeling_utils import log_metrics
class mlflow_pyfunc_wrapper(mlflow.pyfunc.PythonModel):
"""
Wrapper class that allows us to use generic predictors in the mlflow pyfunc format.
Used to wrap predictor types that are not already in the mlflow.* setup.
In order to work with this class, needs to have generic: predict, fit, score, predict_proba functions
"""
def __init__(self, model):
"""
Initialized Wrapped python model
Parameters
----------
model: Python Object
A model that implements the predict, fit, score, and predict_proba functions
"""
self.model = model
def predict(self, *args):
"""
Use Predict function of wrapped model
Parameters
----------
*args :
Arguments needed for wrapped predict function
Returns
-------
predictions: pandas.DataFrame or numpy.ndarray
Predictions of wrapped model on passed arguments
"""
predictions = self.model.predict(*args)
return predictions
def fit(self, *args):
"""
Train/Fit Wrapped model on passed arguments
Parameters
----------
*args :
Arguments needed for wrapped fit(train) function
Returns
-------
Wrapped model after being fit on passed arguments
"""
return self.model.fit(*args)
def score(self, *args):
"""
Predicts and Scores the wrapped model on passed arguments
Parameters
----------
*args :
Arguments needed for wrapped score function
Returns
-------
score: Float
Resulting score of wrapped model score function. (Generally accuracy)
"""
score = self.model.score(*args)
return score
def predict_proba(self,*args):
"""
Generate prediction probabilities of the wrapped model on passed arguments
Parameters
----------
*args :
Arguments needed for wrapped prediction probability functin
Returns
-------
probabilities: pandas.DataFrame or numpy.ndarray
Predicted output probabilities
"""
probabilities = self.model.predict_proba(*args)
return probabilities
class wrapped_basic(OEAModelInterface):
def __init__(self, modelname):
"""
Initialize Basic Wrapped Pyfunc Model utilities (base class)
Parameters
----------
modelname: String
Name of the model for registration and saving purposes
"""
self.predictor = None
self.modelname = modelname
def load_split_data(self, X, Y, A, key, split=.4, stratify=None):
"""
Splits Data into training, validation, and test sets
Parameters
----------
X: pandas.DataFrame
Feature data
Y: pandas.DataFrame
Label data
A: pandas.DataFrame
Senstive Feature data (may or may not be overlap with X)
key: String or List[Strings]
Columns to identify as Keys for all three dataframes. Dropped at loading time.
split: Float
Percentage of data to exclude for testing set
stratify: pandas.DataFrame
Dataframe used to stratify split of data. I.e. if labels are provided, will ensure equal label distribution in train / test sets.
Returns
-------
X_train: pandas.DataFrame
Feature data for training set
X_val: pandas.DataFrame
Feature data for validation set
X_test: pandas.DataFrame
Feature data for test set
y_train: pandas.DataFrame
Label data for training set
y_val: pandas.DataFrame
Label data for validation set
y_test: pandas.DataFrame
Label data for test set
A_train: pandas.DataFrame
Senstive Feature data for training set
A_val: pandas.DataFrame
Senstive Feature data for validation set
A_test: pandas.DataFrame
Senstive Feature data for test set
classes: List[str]
List of classes for classification problem outcomes
"""
if not (A is None):
(
X_train,
X_val_test,
y_train,
y_val_test,
A_train,
A_val_test,
) = train_test_split(
X,
Y,
A,
test_size=split,
random_state=12345,
stratify=stratify,
)
(X_val, X_test, y_val, y_test, A_val, A_test) = train_test_split(
X_val_test, y_val_test, A_val_test, test_size=0.5, random_state=12345
)
else:
(X_train, X_val_test, y_train, y_val_test) = train_test_split(
X,
Y,
test_size=split,
random_state=12345,
stratify=stratify,
)
(X_val, X_test, y_val, y_test) = train_test_split(
X_val_test, y_val_test, test_size=0.5, random_state=12345
)
X_train = X_train.drop(key, axis='columns').reset_index(drop=True)
X_val = X_val.drop(key, axis='columns').reset_index(drop=True)
X_test = X_test.drop(key, axis='columns').reset_index(drop=True)
y_train = y_train.drop(key, axis='columns')
y_train = y_train[y_train.columns[:1]].reset_index(drop=True)
y_val = y_val.drop(key, axis='columns').reset_index(drop=True)
y_val = y_val[y_val.columns[:1]].reset_index(drop=True)
y_test = y_test.drop(key, axis='columns').reset_index(drop=True)
y_test = y_test[y_test.columns[:1]].reset_index(drop=True)
classes = None
self.X_train = X_train
self.X_val = X_val
self.X_test = X_test
self.y_train = y_train.values.reshape(-1)
self.y_val = y_val.values.reshape(-1)
self.y_test = y_test.values.reshape(-1)
self.classes = classes
if not(A is None):
A_train = A_train.drop(key, axis='columns').reset_index(drop=True)
A_val = A_val.drop(key, axis='columns').reset_index(drop=True)
A_test = A_test.drop(key, axis='columns').reset_index(drop=True)
self.A_train = A_train
self.A_val = A_val
self.A_test = A_test
else:
A_train = None
A_val = None
A_test = None
self.A_train = A_train
self.A_val = A_val
self.A_test = A_test
return (
X_train,
X_val,
X_test,
y_train,
y_val,
y_test,
A_train,
A_val,
A_test,
classes,
)
def infer(self, data):
"""
Infer using model
Parameters
----------
data: pandas.DataFrame OR numpy array
Feature data
Returns
-------
predictions: pandas.DataFrame OR numpy array
Results of running inference of the predictor
"""
return self.predictor.predict(data)
def train(self):
"""
Trains model based on data originally loaded using load_split_data. Logs training metrics.
Returns
-------
self.predictor: sklearn Predictor
Trained predictor model object
"""
X_train_val = pd.concat([self.X_train, self.X_val], axis=0)
y_train_val = np.concatenate([self.y_train, self.y_val], axis=0)
self.predictor.fit(X_train_val, y_train_val)
log_metrics(self, dataset="training_val")
return self.predictor
def test(self):
"""
Evaluates model on the test set originally loaded using load_split_data. Logs testing metrics and returns predictions on test set.
Returns
-------
preds: pandas.DataFrame OR numpy array
Results of running inference of the predictor
"""
preds = log_metrics(self, dataset="test")
return preds
def save_model(self, foldername):
"""
Save Wrapped Pyfunc Model to a Path
Parameters
----------
foldername: String
Name of intermediate folder to save model to using mlflow utilities.
"""
mlflow.pyfunc.save_model(foldername, python_model=self.predictor)
def register_model(self, foldername):
"""
Register Model to repository attached to mlflow instance
Parameters
----------
foldername: String
Path of folder to upload to model repository
"""
mlflow.pyfunc.log_model(foldername, python_model=self.predictor, registered_model_name=self.modelname)
def load_model(self, modelname, version):
"""
Load Model from a registered endpoint
Parameters
----------
modelname: String
name of model to load from remote repository
version: String
version of model to load from mllow model repository.
Returns
-------
self.predictor: Wrapped PyFunc Predictor
Returns the predictor loaded from the registered endpoint
"""
model_version_uri = "models:/{model_name}/{version}".format(model_name=modelname,version=version)
self.predictor = mlflow.pyfunc.load_model(model_version_uri)
return self.predictor
class classification_EBM(wrapped_basic):
"""
Model Class for EBM used for Binary CLassification. Inherits from base wrapped model class (OEA Interface Type)
Classification type with Eplainable Boosting Machine special explanation type
"""
model_type = ModelType.binary_classification
explanation_type = ExplanationType.ebm
def init_model(self, seed=5):
"""Initialize Model"""
self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingClassifier(random_state=seed))
class multi_classification_EBM(wrapped_basic):
"""
Model Class for EBM used for Multiclass CLassification. Inherits from base wrapped model class (OEA Interface Type)
Multiclass Classification type with Eplainable Boosting Machine special explanation type
"""
model_type = ModelType.multiclass_classification
explanation_type = ExplanationType.ebm
def init_model(self, seed=5):
"""Initialize Model"""
self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingClassifier(random_state=seed))
class regression_EBM(wrapped_basic):
"""
Model Class for EBM used for Regression. Inherits from base wrapped model class (OEA Interface Type)
Regression type with Eplainable Boosting Machine special explanation type
"""
model_type = ModelType.regression
explanation_type = ExplanationType.ebm
def init_model(self, seed=5):
"""Initialize Model"""
self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingRegressor(random_state=seed))
| 31.346774 | 141 | 0.597119 | 11,308 | 0.969728 | 0 | 0 | 0 | 0 | 0 | 0 | 6,082 | 0.521568 |
7ebe8e1bd51003adafc28a2457a39de43d5df82f | 580 | py | Python | Problemset/rotate-array/rotate-array.py | KivenCkl/LeetCode | fcc97c66f8154a5d20c2aca86120cb37b9d2d83d | [
"MIT"
] | 7 | 2019-05-08T03:41:05.000Z | 2020-12-22T12:39:43.000Z | Problemset/rotate-array/rotate-array.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 1 | 2021-07-19T03:48:35.000Z | 2021-07-19T03:48:35.000Z | Problemset/rotate-array/rotate-array.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 7 | 2019-05-10T20:43:20.000Z | 2021-02-22T03:47:35.000Z |
# @Title: 旋转数组 (Rotate Array)
# @Author: KivenC
# @Date: 2019-03-14 16:57:56
# @Runtime: 124 ms
# @Memory: 13.4 MB
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
'''
k = k % len(nums)
while k > 0:
num = nums.pop()
nums.insert(0, num)
k -= 1
'''
k = k % len(nums)
if k > 0:
nums.reverse()
nums[:k] = reversed(nums[:k])
nums[k:] = reversed(nums[k:])
| 23.2 | 61 | 0.453448 | 462 | 0.785714 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.573129 |
7ebed46683710a9ebe0c25178b513a5b2e2bee9a | 6,940 | py | Python | detectron/utils/wsl_memonger.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 23 | 2020-03-30T11:48:33.000Z | 2022-03-11T06:34:31.000Z | detectron/utils/wsl_memonger.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 9 | 2020-09-28T07:15:16.000Z | 2022-03-25T08:11:06.000Z | detectron/utils/wsl_memonger.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 10 | 2020-03-30T11:48:34.000Z | 2021-06-02T06:12:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import time
import copy
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
import logging
import caffe2.python._import_c_extension as C
from caffe2.python.memonger import verify_graph_equality, verify_inplace_blobs
log = logging.getLogger("memonger")
log.setLevel(logging.INFO)
LiveRange = collections.namedtuple('LiveRange', ["defined", "used", "size"])
def share_freeze_blobs(
net,
namescope,
):
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if namescope != "" and not namescope.endswith("/"):
namescope += "/"
netproto = copy.deepcopy(net.Proto())
new_net = copy.deepcopy(net)
activations = []
external_input = set(new_net.Proto().external_input)
external_output = set(new_net.Proto().external_output)
start_idx = -1
end_idx = -1
# ops
for idx, op in enumerate(new_net._net.op):
# print(op)
if namescope not in op.input[0]:
continue
if op.type == 'Conv' and start_idx < 0:
start_idx = idx
if op.type == 'StopGradient':
end_idx = idx
# print(namescope, 'start_idx: ', start_idx, ' end_idx: ', end_idx)
# Hacky way to get activations, think of a better way
for idx, op in enumerate(new_net._net.op[start_idx:end_idx]):
if namescope not in op.input[0]:
continue
for b in op.output:
if b not in external_output:
activations.append(b)
# print('activations: ', activations)
used_activations = []
for a in activations:
if a in used_activations:
continue
share_pool = [
namescope + '_shared_' + str(i) for i in range(1000, 10000)
]
# print(a)
first_idx = -1
for idx, op in enumerate(new_net._net.op):
if namescope not in op.input[0]:
continue
if a in list(op.input) + list(op.output):
first_idx = idx
break
assert first_idx >= 0, first_idx
for idx, op in enumerate(new_net._net.op[first_idx:]):
if namescope not in op.input[0]:
continue
for b in list(op.input) + list(op.output):
if b in share_pool:
share_pool.remove(b)
for idx, op in enumerate(new_net._net.op):
if namescope not in op.input[0]:
continue
op_input = copy.deepcopy(op.input)
is_found = False
for i, b in enumerate(op_input):
if a == b:
op_input[i] = share_pool[-1]
is_found = True
if is_found:
del new_net._net.op[idx].input[:]
new_net._net.op[idx].input.extend(op_input)
op_output = copy.deepcopy(op.output)
is_found = False
for i, b in enumerate(op_output):
if a == b:
op_output[i] = share_pool[-1]
is_found = True
if is_found:
del new_net._net.op[idx].output[:]
new_net._net.op[idx].output.extend(op_output)
used_activations.append(a)
assert verify_graph_equality(net.Proto(), new_net.Proto()), \
"Memonger graph is not equal to original."
assert verify_inplace_blobs(net.Proto(), new_net.Proto()), \
"Inplace assignments differ in memonger net."
share_pool = [
namescope + '_shared_' + str(i) for i in range(1000, 10000)
]
share_pool_used = {}
for idx, op in enumerate(new_net._net.op):
if namescope not in op.input[0]:
continue
for b in list(op.input) + list(op.output):
if b in share_pool:
share_pool_used[b] = idx
for idx, op in enumerate(new_net._net.op[end_idx:]):
if namescope not in op.input[0]:
continue
for b in list(op.input) + list(op.output):
if b in share_pool_used.keys():
share_pool_used.pop(b)
ops = list(new_net._net.op)
for inp in share_pool_used.keys():
# print('free: ', inp)
# new_net.Free([inp], [inp])
ops.insert(share_pool_used[inp] + 1, core.CreateOperator("Free", [inp], [inp]))
del new_net._net.op[:]
new_net._net.op.extend(ops)
return new_net.Proto()
def share_freeze_blobs_c2(
net,
namescope,
):
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if namescope != "" and not namescope.endswith("/"):
namescope += "/"
netproto = copy.deepcopy(net.Proto())
activations = []
external_input = set(net.Proto().external_input)
external_output = set(net.Proto().external_output)
start_idx = -1
end_idx = -1
# ops
for idx, op in enumerate(netproto.op):
# print(op)
if namescope not in op.input[0]:
continue
if op.type == 'Conv' and start_idx < 0:
start_idx = idx
if op.type == 'StopGradient':
end_idx = idx
print(namescope, 'start_idx: ', start_idx, ' end_idx: ', end_idx)
# Hacky way to get activations, think of a better way
for idx, op in enumerate(netproto.op[start_idx:end_idx]):
for b in op.output:
if b not in external_output:
activations.append(b)
print('activations: ', activations)
share_pool = [namescope + '_shared_' + str(i) for i in range(1000, 10000)]
map_pool = {}
heads = [namescope + 'data']
print('heads: ', heads)
# Remove last activations, as they are usually accessed externally
activations = set(activations[:-1])
print('activations: ', activations)
shared_blobs = activations
dont_share_blobs = None
blob_shapes = None
op_indices = [
index for index, op in enumerate(netproto.op[start_idx:end_idx + 2])
]
print(op_indices)
start_time = time.time()
optim_str = C.memonger_compute_blob_recycling_for_dag(
netproto.SerializeToString(), [str(s).encode('utf-8') for s in heads],
op_indices, set(str(s).encode('utf-8') for s in shared_blobs),
namescope.encode('utf-8'),
set() if dont_share_blobs is None else dont_share_blobs,
{} if blob_shapes is None else blob_shapes)
optim = caffe2_pb2.NetDef()
optim.ParseFromString(optim_str)
assert verify_graph_equality(net.Proto(), optim), \
"Memonger graph is not equal to original."
assert verify_inplace_blobs(net.Proto(), optim), \
"Inplace assignments differ in memonger net."
return optim
| 31.402715 | 87 | 0.602305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 983 | 0.141643 |
7ebf6d3e2be8c328592cef72dbeeecb3c71dccf2 | 1,346 | py | Python | staging/management/commands/staging_generator.py | Pyromanser/django-staging | 1273ba880f48705f968e85631f55af036b1dceb2 | [
"BSD-3-Clause"
] | null | null | null | staging/management/commands/staging_generator.py | Pyromanser/django-staging | 1273ba880f48705f968e85631f55af036b1dceb2 | [
"BSD-3-Clause"
] | null | null | null | staging/management/commands/staging_generator.py | Pyromanser/django-staging | 1273ba880f48705f968e85631f55af036b1dceb2 | [
"BSD-3-Clause"
] | 2 | 2021-06-07T23:09:19.000Z | 2021-06-09T21:27:55.000Z | import os
import sys
from optparse import make_option
from django.core.management import BaseCommand, call_command
from django.conf import settings
def rel(*x):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--generators-dir',
action='store',
dest='generators_dir',
type='string',
default=0,
help='Specify directory where to look for generators'),
)
def handle(self, *app_labels, **options):
settings.TEMPLATE_CONTEXT_PROCESSORS += ('staging.contexts.data_generator_enabled',)
settings.TEMPLATE_DIRS += (rel('..', '..', 'templates'),)
settings.MIDDLEWARE_CLASSES += ('staging.middlewares.GeneratorPagesMiddleware',)
settings.GENERATORS_DIRS = [rel('..', '..', 'generators')]
if os.environ.get('GENERATORS_DIR'):
settings.GENERATORS_DIRS.append(os.environ.get('GENERATORS_DIR'))
if options.get('generators_dir'):
settings.GENERATORS_DIRS.append(options.get('generators_dir'))
for directory in settings.GENERATORS_DIRS:
if not os.path.isdir(directory):
print('%s generators directory does not exist' % directory)
call_command('runserver')
| 38.457143 | 92 | 0.658247 | 1,108 | 0.82318 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.251114 |
7ec2240f63eb96b7be86ac80819b426a891d9e4a | 7,341 | py | Python | test/test_cpy_compat.py | gracinet/hpy | 3f850ae562c6977ed3088d5f5bb31a1ef4155d18 | [
"MIT"
] | null | null | null | test/test_cpy_compat.py | gracinet/hpy | 3f850ae562c6977ed3088d5f5bb31a1ef4155d18 | [
"MIT"
] | null | null | null | test/test_cpy_compat.py | gracinet/hpy | 3f850ae562c6977ed3088d5f5bb31a1ef4155d18 | [
"MIT"
] | null | null | null | from .support import HPyTest
class TestCPythonCompatibility(HPyTest):
# One note about the should_check_refcount() in the tests below: on
# CPython, handles are actually implemented as INCREF/DECREF, so we can
# check e.g. after an HPy_Dup the refcnt is += 1. However, on PyPy they
# are implemented in a completely different way which is unrelated to the
# refcnt (this is the whole point of HPy, after all :)). So in many of the
# following ttests, checking the actual result of the function doesn't
# really make sens on PyPy. We still run the functions to ensure they do
# not crash, though.
def test_frompyobject(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
Py_ssize_t initial_refcount = o->ob_refcnt;
HPy h = HPy_FromPyObject(ctx, o);
Py_ssize_t final_refcount = o->ob_refcnt;
PyList_Append(o, PyLong_FromLong(1234));
PyList_Append(o, PyLong_FromSsize_t(final_refcount -
initial_refcount));
Py_DECREF(o);
return h;
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
x = mod.f()
assert x[0] == 1234
assert len(x) == 2
if self.should_check_refcount():
assert x == [1234, +1]
def test_aspyobject(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_O(f)
static HPy f_impl(HPyContext ctx, HPy self, HPy arg)
{
PyObject *o = HPy_AsPyObject(ctx, arg);
long val = PyLong_AsLong(o);
Py_DecRef(o);
return HPyLong_FromLong(ctx, val*2);
}
@EXPORT f HPy_METH_O
@INIT
""")
assert mod.f(21) == 42
def test_aspyobject_custom_class(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_O(f)
static HPy f_impl(HPyContext ctx, HPy self, HPy arg)
{
PyObject *o = HPy_AsPyObject(ctx, arg);
PyObject *o_res = PyObject_CallMethod(o, "foo", "");
HPy h_res = HPy_FromPyObject(ctx, o_res);
Py_DecRef(o);
Py_DecRef(o_res);
return h_res;
}
@EXPORT f HPy_METH_O
@INIT
""")
class MyClass:
def foo(self):
return 1234
obj = MyClass()
assert mod.f(obj) == 1234
def test_hpy_close(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
HPy h = HPy_FromPyObject(ctx, o);
Py_ssize_t initial_refcount = o->ob_refcnt;
HPy_Close(ctx, h);
Py_ssize_t final_refcount = o->ob_refcnt;
Py_DECREF(o);
return HPyLong_FromLong(ctx, (long)(final_refcount -
initial_refcount));
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
x = mod.f()
if self.should_check_refcount():
assert x == -1
def test_hpy_dup(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
HPy h = HPy_FromPyObject(ctx, o);
Py_ssize_t initial_refcount = o->ob_refcnt;
HPy h2 = HPy_Dup(ctx, h);
Py_ssize_t final_refcount = o->ob_refcnt;
HPy_Close(ctx, h);
HPy_Close(ctx, h2);
Py_DECREF(o);
return HPyLong_FromLong(ctx, (long)(final_refcount -
initial_refcount));
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
x = mod.f()
if self.should_check_refcount():
assert x == +1
def test_many_handles(self):
mod = self.make_module("""
#include <Python.h>
#define NUM_HANDLES 10000
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
Py_ssize_t result = -42;
HPy handles[NUM_HANDLES];
int i;
Py_ssize_t initial_refcount = o->ob_refcnt;
for (i = 0; i < NUM_HANDLES; i++)
handles[i] = HPy_FromPyObject(ctx, o);
for (i = 0; i < NUM_HANDLES; i++)
if (HPy_IsNull(handles[i]))
goto error;
for (i = 0; i < NUM_HANDLES; i++)
HPy_Close(ctx, handles[i]);
Py_ssize_t final_refcount = o->ob_refcnt;
result = final_refcount - initial_refcount;
error:
return HPyLong_FromLong(ctx, (long)result);
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
assert mod.f() == 0
def test_meth_cpy_noargs(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *args)
{
return PyLong_FromLong(1234);
}
@EXPORT f METH_NOARGS
@INIT
""")
assert mod.f() == 1234
def test_meth_cpy_o(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *arg)
{
long x = PyLong_AsLong(arg);
return PyLong_FromLong(x * 2);
}
@EXPORT f METH_O
@INIT
""")
assert mod.f(45) == 90
def test_meth_cpy_varargs(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *args)
{
long a, b, c;
if (!PyArg_ParseTuple(args, "lll", &a, &b, &c))
return NULL;
return PyLong_FromLong(100*a + 10*b + c);
}
@EXPORT f METH_VARARGS
@INIT
""")
assert mod.f(4, 5, 6) == 456
def test_meth_cpy_keywords(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *args, PyObject *kwargs)
{
static char *kwlist[] = { "a", "b", "c", NULL };
long a, b, c;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "lll", kwlist, &a, &b, &c))
return NULL;
return PyLong_FromLong(100*a + 10*b + c);
}
@EXPORT f METH_VARARGS | METH_KEYWORDS
@INIT
""")
assert mod.f(c=6, b=5, a=4) == 456
| 33.368182 | 90 | 0.48522 | 7,309 | 0.995641 | 0 | 0 | 0 | 0 | 0 | 0 | 5,883 | 0.801389 |
7ec2bfc9ccb6ee726f5d61b2d45451c02e9d41ea | 553 | py | Python | blog/migrations/0003_auto_20200321_0543.py | Sergey19940808/blog | 26beea5b218ddfe3347e251994c5c2f500975df0 | [
"MIT"
] | null | null | null | blog/migrations/0003_auto_20200321_0543.py | Sergey19940808/blog | 26beea5b218ddfe3347e251994c5c2f500975df0 | [
"MIT"
] | null | null | null | blog/migrations/0003_auto_20200321_0543.py | Sergey19940808/blog | 26beea5b218ddfe3347e251994c5c2f500975df0 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-21 05:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20200320_1150'),
]
operations = [
migrations.RemoveField(
model_name='subscribebyblog',
name='user',
),
migrations.AlterField(
model_name='subscriberecord',
name='is_read',
field=models.BooleanField(default=False, verbose_name='Запись прочитана пользователем'),
),
]
| 24.043478 | 100 | 0.605787 | 488 | 0.839931 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.321859 |
7ec4be3802281f0f009f1059ea1169959c79b1bc | 92 | py | Python | Curso-em-video-Python/PycharmProjects/pythonExercicios/ex046 - ContagemRegressiva.py | sartinicj/curso-em-video-python | 8cb4ca05a88351c44aa4a7befc59c9596a50f268 | [
"MIT"
] | null | null | null | Curso-em-video-Python/PycharmProjects/pythonExercicios/ex046 - ContagemRegressiva.py | sartinicj/curso-em-video-python | 8cb4ca05a88351c44aa4a7befc59c9596a50f268 | [
"MIT"
] | null | null | null | Curso-em-video-Python/PycharmProjects/pythonExercicios/ex046 - ContagemRegressiva.py | sartinicj/curso-em-video-python | 8cb4ca05a88351c44aa4a7befc59c9596a50f268 | [
"MIT"
] | null | null | null | from time import sleep
for i in range(10, 0, -1):
print(i)
sleep(1)
print('Yeey!!')
| 15.333333 | 26 | 0.597826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.086957 |
7ec5bb3ee6e56981cd97f19bc1f1b34b98f31935 | 631 | py | Python | migrations/versions/1f97f799a477_add_contact_details_to_house.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 2 | 2019-06-11T20:46:43.000Z | 2020-08-27T22:50:32.000Z | migrations/versions/1f97f799a477_add_contact_details_to_house.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 70 | 2017-05-26T14:04:06.000Z | 2021-06-30T10:21:58.000Z | migrations/versions/1f97f799a477_add_contact_details_to_house.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 4 | 2017-08-29T10:09:30.000Z | 2021-05-25T11:29:03.000Z | """Add contact_details to House
Revision ID: 1f97f799a477
Revises: 2df9ce70bad
Create Date: 2018-08-08 10:58:44.869939
"""
# revision identifiers, used by Alembic.
revision = '1f97f799a477'
down_revision = '2df9ce70bad'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('house', sa.Column('contact_details', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('house', 'contact_details')
### end Alembic commands ###
| 23.37037 | 82 | 0.70206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.654517 |
7ec8bcdeff170d91a374ba018e5e6426dc9a2e70 | 725 | py | Python | JYTools/demo/asyncWorker/plusWorker.zh.py | meisanggou/Tools | 66acd710e4227a3a35acf3b804ce12f1de518912 | [
"MIT"
] | null | null | null | JYTools/demo/asyncWorker/plusWorker.zh.py | meisanggou/Tools | 66acd710e4227a3a35acf3b804ce12f1de518912 | [
"MIT"
] | 3 | 2018-01-24T08:16:55.000Z | 2018-01-30T06:55:59.000Z | JYTools/demo/asyncWorker/plusWorker.zh.py | meisanggou/Tools | 66acd710e4227a3a35acf3b804ce12f1de518912 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# coding: utf-8
from JYTools.JYWorker import AsyncRedisWorker
__author__ = 'meisanggou'
class PlusWorker(AsyncRedisWorker):
def handler_task(self, key, params):
print("Enter Plus Worker")
if "a" not in params:
self.set_current_task_invalid("Need a")
if "b" not in params:
self.set_current_task_invalid("Need b")
self.task_log("a is ", params["a"])
self.task_log("b is ", params["b"])
c = params["a"] + params["b"]
self.set_output("c", c)
print("End Plus Task")
p_w = PlusWorker(conf_path="redis_worker.conf", heartbeat_value="FFFFFF", work_tag="Plus",
stat_work_tag="STAT")
p_w.work()
| 25.892857 | 90 | 0.613793 | 466 | 0.642759 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.238621 |
7ec971469d93f6ab0b3357bd1986c1d75f158264 | 1,336 | py | Python | wrappers.py | FlanOfFlans/Capone | ac09fabac8eb9a2be6233ec7cdcc14e954427226 | [
"MIT"
] | null | null | null | wrappers.py | FlanOfFlans/Capone | ac09fabac8eb9a2be6233ec7cdcc14e954427226 | [
"MIT"
] | 4 | 2020-02-07T08:26:32.000Z | 2021-02-28T21:04:22.000Z | wrappers.py | FlanOfFlans/Capone | ac09fabac8eb9a2be6233ec7cdcc14e954427226 | [
"MIT"
] | null | null | null | import discord
class CaponeServer():
def __init__(self, discord_server):
self._discord_server = discord_server
def get_members(self):
return map(CaponeUser, self._discord_server.members())
def equals(self, other):
return self._discord_server == other._discord_server
class CaponeChannel():
def __init__(self, discord_channel):
self._discord_channel = discord_channel
def get_members(self):
return map(CaponeUser, self._discord_channel.members())
def get_name(self):
return self._discord_channel.name
def get_server(self):
return CaponeServer(self._discord_channel.server)
def is_private(self):
return isinstance(self._discord_channel, discord.abc.PrivateChannel)
def equals(self, other):
return self._discord_channel == other._discord_channel
async def send(self, message, source="???"):
self._discord_channel.send(message)
class CaponeUser():
def __init__(self, discord_user):
self._discord_user = discord_user
def __str__(self):
return "{0}#{1}".format(self.get_name(), self.get_discriminator())
def get_name(self):
return self._discord_user.name
def get_discriminator(self):
return self._discord_user.discriminator
def equals(self, other):
return self._discord_user == other._discord_user
async def send(self, message, source="???"):
self._discord_user.send(message) | 24.740741 | 70 | 0.765719 | 1,314 | 0.983533 | 0 | 0 | 0 | 0 | 161 | 0.120509 | 19 | 0.014222 |
7ec9f217b2ef0eef366902d1d8043f6411d178a9 | 143 | py | Python | _longname.py | michaelshumshum/kahoot-annoyer | a4f35e6ac6e54390815164fc9d81e28d8c301057 | [
"MIT"
] | 4 | 2021-02-20T10:55:33.000Z | 2021-09-25T06:20:16.000Z | _longname.py | michaelshumshum/kahoot-annoyer | a4f35e6ac6e54390815164fc9d81e28d8c301057 | [
"MIT"
] | 7 | 2021-01-07T13:57:21.000Z | 2021-09-30T06:28:15.000Z | _longname.py | michaelshumshum/kahoot-annoyer | a4f35e6ac6e54390815164fc9d81e28d8c301057 | [
"MIT"
] | 5 | 2021-01-19T12:27:49.000Z | 2021-09-23T12:29:44.000Z | from random import randint
def longname():
return ''.join(chr(randint(0,143859)) for i in range(10000)).encode('utf-8','ignore').decode()
| 28.6 | 98 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.118881 |
7ec9fcf9ceb86315712a57bbd849628caaaffa4f | 3,535 | py | Python | concept_formation/examples/examples_utils.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 47 | 2015-06-08T20:34:18.000Z | 2021-09-26T17:59:06.000Z | concept_formation/examples/examples_utils.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 65 | 2015-07-27T18:16:31.000Z | 2021-10-04T14:02:51.000Z | concept_formation/examples/examples_utils.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 13 | 2015-07-27T13:27:03.000Z | 2022-03-15T02:18:10.000Z | """
This module contains utility functions used in the example scripts. They are
implemented separately because they use scipy and numpy and we want to remove
external dependencies from within the core library.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from math import sqrt
from scipy.stats import sem
from scipy.stats import t
from scipy import linalg
import numpy as np
from concept_formation.utils import mean
def moving_average(a, n=3):
"""A function for computing the moving average, so that we can smooth out the
curves on a graph.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def lowess(x, y, f=1./3., iter=3, confidence=0.95):
"""
Performs Lowess smoothing
Code adapted from: https://gist.github.com/agramfort/850437
lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
.. todo:: double check that the confidence bounds are correct
"""
n = len(x)
r = int(np.ceil(f*n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w**3)**3
yest = np.zeros(n)
delta = np.ones(n)
for iteration in range(iter):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights*y), np.sum(weights*y*x)])
A = np.array([[np.sum(weights), np.sum(weights*x)],
[np.sum(weights*x), np.sum(weights*x*x)]])
beta = linalg.solve(A, b)
yest[i] = beta[0] + beta[1]*x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta**2)**2
h = np.zeros(n)
for x_idx, x_val in enumerate(x):
r2 = np.array([v*v for i, v in enumerate(residuals) if x[i] == x_val])
n = len(r2)
se = sqrt(mean(r2)) / sqrt(len(r2))
h[x_idx] = se * t._ppf((1+confidence)/2., n-1)
return yest, yest-h, yest+h
def avg_lines(x, y, confidence=0.95):
n = len(x)
mean = np.zeros(n)
lower = np.zeros(n)
upper = np.zeros(n)
for x_idx, x_val in enumerate(x):
ys = np.array([v for i, v in enumerate(y) if x[i] == x_val])
m, l, u = mean_confidence_interval(ys)
mean[x_idx] = m
lower[x_idx] = l
upper[x_idx] = u
return mean, lower, upper
def mean_confidence_interval(data, confidence=0.95):
"""
Given a list or vector of data, this returns the mean, lower, and upper
confidence intervals to the level of confidence specified (default = 95%
confidence interval).
"""
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sem(a)
h = se * t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
| 33.037383 | 82 | 0.600566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,306 | 0.369448 |
7ecc4036bf06e228f69ff0e22a8350689c78b5b3 | 1,082 | py | Python | tests/test_app_builder.py | dmitryhd/avio | 4e99c123de12a682f1ac1141899d670fbab81de6 | [
"MIT"
] | 2 | 2018-05-28T14:15:00.000Z | 2018-10-15T09:33:38.000Z | tests/test_app_builder.py | dmitryhd/avio | 4e99c123de12a682f1ac1141899d670fbab81de6 | [
"MIT"
] | null | null | null | tests/test_app_builder.py | dmitryhd/avio | 4e99c123de12a682f1ac1141899d670fbab81de6 | [
"MIT"
] | null | null | null | from aiohttp import web
from avio.app_builder import AppBuilder
from avio.default_handlers import InfoHandler
def test_create_app():
app = AppBuilder().build_app()
assert isinstance(app, web.Application)
def test_app_config():
builder = AppBuilder({'app_key': 'value'})
app = builder.build_app({'update_key': 'value'})
config = app['config']
assert 'value' == config['app_key']
assert 'value' == config['update_key']
assert 'logging' in config
assert 'host' in config
assert 'port' in config
assert 'ioloop_type' in config
def test_app_routes():
builder = AppBuilder()
app = builder.build_app()
assert 'info' in app.router.named_resources()
assert 'error' in app.router.named_resources()
def test_additional_routes():
class MyAppBuilder(AppBuilder):
def prepare_app(self, app: web.Application, config: dict = None):
app.router.add_view('/_info2', InfoHandler, name='info2')
builder = MyAppBuilder()
app_ = builder.build_app()
assert 'info2' in app_.router.named_resources()
| 26.390244 | 73 | 0.689464 | 176 | 0.162662 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.136784 |
7ecc93eabbbc809f0893cd5190e4fc2c1e5b2a52 | 665 | py | Python | scripts/compute_lengths.py | ZurichNLP/understanding-mbr | 4052f6feef783fd851fdaf0acd6bf6ad71dc58ef | [
"MIT"
] | 12 | 2021-05-19T10:20:05.000Z | 2021-12-18T07:51:35.000Z | scripts/compute_lengths.py | ZurichNLP/understanding-mbr | 4052f6feef783fd851fdaf0acd6bf6ad71dc58ef | [
"MIT"
] | 1 | 2021-06-22T17:37:35.000Z | 2021-06-23T13:16:39.000Z | scripts/compute_lengths.py | ZurichNLP/understanding-mbr | 4052f6feef783fd851fdaf0acd6bf6ad71dc58ef | [
"MIT"
] | 1 | 2021-09-01T09:04:34.000Z | 2021-09-01T09:04:34.000Z | #! /usr/bin/python3
import sys
import numpy
import argparse
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, help="Where to save numpy array of lengths.", required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
lengths = []
for line in sys.stdin:
length = len(line.strip().split(" "))
lengths.append(length)
lengths_array = numpy.asarray(lengths, dtype="int32")
numpy.save(args.output, lengths_array)
if __name__ == '__main__':
main()
| 17.972973 | 106 | 0.664662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.132331 |
7ecf3dcc1c0914b06bad4a9556b6b06a68e54449 | 2,612 | py | Python | turbinia/workers/analysis/postgresql_acct_test.py | jleaniz/turbinia | 78849bf292196e517fe149b2d4c4ab7000576b11 | [
"Apache-2.0"
] | null | null | null | turbinia/workers/analysis/postgresql_acct_test.py | jleaniz/turbinia | 78849bf292196e517fe149b2d4c4ab7000576b11 | [
"Apache-2.0"
] | null | null | null | turbinia/workers/analysis/postgresql_acct_test.py | jleaniz/turbinia | 78849bf292196e517fe149b2d4c4ab7000576b11 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PostgreSQL account analysis task."""
import os
import unittest
from turbinia import config
from turbinia.workers.analysis import postgresql_acct
from turbinia.workers.workers_test import TestTurbiniaTaskBase
class PostgresAcctAnalysisTaskTest(TestTurbiniaTaskBase):
"""Tests for PostgresAcctAnalysisTask Task."""
TEST_DATA_DIR = None
EXPECTED_CREDENTIALS = {'5f4dcc3b5aa765d61d8327deb882cf99': 'postgres'}
POSTGRES_REPORT = """#### **PostgreSQL analysis found 1 weak password(s)**
* **1 weak password(s) found:**
* User 'postgres' with password 'password'"""
def setUp(self):
super(PostgresAcctAnalysisTaskTest, self).setUp()
self.setResults(mock_run=False)
filedir = os.path.dirname(os.path.realpath(__file__))
self.TEST_DATA_DIR = os.path.join(filedir, '..', '..', '..', 'test_data')
self.evidence.local_path = self.TEST_DATA_DIR
def test_extract_data_dir(self):
"""Tests the _extract_data_dir method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
# pylint: disable=protected-access
data_dirs = task._extract_data_dir(self.TEST_DATA_DIR, self.result)
self.assertEqual(len(data_dirs), 1)
self.assertEqual(data_dirs, ['test_data'])
def test_extract_creds(self):
"""Tests the _extract_creds method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
# pylint: disable=protected-access
hashes = task._extract_creds(['/database'], self.evidence)
self.assertDictEqual(hashes, self.EXPECTED_CREDENTIALS)
def test_analyse_postgres_creds(self):
"""Tests the _analyse_postegres_creds method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
(report, priority, summary) = task._analyse_postgres_creds(
self.EXPECTED_CREDENTIALS)
self.assertEqual(report, self.POSTGRES_REPORT)
self.assertEqual(priority, 10)
self.assertEqual(summary, 'PostgreSQL analysis found 1 weak password(s)') | 36.788732 | 77 | 0.745023 | 1,783 | 0.682619 | 0 | 0 | 0 | 0 | 0 | 0 | 1,153 | 0.441424 |
7ecfee91de6afd15c9b4944fc8130ce2c7df090a | 162 | py | Python | top_players.py | ergest/Fantasy-Premier-League | 7773eaad57058e760c5d1f77cfa98d2a06d73e48 | [
"MIT"
] | 1,011 | 2016-12-30T09:37:45.000Z | 2022-03-31T02:50:09.000Z | top_players.py | ergest/Fantasy-Premier-League | 7773eaad57058e760c5d1f77cfa98d2a06d73e48 | [
"MIT"
] | 111 | 2018-04-13T02:02:09.000Z | 2022-02-21T05:07:39.000Z | top_players.py | ergest/Fantasy-Premier-League | 7773eaad57058e760c5d1f77cfa98d2a06d73e48 | [
"MIT"
] | 739 | 2017-12-27T03:30:18.000Z | 2022-03-22T14:09:04.000Z | from getters import *
from parsers import *
def main():
data = get_data()
parse_top_players(data, 'data/2020-21')
if __name__ == '__main__':
main()
| 16.2 | 43 | 0.654321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.148148 |
7ed1533d1fe94afa39fdc1263bf8dee1d5e7d532 | 2,372 | py | Python | src/annotatepORFs.py | clb1/AnnotateCGDB | 741324c7a8d05637a09cd16b515f9af2cde69ae6 | [
"MIT"
] | null | null | null | src/annotatepORFs.py | clb1/AnnotateCGDB | 741324c7a8d05637a09cd16b515f9af2cde69ae6 | [
"MIT"
] | null | null | null | src/annotatepORFs.py | clb1/AnnotateCGDB | 741324c7a8d05637a09cd16b515f9af2cde69ae6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from collections import defaultdict
import gzip
import os
import pandas as pd
import sys
import pdb
def collectDataForMatchesToPMProteins(annotated_PM_proteins, blast_directory):
pORF_to_PM_protein_matches = defaultdict(set)
blast_file_columns = ["query", "subject", "perc_ident", "align_len", "num_mismatches", "num_gaps", \
"query_start", "query_end", "subject_start", "subject_end", "E_value", "bit_score"]
ip = gzip.open(annotated_PM_proteins, 'rb')
header = ip.readline()
for line in ip:
uniprot_id, aa_seq, tm_segments = line.strip().split("\t")
print >> sys.stderr, "\rINFO: processing Blast results for %s" % uniprot_id,
tm_segments_positions = []
for tm_segment in tm_segments.split(','):
start, stop = tm_segment.split('-')
tm_segments_positions.append( set(range(int(start),int(stop)+1)) )
blast_file = "%s/%s.blastp.bz2" % (blast_directory, uniprot_id)
assert(os.path.exists(blast_file))
min_TM_match_len = 15
blast_df = pd.read_csv(blast_file, sep="\t", header=None, skiprows=5, names=blast_file_columns, compression="bz2", skipfooter=1)
for tup in blast_df.itertuples(index=False):
match_positions = set(range(int(tup[6]), int(tup[7]+1)))
if (any(map(lambda x: len(match_positions & x) >= min_TM_match_len, tm_segments_positions))):
pORF_to_PM_protein_matches[tup[1]].add(uniprot_id)
ip.close()
print >> sys.stderr, "INFO: done\n"
return pORF_to_PM_protein_matches
def writeOutput(pORF_to_PM_protein_matches, output_tsv):
print >> sys.stderr, "INFO: writing output to %s" % output_tsv
op = gzip.open(output_tsv, 'wb')
op.write("pORF_ID\tPM_PROTEIN_ID\tGPI_PROTEIN_ID\n")
for pORF_ID, PM_protein_IDs in pORF_to_PM_protein_matches.items():
for protein_ID in PM_protein_IDs:
op.write("%s\t%s\t-\n" % (pORF_ID, protein_ID))
op.close()
if (__name__ == "__main__"):
annotated_PM_proteins, blast_directory, output_tsv = sys.argv[1:]
# Record which regions of each HMPAS protein aligned to which pORFs
pORF_to_PM_protein_matches = collectDataForMatchesToPMProteins(annotated_PM_proteins, blast_directory)
writeOutput(pORF_to_PM_protein_matches, output_tsv)
sys.exit(0)
| 37.650794 | 136 | 0.685497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.176223 |
7ed1dd5c13dcf775a1baf193fd62b210cf6a5965 | 818 | py | Python | ch10-unsupervised/clustering/spectral_clustering/tests/test_spectral_embedding_.py | skforest/intro_ds | 478a6b236c2e33c4baffec8aafa8e0a8ed68dca8 | [
"Apache-2.0"
] | 314 | 2018-02-11T09:44:21.000Z | 2022-03-31T02:55:34.000Z | ch10-unsupervised/clustering/spectral_clustering/tests/test_spectral_embedding_.py | jianyigengge/intro_ds | 886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5 | [
"Apache-2.0"
] | 5 | 2018-05-27T07:18:09.000Z | 2019-03-29T14:07:55.000Z | ch10-unsupervised/clustering/spectral_clustering/tests/test_spectral_embedding_.py | jianyigengge/intro_ds | 886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5 | [
"Apache-2.0"
] | 262 | 2018-03-20T07:36:22.000Z | 2022-03-08T06:51:58.000Z | # -*- coding: UTF-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from spectral_clustering.spectral_embedding_ import spectral_embedding
def assert_first_col_equal(maps):
constant_vec = [1] * maps.shape[0]
assert_array_almost_equal(maps[:, 0] / maps[0, 0], constant_vec)
def test_spectral_embedding():
"""
根据spectral embedding的定义,第一列的数据是恒等的
"""
adjacency = np.array([
[0., 0.8, 0.9, 0.],
[0.8, 0., 0., 0.],
[0.9, 0., 0., 1.],
[0., 0., 1., 0.]])
maps = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="arpack")
assert_first_col_equal(maps)
maps_1 = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="lobpcg")
assert_first_col_equal(maps_1)
| 30.296296 | 75 | 0.662592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.142353 |
7ed22c3a12b83e7e397a2f42645d4d28f20c10a1 | 1,028 | py | Python | setup.py | albarsil/pyschemavalidator- | 18b6dddc41a2e48e87edd9c8744956c933a0954c | [
"MIT"
] | 2 | 2021-07-14T22:16:11.000Z | 2021-07-15T13:59:16.000Z | setup.py | albarsil/pyschemavalidator- | 18b6dddc41a2e48e87edd9c8744956c933a0954c | [
"MIT"
] | null | null | null | setup.py | albarsil/pyschemavalidator- | 18b6dddc41a2e48e87edd9c8744956c933a0954c | [
"MIT"
] | 1 | 2021-11-08T17:51:56.000Z | 2021-11-08T17:51:56.000Z | from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='pyschemavalidator',
version='1.0.4',
description='Decorator for endpoint inputs on APIs and a dictionary/JSON validator.',
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/albarsil/pyschemavalidator',
author='Allan Barcelos',
author_email='albarsil@gmail.com',
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords=['api', 'flask', 'graphql', 'json', 'validation', 'schema', 'dictionary', 'graphql'],
packages=find_packages(exclude=['tests.*', 'tests']),
install_requires=[],
test_suite='tests.test_suite'
)
| 34.266667 | 98 | 0.640078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.534047 |
7ed2f5da954583e80a42f4d878be91fafc750834 | 278 | py | Python | Programming/list.py | flybaozi/algorithm-study | 2e05a0ff9fd9c935822ca43636394f603aa3078d | [
"MIT"
] | null | null | null | Programming/list.py | flybaozi/algorithm-study | 2e05a0ff9fd9c935822ca43636394f603aa3078d | [
"MIT"
] | null | null | null | Programming/list.py | flybaozi/algorithm-study | 2e05a0ff9fd9c935822ca43636394f603aa3078d | [
"MIT"
] | null | null | null | def test1():
arr = [["我", "你好"], ["你在干嘛", "你干啥呢"], ["吃饭呢", "打球呢", "看电视呢"]]
new_arr = []
for i in arr[0]:
print(i)
for j in arr[1]:
new_arr.append(i + j)
print(new_arr)
#
# def test():
# while True:
# test1(arr)
test1()
| 16.352941 | 65 | 0.435252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.4 |
7ed5ebddc6457b559b508614ad321953960871a9 | 1,012 | py | Python | fedsimul/utils/language_utils.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 11 | 2021-05-07T01:28:26.000Z | 2022-03-10T08:23:16.000Z | fedsimul/utils/language_utils.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 2 | 2021-08-13T10:12:13.000Z | 2021-08-31T02:03:20.000Z | fedsimul/utils/language_utils.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 1 | 2021-06-08T07:23:22.000Z | 2021-06-08T07:23:22.000Z | ###############################################################################
# Utils functions for language models.
#
# NOTE: source from https://github.com/litian96/FedProx
###############################################################################
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
NUM_LETTERS = len(ALL_LETTERS)
def _one_hot(index, size):
'''returns one-hot vector with given size and value 1 at given index
'''
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(letter):
'''returns one-hot representation of given letter
'''
index = ALL_LETTERS.find(letter)
return _one_hot(index, NUM_LETTERS)
def word_to_indices(word):
'''returns a list of character indices
Args:
word: string
Return:
indices: int list with length len(word)
'''
indices = []
for c in word:
indices.append(ALL_LETTERS.find(c))
return indices
| 25.948718 | 98 | 0.559289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 608 | 0.600791 |
7ed622001a9b3fee28a3830ea934fc1222ee2519 | 12,347 | py | Python | src/cascade/executor/cascade_plan.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | src/cascade/executor/cascade_plan.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | src/cascade/executor/cascade_plan.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | """
Specification for what parameters are used at what location within
the Cascade.
"""
from os import linesep
from types import SimpleNamespace
import networkx as nx
from cascade.core import getLoggers
from cascade.core.parameters import ParameterProperty, _ParameterHierarchy
from cascade.input_data import InputDataError
from cascade.input_data.configuration.builder import policies_from_settings
from cascade.input_data.configuration.sex import SEX_ID_TO_NAME, SEX_NAME_TO_ID
from cascade.input_data.db.locations import location_id_from_start_and_finish
from cascade.runner.application_config import application_config
from cascade.runner.job_graph import RecipeIdentifier
CODELOG, MATHLOG = getLoggers(__name__)
class EstimationParameters:
def __init__(self, settings, policies, children,
parent_location_id, grandparent_location_id, sexes, number_of_fixed_effect_samples,
model_options):
self.parent_location_id = parent_location_id
self.sexes = sexes
self.data_access = ParameterProperty()
"""These decide which data to get."""
self.run = ParameterProperty()
"""These affect how the program runs but not its results."""
self.grandparent_location_id = grandparent_location_id
"""Can be null at top of drill, even when not global location."""
self.model_options = model_options
self.children = children
self.settings = settings
self.policies = policies
self.number_of_fixed_effect_samples = number_of_fixed_effect_samples
def make_model_options(locations, parent_location_id, ev_settings):
bound_random = get_bound_random_this_location(locations, parent_location_id, ev_settings)
model_options = _ParameterHierarchy(**dict(
bound_random=bound_random,
))
return model_options
def get_bound_random_this_location(locations, parent_location_id, ev_settings):
# Set the bounds throughout the location hierarchy.
# hasattr is right here because any unset ancestor makes the parent unset.
# and one of the child forms can have an unset location or value.
location_tree = locations.copy()
if hasattr(ev_settings, "re_bound_location"):
add_bound_random_to_location_properties(ev_settings.re_bound_location, location_tree)
else:
CODELOG.debug("No re_bound_location in settings.")
# Get the global value, if it exists.
if not ev_settings.model.is_field_unset("bound_random"):
bound_random = ev_settings.model.bound_random
else:
bound_random = None
CODELOG.debug(f"Setting bound_random's default to {bound_random}")
# Search up the location hierarchy to see if an ancestor has a value.
this_and_ancestors = nx.ancestors(location_tree, parent_location_id) | {parent_location_id}
to_top = list(nx.topological_sort(nx.subgraph(location_tree, this_and_ancestors)))
to_top.reverse()
for check_bounds in to_top:
if "bound_random" in location_tree.node[check_bounds]:
CODELOG.debug(f"Found bound random in location {check_bounds}")
bound_random = location_tree.node[check_bounds]["bound_random"]
break
return bound_random
def add_bound_random_to_location_properties(re_bound_location, locations):
for bounds_form in re_bound_location:
if not bounds_form.is_field_unset("value"):
value = bounds_form.value
else:
value = None # This turns off bound random option.
if not bounds_form.is_field_unset("location"):
CODELOG.debug(f"setting {bounds_form.location} to {value}")
locations.node[bounds_form.location]["bound_random"] = value
else:
CODELOG.debug(f"setting root to {value}")
locations.node[locations.graph["root"]]["bound_random"] = value
def recipe_graph_from_settings(locations, settings, args):
"""
This defines the full set of recipes that are the model.
These may be a subset of all locations,
and we may execute a subset of these.
Args:
locations (nx.DiGraph): A graph of locations in a hierarchy.
settings (Configuration): The EpiViz-AT Form (in form.py)
args (Namespace|SimpleNamespace): Parsed arguments.
Returns:
nx.DiGraph: Each node is a RecipeIdentifier. Edges denote dependency
on a previous transform. The graph has a key called "root" that
tells you the first node.
"""
if not settings.model.is_field_unset("drill") and settings.model.drill == "drill":
recipe_graph = drill_recipe_graph(locations, settings, args)
else:
recipe_graph = global_recipe_graph(locations, settings, args)
for recipe_identifier in recipe_graph.nodes:
local_settings = location_specific_settings(locations, settings, args, recipe_identifier)
recipe_graph.nodes[recipe_identifier]["local_settings"] = local_settings
if len(recipe_graph) < application_config()["NonModel"].getint("small-graph-nodes"):
debug_lines = [str(node) for node in nx.topological_sort(recipe_graph)]
debug_str = f"{linesep}\t".join(debug_lines)
CODELOG.debug(f"Recipes in order{linesep}\t{debug_str}")
return recipe_graph
def drill_recipe_graph(locations, settings, args):
if not settings.model.is_field_unset("drill_location_start"):
drill_start = settings.model.drill_location_start
else:
drill_start = None
if not settings.model.is_field_unset("drill_location_end"):
drill_end = settings.model.drill_location_end
else:
raise InputDataError(f"Set to drill but drill location end not set")
try:
drill = location_id_from_start_and_finish(locations, drill_start, drill_end)
except ValueError as ve:
raise InputDataError(f"Location parameter is wrong in settings.") from ve
MATHLOG.info(f"drill nodes {', '.join(str(d) for d in drill)}")
drill = list(drill)
drill_sex = SEX_ID_TO_NAME[settings.model.drill_sex]
setup_task = [RecipeIdentifier(0, "bundle_setup", drill_sex)]
recipes = setup_task + [
RecipeIdentifier(drill_location, "estimate_location", drill_sex)
for drill_location in drill
]
recipe_pairs = list(zip(recipes[:-1], recipes[1:]))
recipe_graph = nx.DiGraph(root=recipes[0])
recipe_graph.add_nodes_from(recipes)
recipe_graph.add_edges_from(recipe_pairs)
return recipe_graph
def global_recipe_graph(locations, settings, args):
"""
Constructs the graph of recipes.
Args:
locations (nx.DiGraph): Root node in the data, and each node
has a level.
settings: The global settings object.
args (Namespace|SimpleNamespace): Command-line arguments.
Returns:
nx.DiGraph: Each node is a RecipeIdentifier.
"""
assert "root" in locations.graph
if settings.model.split_sex == "most_detailed":
split_sex = max([locations.nodes[nl]["level"] for nl in locations.nodes])
else:
split_sex = int(settings.model.split_sex)
global_node = RecipeIdentifier(locations.graph["root"], "estimate_location", "both")
recipe_graph = nx.DiGraph(root=global_node)
# Start with bundle setup
bundle_setup = RecipeIdentifier(0, "bundle_setup", "both")
recipe_graph.graph["root"] = bundle_setup
recipe_graph.add_edge(bundle_setup, global_node)
global_recipe_graph_add_estimations(locations, recipe_graph, split_sex)
return recipe_graph
def global_recipe_graph_add_estimations(locations, recipe_graph, split_sex):
"""There are estimations for every location and for both sexes below
the level where we split sex. This modifies the recipe graph in place."""
# Follow location hierarchy, splitting into male and female below a level.
for start, finish in locations.edges:
if "level" not in locations.nodes[finish]:
raise RuntimeError(
"Expect location graph nodes to have a level property")
finish_level = locations.nodes[finish]["level"]
if finish_level == split_sex:
for finish_sex in ["male", "female"]:
recipe_graph.add_edge(
RecipeIdentifier(start, "estimate_location", "both"),
RecipeIdentifier(finish, "estimate_location", finish_sex),
)
elif finish_level > split_sex:
for same_sex in ["male", "female"]:
recipe_graph.add_edge(
RecipeIdentifier(start, "estimate_location", same_sex),
RecipeIdentifier(finish, "estimate_location", same_sex),
)
else:
recipe_graph.add_edge(
RecipeIdentifier(start, "estimate_location", "both"),
RecipeIdentifier(finish, "estimate_location", "both"),
)
def location_specific_settings(locations, settings, args, recipe_id):
"""
This takes a modeler's description of how the model should be set up,
as described in settings and command-line arguments, and translates
it into what choices apply to this particular recipe. Modelers discuss
plans in terms of what rules apply to which level of the Cascade,
so this works in those terms, not in terms of individual tasks
within a recipe.
Adds ``settings.model.parent_location_id``,
``settings.model.grandparent_location_id``,
and ``settings.model.children``.
There is a grandparent location only if there is a grandparent recipe,
so a drill starting halfway will not have a grandparent location.
There are child locations for the last task though.
Args:
locations (nx.DiGraph): Location hierarchy
settings: Settings from EpiViz-AT
args (Namespace|SimpleNamespace): Command-line arguments
recipe_id (RecipeIdentifier): Identifies what happens at
this location.
Returns:
Settings for this job.
"""
parent_location_id = recipe_id.location_id
if parent_location_id != 0:
predecessors = list(locations.predecessors(parent_location_id))
successors = list(sorted(locations.successors(parent_location_id)))
model_options = make_model_options(locations, parent_location_id, settings)
else:
predecessors = None
successors = None
model_options = None
if predecessors:
grandparent_location_id = predecessors[0]
else:
grandparent_location_id = None
if settings.model.is_field_unset("drill_sex"):
# An unset drill sex gets all data.
sexes = list(SEX_ID_TO_NAME.keys())
else:
# Setting to male or female pulls in "both."
sexes = [settings.model.drill_sex, SEX_NAME_TO_ID["both"]]
policies = policies_from_settings(settings)
if args.num_samples:
sample_cnt = args.num_samples
else:
sample_cnt = policies["number_of_fixed_effect_samples"]
local_settings = EstimationParameters(
settings=settings,
policies=SimpleNamespace(**policies),
children=successors,
parent_location_id=parent_location_id,
grandparent_location_id=grandparent_location_id,
# This is a list of [1], [3], [1,3], [2,3], [1,2,3], not [1,2].
sexes=sexes,
number_of_fixed_effect_samples=sample_cnt,
model_options=model_options,
)
local_settings.data_access = _ParameterHierarchy(**dict(
gbd_round_id=policies["gbd_round_id"],
decomp_step=policies["decomp_step"],
modelable_entity_id=settings.model.modelable_entity_id,
model_version_id=settings.model.model_version_id,
settings_file=args.settings_file,
bundle_file=args.bundle_file,
bundle_id=settings.model.bundle_id,
bundle_study_covariates_file=args.bundle_study_covariates_file,
tier=2 if args.skip_cache else 3,
age_group_set_id=policies["age_group_set_id"],
with_hiv=policies["with_hiv"],
cod_version=settings.csmr_cod_output_version_id,
location_set_version_id=settings.location_set_version_id,
add_csmr_cause=settings.model.add_csmr_cause,
))
local_settings.run = _ParameterHierarchy(**dict(
no_upload=args.no_upload,
db_only=args.db_only,
))
return local_settings
| 40.615132 | 100 | 0.703734 | 856 | 0.069329 | 0 | 0 | 0 | 0 | 0 | 0 | 3,954 | 0.32024 |
7ed6592b4557770826c2b857640043319d38c35b | 8,757 | py | Python | utils/utils.py | YigeWang-WHU/BlastLoadsRegression | 884ba58a31ba854eaf86b846e551a97d84b11924 | [
"MIT"
] | null | null | null | utils/utils.py | YigeWang-WHU/BlastLoadsRegression | 884ba58a31ba854eaf86b846e551a97d84b11924 | [
"MIT"
] | null | null | null | utils/utils.py | YigeWang-WHU/BlastLoadsRegression | 884ba58a31ba854eaf86b846e551a97d84b11924 | [
"MIT"
] | null | null | null | import os
import pickle as pickle
import datetime
import time
# from contextlib import contextmanger
import torch
from torch.autograd import Variable
import random
import numpy as np
def time_str(fmt=None):
if fmt is None:
fmt = '%Y-%m-%d_%H:%M:%S'
return datetime.datetime.today().strftime(fmt)
def str2bool(v):
return v.lower() in ("yes", "true", "1")
def is_iterable(obj):
return hasattr(obj, '__len__')
def to_scalar(vt):
"""
transform a 1-length pytorch Variable or Tensor to scalar
"""
if isinstance(vt, Variable):
return vt.data.cpu().numpy().flatten()[0]
if torch.is_tensor(vt):
return vt.cpu().numpy().flatten()[0]
raise TypeError('Input should be a variable or tensor')
def set_seed(rand_seed):
np.random.seed( rand_seed )
random.seed( rand_seed )
torch.backends.cudnn.enabled = True
torch.manual_seed( rand_seed )
torch.cuda.manual_seed( rand_seed )
def may_mkdir(fname):
if not os.path.exists(os.path.dirname(os.path.abspath(fname))):
os.makedirs(os.path.dirname(os.path.abspath(fname)))
class AverageMeter(object):
"""
Computes and stores the average and current value
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = float(self.sum) / (self.count + 1e-10)
class RunningAverageMeter(object):
"""
Computes and stores the running average and current value
"""
def __init__(self, hist=0.99):
self.val = None
self.avg = None
self.hist = hist
def reset(self):
self.val = None
self.avg = None
def update(self, val):
if self.avg is None:
self.avg = val
else:
self.avg = self.avg * self.hist + val * (1 - self.hist)
self.val = val
class ReDirectSTD(object):
"""
overwrites the sys.stdout or sys.stderr
Args:
fpath: file path
console: one of ['stdout', 'stderr']
immediately_visiable: False
Usage example:
ReDirectSTD('stdout.txt', 'stdout', False)
ReDirectSTD('stderr.txt', 'stderr', False)
"""
def __init__(self, fpath=None, console='stdout', immediately_visiable=False):
import sys
import os
assert console in ['stdout', 'stderr']
self.console = sys.stdout if console == "stdout" else sys.stderr
self.file = fpath
self.f = None
self.immediately_visiable = immediately_visiable
if fpath is not None:
# Remove existing log file
if os.path.exists(fpath):
os.remove(fpath)
if console == 'stdout':
sys.stdout = self
else:
sys.stderr = self
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, **args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
if not os.path.exists(os.path.dirname(os.path.abspath(self.file))):
os.mkdir(os.path.dirname(os.path.abspath(self.file)))
if self.immediately_visiable:
with open(self.file, 'a') as f:
f.write(msg)
else:
if self.f is None:
self.f = open(self.file, 'w')
self.f.write(msg)
def flush(self):
self.console.flush()
if self.f is not None:
self.f.flush()
import os
os.fsync(self.f.fileno())
def close(self):
self.console.close()
if self.f is not None:
self.f.close()
def find_index(seq, item):
for i, x in enumerate(seq):
if item == x:
return i
return -1
def set_devices(sys_device_ids):
"""
Args:
sys_device_ids: a tuple; which GPUs to use
e.g. sys_device_ids = (), only use cpu
sys_device_ids = (3,), use the 4-th gpu
sys_device_ids = (0, 1, 2, 3,), use the first 4 gpus
sys_device_ids = (0, 2, 4,), use the 1, 3 and 5 gpus
"""
import os
visiable_devices = ''
for i in sys_device_ids:
visiable_devices += '{}, '.format(i)
os.environ['CUDA_VISIBLE_DEVICES'] = visiable_devices
# Return wrappers
# Models and user defined Variables/Tensors would be transferred to
# the first device
device_id = 0 if len(sys_device_ids) > 0 else -1
def transfer_optims(optims, device_id=-1):
for optim in optims:
if isinstance(optim, torch.optim.Optimizer):
transfer_optim_state(optim.state, device_id=device_id)
def transfer_optim_state(state, device_id=-1):
for key, val in list(state.items()):
if isinstance(val, dict):
transfer_optim_state(val, device_id=device_id)
#elif isinstance(val, Variable):
#raise RuntimeError("Oops, state[{}] is a Variable!".format(key))
elif isinstance(val, torch.nn.Parameter):
raise RuntimeError("Oops, state[{}] is a Parameter!".format(key))
else:
try:
if device_id == -1:
state[key] = val.cpu()
else:
state[key] = val.cuda(device=device_id)
except:
pass
def load_state_dict(model, src_state_dict):
"""
copy parameter from src_state_dict to model
Arguments:
model: A torch.nn.Module object
src_state_dict: a dict containing parameters and persistent buffers
"""
from torch.nn import Parameter
dest_state_dict = model.state_dict()
for name, param in list(src_state_dict.items()):
if name not in dest_state_dict:
continue
if isinstance(param, Parameter):
param = param.data
try:
dest_state_dict[name].copy_(param)
except Exception as msg:
print(("Warning: Error occurs when copying '{}': {}"
.format(name, str(msg))))
src_missing = set(dest_state_dict.keys()) - set(src_state_dict.keys())
if len(src_missing) > 0:
print ("Keys not found in source state_dict: ")
for n in src_missing:
print(('\t', n))
dest_missint = set(src_state_dict.keys()) - set(dest_state_dict.keys())
if len(dest_missint):
print ("Keys not found in destination state_dict: ")
for n in dest_missint:
print(('\t', n))
def load_ckpt(modules_optims, ckpt_file, load_to_cpu=True, verbose=True):
"""
load state_dict of module & optimizer from file
Args:
modules_optims: A two-element list which contains module and optimizer
ckpt_file: the check point file
load_to_cpu: Boolean, whether to transform tensors in model & optimizer to cpu type
"""
map_location = (lambda storage, loc: storage) if load_to_cpu else None
ckpt = torch.load(ckpt_file, map_location=map_location)
for m, sd in zip(modules_optims, ckpt['state_dicts']):
m.load_state_dict(sd)
if verbose:
print(("Resume from ckpt {}, \nepoch: {}, scores: {}".format(
ckpt_file, ckpt['ep'], ckpt['scores'])))
return ckpt['ep'], ckpt['scores']
def save_ckpt(modules_optims, ep, scores, ckpt_file):
"""
save state_dict of modules/optimizers to file
Args:
modules_optims: a two-element list which contains a module and a optimizer
ep: the current epoch number
scores: the performance of current module
ckpt_file: the check point file path
Note:
torch.save() reserves device type and id of tensors to save.
So when loading ckpt, you have to inform torch.load() to load these tensors
to cpu or your desired gpu, if you change devices.
"""
state_dicts = [m.state_dict() for m in modules_optims]
ckpt = dict(state_dicts = state_dicts,
ep = ep,
scores = scores)
if not os.path.exists(os.path.dirname(os.path.abspath(ckpt_file))):
os.mkdir(os.path.dirname(os.path.abspath(ckpt_file)))
torch.save(ckpt, ckpt_file)
def may_set_mode(maybe_modules, mode):
"""
maybe_modules, an object or a list of objects.
"""
assert mode in ['train', 'eval']
if not is_iterable(maybe_modules):
maybe_modules = [maybe_modules]
for m in maybe_modules:
if isinstance(m, torch.nn.Module):
if mode == 'train':
m.train()
else:
m.eval()
| 31.387097 | 91 | 0.595524 | 2,780 | 0.31746 | 0 | 0 | 0 | 0 | 0 | 0 | 2,544 | 0.29051 |
7ed68a92c00d63af048a398460040921d4a594c2 | 152 | py | Python | src/xbrief/deco/__init__.py | pydget/xbrief | 9e91927a98754b0fca1fa55eae9a785b15e963f9 | [
"MIT"
] | null | null | null | src/xbrief/deco/__init__.py | pydget/xbrief | 9e91927a98754b0fca1fa55eae9a785b15e963f9 | [
"MIT"
] | null | null | null | src/xbrief/deco/__init__.py | pydget/xbrief | 9e91927a98754b0fca1fa55eae9a785b15e963f9 | [
"MIT"
] | null | null | null | from .deco_entries import deco_dict, deco_entries
from .deco_matrix import deco_matrix
from .deco_node import deco
from .deco_vector import deco_vector
| 30.4 | 49 | 0.855263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7ed76cffa617d7dbb0eb5704950de02465bd2acc | 239 | py | Python | GPLT_Python/L1-003.py | upupming/algorithm | 44edcffe886eaf4ce8c7b27a8db50d7ed5d29ef1 | [
"MIT"
] | 107 | 2019-10-25T07:46:59.000Z | 2022-03-29T11:10:56.000Z | GPLT_Python/L1-003.py | upupming/algorithm | 44edcffe886eaf4ce8c7b27a8db50d7ed5d29ef1 | [
"MIT"
] | 1 | 2021-08-13T05:42:27.000Z | 2021-08-13T05:42:27.000Z | GPLT_Python/L1-003.py | upupming/algorithm | 44edcffe886eaf4ce8c7b27a8db50d7ed5d29ef1 | [
"MIT"
] | 18 | 2020-12-09T14:24:22.000Z | 2022-03-30T06:56:01.000Z | a = str(input())
b = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0, '7': 0, '8': 0, '9': 0}
for i in a:
b[i] = b[i] + 1
for i in range(len(b)):
if b[str(i)] == 0:
continue
print(str(i) + ':' + str(b[str(i)]))
| 26.555556 | 84 | 0.376569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.138075 |
7ed81731d9af7ea6f386a90d704f7cac21f33072 | 194 | py | Python | QRCodeLib/qrcodelib/format/mode_indicator.py | yas78/QRCodeLibPy | 7b2c489b5e38aa23619ae41bff7a31993885275b | [
"MIT"
] | null | null | null | QRCodeLib/qrcodelib/format/mode_indicator.py | yas78/QRCodeLibPy | 7b2c489b5e38aa23619ae41bff7a31993885275b | [
"MIT"
] | 1 | 2019-11-04T13:44:44.000Z | 2019-11-04T13:44:44.000Z | QRCodeLib/qrcodelib/format/mode_indicator.py | yas78/QRCodeLibPy | 7b2c489b5e38aa23619ae41bff7a31993885275b | [
"MIT"
] | null | null | null | class ModeIndicator:
LENGTH = 4
TERMINATOR_VALUE = 0x0
NUMERIC_VALUE = 0x1
ALPHANUMERIC_VALUE = 0x2
STRUCTURED_APPEND_VALUE = 0x3
BYTE_VALUE = 0x4
KANJI_VALUE = 0x8
| 19.4 | 33 | 0.690722 | 193 | 0.994845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7ed934e50ac37fe2f6bc49fc6b26c673d5bef545 | 3,635 | py | Python | cogs/setup.py | wan-hazem/discord-bot | c71dc49c2a1e1ad6cd41bd36f26a343684cd40d1 | [
"MIT"
] | null | null | null | cogs/setup.py | wan-hazem/discord-bot | c71dc49c2a1e1ad6cd41bd36f26a343684cd40d1 | [
"MIT"
] | null | null | null | cogs/setup.py | wan-hazem/discord-bot | c71dc49c2a1e1ad6cd41bd36f26a343684cd40d1 | [
"MIT"
] | 1 | 2020-09-24T20:07:11.000Z | 2020-09-24T20:07:11.000Z | from discord import Role, Embed, Color
from discord.ext import commands
from discord.utils import get
from sqlite3 import connect
class Setup(commands.Cog, name='Setup'):
"""
Commandes de setup serveur réservées aux admins
"""
def __init__(self, bot):
self.bot = bot
@commands.command(brief='!setup [verif/mute] [@role]', description='Définir un role pour les membres vérifiés ou mute')
async def role(self, ctx, rtype: str, role: Role):
with connect('data.db') as conn:
c = conn.cursor()
c.execute('SELECT * FROM setup WHERE Guild_ID=?', (ctx.guild.id,))
guild = c.fetchone()
if guild is None:
c.execute(f'INSERT INTO setup (Guild_ID, {rtype.capitalize()}) VALUES (?, ?)', (ctx.guild.id, role.id))
else:
c.execute(f'UPDATE setup SET {rtype.capitalize()}=? WHERE Guild_ID=?', (role.id, ctx.guild.id))
conn.commit()
embed = (Embed(description=f'{ctx.author.mention} a défini {role.mention} pour "{rtype}"', color=0xa84300)
.set_author(name=f'{ctx.author} a modifié le role pour "{rtype}"', icon_url=ctx.author.avatar_url))
await ctx.send(embed=embed)
@commands.command(hidden=True)
@commands.has_permissions(manage_messages=True)
async def regles(self, ctx):
rules = {
'👍 Règle n°1': "Respect mutuel ! Pour un chat sympa et bienveillant, pas d'insultes ou de méchancetés",
'🗳️ Règle n°2': "C'est un serveur dédié à @E - Wizard#3217. Pas de sujets politiques, religieux et pas de racisme, de harcèlement ou de contenu offensif.",
'🔕 Règle n°3': "Pas de spam ou de mentions abusives. Pour éviter d'avoir un chat qui ressembre à rien, évitez les abus.",
'👦 Règle n°4': "Ayez un avatar et un pseudo approprié (family-friendly)",
'🔒 Règle n°5': "Ne partagez pas vos informations personnelles ! Protégez votre intimité et celle des autres.",
'💛 Règle n°6': "Utilisez votre bon sens. Ne faites pas aux autres ce que vous ne voudriez pas qu'on vous fasse.",
'💬 Règle n°7': "Évitez la pub ! Vous pouvez partager vos projets dans #vos-projects.",
'🙏 Règle n°8': "Pas de mandiage de role. C'est juste une perte de temps et ça ne marchera jamais.",
'📑 Règle n°9': "Repectez les [Guidelines de la Communauté Discord](https://discord.com/guidelines) et les [Conditions d'utilisation](https://discord.com/terms).",
}
embed = Embed(title="📃 Règles du serveur:", description='Appuie sur ✅ après avoir lu les règles :',color=0xa84300)
for key, value in rules.items():
embed.add_field(name=key, value=f"{value}\n", inline=False)
await ctx.message.delete()
msg = await ctx.send(embed=embed)
await msg.add_reaction('✅')
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
member = payload.member
with connect('data.db') as conn:
c = conn.cursor()
c.execute('SELECT Verif FROM setup WHERE Guild_ID=?', (member.guild.id,))
role = get(member.guild.roles, id=c.fetchone()[0])
if payload.emoji.name == '✅' and not member.bot:
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
reaction = get(message.reactions, emoji=payload.emoji.name)
if not role in member.roles:
await member.add_roles(role)
await reaction.remove(member)
def setup(bot):
bot.add_cog(Setup(bot)) | 54.253731 | 174 | 0.630261 | 3,538 | 0.952099 | 0 | 0 | 3,358 | 0.90366 | 3,115 | 0.838267 | 1,642 | 0.441873 |
7eda15ab90c17c289351ca4660c59723a26e49ce | 5,793 | py | Python | ppq/parser/onnx_parser.py | wdian/ppq | 58bd1271ea6f0dfaf602eb72bdca63ea79f191b8 | [
"Apache-2.0"
] | 1 | 2022-01-06T09:38:13.000Z | 2022-01-06T09:38:13.000Z | ppq/parser/onnx_parser.py | wdian/ppq | 58bd1271ea6f0dfaf602eb72bdca63ea79f191b8 | [
"Apache-2.0"
] | null | null | null | ppq/parser/onnx_parser.py | wdian/ppq | 58bd1271ea6f0dfaf602eb72bdca63ea79f191b8 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Iterable, List
from torch import random
from ppq.core import NetworkFramework, is_file_exist
from ppq.IR import BaseGraph, GraphBuilder, Operation, Variable
import onnx
from onnx import helper, mapping, numpy_helper
class OnnxParser(GraphBuilder):
def build_variables(
self, graph: BaseGraph,
graph_inputs: List[str], graph_outputs: List[str],
op_inputs: Dict[str, list], op_outputs: Dict[str, list]) -> BaseGraph:
var_list = []
for op_name, _ in graph.operations.items():
for var_name in op_inputs[op_name]: var_list.append(var_name)
for var_name in op_outputs[op_name]: var_list.append(var_name)
# create all variable at once.
for var_name in set(var_list):
graph.variables[var_name] = Variable(name=var_name)
# build graph's input, output variables.
try:
for var_name in graph_inputs:
if var_name not in graph.variables: continue
graph.inputs[var_name] = graph.variables[var_name]
for var_name in graph_outputs:
graph.outputs[var_name] = graph.variables[var_name]
except KeyError as e:
raise KeyError(
'seems you got an input/output variable that is not linked to any opeartion.')
# build operation inputs, outputs variables.
for op in graph.operations.values():
for var_name in op_inputs[op.name]:
var = graph.variables[var_name]
var.dest_ops.append(op)
op.inputs.append(graph.variables[var_name])
for var_name in op_outputs[op.name]:
var = graph.variables[var_name]
var.source_op = op
op.outputs.append(graph.variables[var_name])
return graph
def initialize_params(self, graph: BaseGraph, initializer: Dict[str, Any]) -> BaseGraph:
for var in graph.variables.values():
if var.name in initializer:
for dest_op in var.dest_ops:
assert isinstance(dest_op, Operation)
dest_op.parameters.append(var)
var.value = initializer[var.name]
var.is_parameter = True
return graph
def refine_graph(self, graph: BaseGraph) -> BaseGraph:
for op in graph.operations.values():
for key, value in op.attributes.items():
if isinstance(value, bytes):
# Change bytes to string
value = value.decode('utf-8')
if op.type == 'Constant' or op.type == 'ConstantOfShape':
# The attribute of 'Constant' node is a value, needs to convert to numpy array
value = numpy_helper.to_array(value).copy()
if op.type == 'Cast':
# The attribute of 'Cast' node is data type (represented in int), need to convert to numpy data type
value = mapping.TENSOR_TYPE_TO_NP_TYPE[value]
op.attributes[key] = value
graph_initializers = []
for input_var in graph.inputs.values():
# remove initilizer from graph.inputs
if input_var.value is not None:
graph_initializers.append(input_var.name)
for non_input_var in graph_initializers: graph.inputs.pop(non_input_var)
return graph
def convert_opsets_to_str(self, opsets: Iterable) -> List[Dict[str, str]]:
results = []
for opset in opsets:
results.append({'domain': opset.domain, 'version': opset.version})
return results
def build(self, file_path: str) -> BaseGraph:
_rand_seed = 0 # used for name generation.
if not is_file_exist(file_path):
raise FileNotFoundError(f'file {file_path} does not exist, or it is a directory.')
model_pb = onnx.load(file_path)
opsets = model_pb.opset_import
assert isinstance(model_pb, onnx.ModelProto), \
f'onnx load failed, only ProtoBuffer object is expected here, while {type(model_pb)} is loaded.'
model_pb = model_pb.graph
graph = BaseGraph(name=model_pb.name, built_from=NetworkFramework.ONNX)
graph._detail['opsets'] = self.convert_opsets_to_str(opsets)
# a temporary storage for operation's inputs and outputs
op_inputs_dict, op_outputs_dict = {}, {}
for node in model_pb.node:
op_name = node.name
if len(op_name) == 0: # some opeartion do not have a name, we just generate one.
op_name = 'generated_name_' + str(_rand_seed)
_rand_seed += 1
if op_name in graph.operations:
raise KeyError(f'Duplicated operation {op_name} was found.')
graph.operations[op_name] = Operation(
name=op_name, op_type=node.op_type,
attributes={item.name: helper.get_attribute_value(item) for item in node.attribute},
)
op_inputs_dict[op_name] = [var_name for var_name in node.input]
op_outputs_dict[op_name] = [var_name for var_name in node.output]
initializer = {}
for item in model_pb.initializer:
init_name = item.name
value = numpy_helper.to_array(item)
initializer[init_name] = value
inputs = [item.name for item in model_pb.input]
outputs = [item.name for item in model_pb.output]
graph = self.build_variables(
graph, graph_inputs=inputs, graph_outputs=outputs,
op_inputs=op_inputs_dict, op_outputs=op_outputs_dict)
graph = self.initialize_params(graph, initializer)
return self.refine_graph(graph)
| 43.886364 | 120 | 0.615398 | 5,541 | 0.956499 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.146729 |
7edaeae2fc38a800bb95865424b3192bce1ad4c5 | 96 | py | Python | cubes/query/__init__.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 1,020 | 2015-01-02T03:05:26.000Z | 2022-02-12T18:48:51.000Z | cubes/query/__init__.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 259 | 2015-01-02T22:35:14.000Z | 2021-09-02T04:20:41.000Z | cubes/query/__init__.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 288 | 2015-01-08T00:42:26.000Z | 2022-03-31T17:25:10.000Z | from .browser import *
from .cells import *
from .computation import *
from .statutils import *
| 19.2 | 26 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7edcf59eb18eef02bdb316448f6adea4f05d6570 | 130 | py | Python | pymoo/operators/mutation/nom.py | jarreguit/pymoo | 0496a3c6765826148d8bab21650736760517dd25 | [
"Apache-2.0"
] | 762 | 2018-06-05T20:56:09.000Z | 2021-09-14T09:09:42.000Z | pymoo/operators/mutation/nom.py | jarreguit/pymoo | 0496a3c6765826148d8bab21650736760517dd25 | [
"Apache-2.0"
] | 176 | 2018-09-05T18:37:05.000Z | 2021-09-14T01:18:43.000Z | pymoo/operators/mutation/nom.py | jarreguit/pymoo | 0496a3c6765826148d8bab21650736760517dd25 | [
"Apache-2.0"
] | 160 | 2018-08-05T05:31:20.000Z | 2021-09-14T09:09:45.000Z | from pymoo.core.mutation import Mutation
class NoMutation(Mutation):
def _do(self, problem, X, **kwargs):
return X
| 16.25 | 40 | 0.684615 | 86 | 0.661538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7edd2d6611c190c72458a52ee8280a39a0f6019d | 2,893 | py | Python | tokenize.py | jobsforher/Internship | 68d99e90c54b3ccf1f06bdc817b8a46f1f0fe97a | [
"Apache-2.0"
] | null | null | null | tokenize.py | jobsforher/Internship | 68d99e90c54b3ccf1f06bdc817b8a46f1f0fe97a | [
"Apache-2.0"
] | null | null | null | tokenize.py | jobsforher/Internship | 68d99e90c54b3ccf1f06bdc817b8a46f1f0fe97a | [
"Apache-2.0"
] | null | null | null | import re
from sys import argv
import sys
script, filename=argv
text=open(filename)
s=text.read()
y= re.split('\s+',s)
'''rite=open("new.txt", 'w')'''
temp=[]
values=[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
stopword1=['with','more','than','for','a','an','the','then','to','I','as','am','and','is']
for word in y:
m=0
for val in values:
if word==stopword1[val]:
'''x=rite.write("--- ")'''
break
else:
m=m+1
if m==7:
'''x=rite.write(word)'''
'''x=rite.write(" ")'''
temp.append(word)
break
temp.append("end")
temp.append("qq")
#rite1=open("new1.txt", 'w')
values=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
val=0
sys.stdout.write("Name :: ")
sys.stdout.write(temp[0])
sys.stdout.write(" ")
sys.stdout.write(temp[1])
sys.stdout.write("\n\n")
i=2
keywords=['Personal','PERSONAL','Location','Domain','Industry','Languages','Interests','INTERESTS','SUMMARY','Summary','COMPETENCIES','Competencies','GRADUATION','Graduation','EXPERIENCE','Experience','SKILLS','Skills','QUALIFICATIONS','Qualifications','CERTIFICATION','Certifications','EDUCATION','Education','PROJECTS','Projects','EXPERTISE','Expertise','ACADEMICS','Academics','asdfg','ACHIEVEMENTS','Achievements']
while temp[i]!="qq":
for val in values:
if temp[i]==keywords[val]:
sys.stdout.write("")
sys.stdout.write(temp[i])
sys.stdout.write(" :: ")
count=0
while count==0:
m=0
i=i+1
sys.stdout.write(" ")
if temp[i]=="qq":
count=1
check=0
for m in values:
if temp[i]==keywords[m]:
sys.stdout.write("\n")
sys.stdout.write("\n")
sys.stdout.write("\n")
check=1
count=1
m=m+1
val=0
i=i-1
break
m=m+1
if check==0:
sys.stdout.write(temp[i])
else:
val=val+1
i=i+1
text.close()
rite.close()
| 38.573333 | 418 | 0.382993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.219841 |
7edd3771e2b51e9d09b225c2a3e4394114478f19 | 5,304 | py | Python | main.py | zoumt1633/ace2005-preprocessing | b5c31fabb9648a096a480958a00244fbd5dda622 | [
"MIT"
] | null | null | null | main.py | zoumt1633/ace2005-preprocessing | b5c31fabb9648a096a480958a00244fbd5dda622 | [
"MIT"
] | null | null | null | main.py | zoumt1633/ace2005-preprocessing | b5c31fabb9648a096a480958a00244fbd5dda622 | [
"MIT"
] | null | null | null | import os
import copy
from parser import Parser
import json
import argparse
from tqdm import tqdm
def get_data_paths(ace2005_path):
test_files, dev_files, train_files = [], [], []
with open('./data_list.csv', mode='r') as csv_file:
rows = csv_file.readlines()
for row in rows[1:]:
items = row.replace('\n', '').split(',')
data_type = items[0]
name = items[1]
path = os.path.join(ace2005_path, name)
if data_type == 'test':
test_files.append(path)
elif data_type == 'dev':
dev_files.append(path)
elif data_type == 'train':
train_files.append(path)
return test_files, dev_files, train_files
def find_all(sub, s):
index_list = []
index = s.find(sub)
while index != -1:
index_list.append(index)
index = s.find(sub, index + 1)
if len(index_list) > 0:
return index_list
else:
return [-1]
def find_token_index(tokens, start_pos, end_pos, phrase):
start_idx, end_idx = start_pos, end_pos
token = tokens[start_idx: end_idx]
if token != phrase:
# print(tokens)
pos = find_all(phrase, tokens)
if pos[0] == -1:
start_idx, end_idx = -10, -10
print(tokens)
elif len(pos) == 1:
start_idx = pos[0]
end_idx = start_idx + len(phrase)
else:
rela = [abs(a - start_idx) for a in pos]
start_idx = pos[rela.index(min(rela))]
end_idx = start_idx + len(phrase)
return start_idx, end_idx
def preprocessing(data_type, files):
result = []
event_count, entity_count, sent_count = 0, 0, 0
event_count_2 = 0
print('-' * 20)
print('[preprocessing] type: ', data_type)
for file in tqdm(files):
parser = Parser(path=file)
entity_count += len(parser.entity_mentions)
event_count += len(parser.event_mentions)
sent_count += len(parser.sents_with_pos)
for item in parser.get_data():
data = dict()
data['sentence'] = item['sentence']
data['golden-entity-mentions'] = []
data['golden-event-mentions'] = []
tokens = item['sentence']
sent_start_pos = item['position'][0]
# 由parser预处理的文件进一步处理,得到entity_mention在句子中的相对位置
for entity_mention in item['golden-entity-mentions']:
position = entity_mention['position']
start_idx, end_idx = find_token_index(
tokens=tokens,
start_pos=position[0] - sent_start_pos,
end_pos=position[1] - sent_start_pos + 1,
phrase=entity_mention['text'],
)
entity_mention['start'] = start_idx
entity_mention['end'] = end_idx
del entity_mention['position']
data['golden-entity-mentions'].append(entity_mention)
# 由parser预处理的文件进一步处理,得到event_mention在句子中的相对位置
for event_mention in item['golden-event-mentions']:
# same event mention cab be shared
event_mention = copy.deepcopy(event_mention)
position = event_mention['trigger']['position']
start_idx, end_idx = find_token_index(
tokens=tokens,
start_pos=position[0] - sent_start_pos,
end_pos=position[1] - sent_start_pos + 1,
phrase=event_mention['trigger']['text'],
)
event_mention['trigger']['start'] = start_idx
event_mention['trigger']['end'] = end_idx
del event_mention['trigger']['position']
del event_mention['position']
# 由parser预处理的文件进一步处理,得到arguments在句子中的相对位置
arguments = []
for argument in event_mention['arguments']:
position = argument['position']
start_idx, end_idx = find_token_index(
tokens=tokens,
start_pos=position[0] - sent_start_pos,
end_pos=position[1] - sent_start_pos + 1,
phrase=argument['text'],
)
argument['start'] = start_idx
argument['end'] = end_idx
del argument['position']
arguments.append(argument)
event_mention['arguments'] = arguments
data['golden-event-mentions'].append(event_mention)
result.append(data)
print('sent_count :', sent_count)
print('event_count :', event_count)
print('entity_count :', entity_count)
with open('output/Chinese/{}.json'.format(data_type), 'w') as f:
json.dump(result, f, indent=2, ensure_ascii=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', help="Path of ACE2005 Chinese data", default='./data/Chinese')
args = parser.parse_args()
test_files, dev_files, train_files = get_data_paths(args.data)
preprocessing('test', test_files)
preprocessing('dev', dev_files)
preprocessing('train', train_files)
| 33.783439 | 96 | 0.558258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 925 | 0.169787 |
7ede2a1e09a4811ae478f01a896fc0510aa9e87d | 360 | py | Python | HPOBenchExperimentUtils/core/__init__.py | PhMueller/TrajectoryParser | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | null | null | null | HPOBenchExperimentUtils/core/__init__.py | PhMueller/TrajectoryParser | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | 1 | 2021-09-01T16:35:21.000Z | 2021-11-05T19:53:25.000Z | HPOBenchExperimentUtils/core/__init__.py | automl/HPOBenchExperimentUtils | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | null | null | null | WORKER_WAIT_FOR_SCHEDULER_TO_START_IN_S = 600
WORKER_WAIT_FOR_NAMESERVER_TO_START_IN_S = 300
SCHEDULER_PING_WORKERS_INTERVAL_IN_S = 10
SCHEDULER_TIMEOUT_WORKER_DISCOVERY_IN_S = 600
# See Explanation in HPOBenchExperimentUtils/__init__.py
try:
from HPOBenchExperimentUtils.optimizer.autogluon_optimizer import _obj_fct
except ModuleNotFoundError:
pass
| 32.727273 | 78 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.155556 |
7ee00fb2a41f748e6f16cd24d5dbedd13ee2a74c | 155 | py | Python | portfolio/portfolio_site/models/__init__.py | nguyentrisinh/portfolio | 83b13f8ffaafae76f3a9597b2b8b428da188a555 | [
"MIT"
] | null | null | null | portfolio/portfolio_site/models/__init__.py | nguyentrisinh/portfolio | 83b13f8ffaafae76f3a9597b2b8b428da188a555 | [
"MIT"
] | null | null | null | portfolio/portfolio_site/models/__init__.py | nguyentrisinh/portfolio | 83b13f8ffaafae76f3a9597b2b8b428da188a555 | [
"MIT"
] | null | null | null | from .contact import Contact
from .project import Project
from .link import Link
from .category import Category
from .project_image import ProjectImage | 31 | 39 | 0.819355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7ee0306bca45832afc48ed42c697d3140fcc48e9 | 5,061 | py | Python | cron/main.py | opendatasoft/overpass-query-box | 8ba4fe876ff7959357ad780583b5b849d0ad62f5 | [
"MIT"
] | 3 | 2016-04-28T06:25:57.000Z | 2016-09-27T23:08:53.000Z | cron/main.py | opendatasoft/overpass-query-box | 8ba4fe876ff7959357ad780583b5b849d0ad62f5 | [
"MIT"
] | null | null | null | cron/main.py | opendatasoft/overpass-query-box | 8ba4fe876ff7959357ad780583b5b849d0ad62f5 | [
"MIT"
] | 4 | 2016-07-29T11:54:43.000Z | 2019-04-16T23:27:47.000Z | #!/usr/bin/env python
import sys
import os
import urllib
import requests
import shutil
import json
import datetime
from ftplib import FTP, error_perm, error_reply
API_URL = 'http://localhost/cgi-bin'
TIMEOUT_MINUTES = 15
HOURS_BEFORE_PROCESS = 24
if len(sys.argv) < 4:
sys.stderr.write('Usage : ./main.py FTP_SERVER FTP_USER FTP_PASSWORD\n')
sys.exit(1)
results_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results')
requests_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requests')
cron_history_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cron_history')
cron_history = {}
lock_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.lock')
ftp_server = sys.argv[1]
ftp_user = sys.argv[2]
ftp_password = sys.argv[3]
def date_to_ftp_timestamp(dt):
return dt.strftime('%Y%m%d%H%M%S')
def ftp_timestamp_to_date(timestamp):
return datetime.datetime.strptime(timestamp, '%Y%m%d%H%M%S')
if os.path.exists(lock_file_path):
print 'A lock file is present, the script is already running'
sys.exit()
fd = open(lock_file_path, 'a')
fd.close()
try:
if os.path.exists(results_directory):
shutil.rmtree(results_directory)
if os.path.exists(requests_directory):
shutil.rmtree(requests_directory)
if os.path.exists(cron_history_path):
with open(cron_history_path, 'r') as fd:
cron_history = json.load(fd)
os.makedirs(requests_directory)
os.makedirs(results_directory)
print 'Downloading requests...'
# Connect to the FTP server and change directory to /requests
ftp = FTP(ftp_server, user=ftp_user, passwd=ftp_password)
if 'requests' not in ftp.nlst():
ftp.mkd('requests')
ftp.cwd('requests')
# Download all files from the /requests directory
file_list = ftp.nlst()
for filename in file_list:
with open(os.path.join(requests_directory, filename), 'w+') as fd:
ftp.retrbinary('RETR ' + filename, fd.write)
print 'Processing requests and downloading responses...'
# Determine which requests to process
files_to_process = []
for filename in file_list:
do_process = False
res = ftp.sendcmd('MDTM %s' % filename)
last_modified = ftp_timestamp_to_date(res.split(' ')[1])
if cron_history.get(filename):
history_last_modified = cron_history[filename].get('last_modified')
if history_last_modified:
history_last_modified = ftp_timestamp_to_date(history_last_modified)
if history_last_modified != last_modified:
do_process = True
if not do_process:
history_last_processed = ftp_timestamp_to_date(cron_history[filename].get('last_processed'))
if history_last_processed:
diff = datetime.datetime.now() - history_last_processed
datetime.timedelta(0, 32400)
if diff.total_seconds() / 60 / 60 >= HOURS_BEFORE_PROCESS:
do_process = True
else:
do_process = True
else:
cron_history[filename] = {}
do_process = True
if do_process:
files_to_process.append(filename)
cron_history[filename]['last_modified'] = date_to_ftp_timestamp(last_modified)
cron_history[filename]['last_processed'] = date_to_ftp_timestamp(datetime.datetime.now())
with open(cron_history_path, 'w+') as fd:
json.dump(cron_history, fd)
# We close FTP because the request can take a while
ftp.close()
for filename in files_to_process:
with open(os.path.join(requests_directory, filename), 'r') as fd:
query = fd.read()
print("Processing %s..." % filename)
url = '%s/interpreter?data=%s' % (API_URL, urllib.quote(query, safe=''))
req = requests.get(url, timeout=(TIMEOUT_MINUTES * 60), stream=True)
req.raise_for_status()
with open(os.path.join(results_directory, filename), 'w+') as fd:
for block in req.iter_content(1024):
# fd.write(req.text.encode('utf-8'))
fd.write(block)
print("%s done." % filename)
print 'Download completed, starting upload on FTP...'
file_list = os.listdir(results_directory)
if file_list:
ftp = FTP(ftp_server, user=ftp_user, passwd=ftp_password)
for filename in file_list:
try:
ftp.delete(filename)
except (error_perm, error_reply) as e:
sys.stderr.write('Cannot delete file %s: %s\n' % (filename, e.message))
fd = open(os.path.join(results_directory, filename))
ftp.storbinary('STOR %s' % filename, fd)
fd.close()
print('File %s uploaded.' % filename)
ftp.close()
print("Upload completed.")
shutil.rmtree(requests_directory)
shutil.rmtree(results_directory)
finally:
os.remove(lock_file_path)
| 36.941606 | 108 | 0.648093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.16637 |
7ee047a7066708d1b0437ff3c15225bede21aaf2 | 1,234 | py | Python | tf_mnist_softmax.py | vishwesh5/DeepLearningPython | 8482287a338af50d6766891b7ccdad38253a2113 | [
"MIT"
] | 2 | 2018-07-08T17:32:33.000Z | 2018-08-20T18:07:21.000Z | tf_mnist_softmax.py | vishwesh5/DeepLearningPython | 8482287a338af50d6766891b7ccdad38253a2113 | [
"MIT"
] | null | null | null | tf_mnist_softmax.py | vishwesh5/DeepLearningPython | 8482287a338af50d6766891b7ccdad38253a2113 | [
"MIT"
] | 2 | 2020-04-21T20:35:43.000Z | 2021-08-28T01:46:28.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 6 21:00:25 2018
@author: Vishwesh
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
DATA_DIR = "/tmp/data"
NUM_STEPS=1000
MINIBATCH_SIZE=32
data = input_data.read_data_sets(DATA_DIR,one_hot=True)
x = tf.placeholder(tf.float32,[None,784])
W = tf.Variable(tf.zeros([784,10]))
y_true = tf.placeholder(tf.float32,[None,10])
y_pred = tf.matmul(x,W)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=y_pred,
labels=y_true))
gd_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_mask = tf.equal(tf.argmax(y_pred,1),
tf.argmax(y_true,1))
accuracy = tf.reduce_mean(tf.cast(correct_mask,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(NUM_STEPS):
batch_xs,batch_ys = data.train.next_batch(MINIBATCH_SIZE)
sess.run(gd_step,feed_dict={x: batch_xs,y_true: batch_ys})
ans = sess.run(accuracy,feed_dict={x:data.test.images,
y_true:data.test.labels})
print("Accuracy = {:.4}%".format(ans*100))
| 28.045455 | 73 | 0.664506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.097245 |