blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32f05b77100c28b9d1b8b4cb3de8164e01404a65
|
b9e065798f51eb60d8b29bc0756990103a7b5e70
|
/application/controller/Demo.py
|
b8435f5ae15a54926d7091f2e79b846cc944e392
|
[
"Apache-2.0"
] |
permissive
|
jayqiyoung/LuckyPython
|
02f56c8cbb09d80a597ff430712c2b985567d7f4
|
8546214c62b3173f3bd4cf0a59476a9a7a48743e
|
refs/heads/master
| 2020-12-25T12:28:31.110804
| 2016-01-25T10:19:54
| 2016-01-25T10:19:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
import LuckyPython
class Demo:
@staticmethod
def welcome(self):
data = dict()
data["name"] = "World"
view = LuckyPython.View("application/view")
return view.show_page("Demo/welcome.html", data)
@staticmethod
def hello(self, name):
return "Hello {0} !".format(name)
|
[
"kandisheng@163.com"
] |
kandisheng@163.com
|
ece7a1b4904871303354b9e1284c3e3f5a49bdde
|
ee37afaea6263875c86ab1215e0452501c47e408
|
/Models/DeepFM.py
|
8683dd3c92858c9f479c9deac0d39526da1b2bdc
|
[] |
no_license
|
bytecamp2019-alg-10/mtl
|
abfe5f175a414334309e6cb63b5e1ab10f14ae16
|
4600846245c87efc2f52add3be75ba35fc20f333
|
refs/heads/master
| 2020-07-10T21:24:17.404162
| 2019-08-28T13:06:15
| 2019-08-28T13:06:15
| 204,374,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,809
|
py
|
"""
Tensorflow implementation of DeepFM [1]
Reference:
[1] DeepFM: A Factorization-Machine based Neural Network for CTR Prediction,
Huifeng Guo, Ruiming Tang, Yunming Yey, Zhenguo Li, Xiuqiang He.
"""
import numpy as np
import tensorflow as tf
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
from YellowFin.tuner_utils.yellowfin import YFOptimizer
class DeepFM(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size,
embedding_size=8, dropout_fm=[1.0, 1.0],
deep_layers=[32, 32], dropout_deep=[0.5, 0.5, 0.5],
deep_layers_activation=tf.nn.relu,
epoch=10, batch_size=256,
learning_rate=0.001, optimizer_type="adam",
batch_norm=0, batch_norm_decay=0.995,
verbose=False, random_seed=2016,
use_fm=True, use_deep=True,
loss_type="logloss", eval_metric=roc_auc_score,
l2_reg=0.0, greater_is_better=True):
assert (use_fm or use_deep)
assert loss_type in ["logloss", "mse"], \
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
self.feature_size = feature_size # denote as M, size of the feature dictionary
self.field_size = field_size # denote as F, size of the feature fields
self.embedding_size = embedding_size # denote as K, size of the feature embedding
self.dropout_fm = dropout_fm
self.deep_layers = deep_layers
self.dropout_deep = dropout_deep
self.deep_layers_activation = deep_layers_activation
self.use_fm = use_fm
self.use_deep = use_deep
self.l2_reg = l2_reg
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer_type
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose
self.random_seed = random_seed
self.loss_type = loss_type
self.eval_metric = eval_metric
self.greater_is_better = greater_is_better
self.train_result, self.valid_result = [], []
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
self.feat_index = tf.placeholder(tf.int32, shape=[None, None],
name="feat_index") # None * F
self.feat_value = tf.placeholder(tf.float32, shape=[None, None],
name="feat_value") # None * F
self.label = tf.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
self.dropout_keep_fm = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_fm")
self.dropout_keep_deep = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_deep")
self.train_phase = tf.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.feat_index) # None * F * K
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value)
# ---------- first order term ----------
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F
self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F
# ---------- second order term ---------------
# sum_square part
self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K
self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K
# square_sum part
self.squared_features_emb = tf.square(self.embeddings)
self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K
# second order
self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K
self.y_second_order = tf.nn.dropout(self.y_second_order, self.dropout_keep_fm[1]) # None * K
# ---------- Deep component ----------
self.y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size]) # None * (F*K)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0])
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" %i]), self.weights["bias_%d"%i]) # None * layer[i] * 1
if self.batch_norm:
self.y_deep = self.batch_norm_layer(self.y_deep, train_phase=self.train_phase, scope_bn="bn_%d" %i) # None * layer[i] * 1
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[1+i]) # dropout at each Deep layer
# ---------- DeepFM ----------
if self.use_fm and self.use_deep:
concat_input = tf.concat([self.y_first_order, self.y_second_order, self.y_deep], axis=1)
elif self.use_fm:
concat_input = tf.concat([self.y_first_order, self.y_second_order], axis=1)
elif self.use_deep:
concat_input = self.y_deep
self.out = tf.add(tf.matmul(concat_input, self.weights["concat_projection"]), self.weights["concat_bias"])
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out)
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights
if self.l2_reg > 0:
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["concat_projection"])
if self.use_deep:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d"%i])
# optimizer
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(
self.loss)
elif self.optimizer_type == "yellowfin":
self.optimizer = YFOptimizer(learning_rate=self.learning_rate, momentum=0.0).minimize(
self.loss)
# init
self.saver = tf.train.Saver()
init = tf.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _init_session(self):
config = tf.ConfigProto(device_count={"gpu": 0})
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def _initialize_weights(self):
weights = dict()
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size * K
weights["feature_bias"] = tf.Variable(
tf.random_uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
# deep layers
num_layer = len(self.deep_layers)
input_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0]))
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
if self.use_fm and self.use_deep:
input_size = self.field_size + self.embedding_size + self.deep_layers[-1]
elif self.use_fm:
input_size = self.field_size + self.embedding_size
elif self.use_deep:
input_size = self.deep_layers[-1]
glorot = np.sqrt(2.0 / (input_size + 1))
weights["concat_projection"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32) # layers[i-1]*layers[i]
weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32)
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index+1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_fm: self.dropout_fm,
self.dropout_keep_deep: self.dropout_deep,
self.train_phase: True}
loss, opt = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
return loss
def fit(self, Xi_train, Xv_train, y_train,
Xi_valid=None, Xv_valid=None, y_valid=None,
early_stopping=False, refit=False):
"""
:param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)
:param y_train: label of each sample in the training set
:param Xi_valid: list of list of feature indices of each sample in the validation set
:param Xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = Xv_valid is not None
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# evaluate training and validation datasets
train_result = self.evaluate(Xi_train, Xv_train, y_train)
self.train_result.append(train_result)
if has_valid:
valid_result = self.evaluate(Xi_valid, Xv_valid, y_valid)
self.valid_result.append(valid_result)
if self.verbose > 0 and epoch % self.verbose == 0:
if has_valid:
print("[%d] train-result=%.4f, valid-result=%.4f [%.1f s]"
% (epoch + 1, train_result, valid_result, time() - t1))
else:
print("[%d] train-result=%.4f [%.1f s]"
% (epoch + 1, train_result, time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
# fit a few more epoch on train+valid until result reaches the best_train_score
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
Xi_train = Xi_train + Xi_valid
Xv_train = Xv_train + Xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train,
self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# check
train_result = self.evaluate(Xi_train, Xv_train, y_train)
if abs(train_result - best_train_score) < 0.001 or \
(self.greater_is_better and train_result > best_train_score) or \
((not self.greater_is_better) and train_result < best_train_score):
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] and \
valid_result[-2] > valid_result[-3] and \
valid_result[-3] > valid_result[-4] and \
valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
# dummy y
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.label: y_batch,
self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
y_pred = self.predict(Xi, Xv)
return self.eval_metric(y, y_pred)
|
[
"lzy960601@gmail.com"
] |
lzy960601@gmail.com
|
5e348a82403d2692756bbb92dea05d2c79650670
|
ae9d7d9d1aa6b6e91ac69b93e63f42aec514d37c
|
/checkerpy/tests/types/all/test_all.py
|
91f1a382e9cb9885851e05853c7c065c0b6e9a04
|
[
"MIT"
] |
permissive
|
yedivanseven/CheckerPy
|
03b6be3d50da2e9e8f42c19e542cf0b93a5a241c
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
refs/heads/master
| 2021-09-13T16:43:42.324243
| 2018-05-02T08:46:37
| 2018-05-02T08:46:37
| 117,227,319
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34,094
|
py
|
import logging
import unittest as ut
from collections import defaultdict, deque, OrderedDict
from ....functional import CompositionOf
from ....types.all import All
from ....types.one import _REDUCED_ITER
from ....exceptions import WrongTypeError, IterError, CallableError
class TestAllInstatiation(ut.TestCase):
def test_error_on_wrong_identifier(self):
err_msg = 'Type-checker name @foo is not a valid identifier!'
with self.assertRaises(ValueError) as err:
_ = All(int, identifier='@foo')
self.assertEqual(str(err.exception), err_msg)
def test_has_default_name(self):
AllInt = All(int)
self.assertEqual(AllInt.__name__, 'All')
def test_identifier_sets_name_attribute(self):
AllInt = All(int, identifier='AllInt')
self.assertEqual(AllInt.__name__, 'AllInt')
def test_has_attribute_types_with_one_valid_type(self):
AllInt = All(int)
self.assertTrue(hasattr(AllInt, 'types'))
def test_cannot_set_attribute_types(self):
AllInt = All(int)
with self.assertRaises(AttributeError):
AllInt.types = 'foo'
def test_attribute_types_has_correct_value_with_one_valid_type(self):
AllInt = All(int)
self.assertTupleEqual(AllInt.types, (int, ))
def test_works_with_two_valid_types(self):
_ = All(int, float)
def test_has_attribute_types_with_two_valid_types(self):
AllNum = All(int, float)
self.assertTrue(hasattr(AllNum, 'types'))
def test_attribute_types_has_correct_value_with_two_valid_types(self):
AllNum = All(int, float)
self.assertTupleEqual(AllNum.types, (int, float))
class TestAllWorks(ut.TestCase):
def test_works_with_str(self):
AllStr = All(str)
s = AllStr('foo')
self.assertIsInstance(s, str)
self.assertEqual(s, 'foo')
def test_works_with_tuple(self):
AllStr = All(str)
t = AllStr(('f', 'o', 'o'))
self.assertTupleEqual(t, ('f', 'o', 'o'))
def test_works_with_list(self):
AllStr =All(str)
l = AllStr(['f', 'o', 'o'])
self.assertListEqual(l, ['f', 'o', 'o'])
def test_works_with_deque(self):
AllStr =All(str)
dq = AllStr(deque(['f', 'o', 'o']))
self.assertIsInstance(dq, deque)
self.assertEqual(dq, deque(['f', 'o', 'o']))
def test_works_with_set(self):
AllStr = All(str)
s = AllStr({'f', 'o', 'o'})
self.assertSetEqual(s, {'f', 'o'})
def test_works_with_frozenset(self):
AllStr = All(str)
s = AllStr(frozenset({'f', 'o', 'o'}))
self.assertSetEqual(s, {'f', 'o'})
def test_works_with_dict(self):
AllStr = All(str)
d = AllStr({'f': 1, 'o': 2})
self.assertDictEqual(d, {'f': 1, 'o': 2})
def test_works_ordered_dict(self):
AllStr = All(str)
od = AllStr(OrderedDict({'f': 1, 'o': 2}))
self.assertIsInstance(od, OrderedDict)
self.assertDictEqual(od, {'f': 1, 'o': 2})
def test_works_with_defaultdict(self):
AllStr = All(str)
dd = AllStr(defaultdict(int, {'f': 1, 'o': 2}))
self.assertDictEqual(dd, {'f': 1, 'o': 2})
def test_works_with_dict_keys(self):
AllStr = All(str)
d = AllStr({'f': 1, 'o': 2}.keys())
self.assertIsInstance(d, type({}.keys()))
self.assertSetEqual(set(d), set({'f': 1, 'o': 2}.keys()))
def test_works_with_ordered_dict_keys(self):
AllStr = All(str)
od = OrderedDict({'f': 1, 'o': 2})
output = AllStr(od.keys())
self.assertIsInstance(output, type(od.keys()))
self.assertSetEqual(set(od.keys()), set(output))
def test_works_with_defaultdict_keys(self):
AllStr = All(str)
dd = defaultdict(int, {'f': 1, 'o': 2})
output = AllStr(dd.keys())
self.assertIsInstance(output, type(dd.keys()))
self.assertSetEqual(set(dd.keys()), set(output))
def test_works_with_dict_values(self):
AllInt = All(int)
d = AllInt({'f': 1, 'o': 2}.values())
self.assertIsInstance(d, type({}.values()))
self.assertSetEqual(set(d), set({'f': 1, 'o': 2}.values()))
def test_works_with_ordered_dict_values(self):
AllInt = All(int)
od = OrderedDict({'f': 1, 'o': 2})
output = AllInt(od.values())
self.assertIsInstance(output, type(od.values()))
self.assertSetEqual(set(od.values()), set(output))
def test_works_with_defaultdict_values(self):
AllInt = All(int)
dd = defaultdict(int, {'f': 1, 'o': 2})
output = AllInt(dd.values())
self.assertIsInstance(output, type(dd.values()))
self.assertSetEqual(set(dd.values()), set(output))
def test_returns_correct_type_with_two_types(self):
AllNum = All(int, float)
i = AllNum((1, ))
self.assertIsInstance(i, tuple)
f = AllNum([1.0])
self.assertIsInstance(f, list)
def test_returns_correct_value_with_two_types(self):
AllNum = All(int, float)
self.assertTupleEqual(AllNum((2, )), (2, ))
self.assertListEqual(AllNum([2.0]), [2.0])
class TestAllErrorUnnamedOneType(ut.TestCase):
def test_error_on_unnamed_variable_not_iterable(self):
AllInt = All(int)
log_msg = ['ERROR:root:Variable 3 with type int does not seem'
' to be an iterable with elements to inspect!']
err_msg = ('Variable 3 with type int does not seem to'
' be an iterable with elements to inspect!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(IterError) as err:
_ = AllInt(3)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_variable_not_iterable(self):
AllInt = All(int)
log_msg = ['ERROR:root:Variable test with type int does not '
'seem to be an iterable with elements to inspect!']
err_msg = ('Variable test with type int does not seem '
'to be an iterable with elements to inspect!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(IterError) as err:
_ = AllInt(3, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_tuple_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in tuple (4,'
' 5.0) must be int, not float like 5.0!']
err_msg = ('Type of element 1 in tuple (4, 5.0)'
' must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt((4, 5.0))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_list_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in list [4,'
' 5.0] must be int, not float like 5.0!']
err_msg = ('Type of element 1 in list [4, 5.0]'
' must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt([4, 5.0])
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_deque_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in deque([4,'
' 5.0]) must be int, not float like 5.0!']
err_msg = ('Type of element 1 in deque([4, 5.0])'
' must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(deque([4, 5.0]))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_dict_with_one_type(self):
AllInt = All(int)
inputs = {4: 'four', 5.0: 'five'}
log_msg = ["ERROR:root:Type of key in dict {4: 'four', "
"5.0: 'five'} must be int, not float like 5.0!"]
err_msg = ("Type of key in dict {4: 'four', 5.0: "
"'five'} must be int, not float like 5.0!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_ordered_dict_with_one_type(self):
AllInt = All(int)
inputs = OrderedDict({4: 'four', 5.0: 'five'})
log_msg = ["ERROR:root:Type of key in OrderedDict([(4, 'four'),"
" (5.0, 'five')]) must be int, not float like 5.0!"]
err_msg = ("Type of key in OrderedDict([(4, 'four'), (5.0,"
" 'five')]) must be int, not float like 5.0!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_defaultdict_with_one_type(self):
AllInt = All(int)
inputs = defaultdict(str, {4: 'four', 5.0: 'five'})
log_msg = ["ERROR:root:Type of key in defaultdict(<class 'str'>, {4:"
" 'four', 5.0: 'five'}) must be int, not float like 5.0!"]
err_msg = ("Type of key in defaultdict(<class 'str'>, {4: 'four',"
" 5.0: 'five'}) must be int, not float like 5.0!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_dict_key_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of key in dict_keys([4,'
' 5.0]) must be int, not float like 5.0!']
err_msg = ('Type of key in dict_keys([4, 5.0])'
' must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt({4: 'four', 5.0: 'five'}.keys())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_ordered_dict_key_with_one_type(self):
AllInt = All(int)
inputs = OrderedDict({4: 'four', 5.0: 'five'})
log_msg = ['ERROR:root:Type of key in odict_keys([4,'
' 5.0]) must be int, not float like 5.0!']
err_msg = ('Type of key in odict_keys([4, 5.0])'
' must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs.keys())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_defaultdict_key_with_one_type(self):
AllInt = All(int)
inputs = defaultdict(str, {4: 'four', 5.0: 'five'})
log_msg = ['ERROR:root:Type of key in dict_keys([4,'
' 5.0]) must be int, not float like 5.0!']
err_msg = ('Type of key in dict_keys([4, 5.0])'
' must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs.keys())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_dict_values_with_one_type(self):
AllStr = All(str)
log_msg = ["ERROR:root:Type of value in dict_values(['four', 5])"
" must be str, not int like 5!"]
err_msg = ("Type of value in dict_values(['four', 5])"
" must be str, not int like 5!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllStr({4: 'four', 5.0: 5}.values())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_ordered_dict_values_with_one_type(self):
AllStr = All(str)
inputs = OrderedDict({4: 'four', 5.0: 5})
log_msg = ["ERROR:root:Type of value in odict_values(['four', 5])"
" must be str, not int like 5!"]
err_msg = ("Type of value in odict_values(['four', 5])"
" must be str, not int like 5!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllStr(inputs.values())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_defaultdict_values_with_one_type(self):
AllStr = All(str)
inputs = defaultdict(str, {4: 'four', 5.0: 5})
log_msg = ["ERROR:root:Type of value in dict_values(['four', 5])"
" must be str, not int like 5!"]
err_msg = ("Type of value in dict_values(['four', 5])"
" must be str, not int like 5!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllStr(inputs.values())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_set_with_one_type(self):
AllInt = All(int)
log_msg = ["ERROR:root:Type of element in set {4, "
"5.0} must be int, not float like 5.0!"]
err_msg = ("Type of element in set {4, 5.0} must"
" be int, not float like 5.0!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt({4, 5.0})
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_frozenset_with_one_type(self):
AllInt = All(int)
log_msg = ["ERROR:root:Type of element in frozenset({4, "
"5.0}) must be int, not float like 5.0!"]
err_msg = ("Type of element in frozenset({4, 5.0}) must"
" be int, not float like 5.0!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(frozenset({4, 5.0}))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
class TestAllErrorNamedOneType(ut.TestCase):
def test_error_on_wrong_named_tuple_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in tuple '
'test must be int, not float like 5.0!']
err_msg = ('Type of element 1 in tuple test '
'must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt((4, 5.0), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_list_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in list '
'test must be int, not float like 5.0!']
err_msg = ('Type of element 1 in list test '
'must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt([4, 5.0], 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_deque_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in deque '
'test must be int, not float like 5.0!']
err_msg = ('Type of element 1 in deque test '
'must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(deque([4, 5.0]), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_dict_with_one_type(self):
AllInt = All(int)
inputs = {4: 'four', 5.0: 'five'}
log_msg = ['ERROR:root:Type of key in dict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in dict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_ordered_dict_with_one_type(self):
AllInt = All(int)
inputs = OrderedDict({4: 'four', 5.0: 'five'})
log_msg = ['ERROR:root:Type of key in OrderedDict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in OrderedDict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_defaultdict_with_one_type(self):
AllInt = All(int)
inputs = defaultdict(str, {4: 'four', 5.0: 'five'})
log_msg = ['ERROR:root:Type of key in defaultdict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in defaultdict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_dict_keys_with_one_type(self):
AllInt = All(int)
inputs = {4: 'four', 5.0: 'five'}
log_msg = ['ERROR:root:Type of key in dict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in dict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs.keys(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_ordered_dict_keys_with_one_type(self):
AllInt = All(int)
inputs = OrderedDict({4: 'four', 5.0: 'five'})
log_msg = ['ERROR:root:Type of key in dict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in dict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs.keys(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_defaultdict_keys_with_one_type(self):
AllInt = All(int)
inputs = defaultdict(str, {4: 'four', 5.0: 'five'})
log_msg = ['ERROR:root:Type of key in dict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in dict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(inputs.keys(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_dict_values_with_one_type(self):
AllStr = All(str)
inputs = {4: 'four', 5.0: 5}
log_msg = ['ERROR:root:Type of value in dict '
'test must be str, not int like 5!']
err_msg = ('Type of value in dict test '
'must be str, not int like 5!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllStr(inputs.values(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_ordered_dict_values_with_one_type(self):
AllStr = All(str)
inputs = OrderedDict({4: 'four', 5.0: 5})
log_msg = ['ERROR:root:Type of value in dict '
'test must be str, not int like 5!']
err_msg = ('Type of value in dict test '
'must be str, not int like 5!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllStr(inputs.values(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_defaultdict_values_with_one_type(self):
AllStr = All(str)
inputs = defaultdict(str, {4: 'four', 5.0: 5})
log_msg = ['ERROR:root:Type of value in dict '
'test must be str, not int like 5!']
err_msg = ('Type of value in dict test '
'must be str, not int like 5!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllStr(inputs.values(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_set_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element in set test'
' must be int, not float like 5.0!']
err_msg = ('Type of element in set test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt({4, 5.0}, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_frozenset_with_one_type(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element in frozenset test'
' must be int, not float like 5.0!']
err_msg = ('Type of element in frozenset test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt(frozenset({4, 5.0}), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
class TestAllErrorTowTypes(ut.TestCase):
def test_error_on_wrong_unnamed_variable_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of element 2 in tuple (4, 5.0, 'bar')"
" must be one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of element 2 in tuple (4, 5.0, 'bar') must"
" be one of ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum((4, 5.0, 'bar'))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_dict_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of key in dict {4: 'four', 'bar': 3}"
" must be one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of key in dict {4: 'four', 'bar': 3} must"
" be one of ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4: 'four', 'bar': 3})
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_dict_key_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of key in dict_keys([4, 'bar'])"
" must be one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of key in dict_keys([4, 'bar']) must"
" be one of ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4: 'four', 'bar': 3}.keys())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_dict_value_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of value in dict_values(['four', 3])"
" must be one of ('int', 'float'), not str like four!"]
err_msg = ("Type of value in dict_values(['four', 3]) must"
" be one of ('int', 'float'), not str like four!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4: 'four', 'bar': 3}.values())
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_unnamed_set_with_two_types(self):
AllNum = All(int, float)
with self.assertLogs(level=logging.ERROR):
with self.assertRaises(WrongTypeError):
_ = AllNum({4, 'bar'})
def test_error_on_wrong_unnamed_frozenset_with_two_types(self):
AllNum = All(int, float)
with self.assertLogs(level=logging.ERROR):
with self.assertRaises(WrongTypeError):
_ = AllNum(frozenset({4, 'bar'}))
def test_error_on_wrong_named_variable_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of element 2 in tuple test must"
" be one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of element 2 in tuple test must be one"
" of ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum((4, 5.0, 'bar'), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_dict_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of key in dict test must be"
" one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of key in dict test must be one of"
" ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4: 'four', 5.0: 'five', 'bar': 3}, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_dict_key_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of key in dict test must be"
" one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of key in dict test must be one of"
" ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4: 'four', 5.0: 'five', 'bar': 3}.keys(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_dict_value_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of value in dict test must be"
" one of ('int', 'float'), not str like four!"]
err_msg = ("Type of value in dict test must be one of"
" ('int', 'float'), not str like four!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4: 'four', 5.0: 'five', 'bar': 3}.values(), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_set_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of element in set test must be"
" one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of element in set test must be one of"
" ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum({4, 5.0, 'bar'}, 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_wrong_named_frozenset_with_two_types(self):
AllNum = All(int, float)
log_msg = ["ERROR:root:Type of element in frozenset test must be"
" one of ('int', 'float'), not str like bar!"]
err_msg = ("Type of element in frozenset test must be one of"
" ('int', 'float'), not str like bar!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllNum(frozenset({4, 5.0, 'bar'}), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
class TestAllMethods(ut.TestCase):
def test_has_iterable_type_checker_attributes(self):
AllNum = All(int, float)
for iterable in _REDUCED_ITER:
self.assertTrue(hasattr(AllNum, iterable.__name__))
self.assertTrue(hasattr(AllNum, 'NonEmpty'))
def test_iterable_type_checkers_are_type_CompositionOf(self):
AllNum = All(int, float)
for iterable in _REDUCED_ITER:
type_checker = getattr(AllNum, iterable.__name__)
self.assertIsInstance(type_checker, CompositionOf)
self.assertIsInstance(AllNum.NonEmpty, CompositionOf)
def test_has_attribute_NonEmpty(self):
AllInt = All(int)
self.assertTrue(hasattr(AllInt, 'NonEmpty'))
def test_attribute_NonEmpty_is_type_CompositionOf(self):
AllInt = All(int)
self.assertIsInstance(AllInt.NonEmpty, CompositionOf)
def test_has_attribute_JustLen(self):
AllInt = All(int)
self.assertTrue(hasattr(AllInt, 'JustLen'))
def test_attribute_JustLen_is_type_CompositionOf(self):
AllInt = All(int)
self.assertIsInstance(AllInt.JustLen, CompositionOf)
def test_works_through_type_and_non_empty_checkers(self):
AllInt = All(int)
log_msg = ['ERROR:root:Type of element 1 in tuple '
'test must be int, not float like 5.0!']
err_msg = ('Type of element 1 in tuple test '
'must be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt.NonEmpty.JustTuple((4, 5.0), 'test')
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_through_dict_and_just_length_checkers(self):
AllInt = All(int)
inputs = {4: 'four', 5.0: 'five'}
log_msg = ['ERROR:root:Type of key in dict test'
' must be int, not float like 5.0!']
err_msg = ('Type of key in dict test must'
' be int, not float like 5.0!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(WrongTypeError) as err:
_ = AllInt.JustLen.JustDict(inputs, 'test', length=2)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_has_attribute_o(self):
AllInt = All(int)
self.assertTrue(hasattr(AllInt, 'o'))
def test_attribute_o_is_callable(self):
AllInt = All(int)
self.assertTrue(callable(AllInt.o))
def test_o_returns_composition(self):
AllInt = All(int)
AllNum = All(int, float)
composition = AllInt.o(AllNum)
self.assertIsInstance(composition, CompositionOf)
def test_o_raises_error_on_argument_not_callable(self):
AllInt = All(int)
err_msg = ('foo must be a callable that accepts (i) a value,'
' (ii) an optional name for that value, and (iii)'
' any number of keyword arguments!')
with self.assertRaises(CallableError) as err:
_ = AllInt.o('foo')
self.assertEqual(str(err.exception), err_msg)
if __name__ == '__main__':
ut.main()
|
[
"georg.heimel@snapp.cab"
] |
georg.heimel@snapp.cab
|
31ab53622731a767c1b356b06feb534ca482348d
|
d63e022dcd11d9249e5a7b9670c0f7ab7cc735ef
|
/genproduction_configs/WJetsToQQ_HT400to600.py
|
d8b79646128d2ba871db58039d4f6fe8ac571811
|
[
"MIT"
] |
permissive
|
clelange/TruthNtuple
|
b55caaeb81bc06929b013e5c7af2cd667e184688
|
fd39c9ea2b1d02084a63d3409f79a4a902703a5f
|
refs/heads/master
| 2020-03-12T01:07:17.299211
| 2018-05-10T10:10:41
| 2018-05-10T10:12:17
| 130,367,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,422
|
py
|
import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/2017/13TeV/madgraph/V5_2.4.2/WJetsToQQ/WJetsToQQ_HT400to600_slc6_amd64_gcc481_CMSSW_7_1_30_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
# Link to cards:
# https://github.com/cms-sw/genproductions/commits/master/bin/MadGraph5_aMCatNLO/cards/production/2017/13TeV/WJets_LO_MLM/WJetsToQQ_HT400to600
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'JetMatching:setMad = off',
'JetMatching:scheme = 1',
'JetMatching:merge = on',
'JetMatching:jetAlgorithm = 2',
'JetMatching:etaJetMax = 5.',
'JetMatching:coneRadius = 1.',
'JetMatching:slowJetPower = 1',
'JetMatching:qCut = 19.', #this is the actual merging scale
'JetMatching:nQmatch = 5', #4 corresponds to 4-flavour scheme (no matching of b-quarks), 5 for 5-flavour scheme
'JetMatching:nJetMax = 4', #number of partons in born matrix element for highest multiplicity
'JetMatching:doShowerKt = off', #off for MLM matching, turn on for shower-kT matching
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters',
)
)
)
# Link to generator fragment:
# https://raw.githubusercontent.com/cms-sw/genproductions/f5432464cef51990b40ac8ae01844b7a3091b4cb/python/ThirteenTeV/Hadronizer/Hadronizer_TuneCP5_13TeV_generic_LHE_pythia8_cff.py
|
[
"clemens.lange@cern.ch"
] |
clemens.lange@cern.ch
|
7652f8e517b6c0253bb59ed0e0a90c6899495f5f
|
165cc420b7e8c1b7cbcfd17fea89611db90e866c
|
/hello_world.py
|
1496b95f0f572c35020b6dae68b45c3dd706f1fb
|
[] |
no_license
|
Shoolapani/Demo-repo
|
378d21ac11ab0643a1441432bf55ad497ec0c324
|
57079c0c119c99878ad2ce8bf7b70b3681b1ed8c
|
refs/heads/main
| 2023-04-12T05:58:43.816622
| 2021-05-06T13:16:41
| 2021-05-06T13:16:41
| 363,992,991
| 1
| 0
| null | 2021-05-06T13:16:42
| 2021-05-03T16:30:31
|
Python
|
UTF-8
|
Python
| false
| false
| 135
|
py
|
# First project
print("Hello world")
num=int(input("Enter the number:- "))
num3=int(num *num);
print("Square of two number:- ",num3)
|
[
"satyamkrjha040@gmail.com"
] |
satyamkrjha040@gmail.com
|
a653b493012302d01c67b1a5803b34b9ad1dd3a0
|
e69c09e75e2f1369e98b28e6380868f30c2e0baf
|
/experiments/lsi_clip/make_archive_collage.py
|
994afaf3005b420406046caca41cdb42223613bd
|
[
"MIT"
] |
permissive
|
kangalice/dqd
|
60d8cfd5a1f6c7e2b82173465ded7c4032bd96d9
|
262c31f56cd4704b76ddaf9b9eb024afdbb209d6
|
refs/heads/main
| 2023-09-01T06:55:47.944799
| 2021-10-29T23:01:22
| 2021-10-29T23:01:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,613
|
py
|
# A small script for generating a collage of faces from the QD-Archive
import matplotlib
import matplotlib.font_manager
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams["font.family"] = 'serif'
matplotlib.rcParams["font.serif"] = 'Palatino'
matplotlib.rc('font', size=20)
import os
from pathlib import Path
import torch
import numpy as np
import pandas as pd
from stylegan_models import g_all, g_synthesis, g_mapping
from PIL import Image
# Note that only final archives encode latent codes.
archive_filename = 'logs/cma_mega_adam/trial_0/archive_00010000.pkl'
# min and max index for rows then columns (row major).
# The archive is shape (200, 200) indexed from [0, 200).
archive_dims = (200, 200)
archive_index_range = ((90, 200), (90, 200))
# Measure ranges
measure_ranges = ((0,6), (0,6))
# Controls that x rows and y columns are generated
# Images are "evenly" (as possible) sampled based on this criteria
picture_frequency = (8, 5)
# Use the CPU while we are running exps.
device = "cpu"
# Uncomment to save all grid images separately.
#gen_output_dir = os.path.join('grid_imgs')
#logdir = Path(gen_output_dir)
#if not logdir.is_dir():
# logdir.mkdir()
g_synthesis.eval()
g_synthesis.to(device)
for p in g_synthesis.parameters():
p.requires_grad_(False)
# Read the archive from the log (pickle file)
df = pd.read_pickle(archive_filename)
imgs = []
for j in reversed(range(picture_frequency[1])):
for i in range(picture_frequency[0]):
delta_i = archive_index_range[0][1] - archive_index_range[0][0]
delta_j = archive_index_range[1][1] - archive_index_range[1][0]
index_i_lower = int(delta_i * i / picture_frequency[0] + archive_index_range[0][0])
index_i_upper = int(delta_i * (i+1) / picture_frequency[0] + archive_index_range[0][0])
index_j_lower = int(delta_j * j / picture_frequency[1] + archive_index_range[1][0])
index_j_upper = int(delta_j * (j+1) / picture_frequency[1] + archive_index_range[1][0])
print(i, j, index_i_lower, index_i_upper, index_j_lower, index_j_upper)
query_string = f"{index_i_lower} <= index_0 & index_0 <= {index_i_upper} &"
query_string += f"{index_j_lower} <= index_1 & index_1 <= {index_j_upper}"
print(query_string)
df_cell = df.query(query_string)
if not df_cell.empty:
sol = df_cell.iloc[df_cell['objective'].argmax()]
print(sol)
latent_code = torch.tensor(sol[5:].values, dtype=torch.float32, device=device)
latents = torch.nn.Parameter(latent_code, requires_grad=False)
dlatents = latents.repeat(1,18,1)
img = g_synthesis(dlatents)
img = (img.clamp(-1, 1) + 1) / 2.0 # Normalize from [0,1]
# Uncomment to save all grid images separately.
#pil_img = img[0].permute(1, 2, 0).detach().cpu().numpy() * 255
#pil_img = Image.fromarray(pil_img.astype('uint8'))
#pil_img.save(os.path.join(gen_output_dir, f'{j}_{i}.png'))
img = img[0].detach().cpu()
imgs.append(img)
else:
imgs.append(torch.zeros((3,1024,1024)))
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
plt.figure(figsize=(16,10))
img_grid = make_grid(imgs, nrow=picture_frequency[0], padding=0)
img_grid = np.transpose(img_grid.cpu().numpy(), (1,2,0))
plt.imshow(img_grid)
plt.xlabel("A man with blue eyes.")
plt.ylabel("A person with red hair.")
def create_archive_tick_labels(axis_range, measure_range, dim, num_ticks):
low_pos = axis_range[0] / dim
high_pos = axis_range[1] / dim
tick_offset = [
(high_pos - low_pos) * (p / num_ticks) + low_pos
for p in range(num_ticks+1)
]
ticklabels = [
round((measure_range[1]-measure_range[0]) * p + measure_range[0], 2)
for p in tick_offset
]
return ticklabels
num_x_ticks = 6
num_y_ticks = 6
x_ticklabels = create_archive_tick_labels(archive_index_range[0],
measure_ranges[0], archive_dims[0], num_x_ticks)
y_ticklabels = create_archive_tick_labels(archive_index_range[1],
measure_ranges[1], archive_dims[1], num_y_ticks)
y_ticklabels.reverse()
x_tick_range = img_grid.shape[1]
x_ticks = np.arange(0, x_tick_range+1e-9, step=x_tick_range/num_x_ticks)
y_tick_range = img_grid.shape[0]
y_ticks = np.arange(0, y_tick_range+1e-9, step=y_tick_range/num_y_ticks)
plt.xticks(x_ticks, x_ticklabels)
plt.yticks(y_ticks, y_ticklabels)
plt.tight_layout()
plt.savefig('collage.pdf')
|
[
"tehqin@gmail.com"
] |
tehqin@gmail.com
|
8994919e217e0faa9714145047e5dc631cc7c871
|
8dbb48232b8647d1aa6f1032e5eeb8a525f1fd22
|
/backend/migrations/0004_auto_20200520_0049.py
|
a004edc82f884642e86722a623618b198be9b2c9
|
[
"MIT"
] |
permissive
|
manulangat1/Jaza-ndai
|
97538e7f8a164b213163e6416a88537c39ec37f2
|
e0b02051665a18d64d7d1e1e3f8384b0f3757695
|
refs/heads/master
| 2023-01-30T23:07:26.554672
| 2020-06-12T19:15:47
| 2020-06-12T19:15:47
| 242,711,544
| 2
| 0
|
MIT
| 2023-01-24T02:32:51
| 2020-02-24T10:51:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 741
|
py
|
# Generated by Django 3.0.3 on 2020-05-19 21:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_auto_20200519_1639'),
]
operations = [
migrations.RemoveField(
model_name='review',
name='ride',
),
migrations.AddField(
model_name='review',
name='driver',
field=models.ForeignKey(default=django.utils.timezone.now, on_delete=django.db.models.deletion.CASCADE, related_name='driver_review', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"emmanuelthedeveloper@gmail.com"
] |
emmanuelthedeveloper@gmail.com
|
6e9fd4673eafa40dfb29ca75b028166591b30fd6
|
7aaec8760f020ed934b0bf08e3c569bf73c5b9cd
|
/django/django_orm/book_authors_proj/server/env/bin/django-admin
|
5f63b697eb35054ccb24f26ea28414b75e5a4b69
|
[] |
no_license
|
DallasJM/Python
|
8715b5e09de028ed0dae44f59d0dd0a10e7f8981
|
924ac32089737165c9e90497792740e2b138d987
|
refs/heads/master
| 2021-03-13T09:32:56.120769
| 2020-03-11T19:53:53
| 2020-03-11T19:53:53
| 246,664,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
#!/Users/sallad/Code/bootcamp/python_stack/django/django_orm/book_authors_proj/server/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"sallad@Dallass-MacBook-Air.local"
] |
sallad@Dallass-MacBook-Air.local
|
|
14ecf3ab4975eb5791a89430d0ba8be5cd8e6585
|
297efd4afeb46c0b56d9a975d76665caef213acc
|
/src/core/migrations/0071_auto_20190802_1405.py
|
c7aac7e2794c88618122bd34b6f554eee1e4c436
|
[
"MIT"
] |
permissive
|
metabolism-of-cities/metabolism-of-cities-platform-v3
|
67716c3daae86a0fe527c18aef26ce29e069cbcc
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
refs/heads/master
| 2022-12-06T22:56:22.207853
| 2020-08-25T09:53:51
| 2020-08-25T09:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
# Generated by Django 2.2.2 on 2019-08-02 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0070_auto_20190802_1331'),
]
operations = [
migrations.AddField(
model_name='people',
name='organizations',
field=models.ManyToManyField(blank=True, to='core.Organization'),
),
migrations.DeleteModel(
name='PeopleAffiliation',
),
]
|
[
"paul@penguinprotocols.com"
] |
paul@penguinprotocols.com
|
ad8acdf6ff1d85ddfa41331f6fa6de3a87cb1b49
|
e2c53cf441f071dac3b563c902e2d53409f87ffc
|
/teacher/urls.py
|
3651f23da64f579a4e28789bea4bcf14e016e44b
|
[] |
no_license
|
spriyaalpha/DirectorySchoolApplication
|
9446c93c3fba76f0ebb086b80d9c34d3db6eabce
|
c9d8c60d9c4c4aaa2644dddb029bf42a274575c7
|
refs/heads/main
| 2023-06-02T19:55:47.247543
| 2021-06-20T07:47:47
| 2021-06-20T07:47:47
| 378,121,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('registration', views.teacher_registration, name='teacher-registration'),
path('list', views.teacher_list, name='teacher-list'),
path('profile/<FinalUploadedDetails_id>/', views.teacher_profile, name='teacher-profile'),
path('list/sort/', views.teacher_list_sort, name='teacher-list-sort'),
path('load-upazilla', views.load_upazilla, name='load-upazilla'),
]
|
[
"noreply@github.com"
] |
spriyaalpha.noreply@github.com
|
ba1f261b3860292d509efc9541c58fb97e8cdae4
|
19cb1af3b1e384e8dbb55c5ecfea1cb23598b0be
|
/df/asgi.py
|
ec63d9a4ee5921d2e76db34e7796f94d2867df59
|
[] |
no_license
|
beautxiang/df
|
6d220c3936f562cf13d7dec766eded9129a785eb
|
2a3d0d06dfd3ff26b018e6a4f6ed7f4b4a2251a2
|
refs/heads/master
| 2023-05-30T07:38:33.291681
| 2021-06-15T08:14:23
| 2021-06-15T08:14:23
| 377,076,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
"""
ASGI config for df project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'df.settings')
application = get_asgi_application()
|
[
"2494916623@qq.com"
] |
2494916623@qq.com
|
502d4c8f8451e1c9b1c0ba98ece8a47bedf9a824
|
d483dd0e8f18569329456e7fc14b3839664a7e72
|
/manage.py
|
c13f12f3531f9b8b12734ede4a74d670e1beb055
|
[] |
no_license
|
lucyHD/pathfinder
|
b62bd2f8838992f9a8c827dd50620b205a1b79f1
|
605eec4c5b6eec7dc04d73a082c0ad7a7be7cc45
|
refs/heads/master
| 2023-08-11T02:42:08.699934
| 2020-07-02T16:27:35
| 2020-07-02T16:27:35
| 274,635,751
| 1
| 0
| null | 2021-09-22T19:22:49
| 2020-06-24T10:01:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pathfinderproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"lucyhelendonnelly@gmail.com"
] |
lucyhelendonnelly@gmail.com
|
6f2e3b891d08a35366f8720d70ec8fc1efe31d7c
|
f5171500752e258406718a0d2f33e027e97e9225
|
/Simulators/BEsim-py/be-sim-version-one/input/machines/titan.py
|
122d7300189aea326e7a6371c5d2f8932ce88acd
|
[] |
no_license
|
UFCCMT/behavioural_emulation
|
03e0c84db0600201ccb29a843a4998dcfc17b92a
|
e5fa2f1262d7a72ab770d919cc3b9a849a577267
|
refs/heads/master
| 2021-09-25T17:55:33.428634
| 2018-10-24T15:55:37
| 2018-10-24T15:55:37
| 153,633,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
# @Author: Nalini Kumar
# @Date: 09-12-16
# @Brief: Hardware description file for Titan @ LLNL
# Simulation Root is the "type" (not the "name") of the component that defines
# simulation granularity. default:system, rack, node, processor
Root( "node" )
#This capability will need to be built into the simulator
Leaf( "processor" ) # deafult:core, processor, node
#------------- Define machine hierarchies --------------
# Component( "name", "hierarchy", "type", "quantity" )
# Containers:
Component( "titan", "system" )
Component( "titan.rack" ) #200 racks
Component( "titan.board" ) #24 boards
# Real things:
Component( "titan.node", "node", "cray-xk7", "4" )
Component( "titan.cpu", "processor", "amd.opteron", "1" )
Component( "titan.gpu", "processor", "nvidia.k20x", "1" )
#Override lookup file defined for "titan.cpu' for "computeA"
Operation( "titan.cpu", "computeA", "amd.opteron.computeA.csv")
Operation( "titan.gpu", "transfer", "pci-x16-transfer.csv")
#------------- Define machine networks -----------------
Component( "titan.network", "cray-gemini" ) #predefined type cray-gemini
Component( "titan.node.network", "pci-x16" ) #predefined type pci-x16
Component( "titan.processor.network", "links")
# Override lookup file for "titan.network" and "titan.node.network" for "transfer"
Operation( "titan.network", "transfer", "gemini-transfer.csv")
Operation( "titan.node.network", "transfer", "pci-x16-transfer.csv")
#------------- Describe the connectivity -----------
Connect( "titan.cpu" "titan.cpu.mem", "ddr-channel")
Connect( "titan.gpu" "titan.gpu.mem", "gddr5-channel")
#Offspring( container.name, connect[offspring1, offspring2, interconnect], ... )
Offspring( "temp", connect["titan.cpu", "titan.gpu", "titan.node.network"], ... )
Offspring( "titan.node", connect["temp", "titan.network.router", "ht3link"] )
Offspring( "titan.system", connect["titan.network.router", "3d-torus[200,30,16]"] )
Offspring( "titan.rack", connect["titan.network.router", "3d-torus[]"] )
|
[
"aravindneelakantan@gmail.com"
] |
aravindneelakantan@gmail.com
|
fec2b91bc15aa91b4365c4c1ddc5428b06f48d34
|
251c70bd53ce6c499b011590c2f73632696d950f
|
/virtual/lib/python3.6/site-packages/mypy/modulefinder.py
|
dd801d213064a56f5f66242dd227a8fbae975453
|
[
"MIT"
] |
permissive
|
EduardoPessanha/Git-Python
|
ef09e404641fb988817c995bdf607c1860bf0622
|
87aa10af09510469032732ed2c55d0d65eb4c1d6
|
refs/heads/master
| 2023-01-25T01:10:51.089507
| 2020-12-03T12:27:44
| 2020-12-03T12:27:44
| 296,760,423
| 0
| 0
|
MIT
| 2020-09-28T03:37:14
| 2020-09-19T01:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 28,020
|
py
|
"""Low-level infrastructure to find modules.
This build on fscache.py; find_sources.py builds on top of this.
"""
import ast
import collections
import functools
import os
import subprocess
import sys
from enum import Enum
from typing import Dict, List, NamedTuple, Optional, Set, Tuple, Union
from typing_extensions import Final
from mypy.defaults import PYTHON3_VERSION_MIN
from mypy.fscache import FileSystemCache
from mypy.options import Options
from mypy import sitepkgs
# Paths to be searched in find_module().
SearchPaths = NamedTuple(
'SearchPaths',
[('python_path', Tuple[str, ...]), # where user code is found
('mypy_path', Tuple[str, ...]), # from $MYPYPATH or config variable
('package_path', Tuple[str, ...]), # from get_site_packages_dirs()
('typeshed_path', Tuple[str, ...]), # paths in typeshed
])
# Package dirs are a two-tuple of path to search and whether to verify the module
OnePackageDir = Tuple[str, bool]
PackageDirs = List[OnePackageDir]
PYTHON_EXTENSIONS = ['.pyi', '.py'] # type: Final
# TODO: Consider adding more reasons here?
# E.g. if we deduce a module would likely be found if the user were
# to set the --namespace-packages flag.
class ModuleNotFoundReason(Enum):
# The module was not found: we found neither stubs nor a plausible code
# implementation (with or without a py.typed file).
NOT_FOUND = 0
# The implementation for this module plausibly exists (e.g. we
# found a matching folder or *.py file), but either the parent package
# did not contain a py.typed file or we were unable to find a
# corresponding *-stubs package.
FOUND_WITHOUT_TYPE_HINTS = 1
# The module was not found in the current working directory, but
# was able to be found in the parent directory.
WRONG_WORKING_DIRECTORY = 2
def error_message_templates(self) -> Tuple[str, str]:
if self is ModuleNotFoundReason.NOT_FOUND:
msg = "Cannot find implementation or library stub for module named '{}'"
note = "See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports"
elif self is ModuleNotFoundReason.WRONG_WORKING_DIRECTORY:
msg = "Cannot find implementation or library stub for module named '{}'"
note = ("You may be running mypy in a subpackage, "
"mypy should be run on the package root")
elif self is ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS:
msg = "Skipping analyzing '{}': found module but no type hints or library stubs"
note = "See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports"
else:
assert False
return msg, note
# If we found the module, returns the path to the module as a str.
# Otherwise, returns the reason why the module wasn't found.
ModuleSearchResult = Union[str, ModuleNotFoundReason]
class BuildSource:
"""A single source file."""
def __init__(self, path: Optional[str], module: Optional[str],
text: Optional[str] = None, base_dir: Optional[str] = None) -> None:
self.path = path # File where it's found (e.g. 'xxx/yyy/foo/bar.py')
self.module = module or '__main__' # Module name (e.g. 'foo.bar')
self.text = text # Source code, if initially supplied, else None
self.base_dir = base_dir # Directory where the package is rooted (e.g. 'xxx/yyy')
def __repr__(self) -> str:
return '<BuildSource path=%r module=%r has_text=%s base_dir=%s>' % (
self.path,
self.module,
self.text is not None,
self.base_dir)
class FindModuleCache:
"""Module finder with integrated cache.
Module locations and some intermediate results are cached internally
and can be cleared with the clear() method.
All file system accesses are performed through a FileSystemCache,
which is not ever cleared by this class. If necessary it must be
cleared by client code.
"""
def __init__(self,
search_paths: SearchPaths,
fscache: Optional[FileSystemCache] = None,
options: Optional[Options] = None,
ns_packages: Optional[List[str]] = None) -> None:
self.search_paths = search_paths
self.fscache = fscache or FileSystemCache()
# Cache for get_toplevel_possibilities:
# search_paths -> (toplevel_id -> list(package_dirs))
self.initial_components = {} # type: Dict[Tuple[str, ...], Dict[str, List[str]]]
# Cache find_module: id -> result
self.results = {} # type: Dict[str, ModuleSearchResult]
self.ns_ancestors = {} # type: Dict[str, str]
self.options = options
self.ns_packages = ns_packages or [] # type: List[str]
def clear(self) -> None:
self.results.clear()
self.initial_components.clear()
self.ns_ancestors.clear()
def find_lib_path_dirs(self, id: str, lib_path: Tuple[str, ...]) -> PackageDirs:
"""Find which elements of a lib_path have the directory a module needs to exist.
This is run for the python_path, mypy_path, and typeshed_path search paths."""
components = id.split('.')
dir_chain = os.sep.join(components[:-1]) # e.g., 'foo/bar'
dirs = []
for pathitem in self.get_toplevel_possibilities(lib_path, components[0]):
# e.g., '/usr/lib/python3.4/foo/bar'
dir = os.path.normpath(os.path.join(pathitem, dir_chain))
if self.fscache.isdir(dir):
dirs.append((dir, True))
return dirs
def get_toplevel_possibilities(self, lib_path: Tuple[str, ...], id: str) -> List[str]:
"""Find which elements of lib_path could contain a particular top-level module.
In practice, almost all modules can be routed to the correct entry in
lib_path by looking at just the first component of the module name.
We take advantage of this by enumerating the contents of all of the
directories on the lib_path and building a map of which entries in
the lib_path could contain each potential top-level module that appears.
"""
if lib_path in self.initial_components:
return self.initial_components[lib_path].get(id, [])
# Enumerate all the files in the directories on lib_path and produce the map
components = {} # type: Dict[str, List[str]]
for dir in lib_path:
try:
contents = self.fscache.listdir(dir)
except OSError:
contents = []
# False positives are fine for correctness here, since we will check
# precisely later, so we only look at the root of every filename without
# any concern for the exact details.
for name in contents:
name = os.path.splitext(name)[0]
components.setdefault(name, []).append(dir)
self.initial_components[lib_path] = components
return components.get(id, [])
def find_module(self, id: str) -> ModuleSearchResult:
"""Return the path of the module source file or why it wasn't found."""
if id not in self.results:
self.results[id] = self._find_module(id)
if (self.results[id] is ModuleNotFoundReason.NOT_FOUND
and self._can_find_module_in_parent_dir(id)):
self.results[id] = ModuleNotFoundReason.WRONG_WORKING_DIRECTORY
return self.results[id]
def _find_module_non_stub_helper(self, components: List[str],
pkg_dir: str) -> Union[OnePackageDir, ModuleNotFoundReason]:
plausible_match = False
dir_path = pkg_dir
for index, component in enumerate(components):
dir_path = os.path.join(dir_path, component)
if self.fscache.isfile(os.path.join(dir_path, 'py.typed')):
return os.path.join(pkg_dir, *components[:-1]), index == 0
elif not plausible_match and (self.fscache.isdir(dir_path)
or self.fscache.isfile(dir_path + ".py")):
plausible_match = True
if plausible_match:
return ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS
else:
return ModuleNotFoundReason.NOT_FOUND
def _update_ns_ancestors(self, components: List[str], match: Tuple[str, bool]) -> None:
path, verify = match
for i in range(1, len(components)):
pkg_id = '.'.join(components[:-i])
if pkg_id not in self.ns_ancestors and self.fscache.isdir(path):
self.ns_ancestors[pkg_id] = path
path = os.path.dirname(path)
def _can_find_module_in_parent_dir(self, id: str) -> bool:
"""Test if a module can be found by checking the parent directories
of the current working directory.
"""
working_dir = os.getcwd()
parent_search = FindModuleCache(SearchPaths((), (), (), ()))
while any(file.endswith(("__init__.py", "__init__.pyi"))
for file in os.listdir(working_dir)):
working_dir = os.path.dirname(working_dir)
parent_search.search_paths = SearchPaths((working_dir,), (), (), ())
if not isinstance(parent_search._find_module(id), ModuleNotFoundReason):
return True
return False
def _find_module(self, id: str) -> ModuleSearchResult:
fscache = self.fscache
# If we're looking for a module like 'foo.bar.baz', it's likely that most of the
# many elements of lib_path don't even have a subdirectory 'foo/bar'. Discover
# that only once and cache it for when we look for modules like 'foo.bar.blah'
# that will require the same subdirectory.
components = id.split('.')
dir_chain = os.sep.join(components[:-1]) # e.g., 'foo/bar'
# TODO (ethanhs): refactor each path search to its own method with lru_cache
# We have two sets of folders so that we collect *all* stubs folders and
# put them in the front of the search path
third_party_inline_dirs = [] # type: PackageDirs
third_party_stubs_dirs = [] # type: PackageDirs
found_possible_third_party_missing_type_hints = False
# Third-party stub/typed packages
for pkg_dir in self.search_paths.package_path:
stub_name = components[0] + '-stubs'
stub_dir = os.path.join(pkg_dir, stub_name)
if fscache.isdir(stub_dir):
stub_typed_file = os.path.join(stub_dir, 'py.typed')
stub_components = [stub_name] + components[1:]
path = os.path.join(pkg_dir, *stub_components[:-1])
if fscache.isdir(path):
if fscache.isfile(stub_typed_file):
# Stub packages can have a py.typed file, which must include
# 'partial\n' to make the package partial
# Partial here means that mypy should look at the runtime
# package if installed.
if fscache.read(stub_typed_file).decode().strip() == 'partial':
runtime_path = os.path.join(pkg_dir, dir_chain)
third_party_inline_dirs.append((runtime_path, True))
# if the package is partial, we don't verify the module, as
# the partial stub package may not have a __init__.pyi
third_party_stubs_dirs.append((path, False))
else:
# handle the edge case where people put a py.typed file
# in a stub package, but it isn't partial
third_party_stubs_dirs.append((path, True))
else:
third_party_stubs_dirs.append((path, True))
non_stub_match = self._find_module_non_stub_helper(components, pkg_dir)
if isinstance(non_stub_match, ModuleNotFoundReason):
if non_stub_match is ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS:
found_possible_third_party_missing_type_hints = True
else:
third_party_inline_dirs.append(non_stub_match)
self._update_ns_ancestors(components, non_stub_match)
if self.options and self.options.use_builtins_fixtures:
# Everything should be in fixtures.
third_party_inline_dirs.clear()
third_party_stubs_dirs.clear()
found_possible_third_party_missing_type_hints = False
python_mypy_path = self.search_paths.mypy_path + self.search_paths.python_path
candidate_base_dirs = self.find_lib_path_dirs(id, python_mypy_path) + \
third_party_stubs_dirs + third_party_inline_dirs + \
self.find_lib_path_dirs(id, self.search_paths.typeshed_path)
# If we're looking for a module like 'foo.bar.baz', then candidate_base_dirs now
# contains just the subdirectories 'foo/bar' that actually exist under the
# elements of lib_path. This is probably much shorter than lib_path itself.
# Now just look for 'baz.pyi', 'baz/__init__.py', etc., inside those directories.
seplast = os.sep + components[-1] # so e.g. '/baz'
sepinit = os.sep + '__init__'
near_misses = [] # Collect near misses for namespace mode (see below).
for base_dir, verify in candidate_base_dirs:
base_path = base_dir + seplast # so e.g. '/usr/lib/python3.4/foo/bar/baz'
has_init = False
dir_prefix = base_dir
for _ in range(len(components) - 1):
dir_prefix = os.path.dirname(dir_prefix)
# Prefer package over module, i.e. baz/__init__.py* over baz.py*.
for extension in PYTHON_EXTENSIONS:
path = base_path + sepinit + extension
path_stubs = base_path + '-stubs' + sepinit + extension
if fscache.isfile_case(path, dir_prefix):
has_init = True
if verify and not verify_module(fscache, id, path, dir_prefix):
near_misses.append((path, dir_prefix))
continue
return path
elif fscache.isfile_case(path_stubs, dir_prefix):
if verify and not verify_module(fscache, id, path_stubs, dir_prefix):
near_misses.append((path_stubs, dir_prefix))
continue
return path_stubs
# In namespace mode, register a potential namespace package
if self.options and self.options.namespace_packages:
if fscache.isdir(base_path) and not has_init:
near_misses.append((base_path, dir_prefix))
# No package, look for module.
for extension in PYTHON_EXTENSIONS:
path = base_path + extension
if fscache.isfile_case(path, dir_prefix):
if verify and not verify_module(fscache, id, path, dir_prefix):
near_misses.append((path, dir_prefix))
continue
return path
# In namespace mode, re-check those entries that had 'verify'.
# Assume search path entries xxx, yyy and zzz, and we're
# looking for foo.bar.baz. Suppose near_misses has:
#
# - xxx/foo/bar/baz.py
# - yyy/foo/bar/baz/__init__.py
# - zzz/foo/bar/baz.pyi
#
# If any of the foo directories has __init__.py[i], it wins.
# Else, we look for foo/bar/__init__.py[i], etc. If there are
# none, the first hit wins. Note that this does not take into
# account whether the lowest-level module is a file (baz.py),
# a package (baz/__init__.py), or a stub file (baz.pyi) -- for
# these the first one encountered along the search path wins.
#
# The helper function highest_init_level() returns an int that
# indicates the highest level at which a __init__.py[i] file
# is found; if no __init__ was found it returns 0, if we find
# only foo/bar/__init__.py it returns 1, and if we have
# foo/__init__.py it returns 2 (regardless of what's in
# foo/bar). It doesn't look higher than that.
if self.options and self.options.namespace_packages and near_misses:
levels = [highest_init_level(fscache, id, path, dir_prefix)
for path, dir_prefix in near_misses]
index = levels.index(max(levels))
return near_misses[index][0]
# Finally, we may be asked to produce an ancestor for an
# installed package with a py.typed marker that is a
# subpackage of a namespace package. We only fess up to these
# if we would otherwise return "not found".
ancestor = self.ns_ancestors.get(id)
if ancestor is not None:
return ancestor
if found_possible_third_party_missing_type_hints:
return ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS
else:
return ModuleNotFoundReason.NOT_FOUND
def find_modules_recursive(self, module: str) -> List[BuildSource]:
module_path = self.find_module(module)
if isinstance(module_path, ModuleNotFoundReason):
return []
result = [BuildSource(module_path, module, None)]
if module_path.endswith(('__init__.py', '__init__.pyi')):
# Subtle: this code prefers the .pyi over the .py if both
# exists, and also prefers packages over modules if both x/
# and x.py* exist. How? We sort the directory items, so x
# comes before x.py and x.pyi. But the preference for .pyi
# over .py is encoded in find_module(); even though we see
# x.py before x.pyi, find_module() will find x.pyi first. We
# use hits to avoid adding it a second time when we see x.pyi.
# This also avoids both x.py and x.pyi when x/ was seen first.
hits = set() # type: Set[str]
for item in sorted(self.fscache.listdir(os.path.dirname(module_path))):
abs_path = os.path.join(os.path.dirname(module_path), item)
if os.path.isdir(abs_path) and \
(os.path.isfile(os.path.join(abs_path, '__init__.py')) or
os.path.isfile(os.path.join(abs_path, '__init__.pyi'))):
hits.add(item)
result += self.find_modules_recursive(module + '.' + item)
elif item != '__init__.py' and item != '__init__.pyi' and \
item.endswith(('.py', '.pyi')):
mod = item.split('.')[0]
if mod not in hits:
hits.add(mod)
result += self.find_modules_recursive(module + '.' + mod)
elif os.path.isdir(module_path) and module in self.ns_packages:
# Even more subtler: handle recursive decent into PEP 420
# namespace packages that are explicitly listed on the command
# line with -p/--packages.
for item in sorted(self.fscache.listdir(module_path)):
if os.path.isdir(os.path.join(module_path, item)):
result += self.find_modules_recursive(module + '.' + item)
return result
def verify_module(fscache: FileSystemCache, id: str, path: str, prefix: str) -> bool:
"""Check that all packages containing id have a __init__ file."""
if path.endswith(('__init__.py', '__init__.pyi')):
path = os.path.dirname(path)
for i in range(id.count('.')):
path = os.path.dirname(path)
if not any(fscache.isfile_case(os.path.join(path, '__init__{}'.format(extension)),
prefix)
for extension in PYTHON_EXTENSIONS):
return False
return True
def highest_init_level(fscache: FileSystemCache, id: str, path: str, prefix: str) -> int:
"""Compute the highest level where an __init__ file is found."""
if path.endswith(('__init__.py', '__init__.pyi')):
path = os.path.dirname(path)
level = 0
for i in range(id.count('.')):
path = os.path.dirname(path)
if any(fscache.isfile_case(os.path.join(path, '__init__{}'.format(extension)),
prefix)
for extension in PYTHON_EXTENSIONS):
level = i + 1
return level
def mypy_path() -> List[str]:
path_env = os.getenv('MYPYPATH')
if not path_env:
return []
return path_env.split(os.pathsep)
def default_lib_path(data_dir: str,
pyversion: Tuple[int, int],
custom_typeshed_dir: Optional[str]) -> List[str]:
"""Return default standard library search paths."""
# IDEA: Make this more portable.
path = [] # type: List[str]
if custom_typeshed_dir:
typeshed_dir = custom_typeshed_dir
else:
auto = os.path.join(data_dir, 'stubs-auto')
if os.path.isdir(auto):
data_dir = auto
typeshed_dir = os.path.join(data_dir, "typeshed")
if pyversion[0] == 3:
# We allow a module for e.g. version 3.5 to be in 3.4/. The assumption
# is that a module added with 3.4 will still be present in Python 3.5.
versions = ["%d.%d" % (pyversion[0], minor)
for minor in reversed(range(PYTHON3_VERSION_MIN[1], pyversion[1] + 1))]
else:
# For Python 2, we only have stubs for 2.7
versions = ["2.7"]
# E.g. for Python 3.6, try 3.6/, 3.5/, 3.4/, 3/, 2and3/.
for v in versions + [str(pyversion[0]), '2and3']:
for lib_type in ['stdlib', 'third_party']:
stubdir = os.path.join(typeshed_dir, lib_type, v)
if os.path.isdir(stubdir):
path.append(stubdir)
# Add fallback path that can be used if we have a broken installation.
if sys.platform != 'win32':
path.append('/usr/local/lib/mypy')
if not path:
print("Could not resolve typeshed subdirectories. If you are using mypy\n"
"from source, you need to run \"git submodule update --init\".\n"
"Otherwise your mypy install is broken.\nPython executable is located at "
"{0}.\nMypy located at {1}".format(sys.executable, data_dir), file=sys.stderr)
sys.exit(1)
return path
@functools.lru_cache(maxsize=None)
def get_site_packages_dirs(python_executable: str) -> Tuple[List[str], List[str]]:
"""Find package directories for given python.
This runs a subprocess call, which generates a list of the egg directories, and the site
package directories. To avoid repeatedly calling a subprocess (which can be slow!) we
lru_cache the results."""
def make_abspath(path: str, root: str) -> str:
"""Take a path and make it absolute relative to root if not already absolute."""
if os.path.isabs(path):
return os.path.normpath(path)
else:
return os.path.join(root, os.path.normpath(path))
if python_executable == sys.executable:
# Use running Python's package dirs
site_packages = sitepkgs.getsitepackages()
else:
# Use subprocess to get the package directory of given Python
# executable
site_packages = ast.literal_eval(
subprocess.check_output([python_executable, sitepkgs.__file__],
stderr=subprocess.PIPE).decode())
egg_dirs = []
for dir in site_packages:
pth = os.path.join(dir, 'easy-install.pth')
if os.path.isfile(pth):
with open(pth) as f:
egg_dirs.extend([make_abspath(d.rstrip(), dir) for d in f.readlines()])
return egg_dirs, site_packages
def compute_search_paths(sources: List[BuildSource],
options: Options,
data_dir: str,
alt_lib_path: Optional[str] = None) -> SearchPaths:
"""Compute the search paths as specified in PEP 561.
There are the following 4 members created:
- User code (from `sources`)
- MYPYPATH (set either via config or environment variable)
- installed package directories (which will later be split into stub-only and inline)
- typeshed
"""
# Determine the default module search path.
lib_path = collections.deque(
default_lib_path(data_dir,
options.python_version,
custom_typeshed_dir=options.custom_typeshed_dir))
if options.use_builtins_fixtures:
# Use stub builtins (to speed up test cases and to make them easier to
# debug). This is a test-only feature, so assume our files are laid out
# as in the source tree.
# We also need to allow overriding where to look for it. Argh.
root_dir = os.getenv('MYPY_TEST_PREFIX', None)
if not root_dir:
root_dir = os.path.dirname(os.path.dirname(__file__))
lib_path.appendleft(os.path.join(root_dir, 'test-data', 'unit', 'lib-stub'))
# alt_lib_path is used by some tests to bypass the normal lib_path mechanics.
# If we don't have one, grab directories of source files.
python_path = [] # type: List[str]
if not alt_lib_path:
for source in sources:
# Include directory of the program file in the module search path.
if source.base_dir:
dir = source.base_dir
if dir not in python_path:
python_path.append(dir)
# Do this even if running as a file, for sanity (mainly because with
# multiple builds, there could be a mix of files/modules, so its easier
# to just define the semantics that we always add the current director
# to the lib_path
# TODO: Don't do this in some cases; for motivation see see
# https://github.com/python/mypy/issues/4195#issuecomment-341915031
if options.bazel:
dir = '.'
else:
dir = os.getcwd()
if dir not in lib_path:
python_path.insert(0, dir)
# Start with a MYPYPATH environment variable at the front of the mypy_path, if defined.
mypypath = mypy_path()
# Add a config-defined mypy path.
mypypath.extend(options.mypy_path)
# If provided, insert the caller-supplied extra module path to the
# beginning (highest priority) of the search path.
if alt_lib_path:
mypypath.insert(0, alt_lib_path)
if options.python_executable is None:
egg_dirs = [] # type: List[str]
site_packages = [] # type: List[str]
else:
egg_dirs, site_packages = get_site_packages_dirs(options.python_executable)
for site_dir in site_packages:
assert site_dir not in lib_path
if (site_dir in mypypath or
any(p.startswith(site_dir + os.path.sep) for p in mypypath) or
os.path.altsep and any(p.startswith(site_dir + os.path.altsep) for p in mypypath)):
print("{} is in the MYPYPATH. Please remove it.".format(site_dir), file=sys.stderr)
print("See https://mypy.readthedocs.io/en/latest/running_mypy.html"
"#how-mypy-handles-imports for more info", file=sys.stderr)
sys.exit(1)
elif site_dir in python_path:
print("{} is in the PYTHONPATH. Please change directory"
" so it is not.".format(site_dir),
file=sys.stderr)
sys.exit(1)
return SearchPaths(tuple(reversed(python_path)),
tuple(mypypath),
tuple(egg_dirs + site_packages),
tuple(lib_path))
|
[
"poppessanha@gmail.com"
] |
poppessanha@gmail.com
|
dfcad5c303e88efcac418f48355af1de45e4e2f1
|
44d379302787c5b65a3f5a8143431a3c19c3604a
|
/c03django_advanced/fc_mall/product/models.py
|
cf2952d7e4af3eac297ba0600d632c597a9dc53d
|
[] |
no_license
|
Miniminis/python-study-note
|
71bde562be1111ea4cfae38613f604f4bfe3cba5
|
4b70c2f0ad0a449fcd6f213aeec69d1e40bd3fb2
|
refs/heads/master
| 2023-03-04T08:53:17.113739
| 2020-09-09T21:11:13
| 2020-09-09T21:11:13
| 237,565,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
from django.db import models
# Create your models here.
class FcMallProduct(models.Model):
name = models.CharField(max_length=256, verbose_name='상품명')
price = models.IntegerField(verbose_name='상품가격')
description = models.TextField(verbose_name='상품설명')
stuck = models.IntegerField(verbose_name='재고')
regdate = models.DateTimeField(auto_now_add=True, verbose_name='등록날짜')
def __str__(self):
return self.name
class Meta:
db_table = 'fcmall_product'
verbose_name='상품'
verbose_name_plural='상품'
|
[
"minhee4735@gmail.com"
] |
minhee4735@gmail.com
|
1d2ebb44e2ddc4c16590914e4341cf5e6514f071
|
bbedc4eaff6c69b620487caf14c2f54a05f79308
|
/contact/admin.py
|
1e3065979b6b121b035c771b319613b4f5d85591
|
[] |
no_license
|
venkatR065/perfectlearn
|
404db6d3da40972ab61865243015121b382e98f2
|
3332553ab9349316a58e84ca1ccdae9f0cf7382f
|
refs/heads/main
| 2023-07-15T06:33:12.903131
| 2021-08-19T10:30:20
| 2021-08-19T10:30:20
| 392,893,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
from django.contrib import admin
from .models import Email
from .models import Banner
from .models import Contact
admin.site.register(Contact)
admin.site.register(Email)
admin.site.register(Banner)
|
[
"mallarapuvenkataraju065@gmail.com"
] |
mallarapuvenkataraju065@gmail.com
|
090dd571209ba80b5cf1b896a2b0e700534bd1c5
|
9403d5c55b15a4185ad5394ef637a1f0373916a4
|
/logger/__init__.py
|
05870a6fab1788b3c9f07b06139fe2ea6111862b
|
[] |
no_license
|
KosoFF/EduCerts
|
fb267dbbfb88b7bdb7443f4a291b6af9c5a9e83d
|
bee9b76aac8d0c52df5ca35df34442c91ff34653
|
refs/heads/master
| 2022-12-11T23:52:11.225877
| 2018-05-16T01:13:56
| 2018-05-16T01:13:56
| 132,535,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
import logging
import os
log_handler = logging.StreamHandler()
try:
log_level_name = os.environ['LOG_LEVEL']
except:
log_level_name = 'INFO'
log_level = logging.getLevelName(log_level_name)
log_handler.setLevel(log_level)
log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
c_logger = logging.getLogger('EduCerts')
c_logger.addHandler(log_handler)
c_logger.setLevel(log_level)
c_logger.info('logger is initialized for %s' % (os.environ.get('HEROKU')))
|
[
"a_bka@bk.ru"
] |
a_bka@bk.ru
|
df3811216e191550ef71e1dd876af6d38b448c33
|
4059df77acb4a4472935c41e265da6635d500857
|
/findCountr.py
|
e3fa4be254ed94b55d3106535abc17887b9f578a
|
[
"MIT"
] |
permissive
|
swapnilmarathe007/Handwriting-Recognition
|
857e664c850d09bc2366b5eb1949f34610b57e21
|
1c0430ec528573a2022d5059ef20243c39980776
|
refs/heads/master
| 2020-04-07T15:44:17.788455
| 2018-11-21T06:16:51
| 2018-11-21T06:16:51
| 158,498,803
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,987
|
py
|
# Import all necessary Packages
import cv2
import numpy as np
import matplotlib.pyplot as plt
image_loc = "testing_!.png"
img = cv2.imread(image_loc)
gray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
#threshold Image
ret , thresh = cv2.threshold(gray , 127 , 255 , cv2.THRESH_BINARY)
#find contour
image , contour , hier = cv2.findContours(thresh , cv2.RETR_TREE , cv2.CHAIN_APPROX_SIMPLE)
val = 0
for c in contour:
x,y,w,h = cv2.boundingRect(c)
#draw a green rectangle
cv2.rectangle(img , (x,y) , (x+w , y+h) , (0,255,0) , 2)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img , [box] , 0 ,(0,0,255))
# if (w > 50 and h > 60):
#saving as images
# if()
roi = thresh[y:y+h, x:x+w]
roi = cv2.resize(roi,None,fx=0.5,fy=0.5,interpolation=cv2.INTER_AREA)
print ("height",h,"width",w)
h = h //2
w = w // 2
print ("height",h,"width",w)
if((100 - h) % 2 == 0):
bordersize_top = ( 100 - (h) ) // 2
bordersize_bottom = bordersize_top
print ("HI ")
else:
bodersize_top = (( 100 - (h) ) // 2) + 1
bordersize_bottom = bordersize_top - 1
print("else")
if((100 - w) % 2 == 0):
bordersize_right = ( 100 - (w) ) // 2
bordersize_left = bordersize_right
else:
bordersize_right = (( 100 - (w) ) // 2) + 1
bordersize_left = bordersize_right - 1
print(bordersize_top , bordersize_bottom , bordersize_left , bordersize_right)
mean = 255
try:
ro = cv2.copyMakeBorder(roi, top=bordersize_top, bottom=bordersize_bottom, left=bordersize_left, right=bordersize_right, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean] )
filename = "roi_"+str(val)+".png"
cv2.imwrite(filename , ro)
except:
pass
plt.imshow(roi)
val += 1
print(len(contour))
cv2.drawContours(img, contour, -1, (255, 255, 0), 1)
plt.imshow( img)
ESC = 27
|
[
"swapnilm@winjit.com"
] |
swapnilm@winjit.com
|
2f7fe65dbbd8485f1fe5e9a6204e7b6eb9ad821c
|
62114103a9254006b49ab209be50d9b785ea0ed3
|
/store/models.py
|
c7f1184790240abb0501b45943b6f5fabaab2301
|
[] |
no_license
|
sudeepkhandekar99/tbw-dev
|
8655d29c147e3e2efadd563c32acad6f44d3fbd7
|
1da4688d6f42d2e5dea4bb904cdd4d522ba48815
|
refs/heads/main
| 2023-05-09T23:47:08.096067
| 2021-06-03T18:12:00
| 2021-06-03T18:12:00
| 373,600,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,358
|
py
|
from django.db import models
from django.contrib.auth.admin import User
from datetime import datetime
import random
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length = 50)
uniqueCode = models.CharField(max_length=10, unique=True, null=False, default = "")
price = models.FloatField()
priceNoDiscount = models.FloatField(default=0.0)
desc = models.TextField()
size = models.CharField(max_length=2)
img = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
class Order(models.Model):
payment_status_choices = (
(1, 'SUCCESS'),
(2, 'FAILURE' ),
(3, 'PENDING'),
)
customer = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
date_ordered = models.DateTimeField(null=False, default = datetime.now())
complete = models.BooleanField(default=False)
transaction_id = models.CharField(max_length=100, null=True)
order_id = models.CharField(unique=True, max_length=100, null=True, blank=True, default=None)
razorpay_order_id = models.CharField(max_length=500, null=True, blank=True)
razorpay_payment_id = models.CharField(max_length=500, null=True, blank=True)
razorpay_signature = models.CharField(max_length=500, null=True, blank=True)
total_amount = models.FloatField(default=0.0)
payment_status = models.IntegerField(choices = payment_status_choices, default=3)
def __str__(self):
return str(self.id)
@property
def shipping(self):
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return round(total, 3)
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
def save(self, *args, **kwargs):
if self.order_id is None and self.date_ordered and self.id:
self.order_id = 'ODR' + self.transaction_id + str(self.id)
return super().save(*args, **kwargs)
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
size = models.CharField(max_length=4, default="M")
color = models.CharField(max_length=50, default="Yellow")
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAddress(models.Model):
customer = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
address = models.CharField(max_length=200, null=False)
city = models.CharField(max_length=200, null=False)
state = models.CharField(max_length=200, null=False)
zipcode = models.CharField(max_length=200, null=False)
date_added = models.DateTimeField(null=False)
def __str__(self):
return self.address
class AllOrders(models.Model):
customer = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
order_details = models.TextField(null=True, default="")
shipment = models.ForeignKey(ShippingAddress, on_delete=models.SET_NULL, null=True)
date_ordered = models.DateTimeField(auto_now_add=True , null=True)
order_id_unique = models.CharField(max_length = 100 , null=True)
address = models.CharField(max_length = 100 , null=True)
city = models.CharField(max_length = 50 , null=True)
state = models.CharField(max_length = 50 , null=True)
zipcode = models.CharField(max_length = 50 , null=True)
total_items = models.IntegerField(null=True)
total_amount = models.FloatField(null=True)
razorpay_order_id = models.CharField(max_length = 500 , null=True)
razorpay_payment_id = models.CharField(max_length = 500 , null=True)
def __str__(self):
tempId = str(random.randint(0, 1000))
return self.customer.username + tempId + self.order.date_ordered.strftime('%Y%m%d')
|
[
"sudeepkhandekar99@gmail.com"
] |
sudeepkhandekar99@gmail.com
|
5770346abe4a48d958e623f79a9c61097958d00d
|
f26dd860c8d764fc7a47bde656f393795cd8d763
|
/a4.py
|
782ca2447ff6d44c33f8b927c65ae5bdb2e6972a
|
[] |
no_license
|
chokkuu1998/david
|
8e9fa162f657c8b9bb55502f1cdd730a08ff0235
|
4dc999cdb73383b5a5d7ed3d98b2c1a4d6b5f7ee
|
refs/heads/master
| 2020-03-28T17:05:04.046963
| 2019-07-16T08:07:37
| 2019-07-16T08:07:37
| 148,756,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
Xx=input()
Yy=Xx.title()
if(Xx!=Y):
print("no")
else:
print("yes")
|
[
"noreply@github.com"
] |
chokkuu1998.noreply@github.com
|
81f0ea6e0baef78f3bdcc0825839d08dc09898fc
|
79768b5999f9208484a0cf443eaaf82c3b2bdc56
|
/GUI Visualization/color.py
|
270e16323cf48556c4896637ba90b0d3017a74a6
|
[
"MIT"
] |
permissive
|
RezaFirouzii/a-star_algorithm_visualization
|
a7ab82d987ca188e6905afbe7dccae96c76e490f
|
08be4dca454c1125c7f6bd96df6f61959c5ef777
|
refs/heads/main
| 2023-04-14T00:27:19.318531
| 2021-04-10T16:34:43
| 2021-04-10T16:34:43
| 356,626,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
# bunch of color constants
class Color:
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
BLUE = (0, 128, 255)
GREEN = (0, 153, 0)
YELLOW = (255, 255, 0)
BROWN = (204, 102, 0)
PINK = (255, 102, 178)
PURPLE = (153, 51, 255)
GREY = (128, 128, 128)
colors = {
1: WHITE,
2: YELLOW,
3: RED,
4: BLUE,
5: GREEN,
6: BLACK,
7: BROWN,
8: PINK,
9: PURPLE,
10: GREY
}
|
[
"sr2000f@gmail.com"
] |
sr2000f@gmail.com
|
4eb2b24396476c48355759b19e71e09945090a55
|
e2cede75302ba926fe0cb6fffd608b4352f336ab
|
/patch.py
|
1afad284f5c1027ac7aeb75c1c4e4840da217b9a
|
[] |
no_license
|
stevenbraham/pi-aware-extra-info
|
6c6c81bdb4e90819641011110b6dfc1be3f74645
|
045b4723e7f3571a83fc9f3ec91b99c6117047ea
|
refs/heads/master
| 2020-12-24T06:06:27.635840
| 2016-11-08T23:29:50
| 2016-11-08T23:29:50
| 73,228,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
data = [line.strip() for line in open("index.html", 'r')]
data.insert((len(data) - 2),'<script src="./pi-aware-extra-info.js"></script>')
f=open('index.html','w')
for line in data:
f.write(line+'\n')
f.close()
|
[
"steven@braham.biz"
] |
steven@braham.biz
|
0d9d38e56f67375ccfe16fab62b67589fb05824b
|
157d0810d40bbb165889f946566346663cf5b22f
|
/Python-For-Everyone-Horstmann/Chapter8-Sets-and-Dictionaries/P8_9.py
|
5ad8eec093f3621b0a6870fef90d50450e432ed1
|
[] |
no_license
|
dg5921096/Books-solutions
|
e6ccdcaba0294bdc95e2267723a02d2ba090cb10
|
31bb4bba240bf95aafeb6d189eade62c66a1765a
|
refs/heads/master
| 2021-12-09T16:07:47.756390
| 2021-11-14T07:09:25
| 2021-11-14T07:09:25
| 255,447,147
| 0
| 0
| null | 2020-04-13T21:39:02
| 2020-04-13T21:39:01
| null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
# Write a program that asks a user to type in two strings and that prints
# • the characters that occur in both strings.
# • the characters that occur in one string but not the other.
# • the letters that don’t occur in either string.
# Use the set function to turn a string into a set of characters.
# FUNCTIONS
def shared_characters(string_a, string_b):
string_a = set(string_a)
string_b = set(string_b)
return ", ".join(sorted(string_a.intersection(string_b)))
def unique_characters(string_a, string_b):
string_a = set(string_a)
string_b = set(string_b)
return ", ".join(sorted(string_a.difference(string_b).union(string_b.difference(string_a))))
def non_occurring_letters(string_a, string_b):
alphabet = set("abcdefghijklomnopqrstuvwxyz")
string_a = set(string_a)
string_b = set(string_b)
return ", ".join(sorted(alphabet - string_a.union(string_b)))
# main
def main():
string_a = str(input("Enter the first string: "))
string_b = str(input("Enter the second string: "))
print("Shared characters:")
print(shared_characters(string_a, string_b))
print("Unique characters")
print(unique_characters(string_a, string_b))
print("Non-occuring alphabet letters:")
print(non_occurring_letters(string_a, string_b))
# PROGRAM RUN
if __name__ == "__main__":
main()
|
[
"syndbe@gmail.com"
] |
syndbe@gmail.com
|
4abfddd2014cf989c845810569a277f34bc42ff3
|
f62598a462689aafc489a24c7b1526ad7e356536
|
/mysite/views.py
|
071a0c4aa6c8f16d3fc8ee92caa4cd1b7221f81f
|
[] |
no_license
|
JJ-project/django_web_programing
|
5fcf45bb6293849211d77b4d2ddae8911c53cccd
|
b1ee23f5fd686ddf43720d03780bf961e31d55f1
|
refs/heads/master
| 2020-09-12T15:07:05.672027
| 2019-11-23T08:12:00
| 2019-11-23T08:12:00
| 222,461,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
from django.views.generic.base import TemplateView
from django.apps import apps
class HomeView(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
#context['app_list'] = ['polls', 'books']
dictVerBose = {}
#장고에서 제공하는 apps객체의 get_app_configs() 메소드를 호출하면 settings.py 파일의 INSTALLED_APP에 등록된 각 앱의
#설정 클래스들을 담은 리스트를 반환함
for app in apps.get_app_configs():
print(app)
print(app.path)
if 'site-packages' not in app.path: #물리적 경로에 'site-packages'라는 디렉토리가 포함되어 있다면 제외시킴
dictVerBose[app.label] = app.verbose_name
#app.label:books
#app.verbose_name:Book-Author-Publisher App
print("app.verbose_name:"+app.verbose_name)
context['verbose_dict'] = dictVerBose
return context
|
[
"jinjin2836@gmail.com"
] |
jinjin2836@gmail.com
|
1baac10753e7043c8e72620f0ae434a8d30e89af
|
78d35bb7876a3460d4398e1cb3554b06e36c720a
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/aio/operations/_route_tables_operations.py
|
2d995d5f877e517415f08d401d405cc413ac20b3
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
catchsrinivas/azure-sdk-for-python
|
e35f59b60318a31b3c940a7a3a07b61b28118aa5
|
596227a7738a5342274486e30489239d539b11d1
|
refs/heads/main
| 2023-08-27T09:08:07.986249
| 2021-11-11T11:13:35
| 2021-11-11T11:13:35
| 427,045,896
| 0
| 0
|
MIT
| 2021-11-11T15:14:31
| 2021-11-11T15:14:31
| null |
UTF-8
|
Python
| false
| false
| 29,628
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
[
"noreply@github.com"
] |
catchsrinivas.noreply@github.com
|
3d196d8263c79fa57d8b2a733812641161b65a5b
|
c41f84ac71ce24b741ec1ffd714646078d6c0b8c
|
/spider_practice/scapyProject/stockEastmoney/stockEastmoney/settings.py
|
caaf6f2a3062b893209036e4fe2c70d2024ca193
|
[] |
no_license
|
0xliang/python3
|
b044327cbdcf6af07ad7f11cad7c1e9424dcd9a0
|
a70d9f37cd71070bb5149abaf998a18fc754fc75
|
refs/heads/master
| 2023-03-22T18:29:00.383317
| 2021-03-15T17:44:55
| 2021-03-15T17:44:55
| 298,767,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,146
|
py
|
# Scrapy settings for stockEastmoney project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'stockEastmoney'
SPIDER_MODULES = ['stockEastmoney.spiders']
NEWSPIDER_MODULE = 'stockEastmoney.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'stockEastmoney (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'stockEastmoney.middlewares.StockeastmoneySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'stockEastmoney.middlewares.StockeastmoneyDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'stockEastmoney.pipelines.StockeastmoneyPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"lyqmichaelowen@gmail.com"
] |
lyqmichaelowen@gmail.com
|
94a4c8120d37eabcfee7b87fa13c1a359cc05290
|
77f4ffa8572ef6f57b6b26cb1ce985a06109d8af
|
/main.py
|
0233038af5576d2312e81ddf6690ab96fc2466af
|
[] |
no_license
|
szzzh/zh-NER
|
ced616a658298a52cbe92adaa55eda1db0457673
|
310019aa4f6827ec4ee3f8a4859943a55ec1d097
|
refs/heads/master
| 2020-03-22T13:08:22.511578
| 2018-07-07T23:40:31
| 2018-07-07T23:40:31
| 140,086,179
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,605
|
py
|
import tensorflow as tf
import numpy as np
import os, argparse, time, random
from model import BiLSTM_CRF
from utils import str2bool, get_logger, get_entity
from data import read_corpus, read_dictionary, random_embedding, read_dict, sentence2id
## Session configuration
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # default: 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = False #True
config.gpu_options.per_process_gpu_memory_fraction = 0.2 # need ~700MB GPU memory
## hyperparameters
parser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task')
parser.add_argument('--train_data', type=str, default='data_path', help='train data source')
parser.add_argument('--test_data', type=str, default='data_path', help='test data source')
parser.add_argument('--batch_size', type=int, default=64, help='#sample of each minibatch')
parser.add_argument('--epoch', type=int, default=40, help='#epoch of training')
parser.add_argument('--hidden_dim', type=int, default=300, help='#dim of hidden state')
parser.add_argument('--optimizer', type=str, default='Adam', help='Adam/Adadelta/Adagrad/RMSProp/Momentum/SGD')
parser.add_argument('--CRF', type=str2bool, default=True, help='use CRF at the top layer. if False, use Softmax')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--clip', type=float, default=5.0, help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout keep_prob')
parser.add_argument('--update_embedding', type=str2bool, default=True, help='update embedding during training')
parser.add_argument('--pretrain_embedding', type=str, default='random', help='use pretrained char embedding or init it randomly')
parser.add_argument('--embedding_dim', type=int, default=50, help='random init char embedding_dim')
parser.add_argument('--shuffle', type=str2bool, default=True, help='shuffle training data before each epoch')
parser.add_argument('--mode', type=str, default='demo', help='train/test/demo')
parser.add_argument('--demo_model', type=str, default='1521112368', help='model for test and demo')
args = parser.parse_args()
## get char embeddings
word2id = read_dictionary(os.path.join('.', args.train_data, 'word2id.pkl'))
word2dictname, dictname2id = read_dict(os.path.join('.', args.train_data, 'dict/'))
#print(word2dictname, dictname2id)
if args.pretrain_embedding == 'random':
embeddings = random_embedding(word2id, args.embedding_dim)
else:
embedding_path = 'pretrain_embedding.npy'
embeddings = np.array(np.load(embedding_path), dtype='float32')
## paths setting
paths = {}
timestamp = str(int(time.time())) if args.mode == 'train' else args.demo_model
output_path = os.path.join('.', args.train_data+"_save", timestamp)
if not os.path.exists(output_path): os.makedirs(output_path)
summary_path = os.path.join(output_path, "summaries")
paths['summary_path'] = summary_path
if not os.path.exists(summary_path): os.makedirs(summary_path)
model_path = os.path.join(output_path, "checkpoints/")
if not os.path.exists(model_path): os.makedirs(model_path)
ckpt_prefix = os.path.join(model_path, "model")
paths['model_path'] = ckpt_prefix
result_path = os.path.join(output_path, "results")
paths['result_path'] = result_path
if not os.path.exists(result_path): os.makedirs(result_path)
log_path = os.path.join(result_path, "log.txt")
paths['log_path'] = log_path
get_logger(log_path).info(str(args))
## training model
if args.mode == 'train':
model = BiLSTM_CRF(args, embeddings, dictname2id, word2id, paths, config=config)
model.build_graph()
train_path = os.path.join('.', args.train_data, 'train.txt')
train_data = read_corpus(train_path, word2id, word2dictname, dictname2id)
test_path = os.path.join('.', args.test_data, 'test.txt')
test_data = read_corpus(test_path, word2id, word2dictname, dictname2id); test_size = len(test_data)
## train model on the whole training data
print("train data: {}".format(len(train_data)))
model.train(train=train_data, dev=test_data) # use test_data as the dev_data to see overfitting phenomena
## testing model
elif args.mode == 'test':
ckpt_file = tf.train.latest_checkpoint(model_path)
print(ckpt_file)
paths['model_path'] = ckpt_file
model = BiLSTM_CRF(args, embeddings, dictname2id, word2id, paths, config=config)
model.build_graph()
test_path = os.path.join('.', args.test_data, 'test.txt')
test_data = read_corpus(test_path, word2id, word2dictname, dictname2id); test_size = len(test_data)
print("test data: {}".format(test_size))
model.test(test_data)
## demo
elif args.mode == 'demo':
ckpt_file = tf.train.latest_checkpoint(model_path)
print(ckpt_file)
paths['model_path'] = ckpt_file
model = BiLSTM_CRF(args, embeddings, dictname2id, word2id, paths, config=config)
model.build_graph()
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
print('============= demo =============')
saver.restore(sess, ckpt_file)
while(1):
print('Please input your sentence:')
demo_sent = input()
if demo_sent == '' or demo_sent.isspace():
print('See you next time!')
break
else:
demo_sent = list(demo_sent.strip())
demo_data = [(sentence2id(demo_sent, word2id), [0] * len(demo_sent))]
tag = model.demo_one(sess, demo_data)
res = get_entity(tag[0], demo_sent, dictname2id)
print(res)
|
[
"szzzh8@gmail.com"
] |
szzzh8@gmail.com
|
6bbac76293004570878b2004f4f9bf9722929607
|
79c290a743557ef0fdf41e816bdfc247287a7ca3
|
/booking-hackathon-2015/travel-profiles.py
|
fa8419f092cb235aa2557916dbf94f905d56b742
|
[] |
no_license
|
paulorodriguesxv/hackerrank
|
b836c385b4bd6e33cd222dfeb34592046a815f15
|
00a03e18cea3cd0769047d1c0eefdeb6ba02369c
|
refs/heads/master
| 2021-05-01T05:09:10.865405
| 2017-08-26T23:59:25
| 2017-08-26T23:59:25
| 79,718,137
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
f = open('travel-profiles.txt', 'r')
from collections import Counter
class Budgets(object):
def __init__(self, stream):
liststream = stream.split()
self.price = float(liststream[0])
self.facilities = liststream[1:]
class Hotel(object):
def __init__(self, stream):
liststream = stream.split()
self.aid = liststream[0]
self.price = float(liststream[1])
self.facilities = liststream[2:]
def populateHotels():
hotels = []
qtde = int(f.readline())
for index in xrange(qtde):
hotels.append( Hotel(f.readline()))
return hotels
def populateTestCases():
tests = []
qtde = int(f.readline())
for index in xrange(qtde):
tests.append( Budgets(f.readline()))
return tests
hotellist = populateHotels()
testlist = populateTestCases()
def getHotelWithFacilities(facilities):
result = {}
s1 = set(facilities)
for hotel in hotellist:
s2 = set(hotel.facilities)
if s1 == s1.intersection(s2):
result[hotel.aid] = hotel
return result
def getHotelWithPrices(price, hotels):
return [hotels[x] for x in hotels if hotels[x].price <= price]
def sorthotel(comp1, comp2):
if len(comp1.facilities) > len(comp2.facilities):
return -1
elif len(comp1.facilities) == len(comp2.facilities):
if comp1.price < comp2.price:
return -1
elif comp1.price == comp2.price:
if comp1.aid < comp2.aid:
return -1
else:
return 1
else:
return 1
else:
return 1
def executeTestCases():
for test in testlist:
hotelfinal = getHotelWithFacilities(test.facilities)
hotelfinal = getHotelWithPrices(test.price, hotelfinal)
hotelfinal = sorted(hotelfinal, sorthotel)
output = ''
for hotel in hotelfinal:
output += hotel.aid + ' '
print output
executeTestCases()
|
[
"paulorodriguesxv@gmail.com"
] |
paulorodriguesxv@gmail.com
|
2bada593738d64fac83332d82048bd0c5b4afb46
|
4d16f6eeb98ab37519bb08fecd17640e77191630
|
/test/camera.py
|
6b44c5357e922c87c27d0aee35ed53f81a44e359
|
[
"MIT"
] |
permissive
|
alvarlagerlof/ball-pid
|
94319b7b8458ad104cd25a288889205b33350b8f
|
6122e729782750818449645f97db41b31503a9aa
|
refs/heads/master
| 2021-06-25T20:43:46.424780
| 2020-10-19T10:38:01
| 2020-10-19T10:38:01
| 160,885,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from threading import Thread
import numpy as np
import cv2
import imutils
import time
class Camera:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
time.sleep(1)
Thread(target=self.update, args=()).start()
print("[init] Camera")
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
|
[
"alvar.lagerlof@gmail.com"
] |
alvar.lagerlof@gmail.com
|
ef35a44d9964f90aa902985117d12968ef6d6134
|
892f8ff633ec90e4fc77066dfb3d2df3ed8684b4
|
/ryo_iwata_python_self_assessment/python_code_wars/consecutive_strings.py
|
d7a60e577b70b7811db8fbfe0145c4a7b0fdcb57
|
[] |
no_license
|
ryoiwata/galvanize_python_self_assessment
|
8e321bf7ab55603bd7efe6192a8321c93fa5badb
|
d4ac564a3a4f2ba81264b36b24fc6fff33a4f0b0
|
refs/heads/master
| 2020-04-12T07:07:02.599358
| 2018-12-18T23:38:46
| 2018-12-18T23:38:46
| 162,357,446
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
"""
codewars.com/kata/56a5d994ac971f1ac500003e
given a array of strings and integer k,
return the longest combination of k strings(in the order of the array)
"""
def longest_consec(strarr, k):
if k <= 0 or len(strarr) == 0 or k > len(strarr):
return("")
turn_num = 0
list_of_combined_words = []
for turn in range(len(strarr) - k + 1): # the range must be such that it's possible to have k combinations of words
combined_word = ""
for word in strarr[turn_num:turn_num + k]: #this allows strarr to be iterated through by k words at a time
combined_word += word
list_of_combined_words.append(combined_word)
turn_num += 1
return(max(list_of_combined_words, key = len))
print(longest_consec(["zone", "abigail", "theta", "form", "libe", "zas"], 2))
|
[
"noreply@github.com"
] |
ryoiwata.noreply@github.com
|
df8b60d39f7964d979deab8c142b7f5d70d5f1f8
|
33ff050337ba4575042032d9602bf84dcf81435e
|
/test/functional/p2p_invalid_tx.py
|
8dc06d4c89637c54047786113286b12e17194b2c
|
[
"MIT"
] |
permissive
|
robinadaptor/chronon
|
5256b33fbe797bbdeb9c9a3c2091f0592afe6614
|
630b3945824c1b1cd2ea67ca80835a9f669b9124
|
refs/heads/master
| 2020-07-11T06:27:01.758237
| 2019-12-17T20:53:48
| 2019-12-17T20:53:48
| 145,383,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,575
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
'''
In this test we connect to one node over p2p, and test tx requests.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidTxRequestTest(ComparisonTestFramework):
'''
Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison.
'''
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
|
[
"robin.adaptor@gmail.com"
] |
robin.adaptor@gmail.com
|
ecc0ca086c3e3f050cb57d80121670b8e2ae4f81
|
f4f0235c9f4cc33f3cc83b2d93ccbb6f5fedabe7
|
/flask_boilerplate/models.py
|
3040337e970e1f174d5b33fb63a16d53e0ac166a
|
[] |
no_license
|
isaacchansky/flask_boilerplate
|
b5b894a7705290cef8dd8d3dfb89ee10282112a9
|
1f715a8412e5d13b71199c41ce1f14824f2aef5d
|
refs/heads/master
| 2020-04-12T18:47:30.862224
| 2013-11-07T16:12:58
| 2013-11-07T16:12:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
"""
My Flask App models.
"""
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True, nullable=False)
email = db.Column(db.String, unique=True, nullable=False)
passhash = db.Column(db.String, nullable=False)
salt = db.Column(db.String, nullable=False)
def __init__(self, username=None, email=None, passhash=None, salt=None):
self.username = username
self.email = email
self.passhash = passhash
self.salt = salt
def __repr__(self):
return '<User "{username}">'.format(username=self.username)
|
[
"ichansky@gmail.com"
] |
ichansky@gmail.com
|
9de5f420c7903864615b26079f346ea3cebeab6b
|
e15073438738794f6aab56ddf8f7efc8fea6733f
|
/models/wide_resnet.py
|
f3ced74959cd5c11be75e5829f94daa58a82973b
|
[] |
no_license
|
myunghakLee/DataAugmentationWithNoise
|
e1568957ff8ecaf25d742b546887a9c279e5412f
|
327c2a6592e509e5ce7b230b68cc185641227666
|
refs/heads/master
| 2022-11-15T00:27:54.424266
| 2020-07-09T10:32:03
| 2020-07-09T10:32:03
| 276,308,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,701
|
py
|
### dropout has been removed in this code. original code had dropout#####
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys, os
import numpy as np
import random
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import to_one_hot, mixup_process, get_lambda
from load_data import per_image_standardization
act = torch.nn.ReLU()
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.conv1(act(self.bn1(x)))
out = self.conv2(act(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, num_classes, per_img_std= False, stride = 1):
super(Wide_ResNet, self).__init__()
self.num_classes = num_classes
self.per_img_std = per_img_std
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet_v2 depth should be 6n+4'
n = int((depth-4)/6)
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0], stride = stride)
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
"""
## Modified WRN architecture###
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet_v2 depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
#self.mixup_hidden = mixup_hidden
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0])
self.bn1 = nn.BatchNorm2d(nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
#self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
"""
def forward(self, x, target= None, mixup=False, mixup_hidden=False, mixup_alpha=None, layer_num_out=None):
#print x.shape
if self.per_img_std:
x = per_image_standardization(x)
if mixup_hidden:
layer_mix = random.randint(0,2)
elif mixup:
layer_mix = 0
else:
layer_mix = None
out = x
if mixup_alpha is not None:
lam = get_lambda(mixup_alpha)
lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
lam = Variable(lam)
if target is not None :
target_reweighted = to_one_hot(target,self.num_classes)
if layer_mix == 0:
out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)
out = self.conv1(out)
out = self.layer1(out)
if layer_mix == 1:
out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)
if layer_num_out == 1:
out_tmp = Variable(out.detach().data, requires_grad=False)
out = self.layer2(out)
if layer_mix == 2:
out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)
if layer_num_out == 2:
out_tmp = Variable(out.detach().data, requires_grad=False)
out = self.layer3(out)
if layer_mix == 3:
out, target_reweighted = mixup_process(out, target_reweighted, lam=lam)
if layer_num_out == 3:
out_tmp = Variable(out.detach().data, requires_grad=False)
out = act(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
if layer_num_out == 4:
out_tmp = Variable(out.detach().data, requires_grad=False)
if layer_num_out is not None:
return out, target_reweighted, out_tmp
if target is not None:
return out, target_reweighted
else:
return out
def forward_n_layers(self, x, target= None, mixup=False, mixup_hidden=False, mixup_alpha=None, layer_num=None):
#print x.shape
if self.per_img_std:
x = per_image_standardization(x)
out = x
if target is not None :
target_reweighted = to_one_hot(target,self.num_classes)
out = self.conv1(out)
out = self.layer1(out)
if layer_num==1:
return out, target_reweighted
out = self.layer2(out)
if layer_num==2:
return out, target_reweighted
out = self.layer3(out)
if layer_num==3:
return out, target_reweighted
out = act(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out, target_reweighted
def wrn28_10(num_classes=10, dropout = False, per_img_std = False, stride = 1):
#print ('this')
model = Wide_ResNet(depth=28, widen_factor=10, num_classes=num_classes, per_img_std = per_img_std, stride = stride)
return model
def wrn28_2(num_classes=10, dropout = False, per_img_std = False, stride = 1):
#print ('this')
model = Wide_ResNet(depth =28, widen_factor =2, num_classes = num_classes, per_img_std = per_img_std, stride = stride)
return model
if __name__ == '__main__':
net=Wide_ResNet(28, 10, 0.3, 10)
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
|
[
"mh9716@naver.com"
] |
mh9716@naver.com
|
fc4c5ad27e7a405838aecbb8fccca26d2fbbd4b7
|
59a1151a9236ecc037c54711eb8ff0ad8e88558e
|
/C1 sequence/C1example/oefening 10.py
|
97b502efa524d270c1664002b98073f72c216159
|
[] |
no_license
|
brandonmorren/python
|
c76c2d7ecc471ec7738041caaad85bb0760061bc
|
6db74202ccdf777c9a2caba892bbcf6871941c14
|
refs/heads/master
| 2023-02-11T17:36:44.929892
| 2021-01-04T14:58:44
| 2021-01-04T14:58:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
consumption_day = int(input("power consumption during the day(kilowatt per hour): "))
consumption_night = int(input("power consumption during the night(kilowatt per hour): "))
print("invoice")
print("*" * 7)
fixed_cost = 83.6
consumption_day_price = 0.068 * consumption_day
consumption_night_price = 0.035 * consumption_night
total_exluding_vat = fixed_cost + consumption_night_price + consumption_day_price
total_including_vat = total_exluding_vat * 1.21
print("fixed costs: € " + str(fixed_cost))
print("daily consumption: € " + str(consumption_day_price))
print("night consumption: € " + str(consumption_night_price))
print("total exluding VAT: € " + str(total_exluding_vat))
print("total including VAT: € " + str(total_including_vat))
|
[
"r0842912@student.thomasmore.be"
] |
r0842912@student.thomasmore.be
|
fbb0cb79304c6dda5f420047c1d0b5c865008647
|
459cf34442e5edfada54a0e74499ded88dd9f520
|
/recipe/templatetags/recipe_filters.py
|
3912d0c9a304c269cfaf47387ad16929d535164f
|
[] |
no_license
|
olifirovai/FoodGram_project
|
f5ac8131e699a5477a0b17479d6ec107e84c9b55
|
d27e681d8b03a6ab53a0d4c8339ccae7dcd495be
|
refs/heads/master
| 2023-08-15T19:28:23.208149
| 2021-10-13T22:17:52
| 2021-10-13T22:17:52
| 360,272,094
| 0
| 0
| null | 2021-04-22T00:54:28
| 2021-04-21T18:43:07
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
from django import template
from recipe.models import FavoriteRecipe, ShoppingList, RecipeTypeMapping
register = template.Library()
@register.filter
def subtract(value, arg):
return value - arg
@register.filter()
def is_in_type(type, recipe):
type_exists = RecipeTypeMapping.objects.filter(type=type,
recipe=recipe).exists()
return type_exists
@register.filter()
def duration_format(value):
value = int(value)
h = 'hour'
m = 'minute'
hours = int(value / 60)
minutes = value % 60
if hours > 1:
h += 's'
if minutes > 1:
m += 's'
if hours == 0:
return f'{minutes} {m}'
elif minutes == 0:
return f'{hours} {h}'
return f'{hours} {h}, {minutes} {m}'
@register.filter()
def is_favorite(user, recipe):
favorite = FavoriteRecipe.objects.filter(user=user, recipe=recipe).exists()
return favorite
@register.filter()
def is_in_shopping(user, recipe):
in_shopping = ShoppingList.objects.filter(user=user,
recipe=recipe).exists()
return in_shopping
@register.filter()
def recipe_shopping_count(user):
recipe_amount = ShoppingList.objects.get_shopping_list(user).count()
return recipe_amount
@register.filter()
def ingredients(ingredients, i):
return ingredients[i]
@register.filter()
def add_id(url, type_id):
url_line = str(type_id)
if url is None:
return url_line
return url + url_line
@register.filter()
def string_view(type_id):
return str(type_id)
@register.filter
def url_with_get(request, number):
query = request.GET.copy()
query['page'] = number
return query.urlencode()
|
[
"golubtsovairinas@gmail.com"
] |
golubtsovairinas@gmail.com
|
a23f87abb60c34a3fdb28208f4f5c7807fbeca7b
|
604de367c5455fbe052e430751a0797a017fce69
|
/allennlp/models/encoder_decoders/simple_seq2multiseq.py
|
f5d026f1e5407e500c9482bb01eae411d0aa31d0
|
[
"Apache-2.0"
] |
permissive
|
schangpi/allennlp
|
3a152dcf0be4170777782d5b39318161e4af43e0
|
04eee4ce94ac096c65598ebff394c93b0ac97f5c
|
refs/heads/master
| 2021-05-03T23:16:57.449878
| 2018-03-15T21:23:50
| 2018-03-15T21:23:50
| 120,397,512
| 0
| 0
| null | 2018-02-06T03:36:33
| 2018-02-06T03:36:33
| null |
UTF-8
|
Python
| false
| false
| 14,543
|
py
|
from typing import Dict, Optional
from IPython import embed
import numpy
from overrides import overrides
import torch
from torch.autograd import Variable
# from torch.nn.modules.rnn import GRUCell
from torch.nn.modules.rnn import LSTMCell
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers.seq2multiseq import START_SYMBOL, END_SYMBOL
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.modules.similarity_functions import SimilarityFunction
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits, weighted_sum
from allennlp.training.metrics import CategoricalAccuracy, SpanBasedF1Measure
@Model.register("simple_seq2multiseq")
class SimpleSeq2MultiSeq(Model):
"""
This ``SimpleSeq2MultiSeq`` class is a :class:`Model` which takes a sequence, encodes it, and then
uses the encoded representations to decode another sequence. You can use this as the basis for
a neural machine translation system, an abstractive summarization system, or any other common
seq2seq problem. The model here is simple, but should be a decent starting place for
implementing recent models for these tasks.
This ``SimpleSeq2MultiSeq`` model takes an encoder (:class:`Seq2SeqEncoder`) as an input, and
implements the functionality of the decoder. In this implementation, the decoder uses the
encoder's outputs in two ways. The hidden state of the decoder is initialized with the output
from the final time-step of the encoder, and when using attention, a weighted average of the
outputs from the encoder is concatenated to the inputs of the decoder at every timestep.
Parameters
----------
vocab : ``Vocabulary``, required
Vocabulary containing source and target vocabularies. They may be under the same namespace
(``tokens``) or the target tokens can have a different namespace, in which case it needs to
be specified as ``target_namespace``.
source_embedder : ``TextFieldEmbedder``, required
Embedder for source side sequences
encoder : ``Seq2SeqEncoder``, required
The encoder of the "encoder/decoder" model
max_decoding_steps : int, required
Length of decoded sequences
target_namespace : str, optional (default = 'tokens')
If the target side vocabulary is different from the source side's, you need to specify the
target's namespace here. If not, we'll assume it is "tokens", which is also the default
choice for the source side, and this might cause them to share vocabularies.
target_embedding_dim : int, optional (default = source_embedding_dim)
You can specify an embedding dimensionality for the target side. If not, we'll use the same
value as the source embedder's.
attention_function: ``SimilarityFunction``, optional (default = None)
If you want to use attention to get a dynamic summary of the encoder outputs at each step
of decoding, this is the function used to compute similarity between the decoder hidden
state and encoder outputs.
scheduled_sampling_ratio: float, optional (default = 0.0)
At each timestep during training, we sample a random number between 0 and 1, and if it is
not less than this value, we use the ground truth labels for the whole batch. Else, we use
the predictions from the previous time step for the whole batch. If this value is 0.0
(default), this corresponds to teacher forcing, and if it is 1.0, it corresponds to not
using target side ground truth labels. See the following paper for more information:
Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks. Bengio et al.,
2015.
"""
def __init__(self,
vocab: Vocabulary,
tasks: str,
domains: str,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
upos_namespace: str = "upos_tags",
ner_namespace: str = "ner_tags",
chunk_namespace: str = "chunk_tags",
target_embedding_dim: int = None,
attention_function: SimilarityFunction = None,
scheduled_sampling_ratio: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(SimpleSeq2MultiSeq, self).__init__(vocab, regularizer)
# print(len(tasks), len(domains))
self._num_tasks = len(tasks)
self._tasks = tasks
self._domains = domains
self._source_embedder = source_embedder
self._encoder = encoder
self._max_decoding_steps = max_decoding_steps
self._upos_namespace = upos_namespace
self._ner_namespace = ner_namespace
self._chunk_namespace = chunk_namespace
self._attention_function = attention_function
self._scheduled_sampling_ratio = scheduled_sampling_ratio
self._upos_seq2seq = SimpleSeq2Seq(vocab=vocab, source_embedder=source_embedder, encoder=encoder,
max_decoding_steps=max_decoding_steps, target_namespace=upos_namespace,
target_embedding_dim=target_embedding_dim,
attention_function=attention_function,
scheduled_sampling_ratio=scheduled_sampling_ratio,
initializer=initializer, regularizer=regularizer)
self._ner_seq2seq = SimpleSeq2Seq(vocab=vocab, source_embedder=source_embedder, encoder=encoder,
max_decoding_steps=max_decoding_steps, target_namespace=ner_namespace,
target_embedding_dim=target_embedding_dim,
attention_function=attention_function,
scheduled_sampling_ratio=scheduled_sampling_ratio,
initializer=initializer, regularizer=regularizer)
self._chunk_seq2seq = SimpleSeq2Seq(vocab=vocab, source_embedder=source_embedder, encoder=encoder,
max_decoding_steps=max_decoding_steps, target_namespace=chunk_namespace,
target_embedding_dim=target_embedding_dim,
attention_function=attention_function,
scheduled_sampling_ratio=scheduled_sampling_ratio,
initializer=initializer, regularizer=regularizer)
initializer(self)
@overrides
def forward(self, # type: ignore
task_token: torch.LongTensor,
domain_token: torch.LongTensor,
source_tokens: Dict[str, torch.LongTensor],
upos_tokens: Dict[str, torch.LongTensor] = None,
ner_tokens: Dict[str, torch.LongTensor] = None,
chunk_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Decoder logic for producing the entire target sequence.
Parameters
----------
source_tokens : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the source ``TextField``. This will be
passed through a ``TextFieldEmbedder`` and then through an encoder.
target_tokens : Dict[str, torch.LongTensor], optional (default = None)
Output of ``Textfield.as_array()`` applied on target ``TextField``. We assume that the
target tokens are also represented as a ``TextField``.
"""
# (batch_size, input_sequence_length, encoder_output_dim)
batch_size = len(source_tokens)
upos_output_dict = self._upos_seq2seq.forward(source_tokens, upos_tokens)
ner_output_dict = self._ner_seq2seq.forward(source_tokens, ner_tokens)
chunk_output_dict = self._chunk_seq2seq.forward(source_tokens, chunk_tokens)
loss = 0.0
predictions = []
label_namespaces = []
task_token_ids = task_token.data.cpu().numpy()
for b in range(batch_size):
task = self.vocab.get_token_from_index(task_token_ids[b][0], namespace="task_labels")
if task == 'upos':
loss += upos_output_dict['loss']
predictions.append(upos_output_dict['predictions'])
elif task == 'ner':
loss += ner_output_dict['loss']
predictions.append(ner_output_dict['predictions'])
elif task == 'chunk':
loss += chunk_output_dict['loss']
predictions.append(chunk_output_dict['predictions'])
label_namespaces.append(task)
output_dict = {'loss': loss,
'predictions': predictions,
'label_namespaces': label_namespaces}
# embed()
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
upos_output_dict = self._upos_seq2seq.decode(output_dict)
ner_output_dict = self._ner_seq2seq.forward(output_dict)
chunk_output_dict = self._chunk_seq2seq.forward(output_dict)
all_predicted_tokens = []
for b, task in enumerate(output_dict['label_namespaces']):
if task == 'upos':
all_predicted_tokens.append(upos_output_dict['predictions'][b])
elif task == 'ner':
all_predicted_tokens.append(ner_output_dict['predictions'][b])
elif task == 'chunk':
all_predicted_tokens.append(chunk_output_dict['predictions'][b])
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
upos_accs = {'upos-' + metric_name: metric.get_metric(reset)
for metric_name, metric in self._upos_seq2seq.metrics.items()}
ner_accs = {'ner-' + metric_name: metric.get_metric(reset)
for metric_name, metric in self._ner_seq2seq.metrics.items()}
chunk_accs = {'chunk-' + metric_name: metric.get_metric(reset)
for metric_name, metric in self._chunk_seq2seq.metrics.items()}
upos_metric_dict = self._upos_seq2seq.span_metric.get_metric(reset=reset)
upos_f1 = {'upos-' + x: y for x, y in upos_metric_dict.items() if "overall" in x}
ner_metric_dict = self._ner_seq2seq.span_metric.get_metric(reset=reset)
ner_f1 = {'ner-' + x: y for x, y in ner_metric_dict.items() if "overall" in x}
chunk_metric_dict = self._chunk_seq2seq.span_metric.get_metric(reset=reset)
chunk_f1 = {'chunk-' + x: y for x, y in chunk_metric_dict.items() if "overall" in x}
accs = {metric_name:
(upos_accs['upos-' + metric_name] + ner_accs['ner-' + metric_name] +
chunk_accs['chunk-' + metric_name])/3 for metric_name, _ in self._upos_seq2seq.metrics.items()}
f1 = {x: (upos_f1['upos-' + x] + ner_f1['ner-' + x] + chunk_f1['chunk-' + x])/3
for x, _ in upos_metric_dict.items() if "overall" in x}
return {**f1, **accs, **upos_f1, **ner_f1, **chunk_f1, **upos_accs, **ner_accs, **chunk_accs}
@classmethod
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2MultiSeq':
tasks = params.pop("tasks")
domains = params.pop("domains")
source_embedder_params = params.pop("source_embedder")
source_embedder = TextFieldEmbedder.from_params(vocab, source_embedder_params)
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
max_decoding_steps = params.pop("max_decoding_steps")
upos_namespace = params.pop("upos_namespace", "upos_tags")
ner_namespace = params.pop("ner_namespace", "ner_tags")
chunk_namespace = params.pop("chunk_namespace", "chunk_tags")
# upos_namespace = params.pop("upos_namespace")
# ner_namespace = params.pop("ner_namespace")
# chunk_namespace = params.pop("chunk_namespace")
# If no attention function is specified, we should not use attention, not attention with
# default similarity function.
attention_function_type = params.pop("attention_function", None)
if attention_function_type is not None:
attention_function = SimilarityFunction.from_params(attention_function_type)
else:
attention_function = None
scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0)
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
return cls(vocab,
tasks=tasks,
domains=domains,
source_embedder=source_embedder,
encoder=encoder,
max_decoding_steps=max_decoding_steps,
upos_namespace=upos_namespace,
ner_namespace=ner_namespace,
chunk_namespace=chunk_namespace,
attention_function=attention_function,
scheduled_sampling_ratio=scheduled_sampling_ratio,
initializer=initializer,
regularizer=regularizer
)
|
[
"martbeerina@gmail.com"
] |
martbeerina@gmail.com
|
f5faa5b427e0b5ac987bbe63f6a85745fe598fef
|
ae17dbce5e09a3d6cf6d48be1c31e64cdd110600
|
/boba/urls.py
|
51e8c5ecf134da559e5fa773407051c332fb87a2
|
[
"MIT"
] |
permissive
|
gracerosemary/boba-project
|
6dcf8cfe568b12f2e82552ac94ae5442d0319ed1
|
627504a3caf9f2833d3a5b12c9172543a64a7f70
|
refs/heads/master
| 2023-03-20T10:11:56.888178
| 2021-03-17T18:42:05
| 2021-03-17T18:42:05
| 346,222,209
| 0
| 0
|
NOASSERTION
| 2021-03-17T18:42:06
| 2021-03-10T03:42:17
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.urls import path
from .views import BobaListView, BobaCreateView, BobaDeleteView, BobaDetailView, BobaUpdateView
urlpatterns = [
path("", BobaListView.as_view(), name="boba_list"),
path("<int:pk>/", BobaDetailView.as_view(), name="boba_detail"),
path("new/", BobaCreateView.as_view(), name="boba_create"),
path("<int:pk>/edit", BobaUpdateView.as_view(), name="boba_update"),
path("<int:pk>/delete", BobaDeleteView.as_view(), name="boba_delete"),
]
|
[
"choi.g.330@gmail.com"
] |
choi.g.330@gmail.com
|
903582b3fbfcafbe95101cc7a00acf75d474b845
|
82fdb2f3baeb4f08799d93c4be8d8c829f092415
|
/rasa_core/version.py
|
91afe203b5cccccd5a7f8b12cd4e9baa0ab9359b
|
[
"Apache-2.0"
] |
permissive
|
velamurip/rasa_core
|
915f815772e2b596f837f0e1af511e829cc28e3e
|
f3dbb70d0bb748628ab238eded17a8f5e09279e2
|
refs/heads/master
| 2021-05-16T04:22:04.310610
| 2017-10-05T09:53:22
| 2017-10-05T09:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
__version__ = '0.7.0'
|
[
"tom.bocklisch@scalableminds.com"
] |
tom.bocklisch@scalableminds.com
|
0d7a944e35c031e0b4fc6f129e53b720f2764efd
|
56ec2fe75c27ee0f9eda4246805ea903d195c108
|
/check/myTraining.py
|
9656280a2e7d6e6f2f16399db3936589cb0d1d96
|
[] |
no_license
|
whoisashish/Covid-19-WebApp
|
3ef71e8c3fca96af94265d002fc32d61124adeb1
|
d7eaba0f8612dab268634d874d39c822ba71986d
|
refs/heads/master
| 2022-04-16T08:10:30.141340
| 2020-04-09T17:22:45
| 2020-04-09T17:22:45
| 250,134,732
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
import pandas as pd
df = pd.read_csv('data.csv')
|
[
"noreply@github.com"
] |
whoisashish.noreply@github.com
|
86fbc2d6300338aa31b746d7698c5c018e6d6119
|
69c85c659909aaef863fd92281c11df1ad7d4be3
|
/Python Problems/62.py
|
5c015cc6f2843244d927a382b232ff24d12f3c22
|
[] |
no_license
|
OneCalledSyn/project-euler
|
fd7b8a8541bab173846826dbdfd5503ef2816a54
|
c5b9ec35cedf10fcdf45678e39470e9a8484db73
|
refs/heads/master
| 2023-08-05T01:08:39.095996
| 2023-07-23T19:42:24
| 2023-07-23T19:42:24
| 241,126,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
cubes = dict()
solutions = []
for i in range(100, 10000):
curr = ''.join(sorted(str(i ** 3)))
print(curr)
if curr not in cubes.keys():
cubes[curr] = list()
cubes[curr].append(i)
if len(cubes[curr]) == 5:
solutions.append(cubes[curr])
if len(cubes[curr]) == 6:
solutions.remove(cubes[curr])
#print(i, curr, cubes[curr])
print(solutions, solutions[0][0] ** 3)
|
[
"noreply@github.com"
] |
OneCalledSyn.noreply@github.com
|
aed8fa7e7361c148a1e9f6978319b73fe94d4af9
|
80c24f75f86a29b28aab42d7752cf41165e2ac7d
|
/src/mininglogic/knn_kdtree_fasten_eager.py
|
e560dab0f015ec0902e96c04df8d24f8eecc6c5b
|
[] |
no_license
|
txsing/KDDCup15
|
52ebd7846085b5c0c70d310823de80c67442014e
|
1c89f94c47310e2dc64eb33a695e2ea0c760a9db
|
refs/heads/master
| 2021-01-10T02:26:38.688243
| 2016-03-04T20:59:56
| 2016-03-04T20:59:56
| 52,838,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,957
|
py
|
import numpy as np
import time
from sklearn import neighbors
import sys
from sklearn.svm import SVC
from sklearn import svm
sys.setrecursionlimit(100000000)
TestsampleNum = 48217
trainVector = np.zeros([72325,7]) # 72325 samples in total, each in a 7-component vector format
trainLabel = np.random.rand(72325)
testVector = np.zeros([TestsampleNum,7]) # 10000 samples in total, each in a 7-component vector format
testLabel = np.random.rand(TestsampleNum)
predictedLabel = np.random.rand(TestsampleNum)
trainFile = "WOT_Train_Vector.csv"
def readTrainFile(filename): # input the file to be processed
file = open(filename)
PreprocessData = np.genfromtxt(file, delimiter=",")
file.close()
print PreprocessData
#print PreprocessData.shape[0]
#print PreprocessData.shape[1]
return PreprocessData
def constructKDTree():
tree = neighbors.KDTree(trainVector, leaf_size=2)
return tree
def KNN(array, k, searchTree): #input the x array (an array of points to query), input k value
# tree = spatial.KDTree(trainVector)
#print tree.data
# print tree.data
resultset = np.zeros([k, 7])
resultlabelset = np.zeros(k)
#tree = spatial.KDTree(trainVector)
distance, knn_index = searchTree.query(array, k)
knn_index = knn_index.reshape(k)
for i in range(len(knn_index)):
resultset[i,:] = trainVector[knn_index[i],:]
resultlabelset[i] = trainLabel[knn_index[i]]
return resultset, resultlabelset# is this correct?
def weight_vector(filename):
file = open(filename)
data = np.genfromtxt(file, delimiter=",")
file.close()
print "data shape = \n", data.shape
print "data =\n", data
type_sum = np.zeros(7)
for i in range(7):
type_sum[i] = type_sum[i] + data[i * 2][2]
type_sum[i] = type_sum[i] + data[i * 2 + 1][2]
type_tol = np.sum(type_sum)
print "type_sum = \n", type_sum
print "type_tol = \n", type_tol
ratio = np.zeros(7)
for i in range(7):
if data[i * 2][2] > data[i * 2 + 1][2]:
ratio[i] = data[i * 2][2] / float(data[i * 2 + 1][2])
ratio[i] = ratio[i] * (type_sum[i] / float (type_tol))
else:
ratio[i] = data[i * 2 + 1][2] / float(data[i * 2][2]) # inner ratio
ratio[i] = ratio[i] * (type_sum[i] / float (type_tol)) # weight among all counts
ratio[i] = (-1) * ratio[i] # direction
return ratio
def cal_accuracy(testlabel, predictedlabel):
correct = 0;
for i in range(len(testlabel)):
if testlabel[i] == predictedlabel[i]:
correct = correct + 1
accuracy = correct / float(TestsampleNum)
print "in cal_accuracy TestsampleNum = \n", TestsampleNum
return accuracy
if __name__ == '__main__':
startTime = time.time()
useWeight = int(sys.argv[1]) # Use weight info or not: 1-use; 0-not use
trainfile = sys.argv[2]
testfile = sys.argv[3]
weightfile = sys.argv[4]
k_neighbor = int(sys.argv[5])
AllData = readTrainFile(trainfile)
PreviousId = AllData[0][0]
index = 0
for i in range(AllData.shape[0]): # ith row in original input data
CurrentId = AllData[i][0]
if CurrentId == PreviousId: # add a new term in the vector
trainVector[index][AllData[i][1]] = AllData[i][2]
trainLabel[index] = AllData[i][3]
else:
index = index + 1 # 1st record for a new sample/enrollment
trainVector[index][AllData[i][1]] = AllData[i][2]
trainLabel[index] = AllData[i][3]
PreviousId = CurrentId
# print trainVector
print trainVector.shape[0]
print trainVector.shape[1]
TestData = readTrainFile(testfile)
PreviousId = TestData[0][0]
index = 0
# for i in range(TestData.shape[0]): # ith row in original input data
for i in range(TestData.shape[0]): # ith row in original input data
CurrentId = TestData[i][0]
if CurrentId == PreviousId: # add a new term in the vector
testVector[index][TestData[i][1]] = TestData[i][2]
testLabel[index] = TestData[i][3]
else:
index = index + 1 # 1st record for a new sample/enrollment
testVector[index][TestData[i][1]] = TestData[i][2]
testLabel[index] = TestData[i][3]
PreviousId = CurrentId
# print testVector
print testVector.shape[0]
print testVector.shape[1]
print "TestsampleNum = \n", TestsampleNum
if(useWeight != 0):
ratio = weight_vector(weightfile)
print "ratio = \n", ratio
trainVector[:,0] = trainVector[:,0] * ratio[0] # -0.12748538
trainVector[:,1] = trainVector[:,1] * ratio[1] # 0.57295446
trainVector[:,2] = trainVector[:,2] * ratio[2] # 0.27336366
trainVector[:,3] = trainVector[:,3] * ratio[3] # 0.22940873
trainVector[:,4] = trainVector[:,4] * ratio[4] # 0.01308849
trainVector[:,5] = trainVector[:,5] * ratio[5] # 0.13693268
trainVector[:,6] = trainVector[:,6] * ratio[6] # 0.18386892
testVector[:,0] = testVector[:,0] * ratio[0] # -0.12748538
testVector[:,1] = testVector[:,1] * ratio[1] # 0.57295446
testVector[:,2] = testVector[:,2] * ratio[2] # 0.27336366
testVector[:,3] = testVector[:,3] * ratio[3] # 0.22940873
testVector[:,4] = testVector[:,4] * ratio[4] # 0.01308849
testVector[:,5] = testVector[:,5] * ratio[5] # 0.13693268
testVector[:,6] = testVector[:,6] * ratio[6] # 0.18386892
# print testVector
clf = svm.LinearSVC()
clf.fit(trainVector, trainLabel)
predictedLabel = clf.predict(testVector)
print "\naccuracy = \n", cal_accuracy(testLabel, predictedLabel)
endTime = time.time()
period = endTime - startTime
print "period = \n", period
|
[
"txsing@gmail.com"
] |
txsing@gmail.com
|
8ce56d89a9b682e82b60e2ae11f21050cd689353
|
13390c6d65b2b222a5525e485f5dd1d1b48b46d9
|
/django/clo/paginatortest.py
|
85ec6e7335f0b638a3171b72ca52db913ff880a5
|
[] |
no_license
|
Roarain/py2
|
35c6f730a8dae46e2cb79349250448d5e3ea85b2
|
ee60ac13f3bbad0347bc790c8f64db26210948f0
|
refs/heads/master
| 2021-06-02T16:08:10.625121
| 2019-11-01T11:54:35
| 2019-11-01T11:54:35
| 98,857,911
| 0
| 0
| null | 2020-04-29T23:04:21
| 2017-07-31T06:55:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 592
|
py
|
#coding:utf-8
from django.core.paginator import Paginator
itemlist = ['a','b','c','d','e','f','g']
p = Paginator(itemlist,2)
# print p.num_pages
for i in range(1,p.num_pages+1):
if p.page(i).has_previous():
print 'Page %d 有上一页,上一页是 %d' % (i,p.page(i).previous_page_number())
else:
print 'Page %d 是第一页' % (i)
if p.page(i).has_next():
print 'Page %d 有下一页,下一页是 %d' % (i, p.page(i).next_page_number())
else:
print 'Page %d 是最后一页' % (i)
print p.page(i).object_list
print '----------------'
|
[
"welovewxy@126.com"
] |
welovewxy@126.com
|
688481d306ef5a1d5633e44a88b0133a5281ff0d
|
9486974db3f1eca2359cc65a108301e6d158c641
|
/SS_app/models.py
|
828bcba6c54f167273beb57ccb9f5c3e1c4afec0
|
[] |
no_license
|
KirandeepKaur03/ss
|
c0c41cd879df8c1847715447f41d0aa71fe92c22
|
3241591213cf6ed77175399ee943f8b2558a9afe
|
refs/heads/master
| 2023-04-13T21:39:22.561240
| 2021-04-24T07:42:34
| 2021-04-24T07:42:34
| 347,861,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
from django.db import models
class Queries(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255,default="",null=True,blank=True)
email = models.EmailField(default="",null=True,blank=True)
mobile=models.BigIntegerField(default=0,null=True,blank=True)
message=models.CharField(max_length=255,default="",null=True,blank=True)
def __str__(self):
return self.id
class adminlist(models.Model):
id = models.AutoField(primary_key=True)
adminid = models.CharField(max_length=255, default="", null=True, blank=True)
password = models.CharField(max_length=255, default="", null=True, blank=True)
def __str__(self):
return self.adminid
|
[
"kkirandeep2@gmail.com"
] |
kkirandeep2@gmail.com
|
b6e0e8889401a2c8cee10f7a52d483fcdd0facce
|
a8c1a7cf331a746d2711e48094eb4be5b3c566ae
|
/set_dilation/viz/poly_viz_3d.py
|
20455316cf7b1b93da6e6ba4e04ab1ac5e516f6c
|
[] |
no_license
|
aalbert-dev/minkowski_solver
|
87bbbac95d1e9550f0959b51befc83ca3f33f1ee
|
b5e0d4a8f8b52d0a1f3ea7ed432574ce4bafd8b9
|
refs/heads/main
| 2023-04-18T05:31:38.594006
| 2021-03-24T06:15:58
| 2021-03-24T06:15:58
| 344,354,305
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
def plot(plt, coords, color, style):
if not coords:
return
x_pts = []
y_pts = []
z_pts = []
for i in range(0, len(coords) + 1):
cur_x, cur_y, cur_z = coords[i % len(coords)]
cur_x = float(cur_x)
cur_y = float(cur_y)
cur_z = float(cur_z)
x_pts.append(cur_x)
y_pts.append(cur_y)
z_pts.append(cur_z)
plt.plot(x_pts, y_pts, color=color, linestyle=style)
def read(f_name):
coords = []
f = open(f_name, 'r')
for line in f:
coords.append(line.split(','))
return coords
def get_cmap(n, name='hsv'):
return plt.cm.get_cmap(name, n)
def get_colors_from_angles(z_coords):
num_points = len(z_coords)
unique_z = []
for z in z_coords:
if z not in unique_z:
unique_z.append(z)
# print(num_points, len(unique_z))
z_length = num_points // len(unique_z)
color_seq = []
for i in range(0, num_points):
col_index = int(i / z_length)
color_seq.append(col_index)
return color_seq
def viz(dir_name):
fig = plt.figure()
ax = Axes3D(fig)
coords = read(dir_name + 'poly_results_3d.txt')
if not coords:
return
x_pts = []
y_pts = []
z_pts = []
for i in range(0, len(coords) + 1):
cur_x, cur_y, cur_z = coords[i % len(coords)]
x_pts.append(float(cur_x))
y_pts.append(float(cur_y))
z_pts.append(float(cur_z))
color_seq = get_colors_from_angles(z_pts)
ax.scatter(x_pts, y_pts, z_pts, s=50, c=color_seq)
ax.set_xlabel('X Meters')
ax.set_ylabel('Y Meters')
ax.set_zlabel('Z Radians')
plt.title('Configuration space')
plt.show()
viz('/home/arjun/minkowski_addition/set_dilation/data/')
|
[
"arjunalbert1@gmail.com"
] |
arjunalbert1@gmail.com
|
c13c389f3d476fad44b7df7071d4d66218e09f44
|
8da9a2e818ed7a304c716cc2eb5bd2762a6b8b9d
|
/env/bin/flask
|
ec1c8d934cd89084f05989e845570327b1a3b45b
|
[] |
no_license
|
rachelgoff/FSND
|
9b1b3d63e2707a829fec0f21c0a97b76d30f4140
|
b72230debcd6b18816ce574db4dd7b7e0273e472
|
refs/heads/master
| 2020-12-13T13:55:36.171470
| 2020-06-10T06:04:35
| 2020-06-10T06:04:35
| 234,435,253
| 0
| 0
| null | 2020-01-17T00:00:39
| 2020-01-17T00:00:38
| null |
UTF-8
|
Python
| false
| false
| 307
|
#!/Users/rachel/full_stack_class/projects/fyyur/FSND/projects/capstone/05_what_to_eat/backend/src/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rzhangmoz1@gmail.com"
] |
rzhangmoz1@gmail.com
|
|
23d517302e7e039c5fa654c1734866871498ede2
|
db2d5b2b1a48e0b834993f3fe606deb9a4a57e56
|
/Desktop/master/DPROT/labWork1/codisGit/lab1dprot.py
|
fe09e43e54edfd8d3569112d9de3d36ab18286aa
|
[] |
no_license
|
mbasart/prova2
|
9ce78722a0fd1ab8b3a4b2ef8b46ccc33021dff4
|
b8b429ceee42ee129d18ed566b375e84ecee19cd
|
refs/heads/main
| 2021-06-22T07:18:18.605483
| 2020-12-06T22:35:14
| 2020-12-06T22:35:14
| 149,730,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
f = open("resultatsXifratsMcopy1.txt","r")
valorsXifrats = []
count = 0
valorsXifratsDic = {}
maxfreq = 0
valuefreq = 0
for x in f:
value = x.split(' ', 1)[0].strip()
valorsXifrats.append(value)
valueInt = int(value, 16)
finalxor = valueInt ^ (count+2)
count = count+1
if finalxor in valorsXifratsDic:
valorsXifratsDic[finalxor]+=1
else:
valorsXifratsDic[finalxor] = 1
for x, y in valorsXifratsDic.items():
if y > maxfreq:
maxfreq = y
valuefreq = x
#print(x,y)
print(chr(valuefreq),maxfreq)
#print(valuefreq,maxfreq)
f.close()
#busquem el primer valor de la clau k[0]
f2 = open("resultatsXifrats2Mcopy1.txt","r")
count2 = 0
valorsXifratsDic2 = {}
maxfreq2 = 0
valuefreq2 = 0
for x in f2:
value = x.split(' ', 1)[0].strip()
valueInt = int(value, 16)
valuek = 0
for x in range(256):
finalxor = valuefreq ^ (count2+6+x)
if finalxor == valueInt:
valuek = x
if x in valorsXifratsDic2:
valorsXifratsDic2[x]+=1
else:
valorsXifratsDic2[x] = 1
count2 = count2+1
for x, y in valorsXifratsDic2.items():
if y > maxfreq2:
maxfreq2 = y
valuefreq2 = x
#print(x,y)
print(hex(valuefreq2),maxfreq2)
f2.close()
#busquem el segon valor de la clau k[1]
f3 = open("resultatsXifrats3Mcopy1.txt","r")
count3 = 0
valorsXifratsDic3 = {}
maxfreq3 = 0
valuefreq3 = 0
for x in f3:
value = x.split(' ', 1)[0].strip()
valueInt = int(value, 16)
valuek = 0
for x in range(256):
finalxor = valuefreq ^ (count3+10+valuefreq2+x)
if finalxor == valueInt:
valuek = x
if x in valorsXifratsDic3:
valorsXifratsDic3[x]+=1
else:
valorsXifratsDic3[x] = 1
count3 = count3+1
for x, y in valorsXifratsDic3.items():
if y > maxfreq3:
maxfreq3 = y
valuefreq3 = x
#print(x,y)
print(hex(valuefreq3),maxfreq3)
f3.close()
|
[
"mbd2797@gmail.com"
] |
mbd2797@gmail.com
|
2fb7ffee21e0dc2f4eca5bdebe372d417dfe97d9
|
a59d1faced9fe7348ca7143d2a8643e0ebad2132
|
/pyvisdk/do/operation_disabled_by_guest.py
|
0767e0fe3a5714693497cfec0b893a5dfefa5187
|
[
"MIT"
] |
permissive
|
Infinidat/pyvisdk
|
c55d0e363131a8f35d2b0e6faa3294c191dba964
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
refs/heads/master
| 2023-05-27T08:19:12.439645
| 2014-07-20T11:49:16
| 2014-07-20T11:49:16
| 4,072,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def OperationDisabledByGuest(vim, *args, **kwargs):
'''An OperationDisabledByGuest exception is thrown when an operation fails because
the guest operations agent has been configured to disable the operation.'''
obj = vim.client.factory.create('{urn:vim25}OperationDisabledByGuest')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"guy@rzn.co.il"
] |
guy@rzn.co.il
|
09aa4e450f4fc1fe24c41d5ab95eea8247db0226
|
856c958451ff1770115232d6f7289afb354db4e1
|
/Tests/test_filtering_products_by_name.py
|
23641670420d997ca8cabe5746e1a2d1537fc16c
|
[] |
no_license
|
Alex-Chizhov/Test_automation_for_web_store
|
0d82a105e75521fd3ec9c19497184c7e967239ec
|
8f0e50a2db0c2d6a1bfffdfc615f20efca77d1bc
|
refs/heads/master
| 2022-11-01T22:01:59.152063
| 2022-10-02T06:33:45
| 2022-10-02T06:33:45
| 164,032,730
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
import allure
@allure.feature('Filtering products by name')
@allure.severity('critical')
def test_filtering_products_by_name(appf_customer):
list_products_names = appf_customer.shop.get_filtering_products_by_price()
assert list_products_names == sorted(list_products_names)
|
[
"alexchizhov90@gmail.com"
] |
alexchizhov90@gmail.com
|
c9a99d0b6f30fc9256d329cb0d67be6887f815d5
|
c874e55ec73043f6b837601cc58d855d37649e59
|
/mlcenzer/python scripts/find_time_duration.py
|
90d350b7501cc45fb8a4f1dddb8d2526b7a9d14a
|
[] |
no_license
|
mlcenzer/SBB-dispersal
|
85c54c924b399834a798d700cabf0b2702ae0755
|
1a777370986f83186180552a09149dfba72b96d0
|
refs/heads/master
| 2022-12-11T10:13:32.416530
| 2022-12-03T16:23:52
| 2022-12-03T16:23:52
| 229,098,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
#***************************************************************************************************************************
# Calculates flight duration of a trial. The function reads all the data, splits the data out as a list of strings (each
# string is a row), retrieves the last line of the data, and gets the first element in the list of strings, which is the
# flight duration.
#***************************************************************************************************************************
def find_time_duration(file_name):
with open(file_name, "r") as txtfile:
data = txtfile.readlines()
tot_duration = data[-1].split(",")[0]
return float(tot_duration)
|
[
"mlcenzer@ucdavis.edu"
] |
mlcenzer@ucdavis.edu
|
dcfa5ed4aef2d586ed2d0c1c723f6ce1e8e10460
|
8b69e4e5119813ffeccc25acad7b6922a7fcf8ff
|
/tronn/learn/baselines_v2.py
|
3417bb499bc5f81029e8bec6a7981c91ca6bd13e
|
[
"MIT"
] |
permissive
|
mbrannon88/tronn
|
752089520b83637636f848d11fdf917cadaca6f4
|
59654a958f6debb5be150e383e96997f1982359d
|
refs/heads/master
| 2023-07-08T15:22:08.170872
| 2021-08-04T21:36:21
| 2021-08-04T21:36:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,976
|
py
|
"""Contains code to run baseline models as comparisons
"""
import glob
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.tensor_forest.client import eval_metrics
#from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.platform import app
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.ops import array_ops
from tronn.datalayer import tflearn_input_fn
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tronn.learn import tensor_forest
from tronn.learn import random_forest
#from tensorflow.contrib.tensor_forest.python import tensor_forest
#from tensorflow.contrib.tensor_forest.client import random_forest
from tronn.nets.kmer_nets import featurize_kmers
from tronn.nets.motif_nets import featurize_motifs
from tronn.learn.cross_validation import setup_cv
from tronn.interpretation.motifs import get_encode_pwms
def build_estimator(model_dir, num_classes=3):
"""Build an estimator."""
params = tensor_forest.ForestHParams(
num_classes=num_classes,
num_features=15625, # num classes = 2
num_trees=100,
max_nodes=10000,
regression=True if num_classes > 2 else False) # make this bigger later 500, max nodes 3000
graph_builder_class = tensor_forest.RandomForestGraphs
if num_classes > 2:
print "using multi label head"
head = head_lib.multi_label_head(num_classes) # reactivate this for multi label learning
else:
head = None
return random_forest.TensorForestEstimator(
params,
graph_builder_class=graph_builder_class,
early_stopping_rounds=100000000,
head=head,
model_dir=model_dir)
def auprc_old(probs, targets, weights=None):
return metric_ops.streaming_auc(array_ops.slice(probs, [0, 1], [-1, 1]),
targets, weights, curve='PR')
#return metric_ops.streaming_auc(probs, targets, weights, curve='PR')
def auprc(probs, targets, weights=None):
return tf.metrics.auc(targets, probs, curve='PR')
def auroc_old(probs, targets, weights=None):
return metric_ops.streaming_auc(array_ops.slice(probs, [0, 1], [-1, 1]),
targets, weights, curve='ROC')
#return tf.metrics.auc(targets, probs, curve='ROC')
def auroc(probs, targets, weights=None):
return tf.metrics.auc(targets, probs, curve='ROC')
def accuracy(probs, targets):
print probs.get_shape()
print targets.get_shape()
predictions = tf.cast(tf.greater(tf.cast(probs, 'float32'), tf.cast(0.5, 'float32')), 'float32')
targets_expanded = tf.expand_dims(targets, 2)
predictions_expanded = tf.expand_dims(predictions, 2)
return tf.metrics.accuracy(targets_expanded, predictions_expanded)
def train_and_eval_tensorforest(
data_loader_train,
data_loader_test,
batch_size,
#tasks,
out_dir,
num_classes=3):
"""Runs random forest baseline model
Note that this is TFLearn's model
"""
est = build_estimator(out_dir, num_classes=num_classes)
# TODO change metrics here, here for debugging, move back down later
metric = {}
metric['accuracy'] = metric_spec.MetricSpec(
accuracy,
prediction_key='logits')
metric['auroc_tf'] = metric_spec.MetricSpec(
auroc,
prediction_key='logits')
metric['auprc_tf'] = metric_spec.MetricSpec(
auprc,
prediction_key='logits')
est.fit(input_fn=data_loader_train, max_steps=5000) # steps=5000
if True:
results = est.evaluate(input_fn=data_loader_test,
metrics=metric,
steps=10)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
import ipdb
ipdb.set_trace()
predict_total = 50
prediction_generator = est.predict(input_fn=data_loader_test, outputs=["probabilities", "logits", "classes", "labels"])
#np.zeros((predict_total, num_classes))
for i in xrange(predict_total):
blah = prediction_generator.next()
print blah
return None
def run(args):
"""Run command
"""
data_files = sorted(glob.glob('{}/*.h5'.format(args.data_dir)))
train_files, valid_files, test_files = setup_cv(data_files, cvfold=args.cvfold)
tf.logging.set_verbosity(tf.logging.INFO)
# TODO run through at least 1 epoch and at least until loss drops more
if args.kmers:
train_and_eval_tensorforest(
tflearn_input_fn(
train_files,
args.batch_size,
tasks=args.tasks,
featurize_fn=featurize_kmers,
featurize_params={"kmer_len": args.kmer_len}),
tflearn_input_fn(
test_files,
args.batch_size,
tasks=args.tasks,
featurize_fn=featurize_kmers,
featurize_params={"kmer_len": args.kmer_len}),
args.batch_size,
args.out_dir,
num_classes=args.num_classes)
elif args.motifs:
train_and_eval_tensorforest(
tflearn_input_fn(
train_files,
args.batch_size,
tasks=args.tasks,
featurize_fn=featurize_motifs,
featurize_params={"pwm_list": get_encode_pwms(args.pwm_file)}),
tflearn_input_fn(
test_files,
args.batch_size,
tasks=args.tasks,
featurize_fn=featurize_motifs,
featurize_params={"pwm_list": get_encode_pwms(args.pwm_file)}),
args.batch_size,
args.out_dir,
num_classes=args.num_classes)
return None
|
[
"dskim89@gmail.com"
] |
dskim89@gmail.com
|
358f264887f571c41af0642c4ba2e6a3b8d4574a
|
52b959e977ac6fa150fa1d0414ee69cba3f1869b
|
/dijkstra算法.py
|
6b024fde750262c2a6c7d130075c0cf6ab3c199e
|
[] |
no_license
|
1536696608/Traversal-of-graphs
|
66a00e5118f5a9d547c87d7d9b298b79bac4be12
|
a0bcbf7a17ab39301514241ac115b60f7ded16d2
|
refs/heads/master
| 2020-06-26T05:45:57.157388
| 2019-07-31T02:51:04
| 2019-07-31T02:51:04
| 199,551,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
import heapq
import math
graph = {"A":{"B":5,"C":1},
"B":{"A":5,"C":2,"D":1},
"C":{"A":1,"B":2,"D":4,"E":8},
"D":{"B":1,"C":4,"E":3,"F":6},
"E":{"C":8,"D":3},
"F":{"D":6}
}
def init_dis(graph,s):
dis = {s:0}
vertex = graph
for v in vertex:
if v != s:
dis[v] = math.inf
return dis
def dijkstra(graph,s):
queueh = []
parent = {}
seen = set()
heapq.heappush(queueh,(0,s))
distance = init_dis(graph,s)
while len(queueh) > 0 :
item = heapq.heappop(queueh)
pair = item[1]
dist = item[0]
seen.add(pair)
vertex = graph[pair].keys()
for w in vertex:
if w not in seen:
if dist + graph[pair][w] < distance[w]:
distance[w] = dist + graph[pair][w]
parent[w] = pair
heapq.heappush(queueh,(distance[w],w))
return parent,distance
def main():
parents,distance = dijkstra(graph,"A")
v = "E"
print(parents)
print(distance)
main()
|
[
"noreply@github.com"
] |
1536696608.noreply@github.com
|
46500e06310a7c56f312a32b1272edbd5ce08c71
|
25fed1e45c4b6bdbf50d2beb91db97d8dbbe6926
|
/image_processing/basic_processing.py
|
b1a5c9d7915bf0048e4f2a3003fd0ddb44d9c883
|
[] |
no_license
|
qingnianmeng/Image-processing-app
|
d2ba5629683d41101a55949ab61d7a26df972415
|
86b645973bc2222009170d359255bdfb9b3b186d
|
refs/heads/master
| 2023-01-23T19:00:49.317135
| 2020-12-03T23:50:00
| 2020-12-03T23:50:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,949
|
py
|
"""
Module contains functions that perform some basic image operations
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
def scale(img, factor):
"""
scale the image based on factor.
:param img: target image to scale
:param factor: scale factor
:return: rescaled image
"""
if factor <= 0:
raise ValueError
if factor >= 1:
return cv2.resize(img, None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
else:
return cv2.resize(img, None, fx=factor, fy=factor, interpolation=cv2.INTER_AREA)
def blur(img, sigma):
"""
performs gaussian filter on the target image.
User specify kernel sigma
:param img: image
:param sigma: standard deviation of gaussian kernel
"""
sigma = int(sigma)
fil = cv2.getGaussianKernel(sigma * 3, sigma)
fil = fil * fil.T
return cv2.filter2D(img, -1, fil)
def cartoonize(img):
"""
cartoonize an image, to speed up, the images are scaled down before
applying filters.
https://towardsdatascience.com/building-an-image-cartoonization-web-app-with-python-382c7c143b0d
:param img: target image
:return: cartoon image
"""
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# scale down the image and the apply median blur, then scale up.
gray_img = scale(cv2.medianBlur(scale(gray_img, 0.5), 5), 2)
edges = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 2)
img = scale(cv2.bilateralFilter(scale(img, 0.5), 4, 20, 4), 2)
return cv2.bitwise_and(img, img, mask=edges)
def grad(im):
"""
find the absolute value of the sum of vertical gradient and horizontal gradient
:param im: rbg image
:return: gradient map of the input image in gray scale
"""
im = im.copy()
im = cv2.cvtColor(im * 255, cv2.COLOR_RGB2GRAY)
im = cv2.GaussianBlur(im, (3, 3), 0) / 255
fil1 = np.array([1, 0, -1])
x_grad = np.abs(cv2.filter2D(im, -1, fil1))
y_grad = np.abs(cv2.filter2D(im.T, -1, fil1))
return x_grad + y_grad.T
def seam_finding(img):
"""
find the best seam given cost map
:param img: cost map
:return: accumulating cost map and corresponding backtrack
"""
h, w = img.shape
backtrack = np.zeros_like(img)
dp = img.copy()
for i in range(1, h):
for j in range(w):
idx = np.argmin(dp[i - 1, max(0, j - 1): min(j + 1, w - 1)])
if j > 0:
idx -= 1
backtrack[i, j] = j + idx
dp[i, j] = dp[i, j] + min(dp[i - 1, max(0, j - 1): min(j + 1, w - 1)])
return dp, backtrack
def seam_mask(img):
"""
find the mask of the image that masks out the lowest cost seam
:param img: cost map
:return: seam mask
"""
e_map, backtrack = seam_finding(img)
smallest_seam_idx = np.argmin(e_map[-1])
seam = np.ones_like(img, dtype=bool)
cur_j = smallest_seam_idx
for i in range(img.shape[0] - 1, -1, -1):
cur_j = int(cur_j)
seam[i, cur_j] = False
cur_j = backtrack[i, cur_j]
return seam
def remove_seam(img, mask):
"""
given rgb image and mask, remove the pixels according to the mask
:param img: rgb image
:param mask: seam mask
:return: resized image
"""
h, w, c = img.shape
return img.copy()[mask].reshape(h, w - 1, c)
def seam_carving(img, num_seam):
"""
resizing the image in 1 dimension.
Default resize along the width
:param img: img to resize
:param num_seam: size to reduce
:param mode: if given as 'vertical', resize in the vertical direction
:return: resized image
"""
img = img.copy()
for _ in range(num_seam):
e = grad(img)
mask = seam_mask(e)
img = remove_seam(img, mask)
return img
def contrast_enhancement(img, clip=40):
"""
Contrast enhancement using
CLAHE (Contrast Limited Adaptive Histogram Equalization)
:param img: input rgb image
:param clip: clip threshold
:return: enhanced rgb image
"""
img = np.array(img.copy())
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
clahe = cv2.createCLAHE(clipLimit=clip)
L = img[:, :, 0]
img[:, :, 0] = clahe.apply(L)
return cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
def histogram_equalization(img):
"""
Histogram color equalization
:param img: input rgb image
:return: enhanced rgb image
"""
img = np.array((img.copy()))
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
L = img[:, :, 0]
img[:, :, 0] = cv2.equalizeHist(L)
return cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
def histogram_visualization(img):
"""
plot histogram of pixel values in L channel of LAB color space
"""
img = np.array((img.copy()))
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
L = img[:, :, 0]
fig = plt.figure()
plt.hist(L.ravel(), bins='auto')
fig.savefig('plot.png')
|
[
"qm4@illinois.edu"
] |
qm4@illinois.edu
|
fc678eca34ca0b34e88a85b8926db4d11200920f
|
16e8963be3b720b8c3ad5a792ed68a467e772ba4
|
/Prac6/guitar.py
|
9d653d28ef7748d7551b8269ebc3fc1ac477d125
|
[] |
no_license
|
Kampangchau/practicals-of-programming2
|
0b33475fb7941b1628ba8d22505e042807deb2fe
|
692e47e3b208a3425b8db759d0c7192839400345
|
refs/heads/master
| 2022-07-02T17:56:06.373542
| 2020-05-18T04:19:23
| 2020-05-18T04:19:23
| 259,234,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
CURRENT_YEAR = 2017
VINTAGE_AGE = 50
class Guitar:
"""Guitar class for storing details of a guitar."""
def __init__(self, name, year, cost):
"""constructor that takes three parameters and registers the result"""
self.name = name
self.year = year
self.cost = cost
def __str__(self):
"""A string representing a guitar object"""
return "{} ({}) : ${:.2f}".format(self.name, self.year, self.cost)
def get_age(self):
"""way to get age of guitar"""
return (2018 - self.year)
def is_vintage(self):
"""way to determine if a guitar is vintage"""
age = self.get_age()
if age >= 50:
return True
return False
def unit_test():
"""test of guitar"""
# create test instances
gibson_guitar = Guitar("Gibson L-5 CES", 1922, 16035.40)
# test get_age()
print("{} get_age() - Expected 96. Got {}".format(
gibson_guitar, gibson_guitar.get_age()))
print("{} get_age() - Expected 6. Got {}".format(
another_guitar, another_guitar.get_age()))
# test is_vintage()
print("{} is_vintage() - Expected True. Got {}".format(
gibson_guitar, gibson_guitar.is_vintage()))
print("{} is_vintage() - Expected False. Got {}".format(
another_guitar, another_guitar.is_vintage()))
print("{} is_vintage() - Expected True. Got {}".format(
fifty_guitar, fifty_guitar.is_vintage()))
if __name__ == "__main__":
unit_test()
|
[
"changlin.wen\"my.jcu.edu.au"
] |
changlin.wen"my.jcu.edu.au
|
1252fccbc56b12ce2a33f22a90f63c8d9c9e6ceb
|
a8eb6b808ee6fcaf2a5aed07b8d565f80fe36627
|
/rfPractice/DecisionTree.py
|
bf905f2b81499b923d79eb9e415f79a54571f660
|
[] |
no_license
|
CEfanmin/EnsembleTree
|
1ece7c95164c72aec142daf1dba632931a44cad5
|
8b02757303904f85940cac997ee76f9375727d5c
|
refs/heads/main
| 2023-01-30T23:24:32.739744
| 2020-12-12T03:41:40
| 2020-12-12T03:41:40
| 320,728,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,799
|
py
|
# CART on the Bank Note dataset
from random import seed
from random import randrange
from csv import reader
# Load a CSV file
def load_csv(filename):
file = open(filename, "rb")
lines = reader(file)
dataset = list(lines)
return dataset
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
# Split a dataset into k folds
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for i in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
# Split a data set based on an attribute and an attribute value
def test_split(index, value, dataset):
left, right = list(), list()
for row in dataset:
if row[index] < value:
left.append(row)
else:
right.append(row)
return left, right
# Calculate the Gini index for a split dataset
def gini_index(groups, class_values):
gini = 0.0
for class_value in class_values:
for group in groups:
size = len(group)
if size == 0:
continue
proportion = [row[-1] for row in group].count(class_value) / float(size)
gini += (proportion * (1.0 - proportion))
return gini
# Select the best split point for a dataset
def get_split(dataset):
class_values = list(set(row[-1] for row in dataset))
b_index, b_value, b_score, b_groups = 999, 999, 999, None
for index in range(len(dataset[0])-1):
for row in dataset:
groups = test_split(index, row[index], dataset)
gini = gini_index(groups, class_values)
if gini < b_score:
b_index, b_value, b_score, b_groups = index, row[index], gini, groups
print ({'index':b_index, 'value':b_value})
return {'index':b_index, 'value':b_value, 'groups':b_groups}
# Create a terminal node value
def to_terminal(group):
outcomes = [row[-1] for row in group]
return max(set(outcomes), key=outcomes.count)
# Create child splits for a node or make terminal
def split(node, max_depth, min_size, depth):
left, right = node['groups']
del(node['groups'])
# check for a no split
if not left or not right:
node['left'] = node['right'] = to_terminal(left + right)
return
# check for max depth
if depth >= max_depth:
node['left'], node['right'] = to_terminal(left), to_terminal(right)
return
# process left child
if len(left) <= min_size:
node['left'] = to_terminal(left)
else:
node['left'] = get_split(left)
split(node['left'], max_depth, min_size, depth+1)
# process right child
if len(right) <= min_size:
node['right'] = to_terminal(right)
else:
node['right'] = get_split(right)
split(node['right'], max_depth, min_size, depth+1)
# Build a decision tree
def build_tree(train, max_depth, min_size):
root = get_split(train)
split(root, max_depth, min_size, 1)
return root
# Make a prediction with a decision tree
def predict(node, row):
if row[node['index']] < node['value']:
if isinstance(node['left'], dict):
return predict(node['left'], row)
else:
return node['left']
else:
if isinstance(node['right'], dict):
return predict(node['right'], row)
else:
return node['right']
# Classification and Regression Tree Algorithm
def decision_tree(train, test, max_depth, min_size):
tree = build_tree(train, max_depth, min_size)
predictions = list()
for row in test:
prediction = predict(tree, row)
predictions.append(prediction)
return(predictions)
# Test CART on Bank Note dataset
seed(1)
# load and prepare data
filename = 'data_banknote_authentication.csv'
dataset = load_csv(filename)
# convert string attributes to integers
for i in range(len(dataset[0])):
str_column_to_float(dataset, i)
# evaluate algorithm
n_folds = 5
max_depth = 5
min_size = 10
scores = evaluate_algorithm(dataset, decision_tree, n_folds, max_depth, min_size)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
|
[
"840831204@qq.com"
] |
840831204@qq.com
|
09df5c5e3da9cb387b466b48c192d2f7c5637d02
|
946c75548a83958b1f365b29c86ebd27d681c409
|
/ca_main/src/obs_avoid.py
|
344e817976e10150b2b69b2955d51161b30d6560
|
[] |
no_license
|
umerjamil16/castaway-on-island
|
b48d4a6525f41ae862e71a267bdb2f3d657f5b71
|
c608184385f97b6374467c5dd9d9fa78a1428afc
|
refs/heads/master
| 2020-07-02T22:01:57.093153
| 2019-08-15T20:25:30
| 2019-08-15T20:25:30
| 201,680,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
#! /usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
pub = None
thres = 1
def clbk_laser(msg):
regions = {
'right': min(min(msg.ranges[0:143]), 10),
'fright': min(min(msg.ranges[144:287]), 10),
'front': min(min(msg.ranges[288:431]), 10),
'fleft': min(min(msg.ranges[432:575]), 10),
'left': min(min(msg.ranges[576:719]), 10),
}
take_action(regions)
def take_action(regions):
msg = Twist()
linear_x = 0
angular_z = 0
global thres
state_description = ''
if regions['front'] > thres and regions['fleft'] > thres and regions['fright'] > thres:
state_description = 'case 1 - nothing'
linear_x = 0.6
angular_z = 0
elif regions['front'] < thres and regions['fleft'] > thres and regions['fright'] > thres:
state_description = 'case 2 - front'
linear_x = 0
angular_z = 0.3
elif regions['front'] > thres and regions['fleft'] > thres and regions['fright'] < thres:
state_description = 'case 3 - fright'
linear_x = 0
angular_z = 0.3
elif regions['front'] > thres and regions['fleft'] < thres and regions['fright'] > thres:
state_description = 'case 4 - fleft'
linear_x = 0
angular_z = -0.3
elif regions['front'] < thres and regions['fleft'] > thres and regions['fright'] < thres:
state_description = 'case 5 - front and fright'
linear_x = 0
angular_z = 0.3
elif regions['front'] < thres and regions['fleft'] < thres and regions['fright'] > thres:
state_description = 'case 6 - front and fleft'
linear_x = 0
angular_z = -0.3
elif regions['front'] < thres and regions['fleft'] < thres and regions['fright'] < thres:
state_description = 'case 7 - front and fleft and fright'
linear_x = 0
angular_z = 0.3
elif regions['front'] > thres and regions['fleft'] < thres and regions['fright'] < thres:
state_description = 'case 8 - fleft and fright'
linear_x = 0.3
angular_z = 0
else:
state_description = 'unknown case'
rospy.loginfo(regions)
rospy.loginfo(state_description)
msg.linear.x = linear_x
msg.angular.z = angular_z
pub.publish(msg)
def main():
global pub
rospy.init_node('obstacle_avoidence_py')
pub = rospy.Publisher('/robot_1/cmd_vel', Twist, queue_size=1)
sub = rospy.Subscriber('/robot_1/base_scan', LaserScan, clbk_laser)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"umerjamil@protonmail.com"
] |
umerjamil@protonmail.com
|
079ac10eae3ffca14bd74b719a85f1c0ba7a4d4c
|
596ae66458321f86d7000ac24803e9d90deb764b
|
/python/sparktk/frame/ops/quantiles.py
|
2d25c19fe471568ba78f7f67788d69b6d06f143c
|
[
"Apache-2.0"
] |
permissive
|
tlisonbee/spark-tk
|
162be1aed9adf7eff45af9f239c2df1d50cb53f2
|
cbebbe0efc96f8631c428fad5ad83ca76e3629bf
|
refs/heads/master
| 2021-01-24T23:50:23.730034
| 2016-05-19T23:48:26
| 2016-05-19T23:48:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
def quantiles(self, column_name, quantiles):
"""
Returns a new frame with Quantiles and their values.
:param column_name: The column to calculate quantiles
:param quantiles: What is being requested.
:return: A new frame with two columns (float64): requested Quantiles and their respective values.
Calculates quantiles on the given column.
Examples
--------
<hide>
>>> data = [[100],[250],[95],[179],[315],[660],[540],[420],[250],[335]]
>>> schema = [('final_sale_price', int)]
>>> my_frame = tc.to_frame(data, schema)
<progress>
</hide>
Consider Frame *my_frame*, which accesses a frame that contains a single
column *final_sale_price*:
.. code::
>>> my_frame.inspect()
[#] final_sale_price
=====================
[0] 100
[1] 250
[2] 95
[3] 179
[4] 315
[5] 660
[6] 540
[7] 420
[8] 250
[9] 335
To calculate 10th, 50th, and 100th quantile:
.. code::
>>> quantiles_frame = my_frame.quantiles('final_sale_price', [10, 50, 100])
<progress>
A new Frame containing the requested Quantiles and their respective values
will be returned :
.. code::
>>> quantiles_frame.inspect()
[#] Quantiles final_sale_price_QuantileValue
==============================================
[0] 10.0 95.0
[1] 50.0 250.0
[2] 100.0 660.0
"""
return self._tc.to_frame(self._scala.quantiles(column_name, self._tc.jutils.convert.to_scala_list_double(quantiles)))
|
[
"briton.barker@intel.com"
] |
briton.barker@intel.com
|
6aa53c6bce92c102ff21ae9076922f2929effc42
|
948a8fe4a46bbdda00f3af5d7a999092fd546808
|
/src/crf.py
|
f43327729459334cd0646378348ce4d419fbf2f5
|
[] |
no_license
|
wencanluo/QuantitativeSummarization
|
fcaf072566f0a4907f383042af0054ed1c47d82e
|
8c34923e3447e517ee99fc00fda2bd81b34e25a0
|
refs/heads/master
| 2020-12-21T01:07:37.842895
| 2017-05-11T01:06:02
| 2017-05-11T01:06:02
| 56,019,382
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,333
|
py
|
#!/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# -*- coding: utf-8 -*-
'''
CRF wrapper for wapiti
'''
from classifier import *
import os, codecs
class CRF(Classifier):
'''
The Wrapper of the Wapiti CRF
'''
def __init__(self, root=None):
'''
@param root: string, the root folder of the Wapiti toolkit, it should contain the execute file
'''
self.root = root
def train(self, training_set_file, pattern_file, model_file):
'''
@param training_set_file, the crf feature file for the training set
@param pattern_file, the feature pattern file
@param model_file, the output model
'''
print "training..."
script = os.path.join(self.root, 'wapiti.exe')
cmd = ' '.join([script, 'train', '-e 0.00002 -t 1 -T crf -a l-bfgs -1 0 -2 1 --histsz 5', '-p', pattern_file, training_set_file, model_file])
print cmd
os.system(cmd)
def predict(self, test_file, model_file, output_file):
'''
do the decoding, use the post-probability decoding
'''
print "testing..."
script = os.path.join(self.root, 'wapiti.exe')
cmd = ' '.join([script, 'label', '-p -m', model_file, test_file, output_file])
print cmd
os.system(cmd)
def read_file_raw(self, input, has_score=False):
'''
read the crf file: all the row will be extracted
'''
sentence = []
score = None
for line in codecs.open(input, "r", "utf-8"):
line = line.strip()
if len(line) == 0:
if score:
try:
score = float(score)
except Exception as e:
score = 0
if has_score:
yield sentence, score
else:
yield sentence
sentence = []
continue
elif line[0] == '#': #score
score = line.split()[-1]
continue
tmp = line.split()
sentence.append( tmp )
def read_file_generator_index(self, input, index=None, has_score=False):
'''
read the crf file: the first row is the token and the last row is the label
'''
data = []
for x in index:
data.append([])
score = None
for line in codecs.open(input, "r", "utf-8"):
line = line.strip()
if len(line) == 0:
if score:
try:
score = float(score)
except Exception as e:
score = 0
yield data, score
else:
yield data
data = []
for x in index:
data.append([])
continue
elif line[0] == '#': #score
score = line.split()[2]
continue
tmp = line.split()
for i, x in enumerate(index):
if x == -1:
data[i].append(tmp[-1].split('/')[0])
elif x == 0:
data[i].append(tmp[0].split(':')[-1])
else:
data[i].append(tmp[x])
def read_file_generator(self, input, has_score=False):
'''
read the crf file: the first row is the token and the last row is the label
'''
one_tokens = []
one_labels = []
score = None
for line in codecs.open(input, "r", "utf-8"):
line = line.strip()
if len(line) == 0:
if score:
try:
score = float(score)
except Exception as e:
score = 0
yield one_tokens, one_labels, score
else:
yield one_tokens, one_labels
one_tokens = []
one_labels = []
continue
elif line[0] == '#': #score
score = line.split()[2]
continue
tmp = line.split()
one_tokens.append( tmp[0].split(':')[-1] )
one_labels.append( tmp[-1].split('/')[0] )
def read_file(self, input, has_score=False):
'''
read the crf file: only the first row (token) and the last row (label)
'''
tokens = []
labels = []
one_tokens = []
one_labels = []
scores = []
score = None
for line in codecs.open(input, "r", "utf-8"):
line = line.strip()
if len(line) == 0:
tokens.append(one_tokens)
labels.append(one_labels)
one_tokens = []
one_labels = []
if score:
try:
score = float(score)
except Exception as e:
score = 0
scores.append(score)
continue
elif line[0] == '#': #score
score = line.split()[2]
continue
tmp = line.split()
one_tokens.append( tmp[0].split(':')[-1] )
one_labels.append( tmp[-1].split('/')[0] )
if has_score:
return tokens, labels, scores
else:
return tokens, labels
def read_npost_output(self, input, tags):
'''
read the crf file (with the nbest post probability for each tags): only the first row (token) and the last row (label)
'''
sentence_count = 0
one_sentence = []
for line in codecs.open(input, "r", "utf-8"):
line = line.strip()
if len(line) == 0:
yield one_sentence
one_sentence = []
sentence_count += 1
continue
tmp = line.split()
one_word = []
one_word.append(tmp[0].split(':')[-1]) #token
one_word.append(tmp[len(tmp)-len(tags)-1])#tag
dict = {}
for i in range(len(tags)):
t_p = tmp[len(tmp) - i - 1].split('/')
dict[t_p[0]] = float(t_p[1])
one_word.append(dict)
one_sentence.append(one_word)
def write_file(self, tokens, true_tags, predict_tags, output):
fout = codecs.open(output, "w", "utf-8")
for token, true_tag, predict_tag in zip(tokens, true_tags, predict_tags):
for t, t_tag, p_tag in zip(token, true_tag, predict_tag):
fout.write(' '.join([t, t_tag, p_tag]))
fout.write('\n')
fout.write('\n')
class CRF_Vertibi(CRF):
def predict(self, test_file, model_file, output_file):
'''
use the vertibi decoding
'''
print "testing..."
script = os.path.join(self.root, 'wapiti')
cmd = ' '.join([script, 'label', '-m', model_file, test_file, output_file])
print cmd
os.system(cmd)
class CRF_Score(CRF):
def predict(self, test_file, model_file, output_file):
'''
output the score as probability
'''
print "testing..."
script = os.path.join(self.root, 'wapiti')
cmd = ' '.join([script, 'label', '-s -p -m', model_file, test_file, output_file])
print cmd
os.system(cmd)
class CRF_Interpolation(CRF):
'''
interplation of two models
'''
def predict(self, test_file, model_file, model_file2, flambda, output_file):
print "testing..."
script = os.path.join(self.root, 'wapiti')
cmd = ' '.join([script, 'label', '-p -m', model_file, '--model2', model_file2, '--lamda', flambda, test_file, output_file])
print cmd
os.system(cmd)
class CRF_Score_Nbest(CRF):
'''
output the n-best with scores
'''
def predict(self, test_file, model_file, output_file):
print "testing..."
script = os.path.join(self.root, 'wapiti')
cmd = ' '.join([script, 'label', '-s -n 10 -p -m', model_file, test_file, output_file])
print cmd
os.system(cmd)
class CRF_NoPattern(CRF):
'''
train a crf model without using the pattern file
'''
def train(self, training_set_file, model_file):
'''
@param training_set_file, the crf feature file for the training set
@param model_file, the output model
'''
print "training..."
script = os.path.join(self.root, 'wapiti')
cmd = ' '.join([script, 'train', '-e 0.00002 -t 1 -T crf -a l-bfgs -1 0 -2 1 --histsz 5', training_set_file, model_file])
print cmd
os.system(cmd)
def predict(self, test_file, model_file, output_file):
print "testing..."
script = os.path.join(self.root, 'wapiti')
cmd = ' '.join([script, 'label', '-c -p -m', model_file, test_file, output_file])
print cmd
os.system(cmd)
if __name__ == '__main__':
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('/lm/scratch/wencan_luo/wencan_luo/cws_slu/config/default.cfg')
cwc_train_output = config.get('cwc', 'cwc_train_output_npost')
cwc_test_output = config.get('cwc', 'cwc_test_output_npost')
cwc_train_json_output = config.get('cwc', 'cwc_train_output_npost_json')
cwc_test_json_output = config.get('cwc', 'cwc_test_output_npost_json')
import time
time_start = time.clock()
tags=['b', 'i', 'e', 's']
crf = CRF()
time_end = time.clock()
print "running time: %s" % (time_end - time_start)
|
[
"wencanluo.cn@gmail.com"
] |
wencanluo.cn@gmail.com
|
8879183ca7d037d2f672f977fc300d2c39569277
|
cea61ad705399327f24a6ac4aa31b3084eac42f0
|
/pysharp/libs/common/middlewares.py
|
8c1cb55316975d773c81c2b1b494a86b48ed60a6
|
[] |
no_license
|
BiaoLiu/pysharp
|
e5e0d789c5af1731c201fdbd7df9a3d46d7fdd9b
|
b45db5e63802a19be2368ae865c9f4b47dfb7f95
|
refs/heads/master
| 2021-01-12T09:11:35.148826
| 2017-05-17T01:58:41
| 2017-05-17T01:58:41
| 80,979,236
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,577
|
py
|
# coding=utf-8
import re
import json
from django.conf import settings
# from settings import SITE_URL
from django.views.debug import technical_500_response
import sys
from .log import logger
from .utils import html_escape, url_escape, html_escape_name, check_script
class CheckXssMiddleware(object):
def process_view(self, request, view, args, kwargs):
try:
# 判断豁免权
if getattr(view, 'escape_exempt', False):
return None
escapeType = None
if getattr(view, 'escape_script', False):
escapeType = "script"
elif getattr(view, 'escape_url', False):
escapeType = "url"
# get参数转换
request.GET = self.__escape_data(request.path, request.GET, escapeType)
# post参数转换
request.POST = self.__escape_data(request.path, request.POST, escapeType)
except Exception as e:
logger.error(u"CheckXssMiddleware 转换失败!%s" % e)
return None
def __escape_data(self, path, query_dict, escape_type=None):
"""
GET/POST参数转义
"""
data_copy = query_dict.copy()
new_data = {}
for _get_key, _get_value in data_copy.items():
# json串不进行转义
try:
to_json = json.loads(_get_value)
is_json = True
except Exception as e:
is_json = False
# 转义新数据
if not is_json:
try:
if escape_type == None:
use_type = self.__filter_param(path, _get_key)
else:
use_type = escape_type
if use_type == 'url':
new_data[_get_key] = url_escape(_get_value)
elif use_type == 'script':
new_data[_get_key] = check_script(_get_value, 1)
elif use_type == 'name':
new_data[_get_key] = html_escape_name(_get_value)
else:
new_data[_get_key] = html_escape(_get_value, 1)
except Exception as e:
logger.error(u"CheckXssMiddleware GET/POST参数 转换失败!%s" % e)
new_data[_get_key] = _get_value
else:
try:
new_data[_get_key] = html_escape(_get_value, 1, True)
except Exception as e:
logger.error(u"CheckXssMiddleware GET/POST参数 转换失败!%s" % e)
new_data[_get_key] = _get_value
# update 数据
data_copy.update(new_data)
return data_copy
def __filter_param(self, path, param):
"""
特殊path处理
@param path: 路径
@param param: 参数
@return: 'html/name/url/script'
"""
use_name, use_url, use_script = self.__filter_path_list()
try:
result = 'html'
# name过滤
for name_path, name_v in use_name.items():
is_path = re.match(r'^%s' % name_path, path)
if is_path and param in name_v:
result = 'name'
break
# url过滤
if result == 'html':
for url_path, url_v in use_url.items():
is_path = re.match(r'^%s' % url_path, path)
if is_path and param in url_v:
result = 'url'
break
# script过滤
if result == 'html':
for script_path, script_v in use_script.items():
is_path = re.match(r'^%s' % script_path, path)
if is_path and param in script_v:
result = 'script'
break
except Exception as e:
logger.error(u"CheckXssMiddleware 特殊path处理失败!%s" % e)
result = 'html'
return result
def __filter_path_list(self):
"""
特殊path注册
"""
use_name = {}
use_url = {}
use_script = {}
return (use_name, use_url, use_script)
'''
管理员可查看错误详情
'''
class UserBasedExceptionMiddleware:
def process_exception(self, request, exception):
if request.is_superuser or request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
return technical_500_response(request, *sys.exc_info())
|
[
"452381072@qq.com"
] |
452381072@qq.com
|
10b3f098f5437b76e5a0ca4059cd190fc7d29bb1
|
2740a350055c34a398ecfbf3c8b41a812e0bae9e
|
/calculator.py
|
bc6fecbf2a2dfbf93ac8251af6fceeaa992a8190
|
[] |
no_license
|
vipod/wxpython_calculator
|
8c426b663cca782af1bb5d344b5b934364684101
|
e48d225e2ca55ecd33e15f93c3c2b88e3ec41750
|
refs/heads/master
| 2018-12-28T09:31:35.217151
| 2013-10-24T10:19:19
| 2013-10-24T10:19:19
| 13,828,727
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,801
|
py
|
"""
wxPython learning program: Integer Calculator
Author: Vitaliy Podoba vitaliypodoba@gmail.com
"""
import wx
# list of math operations and digits to check against
OPERATIONS = ('/', '*', '-', '+')
DIGITS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
class Calculator(wx.Dialog):
"""Python Integer Calculator"""
def __init__(self):
# initialize our dialog window with: title and size
wx.Dialog.__init__(self, None, id=-1, title='Calculator',
size=wx.Size(182, 190))
# sizers will allows us to put buttons into nice GRID layout
sizer = wx.GridBagSizer(hgap=7, vgap=10)
# add calculator display - text area in read-only mode
# text inside will be right aligned
self.display = wx.TextCtrl(self, id=-1, value='0',
size=wx.Size(182, 40),
style=wx.TE_READONLY|wx.TE_RIGHT)
sizer.Add(self.display, (0, 0), (1, 4), wx.EXPAND)
# put buttons into 4x4 grid
x = 0
y = 1
for row in (('7', '8', '9', '/'),
('4', '5', '6', '*'),
('1', '2', '3', '-'),
('0', 'C', '=', '+')):
for blabel in row:
# create button
button = wx.Button(self, id=-1, label=blabel, size=wx.Size(40, 20))
# bind mouse click on button
self.Bind(wx.EVT_BUTTON, self.HandleButton, button)
# add button to grid sizer
sizer.Add(button, (y, x), (1, 1))
x += 1
x = 0
y += 1
# set a few variables for calculator to work
self.operation = None # remember last operation
self.last = None # remember last number entered
self.resolved = None # flag to clear screen after solve()
# add our grid bag sizer to our dialog
self.SetSizer(sizer)
# set dialog centrally on the screen
self.CenterOnScreen()
def HandleButton(self, event):
"""This Calculator method is called on every button click"""
# define event variables: button, it's label, text field value
button = event.GetEventObject()
label = button.GetLabel()
value = self.getValue()
# below we handle our event differently based on button clicked
# Clear button
if label == 'C':
# simply reset display and forgot any custom calculator variables
self.Clear()
# digit button pressed
elif label in DIGITS:
# it's important to clear display before:
# * new operation
# * after zero digit
# * and after solve() funtion, '=' button
if value == '0' or value in OPERATIONS or self.resolved:
self.update('')
self.resolved = False
self.display.AppendText(label)
# equal sign: try to calculate results
elif label == '=':
# try to solve our equation
self.solve()
# clicked operation button
elif label in OPERATIONS:
# before any new operation try to solve previous operation
self.solve()
# remember previously entered number
# if user is just changing operation - no need to remember any value
if value not in OPERATIONS:
self.last = self.getValue()
# update last operation used and set display to operation label
self.operation = label
self.update(label)
def Clear(self):
"""Calculator Clear button"""
self.display.SetValue('0')
self.operation = None
self.last = None
def update(self, value):
"""Shortcut for display update value"""
self.display.SetValue(value)
def getValue(self):
"""Shortcut for display get value"""
return self.display.GetValue()
def solve(self):
"""Equal operation: let's calculate result"""
# only calculate anything if we got both: operation and last value
if (self.last != None) and (self.operation != None):
# here we use strings and eval to calculate result
result = str(eval(
# e.g. "67 - 24"
self.last + self.operation + self.getValue()
))
# finally reset calculator values and update display with result
self.operation = None
self.last = None
self.update(result)
self.resolved = True
def main():
# run the application
app = wx.App()
# start calcualator dialog
dlg = Calculator()
dlg.ShowModal()
dlg.Destroy()
# initialize our calculator
if __name__ == '__main__':
main()
|
[
"vitaliypodoba@gmail.com"
] |
vitaliypodoba@gmail.com
|
1954647244f670c5dc25f03b9b3410053e37fac8
|
07520f0b5c41dee98fe9ecfefd890b2a01d4672a
|
/ROS_ws/src/hardware_coms/setup.py
|
f518b3d23de69bd01fca19c2fafa1f09b3d3f026
|
[] |
no_license
|
asmorgan24/project_cuboid
|
a33c4c44b0e64690925664483f1fb5d09253694f
|
81f28bb319817de4bae5917e1d7c09a8fe3a4fb2
|
refs/heads/master
| 2021-07-15T13:12:03.464189
| 2019-02-13T23:07:41
| 2019-02-13T23:07:41
| 153,474,422
| 0
| 1
| null | 2018-11-30T16:58:29
| 2018-10-17T14:49:25
|
C++
|
UTF-8
|
Python
| false
| false
| 214
|
py
|
#!/usr/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d1 = generate_distutils_setup(
packages=['hardware_coms'],
package_dir={'': 'src'}
)
setup(**d1)
|
[
"andrew.morgan@yale.edu"
] |
andrew.morgan@yale.edu
|
df281edec9a4154b5ca80600c51d4c254fbc3f42
|
4aba8e6477aaff482294e93da518afc0351139fc
|
/dataset.py
|
61dd67f10dabe8bd1339e37d769696acd90bb74e
|
[] |
no_license
|
akshit61/CRNN_OCR
|
e0930ef4d11ec3f351fda14f6eb265b8ac701790
|
2216415f836757a61b55c88f6f0a50a5fe98497b
|
refs/heads/master
| 2023-02-18T09:58:30.122690
| 2021-01-16T17:13:22
| 2021-01-16T17:13:22
| 330,213,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
import albumentations
import torch
import numpy as np
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class OCR_data:
def __init__(self, image_path, target, resize=None):
self.image_paths = image_path
self.target = target
self.resize = resize
self.aug = albumentations.Compose([albumentations.Normalize(always_apply=True)])
def __len__(self):
return len(self.image_paths)
def __getitem__(self, item):
image = Image.open(self.image_paths[item]).convert("RGB")
target = self.target[item]
if self.resize:
image = image.resize((self.resize[1], self.resize[0]), resample=Image.BILINEAR)
image = np.array(image)
aug = self.aug(image=image)
image = aug['image']
# to make channel first
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
'''return {'images': torch.tensor(image, dtype=torch.float),
'targets': torch.tensor(target, dtype=torch.long)}'''
return {'images': torch.tensor(image, dtype=torch.float),
'targets': target}
|
[
"akshitkotian61@gmail.com"
] |
akshitkotian61@gmail.com
|
e964e343ab995aa3f38a6d6424a313f5e926a380
|
885e084d1c8dff17146c2c4ef69365dacf0167a6
|
/myus.py
|
8163344d7e27db13c110fea382cd8d2e2956b4aa
|
[
"MIT"
] |
permissive
|
amitattry/MyUs_api
|
06881568563c714231a25ce12a37b6ca23e487a5
|
e240cb0f4fdbfb9ed08af860c04503d425fb42c5
|
refs/heads/master
| 2021-05-07T15:21:53.290351
| 2017-11-08T07:01:05
| 2017-11-08T07:01:05
| 109,939,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,739
|
py
|
import requests
import os
import Tkinter
import json
from Tkinter import *
from tkFileDialog import *
def zoosk(username,password):
""""
Headers - > Paras - > URL -> CAP
"""
headers = {
"X-Cbt":"T3y8j3vOZNJbQN1N-z0XFqCvxGV3vVfKnJqeygBenAWZXwEAANX_XJAQuG3fexoNQyufatJ5GlPxSPEscd9z_JneiftH-C10IA==",
"User-Agent":"ZooskAndroid/443 (Linux; U; Android 4.4.2; en-US; HUAWEI C199; Build/HuaweiC199)"
}
content = 'password=%s&login=%s&udid=12be4dfe06ac4223b60934304c656d5e % (password,username)'
url = 'https://api-android.zoosk.com/v4.0/api.php?locale=en_US&product=4&format=json&z_device_id=151010080201&rpc=login%2Fgeneral_v43'
requestsx = requests.post(url , headers, content)
print (requestsx.text)
def myus(username,password):
u = 'https://gateway.myus.com/Account/GetTokenForUser?api_key=px8zkanujj49vfzpm88mfvfz&isMobile=true'
h = {
"Origin": "file://",
"User-Agent": "Mozilla/5.0 (Linux; Android 4.4.2; HUAWEI C199 Build/HuaweiC199) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Crosswalk/23.53.589.4 Mobile Safari/537.36",
"Content-Type": "application/json;charset=UTF-8"
}
c = '{"UserName":"%s","Password":"%s"}' % (username,password)
requestsx = requests.post(url = u,headers=h,data = c)
out = json.loads(requestsx.text)
import datetime
date_time = datetime.datetime.now()
time = date_time.time()
print ('%s:%s:%s %s:%s - %s' % (time.hour, time.minute, time.second,username ,password ,out['access_token']))
filename = askopenfilename()
with open(filename) as f:
credentials = [x.strip().split(':') for x in f.readlines()]
for username,password in credentials:
myus(username,password)
|
[
"noreply@github.com"
] |
amitattry.noreply@github.com
|
6c0f0ce98c3564f0181e0dcecfc4c60c50a5b9e7
|
c577f5380b4799b4db54722749cc33f9346eacc1
|
/BugSwarm/scikit-learn-scikit-learn-87009782/buggy_files/sklearn/tests/test_multiclass.py
|
16588fe4010c56c223f976be164c3302f1bd128b
|
[] |
no_license
|
tdurieux/BugSwarm-dissection
|
55db683fd95f071ff818f9ca5c7e79013744b27b
|
ee6b57cfef2119523a083e82d902a6024e0d995a
|
refs/heads/master
| 2020-04-30T17:11:52.050337
| 2019-05-09T13:42:03
| 2019-05-09T13:42:03
| 176,972,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,405
|
py
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
|
[
"durieuxthomas@hotmail.com"
] |
durieuxthomas@hotmail.com
|
54c5388edad985329b1af131d036432a1bb73a88
|
a3ab60bdd004bfa22b400a554dff8f3d215b0fac
|
/foul_copy/Radix.py
|
cbde5f11b8affa2705abb0231ea1bb83841dece1
|
[] |
no_license
|
Aidaralievh/Aidaralievh
|
a9e5facc31825f21abba93e6ce6d1884a13b7963
|
3fe9254a77c123ae0164c730dfb3419b592d7012
|
refs/heads/master
| 2023-03-06T06:34:59.130439
| 2021-02-18T12:30:47
| 2021-02-18T12:31:12
| 322,245,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
inputt = open('input.txt')
output = open('output.txt', 'w')
read = inputt.read().split()
a = [int(h) for h in read]
del a[0]
l = [0 for i in range(-100, 101)]
for x in range(len(a)):
num = a[x]
l[num] += 1
new = []
for m in range(-100, 101):
if l[m]:
for n_ in range(l[m]):
new.append(m)
print(new)
|
[
"a.aidaraliev26@gmail.com"
] |
a.aidaraliev26@gmail.com
|
1c109a50966b0734437d36a25fa972e74522ae23
|
47ed490ff277c63374b26efb742b830237988b59
|
/finndata/finndata/settings.py
|
91e804075fc34a2d3cd9349d447b3c3566de978a
|
[] |
no_license
|
an3z/Scrapfinn
|
bc3b8e35c0a37e3bd36333ee99f607dacf6dc297
|
87bb59fe2c519e9b938ca6cc26107f67671b1409
|
refs/heads/main
| 2023-01-24T09:35:54.391746
| 2020-11-12T11:28:32
| 2020-11-12T11:28:32
| 312,234,071
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
BOT_NAME = 'finndata'
SPIDER_MODULES = ['finndata.spiders']
NEWSPIDER_MODULE = 'finndata.spiders'
# Database settings
CONNECTION_STRING = "{drivername}://{user}:{passwd}@{host}:{port}/{db_name}?charset=utf8".format(
drivername="mysql+pymysql",
user="root",
passwd="toor",
host="localhost",
port="3306",
db_name="finndata",
)
ROBOTSTXT_OBEY = False
ITEM_PIPELINES = {
'finndata.pipelines.FinndataPipeline': 100,
'finndata.pipelines.NewFinndataPipeline': 200,
'finndata.pipelines.parkingPipeline': 300,
}
|
[
"salam.alanezy@heimstaden.no"
] |
salam.alanezy@heimstaden.no
|
00939890adec7df36ab5f1d64050e075d92a7be4
|
52d53fa337e3db4349b43f08961dc42cc2cc7624
|
/Visualisations/DatabaseConnection/DatabaseConnection.py
|
83ed507f499bc69783e89d50d909fe4305916375
|
[] |
no_license
|
tomneutens/log_data_analysis
|
7f0a0e14230fe692091bad9508f1433322de8f19
|
6c7d4f8aba05975fdbd0597da60037eccc0fda22
|
refs/heads/master
| 2021-06-29T19:17:02.602250
| 2020-11-16T08:25:38
| 2020-11-16T08:25:38
| 178,832,512
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,456
|
py
|
from pymongo import MongoClient
class DatabaseConnection:
debugSessionIds = [
"100098162156200550876384166413318805194",
"100791076829784311538777553313397622066",
"100845778315810870381297666893312058560",
"102313417323140926547887400500875042809",
"10284872922298938270212915201316900598",
"105578849649353579205604325916623082442",
"106076319898654938516210404764181390687",
"106563039490284856059072574589117842503",
"107861383593120223856232148733774499634",
"111315636062256536330279336482267063787",
"111748846454960255697777895981811088022",
"114124928985858559246652753906933567932",
"116151029947935438853028769435334017364",
"117717194807878827801552408942754553635",
"117926317373266466157107284374827946144",
"11869334356669164130640973680857056664",
"118714242678899997472771907674739702852",
"121896282140801974805816549141031973735",
"122529207243324832685446571495103483613",
"12541850902869437222688741249106215326",
"127309816504281663885865719519876965431",
"127615613026458713032686301709902576539",
"128644601766156074555885835193838242927",
"13003588624884147437037530600809982269",
"131379918622922915065731494868356194228",
"132743822903765424429622800087572183856",
"13379136001073531258444292360856115375",
"134250637263263090171422849459350960947",
"134719013395651191042635559465054707718",
"135312927729158973980791384588306065826",
"135452660040185061244536656018077667400",
"13587584217313558605765799277193588780",
"136119576952264678145720395095599613851",
"137058012824955348077410505829019148784",
"137563301952548650695585607615372908288",
"138857895488150079793929648137706237421",
"140008720797955013730004013744649716637",
"140274351570945304276050913011172036603",
"141556469293547231305128345699628928489",
"143344144143492124640124804979184874300",
"143586471182772365308677473748369096340",
"146909901174410040042392326145318171897",
"147311853940011993539422902087300982970",
"148406727501651431955780513712733441992",
"149814676273419013149721504568464755434",
"151377828364293379103161405386768675922",
"152413750765510245624464548371368601913",
"152819562921099789508252000718754255140",
"154789518642175825687480686385860733768",
"155452494037881028389327863101140194890",
"156240082493014494544546328923032838532",
"159808849928649646645922402368562144097",
"160484469635228737222633868936562802821",
"161624364075521334752360850871327785408",
"161982062243649074634320969126954532886",
"162028289422143974900977260683673723094",
"162327128151118817418998330197307990054",
"162592501768525619579742836081770841257",
"162894508781621555841742443443713117812",
"164532276608253456035167093251185474125",
"165404678509366330054414143711196373617",
"165699279399608280837590250765733732863",
"165820327216082135721631402626404651156",
"166430684697976841886630368793045127214",
"166696090152014751019478684633221592439",
"166706466899644882497914509309693677664",
"172118350935123326372818814017003776221",
"178875275208661804159060332376496868949",
"178888818105058280466082838263910846733",
"179145806051307330547860255972995663872",
"182709107770683658338303605013884453918",
"184210044878790029866407528325312692265",
"185846076633135925266228729616756555539",
"186684099918575949675506023710919614229",
"187872983203687923727748894371152859899",
"189007044969014776391015386544962574956",
"190819340939776378693992563625809537925",
"195430530531155352362104581217016724517",
"198017339091772862093730085531440945472",
"198338813509876407773313843541280364692",
"200508039162385247059428211655675823439",
"202381563582672196944461680727804065681",
"205197584779029305464975423927945653315",
"207093868655217744023716555332030394852",
"207941665077051859494770897110907882022",
"208114704810996079193311826536396140501",
"209406031288077315468637223490420942059",
"21271158596658162401179303593962031424",
"213134055085801779053221505650113401493",
"213278169701437006663412919451176954498",
"21456439698528928698392279034911290278",
"217555662701162262684771094453377739738",
"217815235664639149164872667657155058614",
"223651189725899866680043875504117231296",
"224124899527406994384974989991345257956",
"224528658820206646489999544834279555974",
"226424596326276368304364332877701919886",
"227774457182877875804404515696586409195",
"230435531284864093632284161011372986880",
"230706560390293761603314723634687456734",
"233686884061739774629772954495192333164",
"23433133639225217421646562428977486501",
"235808832306776134137010086715171292838",
"238755208351411449188605106735791301616",
"238926563604501287367028024717024056620",
"239030631468857181290510440832625672697",
"239641455000841961419726286756439982390",
"241717118459946511307277107629610938595",
"242036105268082170669882450739403779805",
"242513794525954699621547978991434074435",
"242651728756765385950623910606333269884",
"244116968846655985836198641569241722020",
"245528804027734006715723816488961483613",
"245563914619845640812179064098575176709",
"245830903413418006848253725097655417482",
"247540201062180724005430583876080115529",
"248288118699879616154261988468860066441",
"249920167079479997249198760820075384241",
"249974491478252273609759513802922920822",
"250306037528980588126556488380672456975",
"250457313591535749510369453624673920374",
"251387216072666120777764260647352145410",
"255674495464960444997095670259626885413",
"2559765746588462257903577985840697842",
"256356723230531810460864781666270544610",
"257313054760700816986291355173985291770",
"260140719076288365501969595596165612393",
"262402112337221531020877595437311643392",
"26515638075565057894778458974858666374",
"265667592281929272621280453087770179213",
"266331577661207984412621982649023192176",
"267470025023343921081139553052252135852",
"267695063397927547937703729911302316426",
"267751964590242399780982791217938838965",
"268428003593238403305104483773086616950",
"269092951056944560716437491575858277041",
"27095215584968149506847010463975852827",
"271145625140663070727163832098838789298",
"271877581943654699975127574170909307294",
"273918666711421551761260237092221448696",
"27562289989847517030043963189913563175",
"277131447634615156023543184210130622654",
"277569555683541153984501809680301564718",
"277925902568701102978042367607705317386",
"278571444815490375669446139631302201772",
"280699630038480156656278947247206462367",
"281048648356420360819845558445921199474",
"282438284583877639172288967291456154258",
"283381907268860484957840748607688227891",
"28865348897476278596254654074794037629",
"289817011331915049788681749931380219269",
"294233134219952226895524662784623818300",
"298946973826558144626785033771537175249",
"30575065889227405785384591345564150482",
"31128392975166162848660274125113931212",
"317168017570769142892434822598815093932",
"317407657123402250232532369230581774942",
"317810246765588381437691470656289959451",
"318272758402471635118164486523949061512",
"31996424765033535790830825417504319973",
"319964997238640268141387599034106464566",
"320372526446818370059990853651324864013",
"321668714487694685937332549398939932950",
"321758273587194590001139335682111461616",
"324492423077297623441154728743411175054",
"324991699820471444226518328382043806942",
"325096310605120898939426942446448993091",
"326393454686265890242579021355623568289",
"326464857664489346448853378898737825791",
"327746499830134339439538501858919723029",
"328473379504667884069775334328335511830",
"328682159766482723213243908976153457833",
"33085078038657310865846512852402959491",
"331794761032939412807267242052075156264",
"332824460635691446214194648388939178409",
"333393636261863167043220303048786987169",
"334501103650690896431231396134637723178",
"335409237152580489967886483185033025031",
"335899919204216457480244276161425314830",
"335919029540085196695712656358170197988",
"335923968626649870829589186068970612693",
"339238018698929913737428530862668047048",
"34769837755236928352103578951381960254",
"38273733567281486705033175654158376775",
"40151403741696310291658634760587190865",
"40756101720790868620023488623459135462",
"41594516313067724339783441001364302440",
"42136316273384504961750955187372527253",
"42223839470522223384500961489852193134",
"42852366113543178658788964953870543870",
"47039539361269630107346621689912117723",
"47273124640767011752348129589839659279",
"50884787801807895152363786581925772541",
"51127806518180553864088218054675367018",
"51273380326331570584250388673771602212",
"51725222373111934239641168861241156549",
"52156141035247708431390232193243831878",
"52691090132049262433734729815044986335",
"52783788459314588286448579128518983480",
"5317251866122907486713888819096553954",
"54207834985272652828370125378223179707",
"54500170284082014866468139221081040053",
"56808603563978995377341662570667207130",
"58157574345924746844269053154523314543",
"59146520701249942020890391913627840064",
"62021777235323476383770535091011196449",
"62605816416331224750004039564907553645",
"64831142422358753237491728318323368438",
"65033631044991037149955625658720412100",
"67118623552935537425310580260858109070",
"68382240149084863252376729208949803643",
"69573028114937800546852302081819111778",
"71683108053760278088486179527355481078",
"7414006559854129416113429278392933819",
"75077197568383657933854333882497827627",
"7637343558638212513585421888142581003",
"78939224574137767695421262506942152077",
"79432162011045936469217614760459720573",
"8132455953832050105516726809592848956",
"83521833134002890108416962256785968088",
"84723308415416462302857232239099977867",
"8498702668874009490272089176612538903",
"87742456399758148001097793938016377487",
"88130304745696567842832294317631369466",
"88418151738019193375290410638868230485",
"89967761835024251088062923134480485360",
"90570604259713016509209925080607788727",
"9074629638473392994209299704526514183",
"90959537979126532584389476448893397369",
"90981316506956637269670558265653470601",
"91071306397263992738970652529452264258",
"91279567330421387255820545461408166356",
"91932501544047351811171491024215451529",
"92068905529135683492567084209819583079",
"93869881989254897091302111822109713286",
"94488752762492682982947687611098168192",
"94595606120245128711366429794467453192",
"95039447919799438789968721780194389913",
"96543936375017262228597878906026691547",
"97359970685990310293431360503793410277",
"97493640573516722971895222991847983632",
"97493649192466294045996911997632333593",
"98337268058732091586401751530579627811",
"98723050124954722797928179077156364655",
"99930641699826077487769976588799375801"
]
createSessionIds = [
"101353373570732381824116799233209664511",
"101537932138396140015312752829262261735",
"102597034802814374531877357330957916734",
"105924397490726199156475864644906690129",
"109206681864232309086541864428672217442",
"109955979215631782203694481089834581984",
"111063451419960437998902313142127244656",
"11122534898580574992715957855324355261",
"111611581556934531910946138614230368321",
"1119212814414146320234354743175873907",
"112961797669122926097291490558252751363",
"113503599633604144573015042637418427403",
"11515592966226135069949244428958948510",
"115731467189902591911275658276690957950",
"116154788186993302661978578662633364828",
"11703948123289249840514264943654807570",
"117259175969457724580376054263108220658",
"119517158644056793395782504086760362279",
"119572793856723272316585365337608575506",
"12038739706746836128105087413272013322",
"122314496211502099146414327362929532551",
"123145216600054395929938318367782175185",
"12359562815874014818551973490979433620",
"125516675609022969344552997588320761510",
"12593550028022991169443784208189442123",
"128120795044846778768676848254900521811",
"129503964841220209529060332730215224496",
"130553056405723450548389719830694006740",
"130925866024188956309996988924921179204",
"131764766030447889675522041219879835828",
"134966043293170591836352610389356610804",
"135192316520025205812631178231857647238",
"138656702642316400298423497771665478225",
"139108264580231166602096942542212796881",
"13995646743538880257615452954222749289",
"140089985229682412465845498750201716285",
"140910896512988682536499122592017414408",
"141075724818251049583841973997917728169",
"14152006528448271336366254300692703038",
"142298770116630965603172175802203096651",
"143334348661315600676166990571095130291",
"144312382355533922189712768015601124021",
"147059268241045796410301615511864868870",
"150711009299885423384504440865247678933",
"151558377847587020917367456079632419098",
"152620133133659722788776658438155198389",
"159063309706221536903404885645944400075",
"159323961957799969098816322743077955771",
"159957470226592255385163380205018475621",
"16031867009696323382512669811186790717",
"160331440674577328253607091105305560820",
"161342666088214991449473441453156883637",
"161985178377901800131229862988249128142",
"164388789246866079872216445105201432683",
"16615362594234198197744078713510140927",
"166286948616903749113066860450076346326",
"16772771589624079075296703823412510014",
"168278856468612942408610737755257577242",
"168404447331335634941865417414901376192",
"168749456603521270179263109259786935970",
"16958430018930103458793733067540113804",
"17156111417213050326609656478445737422",
"172041372478536333516708296717841683545",
"172954515326810685265883646770875399874",
"177005172541788870276691110651460367331",
"177868060832540772744634905132256700008",
"178339554966249103934447816453732837453",
"180286461680726466847959664941682323713",
"181827527241849325398168465067212330719",
"182690473392511554909204058688738419099",
"183889827865366016326131231971486875689",
"185472318874560311298616512382147932035",
"186587508180430160621715922775282626667",
"188101709151297005383731575227236514905",
"188331992217771547836243812329251077441",
"189654868379424600216663745933156431066",
"190341375358483662513229568067480865133",
"19223581377874621391729908106256641886",
"192435490502668058731988140981252019797",
"193153885988792783160974506588369440411",
"196011824426565865331963809234446803446",
"196980319104124177453587031685302421497",
"198070362896115494812894436403900034974",
"198633073418107023925171366219874758782",
"199767412500394026875788750259408205917",
"200746358799241317924392310277033701478",
"201232314971569053256294730795279676746",
"206778013373611707857475859875194480528",
"208175127041033329163811832597563428848",
"208422281082181617045975959939634452042",
"208651404832000897620039077598689353927",
"209686612877819476445450923548754568609",
"209924163828723088933761627666736286886",
"210232754016706502504288503232138361126",
"210503327250633317287400410199413371876",
"21051573501993737596371322190163582713",
"211253027673627968769465889815333062803",
"212086137603425537739083619917812158800",
"213796744660274711241810122483327065106",
"215391175470835368307802657870966619782",
"215445086319065199915002280089823993500",
"216148056196150791854483428042810254770",
"216469093625631170908144416755243154774",
"216702356610803774461637255386182812084",
"216737522812139435606423143158338043611",
"218586681976293426717597314625731933862",
"218953840326847199019081579487459915741",
"220707028622859202404920952573934883235",
"222066581107614631085752945971388582936",
"22240567707603171224628457435037572571",
"22454796781106295377727501471161083782",
"226211835441933521511464784302840562008",
"227177244233909777474402287374113039394",
"227429622934811251305208838182741956970",
"227883528818877107058499732402443536202",
"229117539462191757955018787912073304652",
"229690372454134804108728963190065351792",
"229988490266635149462771722640485691529",
"230260552721935423359542538114261171441",
"230710218327369516822270862354253531520",
"235679998312566505987591992990635012146",
"238212159289557128301595984157059492320",
"240011902280920585373735338144135360154",
"241953805667571827920185465686025932349",
"242015338481143075779044373120686219276",
"242309206919026041106201886868078877246",
"242731904842083988349731746102598888528",
"244868035260434103951853955073742544754",
"245222114922701618256493747535134248419",
"245330190880824080937584674805917281611",
"247191460673514373098028626066575733505",
"247235684991167437344275965053782210486",
"248079812782756912127401403654943049034",
"248917092231375064830460586227621714313",
"251480100010422932148504734149802521854",
"251752938417185676933470665941108621307",
"254907992375489957966266894791385177269",
"256277312548666053782910631331490207102",
"256548281663054716264627924925874830698",
"256648081482366480811479397849950684244",
"256982239570587276134967493028751184242",
"258879499426722220147824406465214495673",
"258932732778893177311527451546877632792",
"259213377690698469446337009993119447012",
"259970560135950534358365861160189991639",
"26024184848161327679039361740626576684",
"261353721647621799699917935659595296998",
"261925639259572194357319065513836966861",
"26213286834997291965258827995032200627",
"266563998501787905248177767833889167092",
"266687961690335723670860498378759018432",
"266771307136338369494851777380281993463",
"266949669044210811926344691919498178649",
"269009369505612485758665647930780443742",
"269855088721117114512264021508127462239",
"270566867573240867449913705573629105149",
"271736705596603583911977854667805114592",
"271887090557175907259552412574724740446",
"273116109288230648972926427862431002650",
"275274351479049094709466879755073167588",
"276107818895010610268792621423300271136",
"283605399577348495748606162578052304764",
"284840113159303394928479609596057263895",
"285963940818219173386394634365863560059",
"286860357720610521612829579399315083537",
"289906114612856980426796703468716546154",
"290408230653407146022438967874238092062",
"291246361599380643694063075947451535779",
"292558405726004115759841458301017149854",
"29265863045997565682593430030873570259",
"293507903541923572781138380510121669563",
"29404690360849096033554949029773179407",
"296903945583422565857364576305237912476",
"29880855111417105778260964765225906812",
"299248491144386992957855496500153619391",
"299427244171875891430739996570688261629",
"302147644378809799748454941489244707070",
"303491729551153882403892489575034755097",
"305594731067227859647307030813311593894",
"30604864641908379523375279943541986347",
"306330100323194266370007755007254645940",
"311159816246010256675878553713123629806",
"31174392073998455128478782465824903436",
"314002750722983654386615972336926931266",
"314521334372428822127928334901424440415",
"315015001451231135617054854873082417900",
"31655772019922239499162179956616268371",
"317058519586546020602959468544699617741",
"317364624328483239622855886884509613573",
"319063364546704860609546254869420779662",
"320405005061278478582598343672512024405",
"322080165797412359021962540173734620474",
"323427232207408245792511002956675410627",
"329390334323576117295608836733331394991",
"329538049724470132306912072685495037907",
"330366790681164198338241005349529397596",
"331186894979308015583285537078102406839",
"33203091279800299565401513149765289670",
"332160096045369577052124375602939097928",
"332666115523774785097082523764839773766",
"33356889121652924837636088876097109432",
"337698162154795320214996988936373282517",
"338181529685759280318634900993962434060",
"338420310280881529660196743881409632915",
"36058277662311892735614064306802652123",
"36667558753863941986303201426690362398",
"37990431202408973622850361907799744779",
"38351500931781779181620715949540115668",
"42267444183239888515421865339068841335",
"43801288012215862679327049664693062758",
"44278500876648978477847495404284172536",
"46409048707050474093008625330229634663",
"46424233979660049868925691579411882910",
"46625839279864398448901186765454248951",
"47339040704731237422465161694736094383",
"47644993361355828935292039794145071006",
"49184601044893475142728586916219505969",
"52192337071007751407456819009041214547",
"5283135856739024137036068627461582062",
"54665170837612560883820980779388409476",
"59600607460021647531664134555096153881",
"60348685298330939456664973722517816733",
"64243920243650003346562360741146976842",
"64868164890579445853179064176513101112",
"65736236594174822728766535336236530604",
"65847368889008530160802400675326389897",
"67137581441946713509134959294411740712",
"68720941991397544799781489831748801596",
"71307967888501512616617747682638100291",
"72213045370297506496475233695523290661",
"73453639887065412496700462878129521901",
"75293042182075613076032358145895365873",
"75914537691045933226293040967117398436",
"76497462353248542700739869406521708606",
"76722878635417517595372112374618929182",
"8208720225607323948382371868527359221",
"82419508017494499820736290288250051819",
"87578589583176626917252461559057973509",
"89297662470815003437541200420460008160",
"89570608000905703102927994501875754984",
"89619136239573068678392319197810169013",
"90266768092005575376551986308851814403",
"91185263755523433375410824484490688660",
"93006151528761084048686973675006736588",
"93806340741557184711497598983848492435",
"94409040331688155131460822672629401204",
"95034774209971961971585986211078802765",
"95707618372767550406402477354128578656",
"96642614785414295316321354109741964704",
"97425823300638584217240115905656644214",
"98815802023890322626078109214223096816",
"99245909820474056157420475004560006718",
"99394925682927067161021085618751898013"
]
currentSessionIds = []
def __init__(self):
self.client = MongoClient('localhost:27017')
self.databaseName = ""
self.collectionName = ""
self.sessionIds = {"BlocklyLogCreate": self.createSessionIds, "BlocklyLogDebug": self.debugSessionIds}
def setDataSource(self, database, collection):
self.databaseName = database
self.collectionName = collection
self.currentSessionIds = self.sessionIds[database]
def getCurrentSessionIds(self):
return self.currentSessionIds
def queryCurrentCollection(self, query):
return self.client[self.databaseName][self.collectionName].find(query)
def aggregateOnCurrentCollection(self, filterPipeline):
#return self.client[self.databaseName].command('aggregate', self.collectionName, pipeline=filterPipeline, explain=True)
return self.client[self.databaseName][self.collectionName].aggregate(filterPipeline, allowDiskUse=True)
def countQueryResults(self, filterPipeline):
return self.client[self.databaseName][self.collectionName].count(filterPipeline)
|
[
"tomneutens@gmail.com"
] |
tomneutens@gmail.com
|
df4ac51fccc876c20ce0077aa7d9de548e96e905
|
97dae48fa3c613a84655c1c0b12cdc0db2c555bb
|
/algorithm/random/random_node.py
|
762a106aab90fb123741b27e865a4ab52323d43e
|
[] |
no_license
|
klknet/geeks4geeks
|
6aa5841b15be41057dc987524721ea1ea37e02ea
|
d7d9099af7617a4000f38c75d2c7214bed570eda
|
refs/heads/master
| 2021-07-12T06:34:30.048691
| 2020-06-22T07:51:14
| 2020-06-22T07:51:14
| 170,288,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
"""
Select a random node from a singly linked list.
"""
import random
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
def insert_node(self, data):
node = Node(data)
node.next = self.head
self.head = node
def random_node(self):
current = self.head
n = 2
result = current.data
while current.next:
current = current.next
r = random.randrange(n)
if r == 0:
result = current.data
n += 1
return result
def size(self):
n = 0
tmp = self.head
while tmp:
n += 1
tmp = tmp.next
return n
list = LinkedList()
list.insert_node(1)
list.insert_node(2)
list.insert_node(3)
list.insert_node(4)
list.insert_node(5)
list.insert_node(6)
list.insert_node(7)
list.insert_node(8)
print(list.random_node())
|
[
"konglk@aliyun.com"
] |
konglk@aliyun.com
|
78d1db3c6dc1d8c61587fd41ec302b44b9c0485e
|
b39670923ee518f2da16207a0d7e6093775f7b55
|
/prac_05/word_occurences.py
|
72bf33df1a8e510cee5448f94c119206bd505145
|
[] |
no_license
|
SebastianFrizzo/CP1404_Practicals
|
02116acbd7730f18c9f4b0f93d9c6e3afe619746
|
f635cd701e99e81087587e1ae34972b51e1d1357
|
refs/heads/master
| 2020-07-12T21:07:48.623511
| 2019-11-11T06:11:22
| 2019-11-11T06:11:22
| 204,906,505
| 0
| 0
| null | 2019-09-18T02:27:24
| 2019-08-28T10:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
user_strings = input("Say something: ").replace(",", "").split(" ")
user_dictionary = {}
max_word_length = 0
for word in user_strings:
if word not in user_dictionary:
user_dictionary[word] = 1
if len(word) > max_word_length:
max_word_length = len(word)
else:
user_dictionary[word] += 1
for word in user_dictionary:
print("{:{}}: {}".format(word, max_word_length, user_dictionary[word]))
|
[
"sebastianfrizzolaloli@gmail.com"
] |
sebastianfrizzolaloli@gmail.com
|
c5783433362fd28cfaa4c90e29326762ed7361eb
|
3698669255e1b1780a467f050b746fc5c803f76a
|
/titles/admin.py
|
3be782bc024f8330a07c90cb0ddb01af260ee97a
|
[] |
no_license
|
RustamIR/yamdb_final
|
2213bc15e0246caea264d59250c960ae64b3c3c0
|
6364f8b8d83f0a9c229ef43919f7738763801dc2
|
refs/heads/master
| 2023-02-27T01:01:04.434217
| 2021-01-18T06:35:13
| 2021-01-23T09:58:53
| 329,542,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
from django.contrib import admin
from .models import Categories, Genres, Titles
class CategoryAdmin(admin.ModelAdmin):
list_display = ("pk", "name", "slug")
search_fields = ("name",)
empty_value_display = '-пусто-'
class GenreAdmin(admin.ModelAdmin):
list_display = ("pk", "name", "slug")
search_fields = ("name",)
empty_value_display = '-пусто-'
class TitleAdmin(admin.ModelAdmin):
list_display = ("pk", "name", "year", "description", 'category')
search_fields = ("name",)
empty_value_display = '-пусто-'
admin.site.register(Categories, CategoryAdmin)
admin.site.register(Genres, GenreAdmin)
admin.site.register(Titles, TitleAdmin)
|
[
"roostamishteev@yandex.ru"
] |
roostamishteev@yandex.ru
|
f8ae2d1fca5e85c23a12fde1dc9d340d64458945
|
80052e0cbfe0214e4878d28eb52009ff3054fe58
|
/e2yun_addons/odoo12/e2yun_base_geolocalize_bing/models/res_partner.py
|
cd91d36bc9c2e0d6e122fe3eabed2e72bf7cbc34
|
[] |
no_license
|
xAlphaOmega/filelib
|
b022c86f9035106c24ba806e6ece5ea6e14f0e3a
|
af4d4b079041f279a74e786c1540ea8df2d6b2ac
|
refs/heads/master
| 2021-01-26T06:40:06.218774
| 2020-02-26T14:25:11
| 2020-02-26T14:25:11
| 243,349,887
| 0
| 2
| null | 2020-02-26T19:39:32
| 2020-02-26T19:39:31
| null |
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import logging
import requests
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
def geo_find_bing(addr, apikey=False):
if not addr:
return None
if not apikey:
raise UserError(_('''API key for GeoCoding (Places) required.\n
Save this key in System Parameters with key: bing.api_key_geocode, value: <your api key>
Visit https://docs.microsoft.com/en-us/bingmaps/getting-started/bing-maps-dev-center-help/getting-a-bing-maps-key
for more information.
'''))
url = "https://dev.virtualearth.net/REST/v1/Locations/?"
try:
result = requests.get(url, params={'q':addr, 'o':'json', 'key':apikey}).json()
except Exception as e:
raise UserError(_('Cannot contact geolocation servers. Please make sure that your Internet connection is up and running (%s).') % e)
if result['statusCode'] != 200:
if result.get('statusDescription'):
_logger.error(result['statusDescription'])
error_msg = _('Unable to geolocate, received the error:\n%s'
'\n\nBing made this a paid feature.\n'
'You should first enable billing on your Bing account.\n'
'Then, go to Developer Console, and enable the APIs:\n'
'Geocoding, Maps Static, Maps Javascript.\n'
% result['statusDescription'])
raise UserError(error_msg)
try:
if len(result['resourceSets']) > 0 and len(result['resourceSets'][0]) > 0:
geo = result['resourceSets'][0]['resources'][0]['point']['coordinates']
return float(geo[0]), float(geo[1])
else:
return None
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(
field for field in [street, ("%s %s" % (zip or '', city or '')).strip(), state, country]
if field
))
class ResPartner(models.Model):
_inherit = "res.partner"
# partner_latitude = fields.Float(string='Geo Latitude', digits=(16, 5))
# partner_longitude = fields.Float(string='Geo Longitude', digits=(16, 5))
# date_localization = fields.Date(string='Geolocation Date')
@classmethod
def _geo_localize(cls, apikey, street='', zip='', city='', state='', country=''):
search = geo_query_address(street=street, zip=zip, city=city, state=state, country=country)
result = geo_find_bing(search, apikey)
if result is None:
search = geo_query_address(city=city, state=state, country=country)
result = geo_find_bing(search, apikey)
return result
@api.multi
def geo_localize(self):
# We need country names in English below
apikey = self.env['ir.config_parameter'].sudo().get_param('bing.api_key_geocode',default='AqY4IFeQhJPHi5FjGBNc7hfgUNcaVf7S_qyyP_dlVCesSJUqI7dBA-gsyoAIUvGu')
for partner in self.with_context(lang='en_US'):
result = partner._geo_localize(apikey,
partner.street,
partner.zip,
partner.city,
partner.state_id.name,
partner.country_id.name)
if result:
partner.write({
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.Date.context_today(partner)
})
return True
|
[
"joytao.zhu@icloud.com"
] |
joytao.zhu@icloud.com
|
6440e43eeb24e41f882ccb95d12ebfb8d7b5b3b5
|
c6c450d750bcc559882c6f211f952b411505d6d8
|
/apps/work/views.py
|
b732081bebd1c4c612bcf8b2aefa24ad1c5c8736
|
[] |
no_license
|
ESCL/pjtracker
|
e26ea09136f35f5c85ea8d68a63fd94fab2629da
|
4dcf0e6a37e8753ae9d69d663c0c280fcca0a26c
|
refs/heads/develop
| 2021-09-26T09:18:54.158051
| 2021-09-10T23:16:07
| 2021-09-10T23:16:07
| 52,280,177
| 1
| 0
| null | 2021-09-10T23:16:08
| 2016-02-22T14:45:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
from django.shortcuts import render, redirect
from django.forms import inlineformset_factory
from ..common.views.base import StandardResourceView
from .forms import (ProjectForm, ProjectSearchForm,
ActivityForm, ActivitySearchForm,
ActivityGroupForm, ActivityGroupSearchForm,
ActivityGroupTypeForm, ActivityGroupTypeSearchForm,
LabourTypeForm, LabourTypeSearchForm,
ActivityInlineForm, ActivityInlineFormSet)
from .models import Project, Activity, ActivityGroup, ActivityGroupType, LabourType
class ProjectView(StandardResourceView):
model = Project
list_template = 'projects.html'
detail_template = 'project.html'
edit_template = 'project-edit.html'
search_form = ProjectSearchForm
main_form = ProjectForm
permissions = {
'add': ('work.add_project',),
'edit': ('work.change_project',)
}
class ActivityView(StandardResourceView):
model = Activity
list_template = 'activities.html'
detail_template = 'activity.html'
edit_template = 'activity-edit.html'
search_form = ActivitySearchForm
main_form = ActivityForm
permissions = {
'add': ('work.add_activity',),
'edit': ('work.change_activity',)
}
class LabourTypeView(StandardResourceView):
model = LabourType
list_template = 'labour-types.html'
detail_template = 'labour-type.html'
edit_template = 'labour-type-edit.html'
search_form = LabourTypeSearchForm
main_form = LabourTypeForm
permissions = {
'add': ('work.add_labourtype',),
'edit': ('work.change_labourtype',)
}
class ActivityGroupView(StandardResourceView):
model = ActivityGroup
list_template = 'activity-groups.html'
detail_template = 'activity-group.html'
edit_template = 'activity-group-edit.html'
search_form = ActivityGroupSearchForm
main_form = ActivityGroupForm
permissions = {
'add': ('work.add_activitygroup',),
'edit': ('work.change_activitygroup',)
}
class ActivityGroupTypeView(StandardResourceView):
model = ActivityGroupType
list_template = 'activity-group-types.html'
detail_template = 'activity-group-type.html'
edit_template = 'activity-group-type-edit.html'
search_form = ActivityGroupTypeSearchForm
main_form = ActivityGroupTypeForm
permissions = {
'add': ('work.add_activitygrouptype',),
'edit': ('work.change_activitygrouptype',)
}
class ProjectWBSView(StandardResourceView):
"""
Experimental WBS edit view.
"""
model = Project
edit_template = 'wbs-edit.html'
formset = inlineformset_factory(Project, Activity, formset=ActivityInlineFormSet,
form=ActivityInlineForm, extra=1)
def show_forms(self, request, pk):
"""
Render the formset for the given project.
"""
proj = pk and self.get_object(request.user, pk) or None
context = {'project': proj, 'forms': self.formset(instance=proj)}
return render(request, self.edit_template, context)
def upsert_instance(self, request, pk, **kwargs):
"""
Save the main form (and subform is the instance is not new) and redirect
to the collection view.
"""
proj = pk and self.get_object(request.user, pk) or None
context = {'project': proj}
fs = self.formset(request.POST, instance=proj)
context['sub_forms'] = fs
if fs.is_valid():
# If all defined forms are valid, save them
instances = fs.save()
# Also update their project and owner (just in case)
Activity.objects.filter(id__in=[a.id for a in instances]).update(project=proj, owner=proj.owner)
# Now redirect to collection view
return redirect('project', pk=pk, **kwargs)
else:
# Invalid, render forms again with errors
return render(request, self.edit_template, context, status=400)
|
[
"claudio.melendrez@gmail.com"
] |
claudio.melendrez@gmail.com
|
28bf442921e0912bc5ab3b61c32415d4283444d4
|
eefede4cd6fbd61fb63a0b367c9a03e2cf8217da
|
/fanjian.py
|
54a6c755e82f548a4dcf49075813f70602aee809
|
[] |
no_license
|
josephok/fun
|
94d1dd4df6868797fc32b329bf1f8cf0f29fb1e8
|
c1176f914219f9ddb3024b8c827fa8a00bc063a5
|
refs/heads/master
| 2020-12-26T03:55:28.864934
| 2016-11-05T04:14:19
| 2016-11-05T04:14:19
| 68,285,161
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
import logging
from spider import Spider
logger = logging.getLogger(__name__)
class FanjianSpider(Spider):
name = "犯贱志"
page_pattern = "latest-"
def _parse_index(self, page):
return page.xpath("//h2[@class='cont-list-title']/a/@href")
def _parse_content(self, page, document):
# 标题
title = page.xpath("//h1/@title")[0]
# 发布日期
post_time = page.xpath("//div[contains(@class, 'view-info')]/text()",
namespaces={"re": r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}'})[1].strip() # noqa
# 内容
content = document(".view-main").html()
logger.info("解析{}的内容,标题为:{}".format(self.name, title))
return (title, post_time, content)
|
[
"josephok@qq.com"
] |
josephok@qq.com
|
2ed97f8452b49adbd0bdcfc03b06558d59735e10
|
f9fb4f8073d963c349679e7f40b73dc711160991
|
/eric-fokou/Lesson2/lesson2_crawling.py
|
38284aae64b54eb4d14d916148d80eaad3a276e4
|
[] |
no_license
|
ouedraogoboukary/starter-kit-datascience
|
83606196fc19cc3385ba8e846ef3014ff9e0b2e9
|
f621d4a1d7826c79c7ebd3a5a07a0138199e6c82
|
refs/heads/master
| 2020-10-01T01:04:23.460810
| 2017-12-04T22:33:52
| 2017-12-04T22:33:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,410
|
py
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
MAX_PAGE = 2
def extractIntFromDOM(soup, classname):
res_str = soup.find(class_=classname).text.replace(
u'\xa0', '').replace('vues', '')
res = int(res_str)
return res
def extractLikeDislikeFromDOM(soup, classname, position):
# print len(soup.find_all(class_=classname))
res_str = soup.find_all(class_=classname)[position].find(
class_="yt-uix-button-content").text.replace(u'\xa0', '')
res = int(res_str)
return res
def computeIndicatorForPage(url):
result = requests.get(url)
soup = BeautifulSoup(result.text, 'html.parser')
number_of_views = extractIntFromDOM(soup, 'watch-view-count')
number_of_likes = extractLikeDislikeFromDOM(
soup, 'like-button-renderer-like-button', 0)
number_of_dislikes = extractLikeDislikeFromDOM(
soup, 'like-button-renderer-dislike-button', 1)
indicator = 1000. * \
(number_of_likes - number_of_dislikes) / number_of_views
title = soup.title.text
print '====='
print title
print "Likes", number_of_likes
print "Dislikes", number_of_dislikes
print "VIews", number_of_views
print "Popularity", indicator
print '====='
metrics = {}
metrics['song'] = title
metrics['number_of_views'] = number_of_views
metrics['number_of_likes'] = number_of_likes
metrics['number_of_dislikes'] = number_of_dislikes
metrics['indicator'] = indicator
return metrics
# computeIndicatorForPage('https://www.youtube.com/watch?v=wfN4PVaOU5Q')
def getAllMetricsForArtist(artist):
all_metrics = []
for page in range(1, MAX_PAGE + 1):
all_videos_artist = requests.get(
'https://www.youtube.com/results?search_query=' + artist + '&page=' + str(page))
soup_artist = BeautifulSoup(all_videos_artist.text, 'html.parser')
# print(soup_artist.prettify())
list_video_artist = map(
# lambda x: x['href'],
# soup_artist.find_all(class_="yt-uix-tile-link"))
lambda x: x['href'], soup_artist.find_all(attrs={"class": "yt-uix-sessionlink spf-link ", "dir": "ltr"}))
for link in list_video_artist:
metrics = computeIndicatorForPage('https://www.youtube.com' + link)
all_metrics.append(metrics)
return all_metrics
metrics_rihanna = getAllMetricsForArtist('rihanna')
df_rihanna = pd.DataFrame(metrics_rihanna, columns=[
'song', 'number_of_views', 'number_of_likes', 'number_of_dislikes', 'indicator'])
df_rihanna.to_csv('Rihanna.csv', index=False, encoding='utf-8')
avg_rihanna_indicator = 0
for song in metrics_rihanna:
avg_rihanna_indicator += song['indicator']
metrics_beyonce = getAllMetricsForArtist('beyonce')
df_beyonce = pd.DataFrame(metrics_beyonce, columns=[
'song', 'number_of_views', 'number_of_likes', 'number_of_dislikes', 'indicator'])
df_beyonce.to_csv('Beyonce.csv', index=False, encoding='utf-8')
avg_beyonce_indicator = 0
for song in metrics_beyonce:
avg_beyonce_indicator += song['indicator']
print(
"=================================================================================")
print("Rihanna AVG indicator = " +
str(float(avg_rihanna_indicator) / len(metrics_rihanna)))
print("Beyonce AVG indicator = " +
str(float(avg_beyonce_indicator) / len(metrics_beyonce)))
|
[
"fokoub@gmail.com"
] |
fokoub@gmail.com
|
fa24656c30eff68cac260d33467c06f2f0f78e40
|
3a3055474bccf3def468c4d03a969efb35809c08
|
/comment/forms.py
|
312dce5e9e0ab645687d8fb96e102cc68cbb442a
|
[] |
no_license
|
kinghanjiangxue/BulidMyWebSite
|
18cc838597e8e0b92d17065b2e9b0fb212d96618
|
8bb59e740814a164b759711953068096cf6d8658
|
refs/heads/master
| 2022-12-26T05:09:37.138055
| 2019-08-23T10:50:24
| 2019-08-23T10:50:24
| 191,863,534
| 3
| 1
| null | 2022-12-09T22:53:41
| 2019-06-14T02:31:00
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['body']
|
[
"yuankisstherain@163.com"
] |
yuankisstherain@163.com
|
8f6b8907e13c2ab27fed2def8a040695b861dd34
|
1d75146a66245dc046dc216bb602129208e00733
|
/open/Edgecortix/code/mobilenetv2/Offline/python/backend_pytorch_yolov3_jit_traced.py
|
64772093cea7eabe60308826e27dbfb1b13720d5
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
georgelyuan/inference_results_v1.1
|
febf287bd5967bf7f087355a81f06a2bd298cbfe
|
3196a5587887c39203ee3ac246fa5dbe789d9085
|
refs/heads/main
| 2023-08-16T08:49:45.274284
| 2021-09-23T20:57:17
| 2021-09-23T20:57:17
| 409,773,141
| 0
| 0
|
NOASSERTION
| 2021-09-23T23:36:37
| 2021-09-23T23:36:37
| null |
UTF-8
|
Python
| false
| false
| 2,310
|
py
|
"""
pytoch jit-traced backend
"""
# pylint: disable=unused-argument,missing-docstring
import json
import os
import time
import torch
import torch.nn as nn
import torchvision
import backend
import sys
sys.path.append('yolov3')
from models.yolo import Detect
from utils.general import non_max_suppression
config = {
'yolov3-jit': (
[
[10, 13, 16, 30, 33, 23],
[30, 61, 62, 45, 59, 119],
[116, 90, 156, 198, 373, 326]
],
[256,512, 1024],
),
'yolov3-tiny-jit': (
[
[10, 14, 23, 27, 37, 58],
[81, 82, 135, 169, 344, 319]
],
[256, 512],
),
}
class BackendPytorchYOLOv3JITTraced(backend.Backend):
def __init__(self):
super(BackendPytorchYOLOv3JITTraced, self).__init__()
self.sess = None
self.model = None
# https://github.com/ultralytics/yolov3
self.conf = 0.001
self.iou = 0.65
def version(self):
return torch.__version__
def name(self):
return "pytorch-yolov3-jit-traced"
def image_format(self):
return "NCHW"
def load(self, model_path, inputs=None, outputs=None):
self.model = torch.jit.load(model_path)
model_name = os.path.split(model_path)[-1].replace('.pt', '')
anchors, ch = config[model_name]
pp = Detect(80, anchors, ch)
s = 128
pp.stride = torch.tensor([s / x.shape[-2] for x in self.model(torch.zeros(1, 3, s, s))])
pp.anchors /= pp.stride.view(-1, 1, 1)
pp.load_state_dict(torch.load(model_path.replace('.pt', '-pp.pt')))
pp.eval()
self.post_processor = pp
# dummy
self.inputs = ["input"]
self.outputs = ["output"]
return self
def predict(self, feed):
key=[key for key in feed.keys()][0]
feed[key] = torch.tensor(feed[key]).float()
size = feed[key].shape[2]
with torch.no_grad():
output = self.model(feed[key])
pred = self.post_processor(list(output))[0]
pred = non_max_suppression(pred, conf_thres=self.conf, iou_thres=self.iou)[0]
bboxes = pred[..., :4]/size
scores = pred[..., 4]
labels = pred[..., 5].int()+1
return [bboxes], [labels], [scores]
|
[
"tjablin@google.com"
] |
tjablin@google.com
|
826053e4c8827b6ddae96995a415820e2ebb74a3
|
d37862139b44f4eaa9892b47b9b7ed52b2bbfef4
|
/week2/d5-permutations.py
|
39db73e253cabb8619fe9c3ac57edd48dd128c6d
|
[] |
no_license
|
nhnminh/leetcode-challenge
|
e59faec4eb8434ff0ebebcac2c31fd279286dee9
|
f618f53eca8e23d9d8b5b4e1bd09d768090c0bf4
|
refs/heads/master
| 2023-01-07T05:11:25.528985
| 2020-11-18T10:09:33
| 2020-11-18T10:09:33
| 312,016,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
if len(nums) <= 1:
return [nums]
seen = {}
first = nums[0]
remainder = nums[1:]
remainder_perms = self.permuteUnique(remainder)
for remainder_perm in remainder_perms:
for pos in range(len(nums)):
curr_perm = remainder_perm.copy()
curr_perm.insert(pos, first)
seen[str(curr_perm)] = curr_perm
return seen.values()
|
[
"nhnminh@apcs.vn"
] |
nhnminh@apcs.vn
|
6a7abbb138da35522f6af973df8ff02116342540
|
3de5bedc4b1ebe5389896658a0bf0c0302afaa0b
|
/images360/images360/items.py
|
49d8879cdb2000b7e69b362809863adfa5c0bc46
|
[] |
no_license
|
ShaoLay/KeyWords_Images
|
c4908081119a2d137d1210c1793bf739cb883a83
|
d83c76e07d834a4226bf708d000efbf9c01f8735
|
refs/heads/master
| 2020-04-12T02:49:35.894724
| 2018-12-18T09:01:53
| 2018-12-18T09:01:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import Item, Field
class ImageItem(Item):
collection = table = 'images'
id = Field()
url = Field()
title = Field()
thumb = Field()
|
[
"javs_shao@163.com"
] |
javs_shao@163.com
|
227624e1a2b47af4149919d50b795f308501b5d2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_verdure.py
|
8a49a8042f50de2bb3ea919aa34e26684f025e8f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
#calss header
class _VERDURE():
def __init__(self,):
self.name = "VERDURE"
self.definitions = [u'(the green colour of) fresh, healthy plants']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
23ebc1cc6d44d65f993c8b7fdda6e4fedc6aada2
|
7c4c9b6b61b233839526b0ac717ea1e3873b105f
|
/video/make_frame.py
|
63d02e346b99426b81ea788d685c50b9a108ed25
|
[] |
no_license
|
Peddi30/speech-img-vid-generator
|
51ec96a9412b22cb97efefc2999fbdd93a7da9c9
|
9bf1c023382b580876575319041d4f1df4912f04
|
refs/heads/master
| 2023-03-20T05:18:26.915497
| 2021-03-09T23:24:49
| 2021-03-09T23:24:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,672
|
py
|
from selenium.webdriver import Chrome, ChromeOptions
import requests
from PIL import Image
import base64
import uuid
import os
import time
import math
import utils.log as log
CHROME_DRIVER_PATH = '/Users/aarenstade/Documents/chromedriver'
def downloadB64Img(img, filename):
data = str(img)[22:]
decoded = base64.b64decode(data)
with open(filename, 'wb') as img:
img.write(decoded)
img.close()
def get_images(query):
options = ChromeOptions()
options.add_argument('--headless')
driver = Chrome(
executable_path=CHROME_DRIVER_PATH, options=options)
url = "https://www.bing.com/images/search?q="+query
driver.get(url)
ActualImages = []
images = driver.find_elements_by_class_name('mimg')
for i in images:
src = i.get_attribute('src')
if(src != None):
ActualImages.append(src)
driver.quit()
return ActualImages
def create_imgs_dir(path):
if not (os.path.exists(path)):
os.makedirs(path)
return True
else:
return False
def download_images(images, path):
for image in images:
if(image != None):
filename = path + "/" + str(uuid.uuid4()) + ".jpg"
if(image.startswith('data')):
downloadB64Img(image, filename)
else:
request = requests.get(image, stream=True)
if not request.ok:
print(request)
with open(filename, 'wb') as imgFile:
for block in request.iter_content(1024):
if not block:
break
imgFile.write(block)
imgFile.close()
# TODO: create image (natural sizes) and then put it into 720p frame
def create_collage(path):
final_w = 1280
final_h = 720
image_paths = [f for f in os.listdir(
path) if os.path.isfile(os.path.join(path, f))]
imgs_w = 0
imgs_h = 0
if(len(image_paths) > 0):
# get average size of images
for i in image_paths:
img = Image.open(path + '/' + i)
imgs_w = imgs_w + img.width
imgs_h = imgs_h + img.height
img.close()
# define block size
block_w = round((imgs_w / len(image_paths)))
block_h = round((imgs_h / len(image_paths)))
# determine number of rows of blocks
nper_row = int(math.floor(final_w / block_w))
nrows = int(math.ceil(len(image_paths) / nper_row))
row_w = block_w * nper_row
row_h = block_h
# for first img
# determine padding from top and left
y_pad_start = round((final_h - (row_h * nrows)) / 2)
x_pad_start = round((final_w - row_w) / 2)
x_cur = x_pad_start
y_cur = y_pad_start
col_num = 1
img_num = 0
# paste images, scaled into blocks
final_img = Image.new('RGB', (final_w, final_h))
for i in image_paths:
img_num = img_num + 1
img = Image.open(path + '/' + i)
image = img.resize((block_w, block_h))
final_img.paste(image, box=(x_cur, y_cur))
# move to next column
col_num = col_num + 1
x_cur = x_cur + block_w
# if we've exited the image
if(col_num > nper_row):
col_num = 1
y_cur = y_cur + row_h # move down a row
# modify x_cur for when next row < than nper_row
imgs_left = len(image_paths) - img_num
if(imgs_left < nper_row):
x_cur = round((final_w - (block_w * imgs_left)) / 2)
else:
x_cur = x_pad_start # go to x start
return final_img
else:
final_img = Image.new('RGB', (final_w, final_h))
return final_img
def CreateFrame(query, path):
start = time.time()
query = query.split(' ')
query = '+'.join(query)
query = query.replace('.', '')
query = query.replace('/', '')
query = query.replace("'", '')
query = query.replace('"', '')
print(query)
path = path + '/src/' + query
newDir = create_imgs_dir(path)
if(newDir):
images = get_images(query)
if(len(images) > 0):
download_images(images, path)
img = create_collage(path)
end = time.time()
log.Log_Frame(query, (end - start))
return img
else:
print('no images found')
return None
else:
img = create_collage(path)
end = time.time()
log.Log_Frame(query, (end - start))
return img
|
[
"aarenstade@gmail.com"
] |
aarenstade@gmail.com
|
d5314cdaa575a225f31f0288cd7ecb535f6da709
|
4aa7a4d0525095725eb99843c83827ba4806ceb1
|
/keras/keras108_ensemble2.py
|
6a175be3a7a210e13cfe51a8b6cbb64286ea57a3
|
[] |
no_license
|
seonukim/Study
|
65a70f5bdfad68f643abc3086d5c7484bb2439d4
|
a5f2538f9ae8b5fc93b5149dd51704e8881f0a80
|
refs/heads/master
| 2022-12-04T17:04:31.489771
| 2020-08-21T00:35:15
| 2020-08-21T00:35:15
| 260,144,755
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
# 1. 데이터
import numpy as np
x1_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x2_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y1_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y2_train = np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
# 2. 모델 구성
from keras.models import Sequential, Model
from keras.layers import Dense, Input
from keras.layers import concatenate
input1 = Input(shape = (1,))
x1 = Dense(100)(input1)
x1 = Dense(100)(x1)
x1 = Dense(100)(x1)
input2 = Input(shape = (1,))
x2 = Dense(100)(input2)
x2 = Dense(100)(x2)
x2 = Dense(100)(x2)
merge = concatenate([x1, x2])
x3 = Dense(100)(merge)
output1 = Dense(1)(x3)
x4 = Dense(70)(merge)
x4 = Dense(70)(x4)
output2 = Dense(1, activation = 'sigmoid')(x4)
model = Model(inputs = [input1, input2], outputs = [output1, output2])
model.summary()
# 3. 컴파일 훈련
model.compile(loss = ['mse', 'binary_crossentropy'],
optimizer = 'adam',
metrics = ['mse', 'acc'])
model.fit([x1_train, x2_train], [y1_train, y2_train], epochs = 100, batch_size = 1)
# 4. 평가 예측
loss = model.evaluate([x1_train, x2_train], [y1_train, y2_train])
print('loss : ', loss)
x1_pred = np.array([11, 12, 13, 14])
x2_pred = np.array([11, 12, 13, 14])
y_pred = model.predict([x1_pred, x2_pred])
print(y_pred)
|
[
"92.seoonooo@gmail.com"
] |
92.seoonooo@gmail.com
|
6a3fa960b84a9d356ed5e5860ecd007c65673ab6
|
f26111108d5b30a4cf52dab4d64a7b068a0cc7dd
|
/custom-addons/openmadrassa_base/__init__.py
|
771db3c08636c7015877ba19efca683e0f6f4456
|
[] |
no_license
|
vidtsin/girls_erp
|
80489598d408e401f3f96c08854691d00790e5f8
|
381bfe7fb2ac2c4fd49bf5b5a7e75365ca300df4
|
refs/heads/master
| 2020-07-19T17:10:24.336653
| 2017-05-06T15:44:02
| 2017-05-06T15:44:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import school
import school_fees
import account_voucher
import res_partner
import parent
import timetable
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"emeytahaz@gmail.com"
] |
emeytahaz@gmail.com
|
7127487e6db86d1498fb041791074873451e4b75
|
44ecce58e54d475158afc703dc7e6c5455db9c9f
|
/Detectivesusi/mysite/mysite/settings.py
|
202281f9453b4d45cf12111a74351663702d0b31
|
[] |
no_license
|
YangTaeSung/CAU-CapstoneDesign2-SusiDetective
|
0d3f5752e3082fcaf5efec1a611b763a2ded60ae
|
ce9e6c2ba44a26f67f54522bc67736b5758e181f
|
refs/heads/master
| 2020-08-07T04:11:20.905517
| 2020-04-01T08:37:41
| 2020-04-01T08:37:41
| 213,290,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,137
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
from django.contrib.messages import constants as message_constants
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u8ga=p_tcieacq#8odl@d+2ncm@!9f5$&2bqvd4=b*r=v6lx(y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True#False#True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
#AUTH_USER_MODEL = 'main.User' # 19.03.23 added for register
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ( # STATIC 파일 호출 시 STATIC 폴더에서 찾음.
os.path.join(BASE_DIR, 'static'),
)
## media 파일 링크
# 각 media 파일에 대한 URL Prefix
MEDIA_URL = '/media/' # 항상 / 로 끝나도록 설정
# 업로드된 파일을 저장할 디렉토리 경로
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MESSAGE_TAGS = {message_constants.DEBUG: 'debug',
message_constants.INFO: 'info',
message_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'danger',}
# https 적용
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
BROKER_URL = 'amqp://guest:guest@localhost//'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
|
[
"tsy0668@naver.com"
] |
tsy0668@naver.com
|
14870f84a64db46f9127e12a287297baeccee592
|
a19179d51e494b1fed9eed94e72a2d01b67fcb44
|
/xylophone/pgw/ConfigurationService.py
|
6a15b62f0d863535d6708b167aa04b047a174da5
|
[
"MIT"
] |
permissive
|
Turysaz/xylophone
|
d4752b926f80fab5350e1e6eb36376f640df6b10
|
da44b8127aa6b89d6cdb3bdb564c386520b37e22
|
refs/heads/master
| 2021-09-11T16:41:31.632019
| 2018-04-09T20:46:00
| 2018-04-09T20:46:00
| 124,000,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# Copyright (c) 2018 Turysaz <turysaz@posteo.org>
from configparser import ConfigParser
def create_configuration_parser():
configuration = ConfigParser()
configuration.read("configuration.txt")
return configuration
|
[
"turysaz@posteo.org"
] |
turysaz@posteo.org
|
06ab084d23697033b94ecb4e90ee9c1e04b11f01
|
75519d2a9bf55e2d9376ea08a36676948a8b232c
|
/cores/Registry.py
|
4271cb1b3e6868b1dde194e897c740ebeb6257e4
|
[
"MIT"
] |
permissive
|
CGFanTuan/damgteam
|
9c32d59cbd0ecb9d3acffd9b902b918c40797e14
|
aec414f084f6ab6ec5897314390605aaa8380d62
|
refs/heads/master
| 2020-09-17T00:29:24.832648
| 2019-11-25T09:51:13
| 2019-11-25T09:51:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,119
|
py
|
# -*- coding: utf-8 -*-
"""
Script Name: Registry.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
from __future__ import absolute_import, unicode_literals
import datetime, time
from bin.data.damg import DAMGDICT, DAMGLIST, DAMG
from appData import layoutTypes
class InspectLayout(DAMG):
key = 'InspectLayout'
layoutTypes = DAMGLIST()
layoutKeys = DAMGLIST()
def __init__(self, parent=None):
super(InspectLayout, self).__init__(parent)
self.layoutTypes.appendList(layoutTypes)
def doInspection(self, layout):
self.layoutTypes.append(layout.Type)
self.layoutKeys.append(layout.key)
return layout
def checkType(self, layout):
if not self.haveType(layout):
try:
layout.show()
except AttributeError:
layoutType = 'Object'
else:
layoutType = 'UI'
layout.__setattr__('Type', layoutType)
return self.checkKey(layout)
def checkKey(self, layout):
if not self.haveKey(layout):
key = layout.__class__.__name__
layout.__setattr__('key', key)
return layout
def haveType(self, layout):
try:
layout.Type
except AttributeError:
return False
else:
return True
def haveKey(self, layout):
try:
layout.key
except KeyError:
return False
else:
return True
class RegistryLayout(DAMGDICT):
awaitingSlots = DAMGLIST()
layout_names = DAMGLIST()
layout_ids = DAMGLIST()
layout_datetimes = DAMGLIST()
layout_keys = DAMGLIST()
def __init__(self):
super(RegistryLayout, self).__init__(self)
self.inspect = InspectLayout(self)
def regisLayout(self, layout):
ui = self.inspect.doInspection(layout)
key = ui.key
if self.isLayout(ui):
if self.isAwaitingSlot(ui):
self.awaitingSlots.remove(key)
self.doRegister(ui)
else:
if not self.isRegistered(ui):
self.doRegister(ui)
else:
print("Already registered: {0}".format(key))
return False
def isAwaitingSlot(self, layout):
key = layout.key
if key in self.awaitingSlots:
return True
else:
return False
def doRegister(self, layout):
key = layout.key
self.layout_names.append(layout.name)
self.layout_ids.append(id(layout))
self.layout_datetimes.append(str(datetime.datetime.fromtimestamp(time.time()).strftime('%H:%M:%S|%d.%m.%Y')))
self.layout_keys.append(layout.key)
# print("Registing layout: {0} : {1}".format(layout.key, layout))
self[key] = layout
return True
def deRegister(self, layout):
key = layout.key
index = self.layout_names.index(layout.name)
if self.isRegistered(layout):
self.awaitingSlots.append(key)
try:
del self[key]
except KeyError:
self.pop(key, None)
self.layout_names.remove(self.layout_names[index])
self.layout_ids.remove(self.layout_ids[index])
self.layout_datetimes.remove(self.layout_datetimes[index])
return True
else:
return False
def isRegistered(self, layout):
key = layout.key
if key in self.keys():
return True
else:
return False
def isLayout(self, layout):
if layout.Type in self.inspect.layoutTypes:
return True
else:
return False
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 8/11/2019 - 4:18 PM
# © 2017 - 2018 DAMGteam. All rights reserved
|
[
"dot@damgteam.com"
] |
dot@damgteam.com
|
0bd4b125f0c29da6a25d10ab87d80259130e89d5
|
bf3bd49d3113db36550808182d2f14a3e6baebf7
|
/05/lambda01.py
|
b5aaf16fd92e09178f37cc9f7374f3b65da2382f
|
[] |
no_license
|
bbangwon/hongong_python
|
7dac29fea90d79143af1bcfa3f79dd6336906954
|
05daa3594b858082b97a2c0e8ba73d84fd86b234
|
refs/heads/master
| 2020-08-24T16:21:19.934245
| 2020-02-02T15:01:09
| 2020-02-02T15:01:09
| 216,862,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
#함수를 선언합니다.
power = lambda x: x * x
under_3 = lambda x: x < 3
#변수를 선언합니다.
list_input_a = [1, 2, 3, 4, 5]
#map() 함수를 사용합니다.
output_a = map(power, list_input_a)
print("# map() 함수의 실행결과")
print("map(power, list_input_a):", output_a)
print("map(power, list_input_a):", list(output_a))
#filter() 함수를 사용합니다.
output_b = filter(under_3, list_input_a)
print("# filter() 함수의 실행결과")
print("filter(under_3, list_input_a):", output_b)
print("filter(under_3, list_input_a):", list(output_b))
|
[
"bbangwon.chung@gmail.com"
] |
bbangwon.chung@gmail.com
|
b091664ff5c3a2d99189aa4cbc7b05ab80edba36
|
83843adddd37a668873f6161f7b2f2907d50911e
|
/unet_test.py
|
37bc1b633b1bea55ca99069393651c53c8969c29
|
[] |
no_license
|
Vishakha6/UNet-Python
|
efd375e9b1043d82791844c7948c78ffe9f63355
|
ea0ed4cc5860f683a277f510a0fb5c8a29f42a2f
|
refs/heads/main
| 2023-03-14T17:21:33.554395
| 2021-03-28T19:02:15
| 2021-03-28T19:02:15
| 336,354,213
| 0
| 0
| null | 2021-03-28T19:02:16
| 2021-02-05T18:20:59
|
Python
|
UTF-8
|
Python
| false
| false
| 6,809
|
py
|
import h5py
import numpy as np
import PIL
import subprocess
import os
from PIL import Image
import sys
from tifffile import imread, TiffFile
from bfio import BioReader, BioWriter, LOG4J, JARS
import javabridge, math
from pathlib import Path
from multiprocessing import cpu_count
def rescale(size,img,mode='uint8'):
if mode == 'float32':
#for floating point images:
img = np.float32(img)
img_PIL = PIL.Image.fromarray(img,mode='F')
elif mode == 'uint8':
#otherwise:
img_PIL = PIL.Image.fromarray(img)
else:
raise(Exception('Invalid rescaling mode. Use uint8 or float32'))
return np.array(img_PIL.resize(size,PIL.Image.BILINEAR))
def normalize(img):
###normalize image
img_min = np.min(img)
img_max = np.max(img)
img_centered = img - img_min
img_range = img_max - img_min
return np.true_divide(img_centered, img_range)
def unet_segmentation(input_img,img_pixelsize_x,img_pixelsize_y,
modelfile_path,weightfile_path,iofile_path,
tiling_x=4,tiling_y=4,gpu_flag='0',
cleanup=True):
#fix parameters
n_inputchannels=1
n_iterations=0
## prepare image rescaling
np.set_printoptions(threshold=sys.maxsize)
#get model resolution (element size) from modelfile
modelfile_h5 = h5py.File(modelfile_path,'r')
modelresolution_y = modelfile_h5['unet_param/element_size_um'][0]
modelresolution_x = modelfile_h5['unet_param/element_size_um'][1]
modelfile_h5.close()
#get input image absolute size
abs_size_x = input_img.shape[1] * img_pixelsize_x
abs_size_y = input_img.shape[0] * img_pixelsize_y
#get rescaled image size in pixel
rescaled_size_px_x = int(np.round(abs_size_x / modelresolution_x))
rescaled_size_px_y = int(np.round(abs_size_y / modelresolution_y))
rescale_size = (rescaled_size_px_x,rescaled_size_px_y)
### preprocess image and store in IO file
#normalize image, then rescale
normalized_img = normalize(input_img)
rescaled_img = np.float32(rescale(rescale_size,normalized_img,mode='float32'))
#prepending singleton dimensions to get the desired blob structure
h5ready_img = np.expand_dims(rescaled_img, axis=(0,1))
iofile_h5 = h5py.File(iofile_path,mode='x')
iofile_h5.create_dataset('data',data=h5ready_img)
iofile_h5.close()
# ### run caffe_unet commands
# #assemble sanity check command
command_sanitycheck = []
command_sanitycheck.append("caffe_unet")
command_sanitycheck.append("check_model_and_weights_h5")
command_sanitycheck.append("-model")
command_sanitycheck.append(modelfile_path)
command_sanitycheck.append("-weights")
command_sanitycheck.append(weightfile_path)
command_sanitycheck.append("-n_channels")
command_sanitycheck.append(str(n_inputchannels))
if gpu_flag:
command_sanitycheck.append("-gpu")
command_sanitycheck.append(gpu_flag)
#runs command and puts console output to stdout
sanitycheck_proc = subprocess.run(command_sanitycheck,stdout=subprocess.PIPE)
# #aborts if process failed
sanitycheck_proc.check_returncode()
#assemble prediction command
command_predict = []
command_predict.append("caffe_unet")
command_predict.append("tiled_predict")
command_predict.append("-infileH5")
command_predict.append(iofile_path)
command_predict.append("-outfileH5")
command_predict.append(iofile_path)
command_predict.append("-model")
command_predict.append(modelfile_path)
command_predict.append("-weights")
command_predict.append(weightfile_path)
command_predict.append("-iterations")
command_predict.append(str(n_iterations))
command_predict.append("-n_tiles")
command_predict.append(str(tiling_x)+'x'+str(tiling_y))
command_predict.append("-gpu")
command_predict.append(gpu_flag)
if gpu_flag:
command_predict.append("-gpu")
command_predict.append(gpu_flag)
#run command
output = subprocess.check_output(command_predict, stderr=subprocess.STDOUT).decode()
print(output)
# load results from io file and return
output_h5 = h5py.File(iofile_path)
score = output_h5['score'][:]
output_h5.close()
# #get segmentation mask by taking channel argmax
segmentation_mask = np.squeeze(np.argmax(score, axis=1))
return segmentation_mask
def read_file(input_directory, pixelsize, output_directory):
img_pixelsize_x = pixelsize
img_pixelsize_y = pixelsize
modelfile_path = "2d_cell_net_v0-cytoplasm.modeldef.h5"
weightfile_path = "snapshot_cytoplasm_iter_1000.caffemodel.h5"
iofile_path = "output.h5"
out_path = Path(output_directory)
rootdir1 = Path(input_directory)
""" Convert the tif to tiled tiff """
javabridge.start_vm(args=["-Dlog4j.configuration=file:{}".format(LOG4J)],
class_path=JARS,
run_headless=True)
i = 0
try:
for PATH in rootdir1.glob('**/*'):
tile_grid_size = 1
tile_size = tile_grid_size * 1024
# Set up the BioReader
with BioReader(PATH,backend='java',max_workers=cpu_count()) as br:
# Loop through timepoints
for t in range(br.T):
# Loop through channels
for c in range(br.C):
with BioWriter(out_path.joinpath(f"final{i}.ome.tif"),metadata = br.metadata, backend='java') as bw:
# Loop through z-slices
for z in range(br.Z):
# Loop across the length of the image
for y in range(0,br.Y,tile_size):
y_max = min([br.Y,y+tile_size])
# Loop across the depth of the image
for x in range(0,br.X,tile_size):
x_max = min([br.X,x+tile_size])
input_img = np.squeeze(br[y:y_max,x:x_max,z:z+1,c,t])
img = unet_segmentation(input_img,img_pixelsize_x, img_pixelsize_y,modelfile_path,weightfile_path,iofile_path)
bw[y:y_max, x:x_max,...] = img
os.remove("output.h5")
i+=1
finally:
# Close the javabridge. Since this is in the finally block, it is always run
javabridge.kill_vm()
|
[
"noreply@github.com"
] |
Vishakha6.noreply@github.com
|
07ad80db0ea602e2772902174ad5d49288838307
|
a0613d6d00be6e23de8757c347e9c7fff1db6733
|
/Source_Code/SC9.py
|
4fea522f2d06d9faccf7f1ccecde50eaaf452b3f
|
[] |
no_license
|
roseyemelyanova/User_Guide
|
125da2a13809afc129dbe60c2bbd57ab84a0a4fb
|
405dfb6f82a9ba55f04153f6ec4218cea65b69a6
|
refs/heads/master
| 2020-04-19T17:14:59.195743
| 2019-01-30T11:10:01
| 2019-01-30T11:10:01
| 168,329,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,388
|
py
|
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
'''
The code below creates the images for Figures 15 and 2 in the
User Guide.
Figure 15 is a L0 image created from the S8 band information.
Figure 2 is a L0 image using only one detector from the S8 band, and
including a closer view to illustrate how scans and detector numbers are
related.
'''
##############################################################################
#STEP1: Define the path to the relevant .SEN3/ file and store relevant
#information from it in variables
##############################################################################
fname = 'Data/S3A_SL_1_RBT____20180817T185806_20180817T190106_20180817T205734_0179_034_341_2700_MAR_O_NR_002.SEN3/'
S8 = Dataset(fname+'S8_BT_in.nc').variables['S8_BT_in'][:]
S8o = Dataset(fname+'S8_BT_in.nc').variables['S8_BT_orphan_in'][:]
scan = Dataset(fname+'indices_in.nc').variables['scan_in'][:]
pix = Dataset(fname+'indices_in.nc').variables['pixel_in'][:]
det = Dataset(fname+'indices_in.nc').variables['detector_in'][:]
scano = Dataset(fname+'indices_in.nc').variables['scan_orphan_in'][:]
pixo = Dataset(fname+'indices_in.nc').variables['pixel_orphan_in'][:]
deto = Dataset(fname+'indices_in.nc').variables['detector_orphan_in'][:]
##############################################################################
#STEP2: As this script is imported into SC11 to make use of variables above,
#set a condition which will only be met if this script is run
##############################################################################
if __name__ == "__main__":
##########################################################################
#STEP3: Define a function to ungrid data from L1 information
##########################################################################
l0_dms = [846,1199]
def plotL0(det_no):
######################################################################
#STEP3a: Create and populate a L0 template by splitting the detectors
#and finding the seperate detector pixels and their values in S8.
#l0_dms is defined in line 30 for simplicity. l0_S8 must exist outside
#of the for loop to produce a full L0 image for both detectors, however
#l0_dms definition in lines 47-49 doesn't allow this. Uncomment the
#lines to see how it changes the image
######################################################################
l0_S8 = np.zeros([l0_dms[0]*2,l0_dms[1]])
for idet in range(det_no):
ipix_valid = np.where(np.ma.getmaskarray(pix)==False)
iscan_valid = np.where(np.logical_and(np.ma.getmaskarray(scan)==False,det==idet))
iscan_valid_orphan = np.where(np.logical_and(np.ma.getmaskarray(deto)==False,deto==idet))
s0 = scan[iscan_valid].min()
#if idet == 0:
#l0_dms = [scan[iscan_valid].max() - scan[iscan_valid].min() + 1,pix[ipix_valid].max() - pix[ipix_valid].min() + 1]
#l0_S8 = np.zeros([l0_dms[0]*2,l0_dms[1]])
l0_S8[((scan[iscan_valid]-s0)*2)+idet,pix[iscan_valid]] = S8[iscan_valid]
l0_S8[((scano[iscan_valid_orphan]-s0)*2)+idet,pixo[iscan_valid_orphan]] = S8o[iscan_valid_orphan]
return l0_S8
##########################################################################
#STEP4: Call the function to produce the required plots
##########################################################################
l0_S8 = plotL0(2)
l0_S8 = np.ma.masked_values(l0_S8,0)
im = plt.imshow(l0_S8, aspect=0.5, origin='lower', cmap='RdBu')
plt.xlabel('pixel number')
plt.ylabel('scan number')
cax = plt.axes([0.9,0.12,0.02,0.76])
plt.colorbar(im, cax)
plt.savefig('Figures/Fig15.png',dpi=1000)
plt.show()
l0_det = plotL0(1)
l0_det = np.ma.masked_values(l0_det,0)
fig, axes = plt.subplots(ncols=2, nrows=1, gridspec_kw = {'width_ratios':[1, 1]})
axes[0].imshow(l0_det, cmap='RdBu')
axes[1].set_xlim(700,750)
axes[1].set_ylim(625,700)
axes[1].imshow(l0_det, cmap='RdBu', aspect=1)
fig.text(0.02, 0.5, 'scan number', ha='center', va='center', rotation=90)
fig.text(0.5, 0.02, 'pixel number', ha='center', va='center')
plt.savefig('Figures/Fig2.png', dpi=1000)
plt.show()
|
[
"rose.yemelyanova@stfc.ac.uk"
] |
rose.yemelyanova@stfc.ac.uk
|
71731d810836000957d4a14c67af9f44ad0a8730
|
739aa685120a039a02fd0083188b6258800d5018
|
/app.py
|
310075a22ef5a981b7a1f3cf6277b9dd0f10d0a2
|
[] |
no_license
|
uzmabb182/web-scraping-challenge
|
839018deb1af717d8ab0cb6532bbe72777f1fcb3
|
74c61c79febfb5331ae81d580aedf0928cf6f5f9
|
refs/heads/main
| 2023-06-04T18:16:22.201735
| 2021-06-25T23:42:52
| 2021-06-25T23:42:52
| 379,407,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
# Create an instance of Flask
app = Flask(__name__, template_folder="templates")
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
mars_collection = mongo.db.mars # reference to the mars collection
# Route to render index.html template using data from Mongo
@app.route("/")
def home():
# Find one record of data from the mongo database
# @TODO: YOUR CODE HERE!
db_data = mars_collection.find_one()
# Return template and data
return render_template("index.html", exploration=db_data)
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# Run the scrape function and save the results to a variable
# @TODO: YOUR CODE HERE!
mars_data = scrape_mars.scrape_info()
# Update the Mongo database using update and upsert=True
# update the mongo database with costa_data
# @TODO: YOUR CODE HERE!
mars_collection.update({}, mars_data, upsert=True)
# Redirect back to home page
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
|
[
"81127634+uzmabb182@users.noreply.github.com"
] |
81127634+uzmabb182@users.noreply.github.com
|
fb3fbed1966c9bc54315d047365e9a8a546cbd50
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2449486_1/Python/Sedols/b.py
|
64b55176a8396c1963fbdc7ee779fa270ef59426
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
import sys
fin = open('in.txt', 'r')
fout = open('out1.txt', 'w')
n = int(fin.readline())
for T in range(0, n):
ret = True
array = [int(x) for x in fin.readline().split()]
m = int(array[0])
s = int(array[1])
board = []
for k in range(0, m):
array = [int(x) for x in fin.readline().split()]
board.append(array)
for k in range(0,m):
for j in range(0,s):
t = int(board[k][j])
f1 = True;
f2 = True;
for z in range(0,m):
if t < int(board[z][j]):
f1 = False
for z in range(0,s):
if t < int(board[k][z]):
f2 = False
ret = ret and (f1 or f2)
fout.write("Case #" + str(T + 1) + ": ")
if ret:
fout.write("YES\n")
else:
fout.write("NO\n");
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
8df28996c392f920439cc92dd00251d705607e13
|
d40cbeeeea6593fe2a85b0fd899de86b5fe8fd4d
|
/model_to_tensorboard.py
|
3c492ab46480adec4df8f1360a212348ab5e8b38
|
[] |
no_license
|
cisco08/tf-graph-freeze
|
859e835bd179e79de08a50e8ca7e9338803c9255
|
cf3e9f29ee2704791ea93e761c2fa43e06339180
|
refs/heads/master
| 2021-05-21T22:44:10.017827
| 2018-08-16T19:26:49
| 2018-08-16T19:26:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
import os
import argparse
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
import pickle
def main(args):
model_dir = args.dir
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
clear_devices = True
with tf.Session(graph=tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices=clear_devices)
saver.restore(sess, input_checkpoint)
writer = tf.summary.FileWriter(args.savedir, sess.graph)
sess.run(tf.local_variables_initializer())
writer.close()
if __name__ == "__main__":
cwd = os.getcwd()
parser = argparse.ArgumentParser(description='This module exports the graph to be visualized on Tensorboard')
parser.add_argument("-d", "--dir", help="Tensorflow models's directory",
default=os.path.join(cwd, "model_example"))
parser.add_argument("-s", "--savedir", help="Logdir of tensorboard",
default=os.path.join(cwd, "model_example"))
args = parser.parse_args()
main(args)
|
[
"guilherme.uzeda@santodigital.com.br"
] |
guilherme.uzeda@santodigital.com.br
|
d1fa352f5e8c36914f1ec3d82fde294d99c835e7
|
90c7b75c018ea55c5f4acb7b600070d35a8ebccf
|
/newspaper_project/views.py
|
6d49094181761f282c185e0207884cd48fb2c2bd
|
[] |
no_license
|
Pulatov-Javlon/news-app
|
05ee6e697d22ef1402e2e2983d88efc723b42fa6
|
f9fe961876a8896438f1599a70ce527bf2500035
|
refs/heads/master
| 2023-03-23T01:54:10.841967
| 2021-03-17T05:12:35
| 2021-03-17T05:12:35
| 348,587,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from django.urls import reverse_lazy
from django.views.generic import CreateView
from accounts.forms import CustomUserCreationForm
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html'
|
[
"javlon_pulatov@mail.ru"
] |
javlon_pulatov@mail.ru
|
0cc0b4065bd79e77eed87e7d39054cd52e5b5ac1
|
331bf102de58b30da373c5b7c2d8f4b4b149c9fc
|
/facebook/postapp/migrations/0013_postcomment_commentreply.py
|
b5c7040e8daa0a00a3ff6444f9d2f32df0d8efee
|
[] |
no_license
|
Chandu8817/Thoughtwin
|
6dfcb077c51bab320895ce0e25554408472491a4
|
d7b86e8baa57a9eb06606b97d3c99a1d751a16b9
|
refs/heads/master
| 2023-04-21T16:17:17.967617
| 2021-05-05T14:29:40
| 2021-05-05T14:29:40
| 344,074,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# Generated by Django 3.1.7 on 2021-03-15 07:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('postapp', '0012_auto_20210315_0718'),
]
operations = [
migrations.AddField(
model_name='postcomment',
name='commentreply',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='postapp.postcomment'),
),
]
|
[
"virendrakapoor45@gmail.com"
] |
virendrakapoor45@gmail.com
|
795a01da9137fe4b28460de6da338a8256919b72
|
e11ff0e58ec044193d977ea2b11cae46ef30b42c
|
/Plot1.py
|
8a21ec8451995d8da1597a34c224ed89369539d6
|
[] |
no_license
|
AndreiChugunov/telecom
|
e3d8f570b6b6ff0697bf22eedd3e9199d657366b
|
e38318d1b372bc00e03e4ac9451c97b15297200b
|
refs/heads/master
| 2020-05-26T11:12:46.765427
| 2017-05-14T19:57:19
| 2017-05-14T19:57:19
| 82,476,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
from scipy.fftpack import fft
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
Fdiscrete = 8e3
t = np.linspace(0, 1, int(Fdiscrete))
A = 2
f0 = 1e3
phi = np.pi / 4
s1 = A * np.cos(2 * np.pi * f0 * t + phi)
alpha = 1000
s2 = np.exp(-alpha * t) * s1
Nfft = int(2 ** np.ceil(np.log2(len(s2))))
sp = fft(s2, Nfft)
sp_dB = 20 * np.log10(np.abs(sp))
f = np.arange(0, Nfft - 1) / Nfft * Fdiscrete
plt.figure()
plt.grid()
plt.plot(f[:int(Nfft / 2)], np.abs(sp[:int(Nfft / 2)]))
plt.figure(0)
plt.subplot(2, 2, 1)
plt.plot(s2[0:100])
plt.grid()
plt.subplot(2, 2, 2)
plt.stem(s2[0:100])
plt.grid()
plt.subplot(2, 2, 3)
plt.plot(s2[0:100], '.')
plt.grid()
plt.subplot(2, 2, 4)
plt.step(t[0:100], s2[0:100])
plt.grid()
plt.show()
|
[
"bloodyavangard@mail.ru"
] |
bloodyavangard@mail.ru
|
ac6372714d0d7a42172ddc02955add3d036d91bc
|
5026e3a914e6d143357e570ff299897ac7e36138
|
/1. Jimmy's pie.py
|
5edb04566b46da45423f671bdde58948c0b1c8af
|
[] |
no_license
|
Minjun-KANG/Python-learning
|
e351907e1a79b49ca4aaaee1feddb28ce530d504
|
ecba2d8a47fe6bedc0a7d52db3bc5b3ba641fb1d
|
refs/heads/master
| 2022-12-09T21:52:49.091993
| 2020-09-12T07:13:29
| 2020-09-12T07:13:29
| 294,889,106
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
"""
문제에서 제시한 코드
pi = '3.14'
#string으로 되어 연산불가
pie.diameter = 55.4
#변수이름에 특수문자 사용불가
pie_radius = pie.diameter // 2
#변수이름에 특수문자 사용불가
circumference = 2 * pi ** pie_radius
#pi가 string이여서 연산불가
circumference-msg = 'Jimmy's pie has a circumference: '
#Jimmy's 에서 '를 사용하려면 "" 쌍따옴표 필요, 변수이름에 특수문자 사용불가
print(circumference-msg, circumference)
#변수이름에 특수문자 사용불가
"""
pi = 3.14
#string으로 되어 연산불가 -> 쌍따옴표 없애줌
pie_diameter = 55.4
#변수이름에 특수문자 사용불가 -> .을 _언더바로 교체
pie_radius = pie_diameter / 2
#변수이름에 특수문자 사용불가 -> 언더바로 교체해서 가능
#//2 로 되어있는 몫만 넘기는 연산식을 /2 로바꿔 소숫점까지 넘김
circumference = 2 * pi * pie_radius
#pi가 string이여서 연산불가 -> pi를 float로 바꿔서 가능
# ** pie_radius 로 된 제곱을 * 곱하기로 바꿔 수식을 완성
circumference_msg = "Jimmy's pie has a circumference: "
#Jimmy's 에서 '를 사용하려면 "" 쌍따옴표 필요, 변수이름에 특수문자 사용불가
#-> 쌍따옴표로 string을 넣어줌, 변수 특수문자 언더바로 교체
print(circumference_msg, circumference)
#변수이름에 특수문자 사용불가 -> 언더바로 교체해서 가능
|
[
"rkdalswns1@kw.ac.kr"
] |
rkdalswns1@kw.ac.kr
|
e913c8716d91ada19978e483c0baa41a1893614c
|
f1f7e8360f86b11fbb4f1559a414fc561a0e42d5
|
/Creacionales/Singleton/Python/Singleton_Ingenuo/main.py
|
edd9c9d3bee94e7ab2d68f0b18d350eea1f2d4f7
|
[] |
no_license
|
carlos-paezf/Design_Patterns
|
58c5b18bb5264d265c57f48eea1bc13ab1e93c1a
|
7ea1d30acd3c2f7375fb7c9f4a9c3b3c8a228c9a
|
refs/heads/main
| 2023-08-02T11:42:28.273573
| 2021-10-04T16:36:10
| 2021-10-04T16:36:10
| 384,248,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
class SingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class Singleton(metaclass=SingletonMeta):
def some_bussines_logic(self):
pass
if __name__ == "__main__":
s1 = Singleton()
s2 = Singleton()
if id(s1) == id(s2):
print('Singleton works, both variables contain the same instance.')
else:
print('Singleton failed, variables contain diferent instances.')
|
[
"carlos.paezf@usantoto.edu.co"
] |
carlos.paezf@usantoto.edu.co
|
af4d4b5c1213abea4e4ba8ffaddad83002f809df
|
c1aac38b1ee7bf7b8621050bd4837a60744bfd9f
|
/ay_hw_5/__init__.py
|
7aa3c0b45b08b3a16f6053b80868ab013a269dff
|
[] |
no_license
|
callmeorangecat/INF_552
|
3b0a007a37963fcd57396dab96d3f17ee20b0eb6
|
cdcaf20e549bfa2d5942f91f2ce3b4a93d1beba9
|
refs/heads/master
| 2021-02-17T15:04:32.222974
| 2020-02-21T20:31:54
| 2020-02-21T20:31:54
| 245,105,595
| 0
| 1
| null | 2020-03-05T08:16:29
| 2020-03-05T08:16:28
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
#
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '10/3/2019 1:32 PM'
|
[
"aaron19940628@gmail.com"
] |
aaron19940628@gmail.com
|
1c9be258220f972fc0cb44e196a956d1cccd70df
|
115e22d743882c6b4f9d834c01a52f8c00b14f3d
|
/optimizers/optimizers.py
|
94711f42f6478c90491f8165eaaee8c9e3357c23
|
[] |
no_license
|
bblss123/DIP2021-SSNet
|
f562c5d5dc8d5ee68d8a5eb01279d863f41bbc50
|
455c607c693fd2055dddf981f67983045862ffa0
|
refs/heads/main
| 2023-07-17T06:31:58.608330
| 2021-08-22T06:22:17
| 2021-08-22T06:22:17
| 374,912,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
import torch
def init_optim(optim, params, lr, weight_decay):
if optim == 'sgd':
return torch.optim.SGD(params, lr=lr, weight_decay=weight_decay, momentum=0.95)
if optim == 'adam':
return torch.optim.Adam(params, lr=lr, weight_decay=weight_decay)
else:
raise KeyError("Unsupported Optimizer: {}".format(optim))
|
[
"bblss123@outlook.com"
] |
bblss123@outlook.com
|
902dfb7519146575070e321be7cb1d14ec655ab8
|
92409dbea06183af132c6ee191303939f2340060
|
/Products/Person/__init__.py
|
7fd3523eaccae3c15612ccff0d424abbd0870453
|
[] |
no_license
|
v2lab/Products.Person
|
2275da8f22fd9716413b68a5d9c3ff76427dd58b
|
caf7d66fbabb1f957fb01a2a13b2c415a83e91a2
|
refs/heads/master
| 2021-01-10T19:54:19.762014
| 2012-11-08T15:11:06
| 2012-11-08T15:11:06
| 16,309,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
"""Main product initializer
"""
from zope.i18nmessageid import MessageFactory
from Products.Person import config
from Products.Archetypes import atapi
from Products.CMFCore import utils
from Products.CMFCore.permissions import setDefaultRoles
# Define a message factory for when this product is internationalised.
# This will be imported with the special name "_" in most modules. Strings
# like _(u"message") will then be extracted by i18n tools for translation.
PersonMessageFactory = MessageFactory('Products.Person')
# Product imports
import config
# Import the content types modules
from content import person
def initialize(context):
"""Initializer called when used as a Zope 2 product.
This is referenced from configure.zcml. Regstrations as a "Zope 2 product"
is necessary for GenericSetup profiles to work, for example.
Here, we call the Archetypes machinery to register our content types
with Zope and the CMF.
"""
# Retrieve the content types that have been registered with Archetypes
# This happens when the content type is imported and the registerType()
# call in the content type's module is invoked. Actually, this happens
# during ZCML processing, but we do it here again to be explicit. Of
# course, even if we import the module several times, it is only run
# once.
content_types, constructors, ftis = atapi.process_types(
atapi.listTypes(config.PROJECTNAME),
config.PROJECTNAME)
# Now initialize all these content types. The initialization process takes
# care of registering low-level Zope 2 factories, including the relevant
# add-permission. These are listed in config.py. We use different
# permissions for each content type to allow maximum flexibility of who
# can add which content types, where. The roles are set up in rolemap.xml
# in the GenericSetup profile.
for atype, constructor in zip(content_types, constructors):
utils.ContentInit('%s: %s' % (config.PROJECTNAME, atype.portal_type),
content_types = (atype,),
permission = config.ADD_PERMISSIONS[atype.portal_type],
extra_constructors = (constructor,),
).initialize(context)
|
[
"thefunny@gmail.com"
] |
thefunny@gmail.com
|
91caf03713ddaccf135a689fad6bb63e1f369df3
|
180911c631321f6768f7ad1ec5c15e00c7b5cf0d
|
/extrapypi/migrations/versions/21a32f52b0c3_remove_digest_from_release_model.py
|
f0fbc25f1a878a8293cb4b7e45c6308603f2de0d
|
[
"MIT"
] |
permissive
|
karec/extrapypi
|
34a943532409ee321b4924b52de21823c9eb7970
|
0abc8d3a632628cc7d3a4020fb7c9436787695f9
|
refs/heads/master
| 2023-02-26T05:12:36.876363
| 2018-02-02T15:54:28
| 2018-02-02T15:54:28
| 105,063,006
| 8
| 4
|
MIT
| 2023-09-02T12:27:06
| 2017-09-27T19:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
"""remove digest from release model
Revision ID: 21a32f52b0c3
Revises: e9ba59cdb7b9
Create Date: 2018-01-02 18:30:24.105748
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '21a32f52b0c3'
down_revision = 'e9ba59cdb7b9'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("release") as batch_op:
batch_op.drop_column('md5_digest')
def downgrade():
with op.batch_alter_table("release") as batch_op:
batch_op.add_column(sa.Column('md5_digest', sa.VARCHAR(length=32), nullable=False))
|
[
"manu.valette@gmail.com"
] |
manu.valette@gmail.com
|
c9c783e1fa71a263175960a59e4ed8c7881b466d
|
71d3632c19ada5585223d4800f681718af0e1f87
|
/python/sw/pro/6326.py
|
e4508e97b2307f8f8a65a886b6c32e563111064c
|
[] |
no_license
|
hyunwoo-song/TOT
|
640433b909913098699ff517698e71a1b8661cff
|
76a8c18af942d9ed3d636fcec35001ae70b6b8d2
|
refs/heads/master
| 2023-01-05T08:11:38.858134
| 2019-08-14T08:53:23
| 2019-08-14T08:53:23
| 162,208,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
N = int(input())
a= 1
for i in range(1, N+1):
a *= i
print(a)
|
[
"gerrar486@gmail.com"
] |
gerrar486@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.