blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d6e7b0cf941632dee22d801ee41360d7d50d3859
|
Python
|
nishiyamayo/atcoder-practice
|
/src/main/scala/past004/F.py
|
UTF-8
| 478
| 3.1875
| 3
|
[] |
no_license
|
N, K = map(int, input().split())
K -= 1
d = dict()
for i in range(N):
S = input()
d[S] = d.get(S, 0) + 1
l = d.items()
l = sorted(l, key=lambda x:x[1], reverse=True)
if len(l) == 1 and K == 0:
print(l[K][0])
elif K == 0:
print(l[K][0] if l[K + 1][1] != l[K][1] else "AMBIGUOUS")
elif K == len(l) - 1:
print(l[K][0] if l[K - 1][1] != l[K][1] else "AMBIGUOUS")
else:
print(l[K][0] if l[K - 1][1] != l[K][1] and l[K + 1][1] != l[K][1] else "AMBIGUOUS")
| true
|
ac59234e19a555b6fa62a6056ccb1f281abf89c5
|
Python
|
joeyqzhou/recommendation-system
|
/cf_to_predict_rating.py
|
UTF-8
| 3,638
| 2.96875
| 3
|
[] |
no_license
|
'''
Created on Nov 1, 2015
@author: joey
'''
import random
import numpy as np
import math
import data_preparation as dp
def userSimilarityRating(data):
#Build the inverse table:
#item_users(key:item,value:{user,rating_user_have give the item})
item_users = dict()
for useri , items_rating in data.items():
for itemj in items_rating.keys():
if itemj not in item_users:
item_users[itemj] = list()
item_users[itemj].append({useri:items_rating[itemj]})
#cos(user_i,user_j) = cosine similarity between (x(i) and x(j))
# x(i): is the vector represent the user that each column is a moive. if user i had
# seen the moive, then the number in the vector is the rating, otherwise zero.
#key:useri
#value:dict,key:userj,value:the number of movie both useri and uerj have seen
C = dict()
#key:userId
#value:the total item number of userId
N = dict()
for itemi, users_rating in item_users.items():
for userj_rating_i in users_rating:
userj = userj_rating_i.keys()[0]
if userj not in C:
N[userj] = 0
C[userj] = {}
N[userj] += userj_rating_i[userj]#rating_{usrj_to_itemi}
for userk_rating_i in users_rating:
userk = userk_rating_i.keys()[0]
if userj != userk:
if userk not in C[userj]:
C[userj][userk] = 0
C[userj][userk] += userj_rating_i[userj] * userk_rating_i[userk]
#caluate similarity by normalize C
W = dict()
for u , related_users in C.items():
if u not in W:
W[u] = {}
for v, cuv in related_users.items():
W[u][v] = cuv / math.sqrt( N[u] * N[v] )
return W
#W: similarity between two users. key:user i,value:dict(key:user j,value Wij)
#R: users'rating for item
def UserCF_to_predict_raring(user,item,trainData,W,K=6):
predict_rating = 0.0
unnormalized_predict_rating = 0.0
count = 0
norm_similarity = 0.0
for useri, similarity in sorted(W[user].items(), \
key = lambda x:x[1], reverse=True):
if item in trainData[useri].keys():
count += 1
if count >=K:
break
unnormalized_predict_rating += similarity * trainData[useri][item]
norm_similarity += similarity
if count < K/2:
return -1 #cant predict, here can be replaced by the average value of the item get
else:
predict_rating = unnormalized_predict_rating/norm_similarity
return predict_rating
def Evaluation_CF_to_predict_rating(train, test, K):
sum_squared_error = 0.0
rmse = 0.0
W = userSimilarityRating(train)
count = 0
print "Evaluation"
for user in train.keys():
if user in test.keys():
test_user_item = test[user] #key: item , value: rating user to item
for itemi in test_user_item.keys():
predict_rating = UserCF_to_predict_raring(user,itemi,train,W,K)
if predict_rating != -1:
count += 1
real_rating = test[user][itemi]
rmse += (predict_rating - real_rating)**2
print "predict number:", count
rmse = np.sqrt(rmse/count)
return rmse
if __name__ == "__main__":
data = dp.readMoiveIdRatingFromRatings("ratings.dat","::")
trainData, testData = dp.splitRatingData(data)
print Evaluation_CF_to_predict_rating(trainData, testData, 20)
| true
|
cef179ae95ea3db91c8de4110f00dc19f402892c
|
Python
|
markok20/twitter-toolbox
|
/twitter_nlp_toolkit/tweet_sentiment_classifier/models/lstm_models.py
|
UTF-8
| 49,344
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
from zipfile import ZipFile
from twitter_nlp_toolkit.file_fetcher import file_fetcher
from ..tweet_sentiment_classifier import Classifier, tokenizer_filter
import os
import json
import pickle as pkl
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from sklearn.utils import resample
class LSTM_Model(Classifier):
"""
LSTM model with trainable embedding layer"
"""
def __init__(self, max_length=25, vocab_size=1000000, neurons=50,
dropout=0.25, rec_dropout=0.25, embed_vec_len=200, activ='hard_sigmoid',
learning_rate=0.001, bootstrap=1, early_stopping=True, patience=50, validation_split=0.2, max_iter=250,
batch_size=10000, accuracy=0, remove_punctuation=False, remove_stopwords=False, lemmatize=True,
hidden_neurons=0, bidirectional=False, **kwargs):
"""
Constructor for LSTM classifier using pre-trained embeddings
Be sure to add additional parametesr to export()
:param max_length: (int) Maximum text length, ie, number of temporal nodes. Default 25
:param vocab_size: (int) Maximum vocabulary size. Default 1E7
:param max_iter: (int) Number of training epochs. Default 100
:param neurons: (int) Depth (NOT LENGTH) of LSTM network. Default 100
:param dropout: (float) Dropout
:param activ: (String) Activation function (for visible layer). Default 'hard_sigmoid'
:param optimizer: (String) Optimizer. Default 'adam'
"""
self.type = 'LSTM_Model'
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.lstm_models'
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.validation_split = validation_split
self.patience = patience
self.max_iter = max_iter
self.learning_rate = learning_rate
self.max_length = max_length
self.max_iter = max_iter
self.batch_size = batch_size
self.vocab_size = vocab_size
self.neurons = neurons
self.hidden_neurons = hidden_neurons
self.dropout = dropout
self.rec_dropout = rec_dropout
self.activ = activ
self.optimizer = 'adam'
self.embed_vec_len = embed_vec_len
self.embedding_initializer = tf.keras.initializers.glorot_normal(seed=None)
self.finetune_embeddings = True
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
self.tokenizer = None
self.classifier = None
self.word_index = None
self.embedding_matrix = None
self.es = []
self.accuracy = accuracy
self.bidirectional = bidirectional
def preprocess(self, train_data, y, weights=None):
if weights is None:
weights = np.ones(len(y))
"""
# Preprocess and tokenize text
"""
if 1 < self.bootstrap < len(y):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_punctuation, lemmatize_pronouns=True,
lemmatize=self.lemmatize, verbose=True)
print('Filtered data')
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
return cleaned_data, y, weights
def fit(self, train_data, y, weights=None, custom_vocabulary=None):
"""
:param train_data: (List-like of Strings) Tweets to fit on
:param y: (Vector) Targets
:param weights: (Vector) Weights for fitting data
:param custom_vocabulary: (List of String) Custom vocabulary to use for tokenizer. Not recommended.
:return: Fit history
"""
cleaned_data, y, weights = self.preprocess(train_data, y, weights)
self.tokenizer = Tokenizer(num_words=self.vocab_size, filters='"#$%&()*+-/:;<=>?@[\\]^_`{|}~\t\n')
self.tokenizer.fit_on_texts(cleaned_data)
train_sequences = self.tokenizer.texts_to_sequences(cleaned_data)
self.word_index = self.tokenizer.word_index
print('Found %s unique tokens.' % len(self.word_index))
X = pad_sequences(train_sequences, maxlen=self.max_length, padding='pre')
self.build_LSTM_network()
print('Fitting LSTM model')
history = self.classifier.fit(X, y, validation_split=self.validation_split, callbacks=self.es,
batch_size=self.batch_size, sample_weight=weights,
epochs=self.max_iter, verbose=2)
self.accuracy = np.max(history.history['val_acc'])
return history
def build_LSTM_network(self):
print("Creating LSTM model")
if self.optimizer == 'adam':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
init = tf.keras.initializers.glorot_uniform(seed=1)
self.classifier = tf.keras.models.Sequential()
self.classifier.add(tf.keras.layers.Embedding(input_dim=len(self.word_index) + 1,
output_dim=self.embed_vec_len,
input_length=self.max_length,
mask_zero=True,
embeddings_initializer=self.embedding_initializer,
trainable=self.finetune_embeddings))
"""
self.classifier.add(tf.keras.layers.LSTM(units=self.neurons, input_shape=(self.max_length, self.embed_vec_len),
kernel_initializer=init, dropout=self.dropout,
recurrent_dropout=self.rec_dropout))
"""
if self.bidirectional:
self.classifier.add(tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units=self.neurons, input_shape=(self.max_length, self.embed_vec_len),
kernel_initializer=init, dropout=self.dropout,
recurrent_dropout=self.rec_dropout)))
else:
self.classifier.add(tf.keras.layers.LSTM(units=self.neurons, input_shape=(self.max_length, self.embed_vec_len),
kernel_initializer=init, dropout=self.dropout,
recurrent_dropout=self.rec_dropout))
if self.hidden_neurons > 0:
self.classifier.add(
tf.keras.layers.Dense(units=self.hidden_neurons, kernel_initializer=init, activation='elu'))
self.classifier.add(tf.keras.layers.Dense(units=1, kernel_initializer=init, activation=self.activ))
self.classifier.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['acc'])
print(self.classifier.summary())
if self.early_stopping:
self.es.append(
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=self.patience))
def refine(self, train_data, y, bootstrap=True, weights=None):
"""
Train model further
:param train_data: (list of Strings) Training tweets
:param y: (vector) Targets
:param weights: (vector) Training data weights
:param bootstrap: (bool) Resample training data
:returns: Fit history
"""
"""
# Preprocess and tokenize text
"""
if bootstrap and 1 < self.bootstrap < len(y):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif bootstrap and self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
filtered_data = tokenizer_filter(train_data, remove_punctuation=False, remove_stopwords=False,
lemmatize=True)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
train_sequences = self.tokenizer.texts_to_sequences(cleaned_data)
X = pad_sequences(train_sequences, maxlen=self.max_length, padding='pre')
es = []
if self.early_stopping:
es.append(
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=self.patience))
history = self.classifier.fit(X, y, validation_split=self.validation_split, callbacks=self.es,
batch_size=self.batch_size, sample_weight=weights,
epochs=self.max_iter, verbose=2)
self.accuracy = np.max(history.history['val_acc'])
return history
def predict(self, data, **kwargs):
"""
Make binary predictions
:param data: (list of Strings) Tweets
:return: (vector of Bool) Predictions
"""
return np.round(self.predict_proba(data, **kwargs))
def predict_proba(self, data, preprocess=True):
"""
Make continuous predictions
:param data: (list of Strings) Tweets
:return: (vector) Predictions
"""
if self.tokenizer is None:
raise ValueError('Model has not been trained!')
filtered_data = tokenizer_filter(data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
verbose=False)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = pad_sequences(self.tokenizer.texts_to_sequences(cleaned_data), maxlen=self.max_length)
return self.classifier.predict(X)
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'bootstrap': self.bootstrap,
'early_stopping': self.early_stopping,
'validation_split': float(self.validation_split),
'patience': int(self.patience),
'max_iter': int(self.max_iter),
'max_length': int(self.max_length),
'neurons': int(self.neurons),
'hidden_neruons': int(self.hidden_neurons),
'dropout': float(self.dropout),
'rec_dropout': float(self.rec_dropout),
'activ': self.activ,
'vocab_size': self.vocab_size,
'batch_size': self.batch_size,
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'learning_rate': self.learning_rate,
'bidirectional': self.bidirectional
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/lstm_tokenizer.pkl', 'wb+') as outfile:
pkl.dump(self.tokenizer, outfile)
model_json = self.classifier.to_json()
with open(filename + "/lstm_model.json", "w+") as json_file:
json_file.write(model_json)
self.classifier.save_weights(filename + "/lstm_model.h5")
def load_model(self, filename):
"""
Load a model from the disc
:param filename: (String) Path to file
"""
self.tokenizer = pkl.load(open(filename + '/lstm_tokenizer.pkl', 'rb'))
with open(filename + '/lstm_model.json', 'r') as infile:
model_json = infile.read()
self.classifier = tf.keras.models.model_from_json(model_json)
self.classifier.load_weights(filename + '/lstm_model.h5')
self.classifier.compile(loss='binary_crossentropy',
optimizer=self.optimizer,
metrics=['acc'])
class GloVE_Model(LSTM_Model):
"""
LSTM model that uses GloVE pre-trained embeddings
# TODO add automatic embedding downloading and unzipping
"""
def __init__(self, embedding_dict=None, embed_vec_len=200, max_length=25, vocab_size=1000000, batch_size=10000,
neurons=100,
hidden_neurons=0, dropout=0.2, bootstrap=1, early_stopping=True, validation_split=0.2, patience=50,
max_iter=250,
rec_dropout=0.2, activ='hard_sigmoid', accuracy=0, remove_punctuation=False, learning_rate=0.001,
remove_stopwords=False, lemmatize=True, finetune_embeddings=False, bidirectional=False, **kwargs):
"""
Constructor for LSTM classifier using pre-trained embeddings
Be sure to add extra parameters to export()
:param glove_index: (Dict) Embedding index to use. IF not provided, a standard one will be downloaded
:param name: (String) Name of model
:param embed_vec_len: (int) Embedding depth. Inferred from dictionary if provided. Otherwise 25, 50, 100, and
are acceptible values. 200
:param embedding_dict: (dict) Embedding dictionary
:param max_length: (int) Maximum text length, ie, number of temporal nodes. Default 25
:param vocab_size: (int) Maximum vocabulary size. Default 1E7
:param max_iter: (int) Number of training epochs. Default 100
:param neurons: (int) Depth (NOT LENGTH) of LSTM network. Default 100
:param dropout: (float) Dropout
:param activ: (String) Activation function (for visible layer). Default 'hard_sigmoid'
:param optimizer: (String) Optimizer. Default 'adam'
:param early_stopping: (bool) Train with early stopping
:param validation_split: (float) Fraction of training data to withold for validation
:param patience: (int) Number of epochs to wait before early stopping
"""
self.type = 'GloVE_Model'
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.lstm_models'
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.validation_split = validation_split
self.patience = patience
self.max_iter = max_iter
self.embed_vec_len = embed_vec_len
self.learning_rate = learning_rate
self.bidirectional = bidirectional
self.embedding_initializer = None
self.finetune_embeddings = finetune_embeddings
self.max_length = max_length
self.embedding_dict = embedding_dict
self.max_iter = max_iter
self.vocab_size = vocab_size
self.neurons = neurons
self.hidden_neurons = hidden_neurons
self.dropout = dropout
self.rec_dropout = rec_dropout
self.activ = activ
self.optimizer = 'adam'
self.batch_size = batch_size
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
self.es = []
self.tokenizer = None
self.classifier = None
self.word_index = None
self.embedding_matrix = None
self.accuracy = accuracy
if self.embedding_dict is not None:
self.embed_vec_len = len(list(self.embedding_dict.values())[0])
print('Setting embedding depth to {}'.format(self.embed_vec_len))
def preprocess(self, train_data, y, weights=None):
if self.embedding_dict is None:
print('Reloading embedding index')
try:
self.embedding_dict = {}
with open('.glove_dicts/glove.twitter.27B.' + str(self.embed_vec_len) + 'd.txt', encoding="utf8") as f:
for line in f:
word, representation = line.split(maxsplit=1)
representation = np.fromstring(representation, 'f', sep=' ')
self.embedding_dict[word] = representation
print('Dictionary loaded')
except FileNotFoundError:
file_fetcher.download_file("http://nlp.stanford.edu/data/glove.twitter.27B.zip",
"glove_dicts.zip")
with ZipFile('glove_dicts.zip', 'r') as zipObj:
zipObj.extractall('glove_dicts')
self.embedding_dict = {}
with open('/.glove_dicts/glove.twitter.27B.' + str(self.embed_vec_len) + 'd.txt', encoding="utf8") as f:
for line in f:
word, representation = line.split(maxsplit=1)
representation = np.fromstring(representation, 'f', sep=' ')
self.embedding_dict[word] = representation
print('Dictionary loaded')
if weights is None:
weights = np.ones(len(y))
if 1 < self.bootstrap < len(y):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
print('Sampled %d training points' % len(train_data))
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
lemmatize_pronouns=False)
print('Filtered data')
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
return cleaned_data, y, weights
def fit(self, train_data, y, weights=None, custom_vocabulary=None, clear_embedding_dictionary=True):
"""
:param train_data: (Dataframe) Training data
:param y: (vector) Targets
:param weights: (vector) Weights for fitting data
:param custom_vocabulary: Custom vocabulary for the tokenizer. Not recommended.
:param clear_embedding_dictionary: Delete the embedding dictionary after loading the embedding layer.
Recommended, but will prevent the model from being re-fit (not refined)
:returns Fit history
"""
"""
# Preprocess and tokenize text
"""
cleaned_data, y, weights = self.preprocess(train_data, y, weights)
if custom_vocabulary is not None:
print('Applying custom vocabulary')
self.tokenizer = Tokenizer(num_words=len(custom_vocabulary))
self.tokenizer.fit_on_texts(custom_vocabulary)
else:
print('Fitting tokenizer')
self.tokenizer = Tokenizer(num_words=self.vocab_size, char_level=False)
self.tokenizer.fit_on_texts(cleaned_data)
train_sequences = self.tokenizer.texts_to_sequences(cleaned_data)
self.word_index = self.tokenizer.word_index
X = pad_sequences(train_sequences, maxlen=self.max_length, padding='pre')
self.embedding_matrix = np.zeros((len(self.word_index) + 1, self.embed_vec_len))
for word, i in self.word_index.items():
embedding_vector = self.embedding_dict.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros. # TODO consider optimizing
self.embedding_matrix[i] = embedding_vector
self.build_LSTM_network()
if clear_embedding_dictionary:
self.embedding_matrix = None
self.embedding_dict = None
print('Fitting GloVE model')
history = self.classifier.fit(X, y, validation_split=self.validation_split, batch_size=self.batch_size,
epochs=self.max_iter, sample_weight=weights,
callbacks=self.es, verbose=2)
self.accuracy = np.max(history.history['val_acc'])
return history
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'max_length': int(self.max_length),
'neurons': int(self.neurons),
'hidden_neruons': int(self.hidden_neurons),
'dropout': float(self.dropout),
'rec_dropout': float(self.rec_dropout),
'activ': self.activ,
'vocab_size': int(self.vocab_size),
'max_iter': int(self.max_iter),
'batch_size': self.batch_size,
'early_stopping': self.early_stopping,
'patience': int(self.patience),
'bootstrap': self.bootstrap,
'validation_split': float(self.validation_split),
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'finetune_embeddings': self.finetune_embeddings,
'learning_rate': self.learning_rate,
'bidirectional': self.bidirectional
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/glove_tokenizer.pkl', 'wb+') as outfile:
pkl.dump(self.tokenizer, outfile)
# model_json = self.classifier.to_json()
with open(filename + "/glove_model.json", "w+") as json_file:
json_file.write(self.classifier.to_json())
self.classifier.save_weights(filename + "/glove_model.h5")
def load_model(self, filename):
"""
:param filename: (String) Path to file
"""
self.tokenizer = pkl.load(open(filename + '/glove_tokenizer.pkl', 'rb'))
with open(filename + '/glove_model.json', 'r') as infile:
model_json = infile.read()
self.classifier = tf.keras.models.model_from_json(model_json)
self.classifier.load_weights(filename + '/glove_model.h5')
self.classifier.compile(loss='binary_crossentropy',
optimizer=self.optimizer,
metrics=['acc'])
class NGRAM_Model(LSTM_Model):
"""
LSTM model that uses GloVE pre-trained embeddings
# TODO add automatic embedding downloading and unzipping
"""
def __init__(self, embedding_dict=None, embed_vec_len=200, max_length=25, vocab_size=1000000, batch_size=10000,
neurons=100,
hidden_neurons=0, dropout=0.2, bootstrap=1, early_stopping=True, validation_split=0.2, patience=50,
max_iter=250,
rec_dropout=0.2, activ='hard_sigmoid', accuracy=0, remove_punctuation=False, learning_rate=0.001,
remove_stopwords=False, lemmatize=True, finetune_embeddings=True, n_gram=3, feature_maps=10, **kwargs):
"""
Constructor for NGRAM LSTM classifier using pre-trained embeddings
Be sure to add extra parameters to export()
:param glove_index: (Dict) Embedding index to use. IF not provided, a standard one will be downloaded
:param name: (String) Name of model
:param embed_vec_len: (int) Embedding depth. Inferred from dictionary if provided. Otherwise 25, 50, 100, and
are acceptible values. 200
:param embedding_dict: (dict) Embedding dictionary
:param max_length: (int) Maximum text length, ie, number of temporal nodes. Default 25
:param vocab_size: (int) Maximum vocabulary size. Default 1E7
:param max_iter: (int) Number of training epochs. Default 100
:param neurons: (int) Depth (NOT LENGTH) of LSTM network. Default 100
:param dropout: (float) Dropout
:param activ: (String) Activation function (for visible layer). Default 'hard_sigmoid'
:param optimizer: (String) Optimizer. Default 'adam'
:param early_stopping: (bool) Train with early stopping
:param validation_split: (float) Fraction of training data to withold for validation
:param patience: (int) Number of epochs to wait before early stopping
"""
self.type = 'NGRAM_Model'
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.lstm_models'
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.validation_split = validation_split
self.patience = patience
self.max_iter = max_iter
self.embed_vec_len = embed_vec_len
self.learning_rate = learning_rate
self.embedding_initializer = None
self.finetune_embeddings = finetune_embeddings
self.max_length = max_length
self.embedding_dict = embedding_dict
self.max_iter = max_iter
self.vocab_size = vocab_size
self.neurons = neurons
self.hidden_neurons = hidden_neurons
self.dropout = dropout
self.rec_dropout = rec_dropout
self.activ = activ
self.optimizer = 'adam'
self.batch_size = batch_size
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
self.es = []
self.tokenizer = None
self.classifier = None
self.word_index = None
self.embedding_matrix = None
self.accuracy = accuracy
self.n_grams = n_gram
self.feature_maps = feature_maps
if self.embedding_dict is not None:
self.embed_vec_len = len(list(self.embedding_dict.values())[0])
print('Setting embedding depth to {}'.format(self.embed_vec_len))
def build_NGRAM_network(self):
print("Creating LSTM model")
if self.optimizer == 'adam':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
init = tf.keras.initializers.glorot_uniform(seed=1)
self.classifier = tf.keras.models.Sequential()
self.classifier.add(tf.keras.layers.Embedding(input_dim=len(self.word_index) + 1,
output_dim=self.embed_vec_len,
input_length=self.max_length,
mask_zero=True,
embeddings_initializer=self.embedding_initializer,
trainable=self.finetune_embeddings))
self.classifier.add(tf.keras.layers.Conv1D(self.feature_maps, self.n_grams, self.embed_vec_len, data_format='channels_first'))
self.classifier.add(tf.keras.layers.Dropout(self.dropout))
# self.classifier.add(tf.keras.layers.MaxPooling2D(poolsize=(self.max_length - self.n_grams + 1, 1)))
self.classifier.add(tf.keras.layers.LSTM(units=self.neurons, input_shape=(self.max_length, self.embed_vec_len),
kernel_initializer=init, dropout=self.dropout,
recurrent_dropout=self.rec_dropout))
self.classifier.summary()
if self.hidden_neurons > 0:
self.classifier.add(
tf.keras.layers.Dense(units=self.hidden_neurons, kernel_initializer=init, activation='elu'))
self.classifier.add(tf.keras.layers.Dense(units=1, kernel_initializer=init, activation=self.activ))
self.classifier.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['acc'])
print(self.classifier.summary())
if self.early_stopping:
self.es.append(
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=self.patience))
def preprocess(self, train_data, y, weights=None):
if self.embedding_dict is None:
print('Reloading embedding index')
try:
self.embedding_dict = {}
with open('/.glove_dicts/glove.twitter.27B.' + str(self.embed_vec_len) + 'd.txt', encoding="utf8") as f:
for line in f:
word, representation = line.split(maxsplit=1)
representation = np.fromstring(representation, 'f', sep=' ')
self.embedding_dict[word] = representation
print('Dictionary loaded')
except FileNotFoundError:
file_fetcher.download_file("http://nlp.stanford.edu/data/glove.twitter.27B.zip",
"glove_dicts.zip")
with ZipFile('glove_dicts.zip', 'r') as zipObj:
zipObj.extractall('glove_dicts')
self.embedding_dict = {}
with open('.glove_dicts/glove.twitter.27B.' + str(self.embed_vec_len) + 'd.txt', encoding="utf8") as f:
for line in f:
word, representation = line.split(maxsplit=1)
representation = np.fromstring(representation, 'f', sep=' ')
self.embedding_dict[word] = representation
print('Dictionary loaded')
if weights is None:
weights = np.ones(len(y))
if 1 < self.bootstrap < len(y):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
print('Sampled %d training points' % len(train_data))
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
lemmatize_pronouns=False)
print('Filtered data')
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
return cleaned_data, y, weights
def fit(self, train_data, y, weights=None, custom_vocabulary=None, clear_embedding_dictionary=True):
"""
:param train_data: (Dataframe) Training data
:param y: (vector) Targets
:param weights: (vector) Weights for fitting data
:param custom_vocabulary: Custom vocabulary for the tokenizer. Not recommended.
:param clear_embedding_dictionary: Delete the embedding dictionary after loading the embedding layer.
Recommended, but will prevent the model from being re-fit (not refined)
:returns Fit history
"""
"""
# Preprocess and tokenize text
"""
cleaned_data, y, weights = self.preprocess(train_data, y, weights)
if custom_vocabulary is not None:
print('Applying custom vocabulary')
self.tokenizer = Tokenizer(num_words=len(custom_vocabulary))
self.tokenizer.fit_on_texts(custom_vocabulary)
else:
print('Fitting tokenizer')
self.tokenizer = Tokenizer(num_words=self.vocab_size, char_level=False)
self.tokenizer.fit_on_texts(cleaned_data)
train_sequences = self.tokenizer.texts_to_sequences(cleaned_data)
self.word_index = self.tokenizer.word_index
X = pad_sequences(train_sequences, maxlen=self.max_length, padding='pre')
self.embedding_matrix = np.zeros((len(self.word_index) + 1, self.embed_vec_len))
for word, i in self.word_index.items():
embedding_vector = self.embedding_dict.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros. # TODO consider optimizing
self.embedding_matrix[i] = embedding_vector
self.build_NGRAM_network()
if clear_embedding_dictionary:
self.embedding_matrix = None
self.embedding_dict = None
print('Fitting GloVE model')
history = self.classifier.fit(X, y, validation_split=self.validation_split, batch_size=self.batch_size,
epochs=self.max_iter, sample_weight=weights,
callbacks=self.es, verbose=2)
self.accuracy = np.max(history.history['val_acc'])
return history
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'max_length': int(self.max_length),
'neurons': int(self.neurons),
'hidden_neruons': int(self.hidden_neurons),
'dropout': float(self.dropout),
'rec_dropout': float(self.rec_dropout),
'activ': self.activ,
'vocab_size': int(self.vocab_size),
'max_iter': int(self.max_iter),
'batch_size': self.batch_size,
'early_stopping': self.early_stopping,
'patience': int(self.patience),
'bootstrap': self.bootstrap,
'validation_split': float(self.validation_split),
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'finetune_embeddings': self.finetune_embeddings,
'learning_rate': self.learning_rate,
'n_grams': self.n_grams,
'feature_maps': self.feature_maps
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/ngram_tokenizer.pkl', 'wb+') as outfile:
pkl.dump(self.tokenizer, outfile)
# model_json = self.classifier.to_json()
with open(filename + "/ngram_model.json", "w+") as json_file:
json_file.write(self.classifier.to_json())
self.classifier.save_weights(filename + "/ngram_model.h5")
def load_model(self, filename):
"""
:param filename: (String) Path to file
"""
self.tokenizer = pkl.load(open(filename + '/glove_tokenizer.pkl', 'rb'))
with open(filename + '/glove_model.json', 'r') as infile:
model_json = infile.read()
self.classifier = tf.keras.models.model_from_json(model_json)
self.classifier.load_weights(filename + '/glove_model.h5')
self.classifier.compile(loss='binary_crossentropy',
optimizer=self.optimizer,
metrics=['acc'])
class Charlevel_Model(LSTM_Model):
"""
LSTM model that uses GloVE pre-trained embeddings
# TODO add automatic embedding downloading and unzipping
"""
def __init__(self, embedding_dict=None, embed_vec_len=128, max_length=140, vocab_size=128, batch_size=10000,
neurons=100,
hidden_neurons=0, dropout=0.2, bootstrap=1, early_stopping=True, validation_split=0.2, patience=50,
max_iter=250,
rec_dropout=0.2, activ='hard_sigmoid', accuracy=0, remove_punctuation=False, learning_rate=0.001,
remove_stopwords=False, lemmatize=False, finetune_embeddings=True, n_grams=[3,4,5], feature_maps=10, bidirectional=False, **kwargs):
"""
Constructor for NGRAM LSTM classifier using pre-trained embeddings
Be sure to add extra parameters to export()
:param glove_index: (Dict) Embedding index to use. IF not provided, a standard one will be downloaded
:param name: (String) Name of model
:param embed_vec_len: (int) Embedding depth. Inferred from dictionary if provided. Otherwise 25, 50, 100, and
are acceptible values. 200
:param embedding_dict: (dict) Embedding dictionary
:param max_length: (int) Maximum text length, ie, number of temporal nodes. Default 25
:param vocab_size: (int) Maximum vocabulary size. Default 1E7
:param max_iter: (int) Number of training epochs. Default 100
:param neurons: (int) Depth (NOT LENGTH) of LSTM network. Default 100
:param dropout: (float) Dropout
:param activ: (String) Activation function (for visible layer). Default 'hard_sigmoid'
:param optimizer: (String) Optimizer. Default 'adam'
:param early_stopping: (bool) Train with early stopping
:param validation_split: (float) Fraction of training data to withold for validation
:param patience: (int) Number of epochs to wait before early stopping
"""
self.type = 'Charlevel_Model'
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.lstm_models'
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.validation_split = validation_split
self.patience = patience
self.max_iter = max_iter
self.embed_vec_len = embed_vec_len
self.learning_rate = learning_rate
self.embedding_initializer = None
self.finetune_embeddings = finetune_embeddings
self.max_length = max_length
self.embedding_dict = embedding_dict
self.max_iter = max_iter
self.vocab_size = vocab_size
self.neurons = neurons
self.hidden_neurons = hidden_neurons
self.dropout = dropout
self.rec_dropout = rec_dropout
self.activ = activ
self.optimizer = 'adam'
self.batch_size = batch_size
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
self.es = []
self.tokenizer = None
self.classifier = None
self.word_index = None
self.embedding_matrix = None
self.accuracy = accuracy
self.n_grams = n_grams
self.feature_maps = feature_maps
self.bidirectional = bidirectional
if self.embedding_dict is not None:
self.embed_vec_len = len(list(self.embedding_dict.values())[0])
print('Setting embedding depth to {}'.format(self.embed_vec_len))
def build_charlevel_network(self):
# TODO consider bidirectional
print("Creating character-level model")
if self.optimizer == 'adam':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
init = tf.keras.initializers.glorot_uniform(seed=1)
inputs = tf.keras.Input(shape=(self.max_length,))
embeddings = tf.keras.layers.Embedding(input_dim=len(self.word_index) + 1,
output_dim=self.embed_vec_len,
input_length=self.max_length,
mask_zero=False,
embeddings_initializer=self.embedding_initializer,
trainable=self.finetune_embeddings)(inputs)
reshape = tf.keras.layers.Reshape((self.max_length, self.embed_vec_len, 1))(embeddings)
outputs = []
for ngram in self.n_grams:
conv_layer = tf.keras.layers.Conv2D(self.feature_maps, kernel_size=ngram)(reshape)
#reshape_layer = tf.keras.layers.Reshape((self.max_length, self.feature_maps, 1))(conv_layer)
pooling_layer = tf.keras.layers.MaxPooling2D(pool_size=(1, self.embed_vec_len-ngram), data_format='channels_last')(conv_layer)
reshape_layer = tf.keras.layers.Reshape((self.max_length-ngram+1,self.feature_maps))(pooling_layer)
if self.bidirectional:
outputs.append(tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units=self.neurons, input_shape=(self.max_length, self.embed_vec_len),
kernel_initializer=init, dropout=self.dropout,
recurrent_dropout=self.rec_dropout))(reshape_layer))
else:
outputs.append(tf.keras.layers.LSTM(units=self.neurons, input_shape=(self.max_length, self.embed_vec_len),
kernel_initializer=init, dropout=self.dropout,
recurrent_dropout=self.rec_dropout)(reshape_layer))
output = tf.keras.layers.concatenate(outputs)
flattened = tf.keras.layers.Flatten()(output)
if self.hidden_neurons > 0:
hidden_layer = tf.keras.layers.Dense(units=self.hidden_neurons, kernel_initializer=init, activation='relu')(flattened)
dropout = tf.keras.layers.Dropout(self.dropout)(hidden_layer)
else:
dropout = tf.keras.layers.Dropout(self.dropout)(flattened)
output = tf.keras.layers.Dense(units=1, kernel_initializer=init, activation=self.activ)(dropout)
self.classifier = tf.keras.Model(inputs=inputs, outputs=output)
self.classifier.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['acc'])
self.classifier.summary()
if self.early_stopping:
self.es.append(
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=self.patience))
def preprocess(self, train_data, y, weights=None):
if weights is None:
weights = np.ones(len(y))
if 1 < self.bootstrap < len(y):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
print('Sampled %d training points' % len(train_data))
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
lemmatize_pronouns=False)
print('Filtered data')
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
print(cleaned_data[0])
return cleaned_data, y, weights
def fit(self, train_data, y, weights=None, custom_vocabulary=None):
"""
:param train_data: (List-like of Strings) Tweets to fit on
:param y: (Vector) Targets
:param weights: (Vector) Weights for fitting data
:param custom_vocabulary: (List of String) Custom vocabulary to use for tokenizer. Not recommended.
:return: Fit history
"""
cleaned_data, y, weights = self.preprocess(train_data, y, weights)
# filters='"#$%&()*+-/:;<=>?@[\\]^_`{|}~\t\n',
self.tokenizer = Tokenizer(num_words=self.vocab_size,
char_level=True, oov_token=0)
self.tokenizer.fit_on_texts(cleaned_data)
train_sequences = self.tokenizer.texts_to_sequences(cleaned_data)
self.word_index = self.tokenizer.word_index
print('Found %s unique tokens.' % len(self.word_index))
X = pad_sequences(train_sequences, maxlen=self.max_length, padding='pre')
self.build_charlevel_network()
print('Fitting LSTM model')
steps_per_epoch = min((int(len(y) / self.batch_size)), 1000)
history = self.classifier.fit(X, y, validation_split=self.validation_split, callbacks=self.es,
batch_size=self.batch_size, sample_weight=weights,
epochs=self.max_iter, verbose=1, steps_per_epoch=steps_per_epoch)
self.accuracy = np.max(history.history['val_acc'])
return history
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'max_length': int(self.max_length),
'neurons': int(self.neurons),
'hidden_neruons': int(self.hidden_neurons),
'dropout': float(self.dropout),
'rec_dropout': float(self.rec_dropout),
'activ': self.activ,
'vocab_size': int(self.vocab_size),
'max_iter': int(self.max_iter),
'batch_size': self.batch_size,
'early_stopping': self.early_stopping,
'patience': int(self.patience),
'bootstrap': self.bootstrap,
'validation_split': float(self.validation_split),
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'finetune_embeddings': self.finetune_embeddings,
'learning_rate': self.learning_rate,
'n_grams': self.n_grams,
'feature_maps': self.feature_maps,
'bidirectional': self.bidirectional
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/charlevel_tokenizer.pkl', 'wb+') as outfile:
pkl.dump(self.tokenizer, outfile)
# model_json = self.classifier.to_json()
with open(filename + "/charlevel_model.json", "w+") as json_file:
json_file.write(self.classifier.to_json())
self.classifier.save_weights(filename + "/charlevel_model.h5")
tf.keras.utils.plot_model(
self.classifier,
to_file=(filename + "/model_topology.png"),
show_shapes=True,
show_layer_names=True)
def load_model(self, filename):
"""
:param filename: (String) Path to file
"""
self.tokenizer = pkl.load(open(filename + '/glove_tokenizer.pkl', 'rb'))
with open(filename + '/glove_model.json', 'r') as infile:
model_json = infile.read()
self.classifier = tf.keras.models.model_from_json(model_json)
self.classifier.load_weights(filename + '/glove_model.h5')
self.classifier.compile(loss='binary_crossentropy',
optimizer=self.optimizer,
metrics=['acc'])
| true
|
85c9b6a16f4355018f66651b8df4c52276be3379
|
Python
|
dlondonmedina/Everyday-Coding
|
/week2/homework/Player.py
|
UTF-8
| 344
| 3.109375
| 3
|
[] |
no_license
|
class Player:
def __init__(self, id, token):
self.id = id
self.token = token
self.wins = 0
self.rows = [0, 0, 0]
self.cols = [0, 0, 0]
self.diags = [0, 0]
def get_token(self):
return self.token
def add_win(self):
self.wins += 1
def get_wins(self):
return self.wins
| true
|
dbc422532dc17527363cd20c1b25235c27694ddf
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02257/s884997944.py
|
UTF-8
| 390
| 3.078125
| 3
|
[] |
no_license
|
N = int(input())
a = [int(input()) for x in range(N)]
import math
cnt = 0
for i in range(N) :
cnt += 1
if a[i] == 2 :
continue
else :
for j in range(2, int(math.sqrt(a[i])) + 1) :
if a[i] % 2 == 0 :
cnt -= 1
break
if a[i] % j == 0 :
cnt -= 1
break
print(cnt)
| true
|
39b49022b06282570c724ac6e8e18af401cfd524
|
Python
|
codermoji-contrib/python
|
/start/Intro to Dicts/printdict/printval4.py
|
UTF-8
| 123
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
age = dict(tom=23, jane=32, mike=27, linda=25)
print(age['mike'])
print(age['linda'])
print(age['jane'])
print(age['tom'])
| true
|
89bec2dac4eca41d758985b85e3a155b52439b3e
|
Python
|
OaklandPeters/abf
|
/abf/test_abf.py
|
UTF-8
| 5,851
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
from __future__ import absolute_import
import unittest
import types
import abc
if __name__ == "__main__":
import sys
sys.path.append('..')
from abf.meta import *
from abf.error import *
else:
from .meta import *
from .error import *
class TestMyProcessing(unittest.TestCase):
class MyProcessing(object):
__metaclass__ = ABFMeta
@classmethod
def __call__(self, *args, **kwargs):
return 1234
@classmethod
def foo(self, bang):
return 'foo'+repr(bang)
@classmethod
def already_classmethod(cls, shebang):
return 'already '+str(shebang)
def test_basic(self):
result = self.MyProcessing('a')
self.assertEqual(result, 1234)
self.assertEqual(self.MyProcessing.foo(5), "foo5")
self.assertEqual(self.MyProcessing.already_classmethod(3), 'already 3')
def test_abstract_method_requirement(self):
# This currently fails, because the obligation to reimplement __call__
# is not being enforced.
def make_erroring():
class MyErroring(object):
__metaclass__ = ABFMeta
def caller(self, *args, **kwargs):
print("Insufficient - Should have errored")
return MyErroring
thing = make_erroring
self.assertRaises(ABFAbstractError, make_erroring)
self.assertRaises(NotImplementedError, make_erroring)
def test_classmethods(self):
_get = lambda name: self.MyProcessing.__dict__[name]
self.assert_(isinstance(_get('__call__'), classmethod))
self.assert_(isinstance(_get('foo'), classmethod))
self.assert_(isinstance(_get('already_classmethod'), classmethod))
self.MyProcessing.new_method = lambda x: x
self.assert_(not isinstance(_get('new_method'), classmethod))
class TestInheritance(unittest.TestCase):
def test_three_generations(self):
class AsserterInterface(object):
__metaclass__ = ABFMeta
message = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
meets = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
exception = abc.abstractproperty()
get_return = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
#-------- Mixins
def __call__(self, *args, **kwargs):
if not self.meets(*args, **kwargs):
self.raise_exception(*args, **kwargs)
return self.get_return(*args, **kwargs)
def raise_exception(self, *args, **kwargs):
raise self.exception(self.message(*args, **kwargs))
def make_error():
class FileAsserterInterface(AsserterInterface):
"""Parent interface for all File-related assertions."""
def get_return(self, path):
return path
meets = abc.abstractproperty()
message = abc.abstractproperty()
#exception = abc.abstractproperty()
return FileAsserterInterface
self.assertRaises(ABFAbstractError, make_error)
def test_approach_two(self):
class GrandParent(object):
__metaclass__ = ABFMeta
message = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
meets = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
exception = abc.abstractproperty()
get_return = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
#-------- Mixins
def __call__(self, *args, **kwargs):
if not self.meets(*args, **kwargs):
self.raise_exception(*args, **kwargs)
return self.get_return(*args, **kwargs)
def raise_exception(self, *args, **kwargs):
raise self.exception(self.message(*args, **kwargs))
class Parent(GrandParent):
"""
defines one abstract --> non-abstract
preserves three abstracts --> abstract
"""
def get_return(self, path):
return path
meets = abc.abstractproperty()
message = abc.abstractproperty()
exception = abc.abstractproperty()
def should_be_correct():
class Child1(Parent):
"""Implements remaining abstracts."""
def meets(self): return False
def message(self): return "My message"
exception = RuntimeError
return Child1
def should_be_correct_2():
#Correct, because:
# (1) meets, exception redefined as non-abstract
# (2) message redefined as abstract
class Child3(Parent):
def meets(self): return False
message = abc.abstractmethod(lambda *args, **kwargs: NotImplemented)
exception = RuntimeError
return Child3
def should_be_error_1():
#Error because abstract 'message' is neither:
# (1) defined as non-abstract
# (2) re-defined as abstract in Child
class Child2(Parent):
def meets(self): return False
exception = RuntimeError
return Child2
Child1 = should_be_correct()
Child2 = should_be_correct_2()
# Child version 1
self.assert_(
issubclass(Child1, Parent)
)
# Child version 2
self.assert_(
issubclass(Child2, Parent)
)
# Child version 3 - erroring
self.assertRaises(ABFAbstractError, should_be_error_1)
if __name__ == "__main__":
unittest.main()
| true
|
ecc139d202672de8a994b48e7258344bb46b4a07
|
Python
|
A-Schmid/python-audio-tutorial
|
/mido/midi_device.py
|
UTF-8
| 575
| 3.15625
| 3
|
[] |
no_license
|
import mido
# list of all available MIDI input devices
inputs = mido.get_input_names()
# let user select a device
counter = 0
for device in inputs:
print(f'[{counter}] {device}')
counter += 1
selection = input('select device: ')
try:
# listen to input from selected device and print MIDI messages
with mido.open_input(inputs[int(selection)]) as p:
for msg in p:
print(msg)
except (ValueError, IndexError) as e:
print(f'invalid device number: {selection} - must be between 0 and {len(inputs)-1}')
except KeyboardInterrupt:
pass
| true
|
f3c5350ae8ad234f3ef3fdcb9722ee858fe71015
|
Python
|
fulder/python-httpsig
|
/httpsig/tests/test_verify.py
|
UTF-8
| 12,473
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import sys
import os
import unittest
from httpsig.sign import HeaderSigner, Signer
from httpsig.sign_algorithms import PSS
from httpsig.verify import HeaderVerifier, Verifier
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
class BaseTestCase(unittest.TestCase):
def _parse_auth(self, auth):
"""Basic Authorization header parsing."""
# split 'Signature kvpairs'
s, param_str = auth.split(' ', 1)
self.assertEqual(s, 'Signature')
# split k1="v1",k2="v2",...
param_list = param_str.split(',')
# convert into [(k1,"v1"), (k2, "v2"), ...]
param_pairs = [p.split('=', 1) for p in param_list]
# convert into {k1:v1, k2:v2, ...}
param_dict = {k: v.strip('"') for k, v in param_pairs}
return param_dict
class TestVerifyHMACSHA1(BaseTestCase):
test_method = 'POST'
test_path = '/foo?param=value&pet=dog'
header_host = 'example.com'
header_date = 'Thu, 05 Jan 2014 21:31:40 GMT'
header_content_type = 'application/json'
header_digest = 'SHA-256=X48E9qOokqqrvdts8nOJRJN3OWDUoyWxBf7kbu9DBPE='
header_content_length = '18'
sign_header = 'authorization'
def setUp(self):
secret = b"something special goes here"
self.keyId = "Test"
self.algorithm = "hmac-sha1"
self.sign_secret = secret
self.verify_secret = secret
self.sign_algorithm = None
def test_basic_sign(self):
signer = Signer(secret=self.sign_secret, algorithm=self.algorithm, sign_algorithm=self.sign_algorithm)
verifier = Verifier(
secret=self.verify_secret, algorithm=self.algorithm, sign_algorithm=self.sign_algorithm)
GOOD = b"this is a test"
BAD = b"this is not the signature you were looking for..."
# generate signed string
signature = signer.sign(GOOD)
self.assertTrue(verifier._verify(data=GOOD, signature=signature))
self.assertFalse(verifier._verify(data=BAD, signature=signature))
def test_default(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.sign_secret, algorithm=self.algorithm,
sign_header=self.sign_header, sign_algorithm=self.sign_algorithm)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.verify_secret, sign_header=self.sign_header, sign_algorithm=self.sign_algorithm)
self.assertTrue(hv.verify())
def test_signed_headers(self):
HOST = self.header_host
METHOD = self.test_method
PATH = self.test_path
hs = HeaderSigner(
key_id="Test",
secret=self.sign_secret,
algorithm=self.algorithm,
sign_header=self.sign_header,
headers=[
'(request-target)',
'host',
'date',
'content-type',
'digest',
'content-length'
],
sign_algorithm=self.sign_algorithm)
unsigned = {
'Host': HOST,
'Date': self.header_date,
'Content-Type': self.header_content_type,
'Digest': self.header_digest,
'Content-Length': self.header_content_length,
}
signed = hs.sign(unsigned, method=METHOD, path=PATH)
hv = HeaderVerifier(
headers=signed, secret=self.verify_secret,
host=HOST, method=METHOD, path=PATH,
sign_header=self.sign_header, sign_algorithm=self.sign_algorithm)
self.assertTrue(hv.verify())
def test_incorrect_headers(self):
HOST = self.header_host
METHOD = self.test_method
PATH = self.test_path
hs = HeaderSigner(secret=self.sign_secret,
key_id="Test",
algorithm=self.algorithm,
sign_header=self.sign_header,
headers=[
'(request-target)',
'host',
'date',
'content-type',
'digest',
'content-length'],
sign_algorithm=self.sign_algorithm)
unsigned = {
'Host': HOST,
'Date': self.header_date,
'Content-Type': self.header_content_type,
'Digest': self.header_digest,
'Content-Length': self.header_content_length,
}
signed = hs.sign(unsigned, method=METHOD, path=PATH)
hv = HeaderVerifier(headers=signed, secret=self.verify_secret,
required_headers=["some-other-header"],
host=HOST, method=METHOD, path=PATH,
sign_header=self.sign_header, sign_algorithm=self.sign_algorithm)
with self.assertRaises(ValueError) as e:
hv.verify()
self.assertEqual(str(e.exception), 'some-other-header is a required header(s)')
def test_extra_auth_headers(self):
HOST = "example.com"
METHOD = "POST"
PATH = '/foo?param=value&pet=dog'
hs = HeaderSigner(
key_id="Test",
secret=self.sign_secret,
sign_header=self.sign_header,
algorithm=self.algorithm, headers=[
'(request-target)',
'host',
'date',
'content-type',
'digest',
'content-length'
],
sign_algorithm=self.sign_algorithm)
unsigned = {
'Host': HOST,
'Date': self.header_date,
'Content-Type': self.header_content_type,
'Digest': self.header_digest,
'Content-Length': self.header_content_length,
}
signed = hs.sign(unsigned, method=METHOD, path=PATH)
hv = HeaderVerifier(
headers=signed,
secret=self.verify_secret,
method=METHOD,
path=PATH,
sign_header=self.sign_header,
required_headers=['date', '(request-target)'],
sign_algorithm=self.sign_algorithm)
self.assertTrue(hv.verify())
def test_empty_secret(self):
with self.assertRaises(ValueError) as e:
HeaderVerifier(secret='', headers={})
self.assertEqual(str(e.exception), 'secret cant be empty')
def test_none_secret(self):
with self.assertRaises(ValueError) as e:
HeaderVerifier(secret=None, headers={})
self.assertEqual(str(e.exception), 'secret cant be empty')
def test_huge_secret(self):
with self.assertRaises(ValueError) as e:
HeaderVerifier(secret='x' * 1000000, headers={})
self.assertEqual(str(e.exception), 'secret cant be larger than 100000 chars')
class TestVerifyHMACSHA256(TestVerifyHMACSHA1):
def setUp(self):
super(TestVerifyHMACSHA256, self).setUp()
self.algorithm = "hmac-sha256"
class TestVerifyHMACSHA512(TestVerifyHMACSHA1):
def setUp(self):
super(TestVerifyHMACSHA512, self).setUp()
self.algorithm = "hmac-sha512"
class TestVerifyRSASHA1(TestVerifyHMACSHA1):
def setUp(self):
private_key_path = os.path.join(
os.path.dirname(__file__),
'rsa_private_1024.pem')
with open(private_key_path, 'rb') as f:
private_key = f.read()
public_key_path = os.path.join(
os.path.dirname(__file__),
'rsa_public_1024.pem')
with open(public_key_path, 'rb') as f:
public_key = f.read()
self.keyId = "Test"
self.algorithm = "rsa-sha1"
self.sign_secret = private_key
self.verify_secret = public_key
self.sign_algorithm = None
class TestVerifyRSASHA256(TestVerifyRSASHA1):
def setUp(self):
super(TestVerifyRSASHA256, self).setUp()
self.algorithm = "rsa-sha256"
class TestVerifyRSASHA512(TestVerifyRSASHA1):
def setUp(self):
super(TestVerifyRSASHA512, self).setUp()
self.algorithm = "rsa-sha512"
class TestVerifyRSASHA512ChangeHeader(TestVerifyRSASHA1):
sign_header = 'Signature'
class TestVerifyHS2019PSS(TestVerifyHMACSHA1):
def setUp(self):
private_key_path = os.path.join(os.path.dirname(__file__), 'rsa_private_2048.pem')
with open(private_key_path, 'rb') as f:
private_key = f.read()
public_key_path = os.path.join(os.path.dirname(__file__), 'rsa_public_2048.pem')
with open(public_key_path, 'rb') as f:
public_key = f.read()
self.keyId = "Test"
self.algorithm = "hs2019"
self.sign_secret = private_key
self.verify_secret = public_key
self.sign_algorithm = PSS(salt_length=0)
def test_algorithm_mismatch(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.sign_secret, algorithm=self.algorithm,
sign_header=self.sign_header, sign_algorithm=self.sign_algorithm)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.verify_secret, sign_header=self.sign_header, algorithm="rsa-sha256", sign_algorithm=self.sign_algorithm)
self.assertFalse(hv.verify())
def test_correct_derived_algorithm(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.sign_secret, algorithm=self.algorithm,
sign_header=self.sign_header, sign_algorithm=self.sign_algorithm)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.verify_secret, sign_header=self.sign_header, algorithm="hs2019", sign_algorithm=self.sign_algorithm)
self.assertTrue(hv.verify())
class TestSignAndVerify(unittest.TestCase):
header_date = 'Thu, 05 Jan 2014 21:31:40 GMT'
sign_header = 'authorization'
def setUp(self):
with open(os.path.join(os.path.dirname(__file__), 'rsa_private_1024.pem'), 'rb') as f:
self.private_key = f.read()
with open(os.path.join(os.path.dirname(__file__), 'rsa_public_1024.pem'), 'rb') as f:
self.public_key = f.read()
with open(os.path.join(os.path.dirname(__file__), 'rsa_private_2048.pem'), 'rb') as f:
self.other_private_key = f.read()
with open(os.path.join(os.path.dirname(__file__), 'rsa_public_2048.pem'), 'rb') as f:
self.other_public_key = f.read()
def test_default(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.private_key, algorithm='rsa-sha1',
sign_header=self.sign_header)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.public_key, sign_header=self.sign_header)
self.assertTrue(hv.verify())
def test_other_default(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.other_private_key, algorithm='rsa-sha256',
sign_header=self.sign_header)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.other_public_key, sign_header=self.sign_header)
self.assertTrue(hv.verify())
def test_mix_default_1_256(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.private_key, algorithm='rsa-sha1',
sign_header=self.sign_header)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.other_public_key, sign_header=self.sign_header)
self.assertFalse(hv.verify())
def test_mix_default_256_1(self):
unsigned = {
'Date': self.header_date
}
hs = HeaderSigner(
key_id="Test", secret=self.other_private_key, algorithm='rsa-sha256',
sign_header=self.sign_header)
signed = hs.sign(unsigned)
hv = HeaderVerifier(
headers=signed, secret=self.public_key, sign_header=self.sign_header)
self.assertFalse(hv.verify())
| true
|
84ce930d03cf753f40aa0028b7f88c188d893a7c
|
Python
|
r4mi4/Algorithm
|
/two_sum.py
|
UTF-8
| 358
| 3.953125
| 4
|
[] |
no_license
|
"""
two sum:
[2,7,11,15], 18 => [1,2]
"""
def two_sum(numbers,target):
p1 = 0
p2 = len(numbers) - 1
while p1 < p2:
print(p1,p2)
s = numbers[p1] + numbers[p2]
if s == target:
return [p1,p2]
elif s > target:
p2 -=1
else:
p1 +=1
print(two_sum([2,7,11,15], 9))
| true
|
a78e11a2bfe378a3b25c139696cef545dedc63ad
|
Python
|
Junghwan-brian/web-programming
|
/Web-Scrapper/main.py
|
UTF-8
| 487
| 2.59375
| 3
|
[] |
no_license
|
#%%
import requests
from bs4 import BeautifulSoup
import pandas as pd
import sys
sys.path
#%%
# stackoveflow와 indeed를 'C:\\Users\\brian\\Anaconda3\\envs\\WebProgramming\\lib\\site-packages'
# 폴더안에 넣어주고 사용했다.
import stackoverflow
import indeed
# %%
stackoverflow_job = stackoverflow.get_so_jobs()
indeed_job = indeed.get_indeed_jobs()
jobs = stackoverflow_job + indeed_job
print(jobs)
path = "jobs.csv"
job_df = pd.DataFrame(jobs)
job_df.to_csv(path)
# %%
| true
|
5392b17de6e8767ba5506cf21447618c7b243445
|
Python
|
walkccc/LeetCode
|
/solutions/0249. Group Shifted Strings/0249.py
|
UTF-8
| 471
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def groupStrings(self, strings: List[str]) -> List[List[str]]:
keyToStrings = collections.defaultdict(list)
# 'abc' . '11' because diff(a, b) = 1 and diff(b, c) = 1
def getKey(s: str) -> str:
key = ''
for i in range(1, len(s)):
diff = (ord(s[i]) - ord(s[i - 1]) + 26) % 26
key += str(diff) + ','
return key
for s in strings:
keyToStrings[getKey(s)].append(s)
return keyToStrings.values()
| true
|
db062c7f0a3964f838bac52eab65bc23a2ff53d3
|
Python
|
EwersLabUWyo/TREES_Py_R
|
/Python3_Version/scripts/TREES_GUI.py
|
UTF-8
| 2,380
| 2.59375
| 3
|
[] |
no_license
|
# Written by Matt Cook
# Created July 8, 2016
# mattheworion.cook@gmail.com
import os.path
import pickle
import tkinter
import TREES_utils as utils
class MainMenu(object):
def __init__(self):
root = self.root = tkinter.Tk()
root.title('Welcome to the TREES Graphical User Interface')
# width x height + x_offset + y_offset:
root.minsize(width=800, height=100)
# Look for a pickle of previous setup in current directory
curr_dir = os.path.curdir
filename = curr_dir + 'prev_ws.p'
if os.path.isfile(filename):
wSpace = pickle.load(filename)
opts = wSpace['opts']
# if no previous workspace, initialize
else:
# defining options for opening a directory
self.dirOpt = dirOpt = {}
dirOpt['mustexist'] = False
dirOpt['parent'] = root
dirOpt['title'] = 'Please choose your working directory'
# define options for opening or saving a csv file
self.fileOpt = fileOpt= {}
fileOpt['defaultextension'] = '.csv'
fileOpt['filetypes'] = [('csv files', '*.csv'),
('text files', '*.txt')]
fileOpt['initialdir'] = '\\'
fileOpt['parent'] = root
fileOpt['title'] = 'Please select your file'
# Store opts as one dicitionary to pass into make
self.opts = opts = {}
opts['dirOpts'] = dirOpt
opts['fileOpts'] = fileOpt
# defining titles for frames
self.frame_titles = titles = []
titles.insert(0, 'Blue Stain Xylem data: ')
titles.insert(1, 'Daily Sap Flux Decline data: ')
titles.insert(2, 'Water Stress data: ')
titles.insert(3, 'Gsref data: ')
titles.insert(4, 'Directory containing data files: ')
# Hard code this for now, come back and changelater
calcs = ['Xylem Scalar', 'Water Stress', 'Gsv0']
# populate window with widgets
utils.makeMain(root, titles, calcs, **opts)
mainGUI = MainMenu()
root = mainGUI.root
root.bind('<Escape>', lambda e: utils.__closeWindow(root))
root.mainloop()
| true
|
65649364f555ee7c03489b072721dfe2bf823181
|
Python
|
zhaolijian/suanfa
|
/leetcode/1011.py
|
UTF-8
| 1,319
| 3.671875
| 4
|
[] |
no_license
|
# 传送带上的包裹必须在 D 天内从一个港口运送到另一个港口。
# 传送带上的第 i 个包裹的重量为 weights[i]。每一天,我们都会按给出重量的顺序往传送带上装载包裹。我们装载的重量不会超过船的最大运载重量。
# 返回能在 D 天内将传送带上的所有包裹送达的船的最低运载能力。
class Solution:
def shipWithinDays(self, weights, D: int) -> int:
max_weight = max(weights)
length = len(weights)
# 平均每艘船运的货物数量
temp = length // D if length % D == 0 else length // D + 1
sorted_weights = sorted(weights)
# 最大运载能力(最大就是最重的temp个货物)
max_val = sum(sorted_weights[length - temp:])
# 最小运载能力
min_val = max_weight
# 二分法
while min_val < max_val:
# 运载能力
mid_val = (min_val + max_val) // 2
temp = 0
number = 1
for weight in weights:
temp += weight
if temp > mid_val:
number += 1
temp = weight
if number > D:
min_val = mid_val + 1
else:
max_val = mid_val
return min_val
| true
|
a80280d6377ac1b5bce4091736203558ad2ea879
|
Python
|
mykhaly/ucu
|
/computer_vision/HW4/helper.py
|
UTF-8
| 1,430
| 2.96875
| 3
|
[] |
no_license
|
import itertools as it
import numpy as np
def get_homography(source, destination):
h = np.zeros((source.size, 9))
for idx, ((x_src, y_src), (x_dest, y_dest)) in enumerate(zip(source, destination)):
row_idx = idx * 2
h[row_idx][0:3] = np.array([-x_src, -y_src, -1])
h[row_idx][6:9] = np.array([x_dest * x_src, x_dest * y_src, x_dest])
h[row_idx+1][3:6] = np.array([-x_src, -y_src, -1])
h[row_idx+1][6:9] = np.array([y_dest * x_src, y_dest * y_src, y_dest])
evs, evcs = np.linalg.eig(np.dot(h.T, h))
return evcs.T[np.argmin(evs)].reshape(3, 3)
def apply_homography(point, homography):
new_point = homography.dot(np.hstack((point, np.ones(1))))
return new_point[:2] / new_point[-1]
def find_homography(source, destination):
matching_points = np.array(list(it.combinations(range(source.shape[0]), 4)))
min_error = float('inf')
best_match = None
homography = None
for match in matching_points:
curr_homography = get_homography(source[match], destination[match])
transformed_src = np.apply_along_axis(lambda x: apply_homography(x, curr_homography), axis=1, arr=source)
error = np.sum(np.sqrt(np.sum(np.square(destination - transformed_src), axis=1)))
if min_error > error:
min_error = error
best_match = match
homography = curr_homography
return homography, best_match
| true
|
00f2cc8a2cc9bcf125175d33bfce8765e09cb9fe
|
Python
|
DaHuO/Supergraph
|
/codes/CodeJamCrawler/16_0_1/Witzy/A.py
|
UTF-8
| 855
| 2.921875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
created by huash at 2016/4/9 08:32
"""
__author__ = 'huash'
import sys
import os
import datetime
import functools
import itertools
import collections
def getDigits(num):
result = set()
while num > 0:
result.add(num % 10)
num /= 10
return result
def lastNumber(num):
baseDigits = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
visitedNums = set()
v = num
while v not in visitedNums:
baseDigits = baseDigits - getDigits(v)
if len(baseDigits) == 0:
return v
visitedNums.add(v)
v += num
return "INSOMNIA"
f = open("A-large.in", "r")
output = open("A-large.out", "w")
T = f.readline()
i = 1
for num in f.readlines():
output.write("Case #{}: {}\n".format(i, lastNumber(int(num))))
i += 1
| true
|
541a36fb793ac61107e9e8361317487167142204
|
Python
|
ksielemann/QUOD
|
/variance_in_repl_test.py
|
UTF-8
| 5,567
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
### Katharina Sielemann ###
### kfrey@cebitec.uni-bielefeld.de ###
### v1 ###
#prior to this analysis: run QUOD.py for the (I) whole dataset including all accessions and (II) the replicate dataset of the same accession
#imports
import os, glob, sys
from argparse import ArgumentParser
import scipy.stats
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
__usage__ = """
python3 variance_in_repl_test.py
--all_covs <FULL_PATH_TO_accession_coverage.txt_FILE_FOR_ALL_ACCESSIONS> (in QUOD output folder of whole dataset)
--replicate_scores <FULL_PATH_TO_gene_dispensability_scores.csv_FILE_FOR_REPLICATE_DATASET> (in QUOD output folder of replicate dataset)
--output_dir <FULL_PATH_TO_OUPUT_FOLDER>
REQUIREMENTS: os, glob, sys, argparse, scipy, pandas, numpy, matplotlib
"""
input_parameters = ArgumentParser()
input_parameters.add_argument("--all_covs", dest="all_covs")
input_parameters.add_argument("--replicate_scores", dest="repl_scores")
input_parameters.add_argument("--output_dir", "--output", "--out", "--o", dest="output_directory")
if "--help" in sys.argv or "-h" in sys.argv:
print(__usage__)
sys.exit(1)
args = input_parameters.parse_args()
if args.all_covs is None:
print("\n'--all_covs' was not set'")
print(__usage__)
sys.exit(1)
elif args.repl_scores is None:
print("\n'--replicate_scores' was not set'")
print(__usage__)
sys.exit(1)
elif args.output_directory is None:
print("\n'--output_dir' was not set'")
print(__usage__)
sys.exit(1)
else:
#iterative (100x) random (n=14) selection of datasets
input_matrix = pd.read_csv(args.all_covs, sep="\t")
input_matrix = input_matrix.set_index("gene")
if os.path.isdir(args.output_directory + "iterative_random_sets_scores/") == False:
os.makedirs(args.output_directory + "iterative_random_sets_scores/")
for i in range(100):
print("iteration " + str(i+1))
cov_matrix = input_matrix.sample(14, axis="columns").copy()
N = len(list(cov_matrix))
cX = cov_matrix.div(cov_matrix.mean(axis="index"), axis="columns")
ds = 1/((cX.sum(axis="columns")).div(N))
ds.to_csv(args.output_directory + "iterative_random_sets_scores/" + str(i+1) + "_ds.csv", header=False)
datei = open(args.repl_scores,"r")
lines = datei.readlines()
datei.close()
all_values = []
for line in lines:
line = line.strip().split(",")
if float(line[1]) != np.inf:
all_values.append(float(line[1]))
max_number = max(all_values)
#take average of iterative random sets
score_files = glob.glob(args.output_directory + "iterative_random_sets_scores/*_ds.csv")
variances = []
stdeviations = []
for scorefile in score_files:
datei = open(scorefile,"r")
ds = datei.readlines()
datei.close()
scores = []
for line in sorted(list(ds)):
line = line.strip().split(",")
if float(line[1]) >= max_number:
value = max_number
scores.append(float(value))
else:
scores.append(float(line[1]))
variances.append(np.var(scores))
stdeviations.append(np.std(scores))
#Col-0 replicates (n=14)
datei = open(args.repl_scores,"r")
ds = datei.readlines()
datei.close()
all_values = []
for line in ds:
line = line.strip().split(",")
if line[1] != np.inf:
all_values.append(line[1])
max_number = max(all_values)
scores_col = []
for line in sorted(list(ds)):
line = line.strip().split(",")
if line[1] >= max_number:
value = max_number
scores_col.append(float(value))
else:
scores_col.append(float(line[1]))
#boxplot
my_dict = {'randomly,\niterative\nselected datasets': variances, 'replicates': np.var(scores_col)}
medianprops = dict(linestyle='-', linewidth=1.5, color='royalblue')
meanprops = dict(linestyle='dashed', linewidth=1.5, color='royalblue')
plt.figure(figsize=(6,7))
data = [variances, np.var(scores_col)]
plt.ylim(ymin=0, ymax = max(variances))
plt.xlim(xmin=0.8, xmax=2.2)
plt.scatter(2, np.var(scores_col), color='grey', s=4, alpha=1)
x = np.random.normal(1, 0.04, len(variances))
plt.scatter(x, variances, color='grey', s=1, alpha=0.4)
plt.boxplot(my_dict['randomly,\niterative\nselected datasets'], showmeans=True, meanline=True, showfliers=False, medianprops=medianprops, meanprops=meanprops, widths=0.3)
plt.xticks([1,2],["randomly,\niterative\nselected datasets", "replicates"])
plt.ylabel('variance of the dispensability score (ds)', fontsize=16)
plt.tick_params(axis='both', which='both', labelsize=14)
plt.savefig(args.output_directory + "variance_in_replicates.png")
plt.close()
#calculate mean scores of iterative randomly selected datasets
score_files = glob.glob(args.output_directory + "iterative_random_sets_scores/*_ds.csv")
gene_scores = {}
for scorefile in score_files:
datei = open(scorefile,"r")
ds = datei.readlines()
datei.close()
for zeile in ds:
line = zeile.strip().split(",")
#print(line[0])
if line[0] not in gene_scores.keys():
if float(line[1]) <= float(max_number):
gene_scores[line[0]] = [float(line[1])]
else:
gene_scores[line[0]] = [float(max_number)]
else:
if float(line[1]) <= float(max_number):
gene_scores[line[0]] += [float(line[1])]
else:
gene_scores[line[0]] += [float(max_number)]
scores_randomsets = []
for gene in gene_scores.keys():
scores_randomsets.append(np.mean(gene_scores[gene]))
#calculate differences
print("\nreplicates (var,std):")
print(np.var(scores_col))
print(np.std(scores_col))
print("\niterative randomly selected datasets (var,std):")
print(np.var(scores_randomsets))
print(np.std(scores_randomsets))
print("\n")
print(scipy.stats.levene(scores_randomsets, scores))
| true
|
d4a4663f0ede98675e7384d12e6594e1256c7999
|
Python
|
zymov/leetcode
|
/py/_53_Maximum_Subarray.py
|
UTF-8
| 683
| 3.671875
| 4
|
[] |
no_license
|
"""
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Follow up:
If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
"""
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
sum = 0
max = nums[0]
for i in nums:
sum += i
if sum > max:
max = sum
if sum < 0:
sum = 0
return max
| true
|
307e5a11874e6bbfbcc958349d0fbc79a80b6802
|
Python
|
jkapila/theNatureofCodeProject
|
/genrics.py
|
UTF-8
| 402
| 3.265625
| 3
|
[] |
no_license
|
"""
These are general methods definition
"""
from vector import PVector
def addition(vec1, vec2):
return PVector(vec1.x+vec2.x, vec1.y+vec2.y, vec1.z+vec2.z)
def subtract(vec1, vec2):
return PVector(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z)
def multiply(vec1, d):
return PVector(vec1.x,vec1.y,vec1.z).mult(d)
def divide(vec1, d):
return PVector(vec1.x,vec1.y,vec1.z).div(d)
| true
|
dbea24b22fcedc3f9ea56d299ad39f5dfef2d435
|
Python
|
OmarTahoun/competitive-programming
|
/Miscellaneous/UIA_Warm_Up/Python/phoneCode.py
|
UTF-8
| 285
| 3.0625
| 3
|
[] |
no_license
|
n = int(input())
substring = list(raw_input())
for i in range(n-1):
phone = list(raw_input())
for j in range(len(substring)):
if substring[j] == phone[j]:
continue
else:
substring = substring[:j]
break
print len(substring)
| true
|
e3ad9dcf26d1da0d1f0654ba72c17e7ca0d4256c
|
Python
|
sloria/webtest-plus
|
/webtest_plus/response.py
|
UTF-8
| 3,192
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import re
from webtest import response
class TestResponse(response.TestResponse):
'''Same as WebTest's TestResponse but adds basic HTTP authentication to
``click`` and ``clickbutton``.
'''
def click(self, description=None, linkid=None, href=None,
index=None, verbose=False,
extra_environ=None, auth=None, auth_type=None):
"""
Click the link as described. Each of ``description``,
``linkid``, and ``url`` are *patterns*, meaning that they are
either strings (regular expressions), compiled regular
expressions (objects with a ``search`` method), or callables
returning true or false.
All the given patterns are ANDed together:
* ``description`` is a pattern that matches the contents of the
anchor (HTML and all -- everything between ``<a...>`` and
``</a>``)
* ``linkid`` is a pattern that matches the ``id`` attribute of
the anchor. It will receive the empty string if no id is
given.
* ``href`` is a pattern that matches the ``href`` of the anchor;
the literal content of that attribute, not the fully qualified
attribute.
If more than one link matches, then the ``index`` link is
followed. If ``index`` is not given and more than one link
matches, or if no link matches, then ``IndexError`` will be
raised.
If you give ``verbose`` then messages will be printed about
each link, and why it does or doesn't match. If you use
``app.click(verbose=True)`` you'll see a list of all the
links.
You can use multiple criteria to essentially assert multiple
aspects about the link, e.g., where the link's destination is.
"""
found_html, found_desc, found_attrs = self._find_element(
tag='a', href_attr='href',
href_extract=None,
content=description,
id=linkid,
href_pattern=href,
index=index, verbose=verbose)
auth = auth or self.test_app.auth
auth_type = auth_type or self.test_app.auth_type
return self.goto(str(found_attrs['uri']), extra_environ=extra_environ,
auth=auth, auth_type=auth_type)
def clickbutton(self, description=None, buttonid=None, href=None,
index=None, verbose=False, auth=None, auth_type=None):
"""
Like :meth:`~webtest.response.TestResponse.click`, except looks
for link-like buttons.
This kind of button should look like
``<button onclick="...location.href='url'...">``.
"""
found_html, found_desc, found_attrs = self._find_element(
tag='button', href_attr='onclick',
href_extract=re.compile(r"location\.href='(.*?)'"),
content=description,
id=buttonid,
href_pattern=href,
index=index, verbose=verbose)
auth = auth or self.test_app.auth
auth_type = auth_type or self.test_app.auth_type
return self.goto(str(found_attrs['uri']), auth=auth, auth_type=auth_type)
| true
|
9c5bb4ec55693cb8ef72aba460d5bfe2fd6f614d
|
Python
|
DeepSwissVoice/Dobby
|
/dobby/errors.py
|
UTF-8
| 3,902
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
from typing import Any, Callable, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from . import Context
DobbyBaseError = type("DobbyBaseError", (Exception,), {})
DobbyBaseError.__doc__ = """This exception is just a subclass of `Exception`.
It doesn't add any extra functionality but it's supposed to be the only error ever
raised by Dobby.
"""
class DobbyError(DobbyBaseError):
"""
A base class for all Dobby-related errors that provide a bit more info.
A `DobbyError` can help you fix the problem by giving a (useless) hint and the `Context`
in which the error was raised.
Attributes:
ctx: `Context` that was passed to the error (optional)
"""
_msg: str
_hint: Optional[str]
ctx: Optional["Context"]
def __init__(self, msg: str, **kwargs):
self._msg = msg
self._hint = kwargs.pop("hint", None)
self.ctx = kwargs.pop("ctx", None)
super().__init__(**kwargs)
def __repr__(self) -> str:
return f"{type(self).__name__}({self.msg!r})"
def __str__(self) -> str:
lines = [self.msg]
if self.message:
lines.append(f"=== Message ===\n{self.message}")
if self.ctx:
lines.append(f"=== Context ===\n{self.ctx.prettify()}")
if self.hint:
lines.append(f"=== Hint ===\n{self.hint}")
return "\n\n".join(lines)
def _format_str(self, s: str) -> str:
return s.format(msg=self._msg, hint=self._hint, ctx=self.ctx, self=self)
@property
def msg(self) -> str:
"""Formatted message that was passed to the error."""
return self._format_str(self._msg)
@property
def hint(self) -> Optional[str]:
"""The hint which may have been provided when creating the error."""
if self._hint:
return self._format_str(self._hint)
@property
def message(self) -> Optional[str]:
"""A property for subclasses to override.
For `DobbyError` this will always be `None`.
If the message is a truthy value then it'll be included in the string representation
of the error.
"""
return None
SetupError = type("SetupError", (DobbyError,), {})
SetupError.__doc__ = """
A subclass of `DobbyError` which really doesn't add anything to the base class **but**
it is (or at least should be) the base class for all errors that happen during the setup of
Dobby (that's basically everything before going to sleep to wait for the first task).
"""
EnvError = type("EnvError", (SetupError,), {})
EnvError.__doc__ = """
Finally we get to the first *real* error which is raised when trying to access a key from
the `env` that isn't defined in the environment variables or the `env` config key.
"""
class ConversionError(SetupError):
"""Raised when the conversion of a config value to the designated slave argument type fails.
Attributes:
key: Name of the parameter that the `value` was supposed to be converted for
value: Value that was passed to the `Converter` to be converted
converter: `Converter` that tried to convert the `value`
"""
key: str
value: Any
converter: Callable
def __init__(self, msg: str, **kwargs):
self.key = kwargs.pop("key", None)
self.value = kwargs.pop("value", None)
self.converter = kwargs.pop("converter")
super().__init__(msg, **kwargs)
@property
def message(self) -> str:
"""
A string which provides information on which `Converter` was used,
what was to be converted and for which slave argument.
"""
lines = []
if self.converter:
lines.append(f"Converter: {self.converter}")
if self.key:
lines.append(f"Key: \"{self.key}\"")
if self.value:
lines.append(f"Provided value: {self.value}")
return "\n".join(lines)
| true
|
ed0ca502bf073ff2853b26d7e70e8fcf076dcfab
|
Python
|
yiv/py-lab
|
/datatype/dict.py
|
UTF-8
| 230
| 3.203125
| 3
|
[] |
no_license
|
book = {'padme': 35, 'edwin': 34}
print(book)
book['nick'] = 33
print('增', book)
book['edwin'] = 50
print('改', book)
print('查', book['padme'])
print('键列表', list(book))
print('键列表(排序)', sorted(book))
| true
|
0699ee1d56bd5a355e0723e10bd8e61401aefffc
|
Python
|
trec-dd/trec-dd-jig
|
/old-scorer/scorer.py
|
UTF-8
| 1,632
| 2.53125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# Run all the old-scorer in one trial
from subprocess import call
import click
def run_all():
cubetest()
stage_aware_cubetest()
nDCG()
snDCG()
def cubetest(truth, run):
print 'Calling cubetest'
# results print to screen
call(["perl", "cubeTest_dd.pl", truth, run, '50', '>'])
def stage_aware_cubetest(truth, run):
print 'Calling stage aware cubetest'
# results print to screen
call(["perl", "cubeTest_dd_s.pl", truth, run, '50'])
def nDCG(truth, run):
print 'Calling nDCG'
# results written into file
call(["./ndeval", truth, run])
# ndeval [options] qrels.txt run (-help for full usage information)
def snDCG(truth, run):
print 'Calling snDCG'
# results written into file
call(["perl", "snDCG_per_iteration.pl", truth, run, '5'])
def pr(truth, run):
print 'Calling P@R'
# results print to screen
call(["perl", "cubeTest_dd.pl", truth, run, '50'])
choice = {
'cubetest': cubetest,
'stage_aware_cubetest': stage_aware_cubetest,
'nDCG': nDCG,
'snDCG': snDCG,
'PR': pr,
}
@click.command()
@click.option('-truth', type=click.Path(exists=True), help='ground truth file path')
@click.option('-run', type=click.Path(exists=True), help='run file path')
# @click.option('-config', '-c',
# type=click.Choice(['all', 'cubetest', 'stage_aware_cubetest',
# 'nDCG', 'SnDCG']),
# default='all', help='config option')
def score(truth, run):
for key, func in choice.iteritems():
func(truth, run)
if __name__ == '__main__':
score()
| true
|
df5236ef929ea667965c723bd0f24674b0b631d9
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03807/s352343009.py
|
UTF-8
| 235
| 3.015625
| 3
|
[] |
no_license
|
import sys
input = sys.stdin.readline
def main():
N = int(input())
a_list = list(map(int, input().split()))
if sum(a_list) % 2 == 1:
print("NO")
else:
print("YES")
if __name__ == '__main__':
main()
| true
|
c259b02e9473222828a55e03ac514bbe0450c2da
|
Python
|
LKhushlani/leetcode
|
/duplicte0s.py
|
UTF-8
| 514
| 2.90625
| 3
|
[] |
no_license
|
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
l = len(arr)
shifts = 0
for i in range(l):
if arr[i] == 0:
shifts += 1
for i in range((l-1), -1, -1):
if shifts+i <l:
arr[shifts +i] = arr[i]
if arr[i] == 0:
shifts -=1
if shifts +i < l:
arr[shifts+i] = 0
| true
|
51a33b9fa028cf391ae4939791510888d3c5caea
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02970/s119776449.py
|
UTF-8
| 102
| 3.015625
| 3
|
[] |
no_license
|
n,d = map(int,input().split())
c = d*2+1
if n%c ==0:
print(int(n//c))
else:
print(int(n//c)+1)
| true
|
b55c89444a2b4c620ff073f3bf1b9b4d5efb8722
|
Python
|
rafarios/quicksort
|
/quicksort.py
|
UTF-8
| 1,177
| 3.65625
| 4
|
[] |
no_license
|
#import subprocess
def printArray(arr):
n = len(arr)
f = open("README.md", "w")
for i in range(n):
print (arr[i])
f.write(arr[i] + "\n")
print(" ")
f.close()
#output = subprocess.run(['git', 'add README.md'])
#output = subprocess.run(['git', 'commit -F "README.md updated"'])
def partition(arr,low,high):
i = ( low-1 ) # index of smaller element
pivot = arr[high] # pivot
for j in range(low , high):
# If current element is smaller than or equal to pivot
if len(arr[j]) <= len(pivot):
# increment index of smaller element
i = i+1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high] = arr[high],arr[i+1]
printArray(arr)
return ( i+1 )
def quickSort(arr,low,high):
if low < high:
# pi is partitioning index, arr[p] is now at right place
pi = partition(arr,low,high)
# Separately sort elements before partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
arr = ["*****", "***", "************", "*************", "**", "*", "****", "*************************", "*********", "*****", "*********", "*******", "**"]
n = len(arr)
quickSort(arr,0,n-1)
| true
|
edb4712bf0c452f66d0da6f246451b9c93f3e3ea
|
Python
|
tools4origins/pysparkling
|
/pysparkling/tests/test_context.py
|
UTF-8
| 2,693
| 3.125
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
import logging
import unittest
import pysparkling
class Context(unittest.TestCase):
def test_broadcast(self):
b = pysparkling.Context().broadcast([1, 2, 3])
self.assertEqual(b.value[0], 1)
def test_lock1(self):
"""Should not be able to create a new RDD inside a map operation."""
sc = pysparkling.Context()
self.assertRaises(
pysparkling.exceptions.ContextIsLockedException,
lambda: (sc
.parallelize(range(5))
.map(lambda _: sc.parallelize([1]))
.collect())
)
def test_lock2(self):
"""Should not be able to create RDDs containing RDDs."""
sc = pysparkling.Context()
def parallelize_in_parallelize():
o = sc.parallelize(sc.parallelize(range(x)) for x in range(5))
print(o.map(lambda x: x.collect()).collect())
self.assertRaises(
pysparkling.exceptions.ContextIsLockedException,
parallelize_in_parallelize
)
def test_parallelize_single_element(self):
my_rdd = pysparkling.Context().parallelize([7], 100)
self.assertEqual(my_rdd.collect(), [7])
def test_parallelize_matched_elements(self):
my_rdd = pysparkling.Context().parallelize([1, 2, 3, 4, 5], 5)
self.assertEqual(my_rdd.collect(), [1, 2, 3, 4, 5])
def test_parallelize_empty_partitions_at_end(self):
my_rdd = pysparkling.Context().parallelize(range(3529), 500)
print(my_rdd.getNumPartitions())
my_rdd.foreachPartition(lambda p: print(sum(1 for _ in p)))
self.assertEqual(my_rdd.getNumPartitions(), 500)
self.assertEqual(my_rdd.count(), 3529)
def test_retry(self):
class EverySecondCallFails:
def __init__(self):
self.attempt = 0
def __call__(self, value):
self.attempt += 1
if self.attempt % 2 == 1:
raise Exception
return value
data = list(range(6))
rdd = pysparkling.Context().parallelize(data, 3)
result = rdd.mapPartitions(EverySecondCallFails()).collect()
self.assertEqual(result, data)
def test_union(self):
sc = pysparkling.Context()
rdd1 = sc.parallelize(['Hello'])
rdd2 = sc.parallelize(['World'])
union = sc.union([rdd1, rdd2]).collect()
print(union)
self.assertEqual(union, ['Hello', 'World'])
def test_version(self):
self.assertIsInstance(pysparkling.Context().version, str)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
Context().test_retry()
| true
|
2fb401ed5b35314d0bc072cb21178ae77afe8933
|
Python
|
Benny93/dragonflow
|
/dragonflow/db/drivers/redis_calckey.py
|
UTF-8
| 990
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crc16
RedisClusterHashSlots = 16384
def key2slot(key):
"""
Calculate keyslot for a given key.
This also works for binary keys that is used in python 3.
"""
k = unicode(key)
start = k.find("{")
if start > -1:
end = k.find("}", start + 1)
if end > -1 and end != start + 1:
k = k[start + 1:end]
return crc16.crc16xmodem(k) % RedisClusterHashSlots
| true
|
4ca5a35f79db9a0d38f55fa4a4ec6a6994f220ca
|
Python
|
WanzhengZhu/local-embedding
|
/code/find_general_terms.py
|
UTF-8
| 5,919
| 2.515625
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.feature_extraction.text import *
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import normalize
import time
def read_file(filename):
lines = []
with open(filename) as file:
for line in file:
# line = " ".join(line.split())
lines.append(line[:-1]) # Git rid of the last character \n
return lines
def read_interested_file(filename, keywords): # Read lines with at least one keyword
lines = []
with open(filename) as file:
for line in file:
lines.append(line[:-1]) # Git rid of the last character \n
return lines
def read_file1(filename, num):
training_data = []
training_label = []
i = 0
with open(filename) as file:
for line in file:
training_data.append(int(line.split()[0]))
training_label.append(int(line.split()[1]))
i = i+1
if i >= num:
break
return training_data, training_label
def read_wordEmb(filename):
words = []
wordEmb = []
with open(filename) as file:
for line in file:
words.append(line.split()[0]) # Vocabulary
wordEmb.append(map(float, line.split()[1:])) # word vectors
return (words,wordEmb)
def Normalize(senvec):
norm = 0
for x in senvec:
norm += x**2
senvec = [x/(norm**0.5) for x in senvec]
return senvec
def find_general_terms(input_dir, node_dir, center_names, keywords_file):
# node_dir = '/Users/wanzheng/Desktop/local-embedding/data/dblp/non-para-00/'
print('[find_general_terms] Begin finding general terms')
begin = time.time()
print 'Reading files...'
keywords = read_file(keywords_file)
filename = input_dir + 'papers.txt'
lines = read_file(filename)
# lines = read_interested_file(filename, keywords)
filename1 = node_dir + 'paper_cluster.txt'
number = max(int(0.01*len(lines)), 50000)
training_data_index, training_label = read_file1(filename1, number)
lines_used = []
for i in training_data_index:
lines_used.append(lines[i])
del lines # Save memory
print 'Initializing tf-idf...'
vectorizer = TfidfVectorizer(min_df=10)
A = vectorizer.fit_transform(lines_used)
print A._shape
tfidf = A.toarray()
print 'Trim tf-idf to only interested keywords'
temp = []
for i in keywords:
if i in vectorizer.vocabulary_:
temp.append(vectorizer.vocabulary_.get(i))
tfidf = np.array(tfidf[:, temp])
print 'Original size is: ' + str(tfidf.shape)
invalid = np.where(np.sum(tfidf, 1) == 0)
print 'Number of invalid entries is: ' + str(len(invalid[0]))
tfidf = np.delete(tfidf, invalid, 0)
training_label = np.delete(np.array(training_label), invalid, 0)
print 'Truncated size is: ' + str(tfidf.shape)
print 'Running LASSO...'
tfidf = normalize(tfidf, norm='l2', axis=0) # axis: 1: normalize each data; 0: normalize each feature. 0 is better than 1.
clf = SGDClassifier(loss="hinge", penalty="l1", alpha=0.00005, n_jobs=20, max_iter=5) # Alpha for dblp is
clf.fit(tfidf, training_label)
dimension = len(tfidf[0])
del tfidf # To save memory
print 'Writing to file...'
filename_to_write = node_dir + 'repre_keyword.txt'
general_keyword_to_write = node_dir + 'general_keyword.txt'
nondetermining_terms = [[] for i in range(len(clf.coef_))]
specific_terms = [[] for i in range(len(clf.coef_))]
with open(filename_to_write, 'w') as fout:
cluster_num = 0
for weight in clf.coef_:
keyword_number = 0
print sum(weight)
fout.write('cluster number ' + str(cluster_num) + ': ' + center_names[cluster_num] + '\n')
representativeness_order = np.argsort(abs(np.array(weight)))[::-1]
for i in representativeness_order:
keyword_i = vectorizer.vocabulary_.keys()[vectorizer.vocabulary_.values().index(temp[i])]
if weight[i] == 0: # Write to general terms
nondetermining_terms[cluster_num].append(keyword_i)
continue
if weight[i] > 1: # Write to specific terms. The importance of this word is above threshold
specific_terms[cluster_num].append(keyword_i)
if keyword_number < 20:
if weight[i] > 0: # Only output positive value, since positive value means learning towards this cluster
fout.write(keyword_i + '\t' + str(weight[i]) + '\n')
keyword_number = keyword_number + 1
cluster_num = cluster_num + 1
fout.write('\n')
print 'Non-zero percentage: ' + str(keyword_number) + '/' + str(dimension)
for i in range(len(specific_terms)):
print('[find_general_terms] Specific terms: ', specific_terms[i].__len__())
with open(node_dir + 'repre_cand_' + str(i) + '.txt', 'w') as fout:
for j in specific_terms[i]:
fout.write(j + '\n')
# General terms for all clusters
result = set(nondetermining_terms[0])
for s in nondetermining_terms[1:]:
result.intersection_update(s)
# print result
general_terms = []
with open(general_keyword_to_write, 'w') as fout:
for i in result:
fout.write(i + '\n')
general_terms.append(i)
print('[find_general_terms] General terms: ', general_terms.__len__())
end = time.time()
print('[find_general_terms] Finish finding general terms using time %s second' % (end-begin))
return general_terms, specific_terms
# print 'Running LASSO'
# for alpha in [0, 0.01, 0.1, 0.3, 0.5, 1, 2]:
# print 'alpha is: ' + str(alpha)
# clf = Lasso(alpha=alpha)
# clf.fit(tfidf, training_label)
# print clf.coef_
# print sum(clf.coef_)
| true
|
9863a5914df334b8ea4c0c124c20125e2a8114af
|
Python
|
BQSKit/bqskit
|
/bqskit/passes/processing/exhaustive.py
|
UTF-8
| 6,358
| 2.796875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
"""This module implements the ExhaustiveGateRemovalPass."""
from __future__ import annotations
import logging
from typing import Any
from typing import Callable
import numpy as np
from bqskit.compiler.basepass import BasePass
from bqskit.compiler.passdata import PassData
from bqskit.ir.circuit import Circuit
from bqskit.ir.operation import Operation
from bqskit.ir.opt.cost.functions import HilbertSchmidtResidualsGenerator
from bqskit.ir.opt.cost.generator import CostFunctionGenerator
from bqskit.ir.structure import CircuitStructure
from bqskit.runtime import get_runtime
from bqskit.utils.typing import is_real_number
_logger = logging.getLogger(__name__)
class ExhaustiveGateRemovalPass(BasePass):
"""
The ExhaustiveGateRemovalPass class.
Use instantiation to remove the most possible gates from the circuit.
"""
def __init__(
self,
success_threshold: float = 1e-10,
cost: CostFunctionGenerator = HilbertSchmidtResidualsGenerator(),
instantiate_options: dict[str, Any] = {},
collection_filter: Callable[[Operation], bool] | None = None,
scoring_fn: Callable[[Circuit], float] | None = None,
) -> None:
"""
Construct a ExhaustiveGateRemovalPass.
Args:
success_threshold (float): The distance threshold that
determines successful termintation. Measured in cost
described by the hilbert schmidt cost function.
(Default: 1e-10)
cost (CostFunction | None): The cost function that determines
successful removal of a gate.
(Default: HilbertSchmidtResidualsGenerator())
instantiate_options (dict[str: Any]): Options passed directly
to circuit.instantiate when instantiating circuit
templates. (Default: {})
collection_filter (Callable[[Operation], bool] | None):
A predicate that determines which operations should be
attempted to be removed. Called with each operation
in the circuit. If this returns true, this pass will
attempt to remove that operation. Defaults to all
operations.
scoring_fn (Callable[[Circuit], float]):
A scoring function for the circuits to determine which one
to select. Defaults to gate counts weighted by their size.
"""
if not is_real_number(success_threshold):
raise TypeError(
'Expected real number for success_threshold'
', got %s' % type(success_threshold),
)
if not isinstance(cost, CostFunctionGenerator):
raise TypeError(
'Expected cost to be a CostFunctionGenerator, got %s'
% type(cost),
)
if not isinstance(instantiate_options, dict):
raise TypeError(
'Expected dictionary for instantiate_options, got %s.'
% type(instantiate_options),
)
self.collection_filter = collection_filter or default_collection_filter
if not callable(self.collection_filter):
raise TypeError(
'Expected callable method that maps Operations to booleans for'
' collection_filter, got %s.' % type(self.collection_filter),
)
self.scoring_fn = scoring_fn or default_scoring_fn
if not callable(self.scoring_fn):
raise TypeError(
'Expected callable method that maps Circuits to floats for'
' scoring_fn, got %s.' % type(self.scoring_fn),
)
self.success_threshold = success_threshold
self.cost = cost
self.instantiate_options: dict[str, Any] = {
'dist_tol': self.success_threshold,
'min_iters': 100,
}
self.instantiate_options.update(instantiate_options)
async def run(self, circuit: Circuit, data: PassData) -> None:
"""Perform the pass's operation, see :class:`BasePass` for more."""
instantiate_options = self.instantiate_options.copy()
if 'seed' not in instantiate_options:
instantiate_options['seed'] = data.seed
_logger.debug('Starting exhaustive gate removal.')
target = self.get_target(circuit, data)
# Frontier tracks circuits successfully instantiated to target
frontier = [circuit.copy()]
# Track best cicuit seen so far
best_circuit = None
best_score = -np.inf
# Keep removing until no more successful circuits
while len(frontier) > 0:
# Expand each element of frontier by removing gates
expanded_circuits = []
# Don't repeat circuit structures
circuits_seen = set()
for c in frontier:
for cycle, op in c.operations_with_cycles():
point = (cycle, op.location[0])
copy = c.copy()
copy.pop(point)
structure = CircuitStructure(copy)
if structure not in circuits_seen:
expanded_circuits.append(copy)
circuits_seen.add(structure)
# Instantiate them all
instantiated_circuits = await get_runtime().map(
Circuit.instantiate,
expanded_circuits,
target=target,
**instantiate_options,
)
# Process them
next_frontier = []
for c in instantiated_circuits:
if self.cost(c, target) < self.success_threshold:
next_frontier.append(c)
score = self.scoring_fn(c)
if score > best_score:
best_circuit = c
best_score = score
frontier = next_frontier
# Keep best circuit if one found
if best_circuit is not None:
circuit.become(best_circuit)
def default_collection_filter(op: Operation) -> bool:
return True
def default_scoring_fn(circuit: Circuit) -> float:
"""Default scoring function."""
score = 0.0
for op in circuit:
score -= (op.num_qudits - 1) * 100 + 1
return score
| true
|
18d31fb5ec52da0d2eed3b6e716937b34f3315bb
|
Python
|
italosaniz/SisTelecomunicaciones
|
/modulogit.py
|
UTF-8
| 1,204
| 3.609375
| 4
|
[] |
no_license
|
#COSTANCIA DE MATRICULA
def nombre():
global nom
nom=input("Ingrese su nombre: ")
if (not verificar(nom)):
print("Intentelo de nuevo")
nombre()
def verificar(x):
for i in x:
if (ord(i)<65 or ord(i)>90) and (ord(i)<97 or ord(i)>122) and ord(i)!=32:
return False
return True
def apellido():
global ape
ape=input("Ingrese su apellido: ")
if (not verificar(ape)):
print("Intentelo de nuevo")
apellido()
def numdni():
global dni
while True:
try:
dni=int(input("Ingrese el numero de su DNI: "))
break
except:
print("Intentelo de nuevo")
dni=str(dni)
if (len(dni)!=8):
print("Intentelo de nuevo")
numdni()
def naci():
global nacimiento
while True:
try:
nacimiento=int(input("Ingrese la fecha de su nacimiento"))
break
except:
print("Intentelo de nuevo")
def cui():
global cui
while True:
try:
cui=int(input("Ingrese el numero de su CUI"))
break
except:
print("Intentelo de nuevo")
| true
|
70943469fe4721c8019c58ceaf47d7d9aede8b69
|
Python
|
Tmk10/Reddit_dailyprogramming
|
/Kaprekar routine/kaprekar_routine.py
|
UTF-8
| 723
| 3.109375
| 3
|
[] |
no_license
|
#main challenge
def kaprekar_routine(number):
number = list(str(number))
if len(number) <4:
number.insert(0,"0")
return(max(number))
#bonus 1
def kaprekar_routine_1(number):
number = list(str(number))
if len(number) < 4:
number.insert(0, "0")
return "".join(sorted(number,reverse=True))
#bonus 2
def kaprekar_routine_2(number):
counter =0
while number != 6174:
number = list(str(number))
if len(number) < 4:
number.insert(0, "0")
if len(set(number)) < 2:
counter =0
return counter
number = int("".join(sorted(number,reverse=True))) - int("".join(sorted(number)))
counter+=1
return counter
| true
|
918aeaeaf69f1b365f23c6208b63883f71425df0
|
Python
|
AbdulHannanKhan/Advance-Lane-Finding
|
/lane.py
|
UTF-8
| 15,579
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import cv2
def eval_poly(vec, poly):
"""
Evaluates value of a polynomial at a given point
:param vec: The given point :float
:param poly: The polynomial :ndarray[3,]
:return: value of polynomial at given point :float
"""
return vec ** 2 * poly[0] + vec * poly[1] + poly[2]
def curvature_of_poly(poly, y):
"""
Given a polynomial and a point y calculate its curvature
:param poly: The polynomial :ndarray[3,]
:param y: The point to calculate curvature at :float
:return: The curvature of Polynomial at y
"""
a, b, c = poly
return ((1 + (2 * a * y + b) ** 2) ** (3 / 2)) / np.abs(2 * a)
class Lane:
left_anchor = None
right_anchor = None
left_poly = None
left_poly_m = None
right_poly = None
right_poly_m = None
win_count = None
search_window_margin = None
min_votes = None
image_size = None
mean_dist = 0
dist_count = 0
data_min = 0
curvature = 0
vehicle_center = 0
lane_width = 3.7 # average lane width
vehicle_width = 2.5 # average vehicle width
xm_per_pix = lane_width / 580 # 1280 - 350(offset)*2 = 580px
ym_per_pix = 30 / 720 # 30 meters actual lane length in ROI perspective projected on 720px
M_ = None
M_inv_ = None
@classmethod
def threshold(cls, frame):
"""
Combine Saturation and Sobel thresholds to extract possible lane indication pixels
:param frame: The given image to extract lane pixels
:return: Grayscale image with highlighted possible lane pixels
"""
image = cv2.cvtColor(frame, cv2.COLOR_RGB2HLS) # Convert to HLS
s_ = image[:, :, 2] # extract S channel from HLS
_h, _w = s_.shape
image = cv2.GaussianBlur(s_, (5, 5), 1) # Blur before thresholding to reduce noise
highly_saturated = np.uint8(image > 200)
sobel_strong_edges = np.uint8(cv2.Sobel(image, cv2.CV_64F, 1, 0) > 20)
# Highlight where highly saturated or strong sobel edge pixels are found
image = highly_saturated * 50 + sobel_strong_edges * 50
return image
def find_hist_based_anchors(self, frame):
"""
Using histograms find left and right lane polynomial starting points
:param frame: Input frame
:return: None
"""
# define bounds
frame_height = frame.shape[0]
win_height = int(frame_height / self.win_count)
mid_point = int(frame.shape[1] / 2)
# calculate histogram of last 1/8th row patch
hist = np.sum(frame[-win_height:, :] > 0, 0)
# extract max values one from left half of image and one from right half as left and right anchors
# respectively
self.left_anchor = np.argmax(hist[:mid_point])
self.right_anchor = np.argmax(hist[mid_point:]) + mid_point
def extract_poly(self, frame):
"""
Use left and right anchors as starting point and apply sliding window approach to find points of interest
for lane polynomial
:param frame: Input frame
:return: None
"""
debug = np.copy(frame) # for debug draw sliding window rects
# Define current left and right x positions
cur_left = self.left_anchor
cur_right = self.right_anchor
# Search parameters setup
height, width = frame.shape[:2]
win_height = int(height / self.win_count)
margin = self.search_window_margin
# Storage for left and right points of interest for polynomial
nonzero_indices_left = []
nonzero_indices_right = []
# Extract all nonzero x and y locations from frame
nonzero_y, nonzero_x = np.nonzero(frame)
# For all sliding windows
for i in range(self.win_count):
# Define window start and end
win_set = height - (i + 1) * win_height
win_end = height - i * win_height
# Find left and right polynomial candidates by checking if they lie inside the sliding window
left_candidates = (
(nonzero_y >= win_set) &
(nonzero_y < win_end) &
(nonzero_x >= max(cur_left - margin, 0)) &
(nonzero_x < min(cur_left + margin, width))
).nonzero()[0]
right_candidates = (
(nonzero_y >= win_set) &
(nonzero_y < win_end) &
(nonzero_x >= max(cur_right - margin, 0)) &
(nonzero_x < min(cur_right + margin, width))
).nonzero()[0]
# Add found candidates to their respective storages
nonzero_indices_left += left_candidates.tolist()
nonzero_indices_right += right_candidates.tolist()
# If there are more candidates than minimum votes shift the current x positions to mean of current window
if np.sum(left_candidates) > self.min_votes:
cur_left = np.mean(nonzero_x[left_candidates])
if np.sum(right_candidates) > self.min_votes:
cur_right = np.mean(nonzero_x[right_candidates])
# Draw rects for debugging
cv2.rectangle(debug, (int(cur_left - margin), win_set), (int(cur_left + margin), win_end), 255)
cv2.rectangle(debug, (int(cur_right - margin), win_set), (int(cur_right + margin), win_end), 255)
# Extract x and y indices of candidates for both left and right polynomial
left_y = nonzero_y[nonzero_indices_left]
left_x = nonzero_x[nonzero_indices_left]
right_y = nonzero_y[nonzero_indices_right]
right_x = nonzero_x[nonzero_indices_right]
# if total candidate points of polynomial are greater than a threshold fit polynomial to the points
# Also find metric polynomials to use for curvature and vehicle position detection
if np.sum(nonzero_indices_left) > 100:
self.left_poly = np.polyfit(left_y, left_x, 2)
# Find a metric polynomial by converting points to read world points
left_y_metric = left_y * self.ym_per_pix
left_x_metric = (left_x - self.warp_offset) * self.xm_per_pix # Consider perspective transform offset
self.left_poly_m = np.polyfit(left_y_metric, left_x_metric, 2)
if np.sum(nonzero_indices_right) > 100:
self.right_poly = np.polyfit(right_y, right_x, 2)
right_y_metric = right_y * self.ym_per_pix
right_x_metric = (right_x - self.warp_offset) * self.xm_per_pix
self.right_poly_m = np.polyfit(right_y_metric, right_x_metric, 2)
# keep track of overall mean pixel distances between left and right polynomials
self.mean_dist += self.right_anchor - self.left_anchor
self.dist_count += 1
# estimate curvature and vehicle position using found lane polynomials
self.estimate_curvature_and_position()
def estimate_curvature_and_position(self):
"""
Estimates curvature of lane and position of vehicle
:return: None
"""
height = self.image_size[0]
eval_point = (height - 1) * self.ym_per_pix # point closest to vehicle to estimate curvature at
# Find curvature of both polynomials and take mean
left_curvature = curvature_of_poly(self.left_poly_m, eval_point)
right_curvature = curvature_of_poly(self.right_poly_m, eval_point)
self.curvature = (left_curvature + right_curvature) / 2
# Find vehicle position
absolute_vehicle_center = (eval_poly(eval_point, self.right_poly_m) +
eval_poly(eval_point, self.left_poly_m)) / 2
# Estimate vehicle position relative to lane center
self.vehicle_center = self.lane_width / 2 - absolute_vehicle_center
def create_image_mask(self):
"""
Create image mask based on lane polynomials to highlight frame
:return: Mask image
"""
h, w = self.image_size
im = np.zeros((h, w, 3), dtype=np.uint8)
# Sample y points starting from top confidence location to bottom of image
plot_y = np.linspace(self.data_min, h - 1, h - self.data_min)
# Calculate values of polynomials at y sample points
left_plt_x = self.left_poly[0] * plot_y ** 2 + self.left_poly[1] * plot_y + self.left_poly[2]
right_plt_x = self.right_poly[0] * plot_y ** 2 + self.right_poly[1] * plot_y + self.right_poly[2]
# Update mean dist using intercepts of polynomials
self.mean_dist += right_plt_x[-1] - left_plt_x[-1]
self.dist_count += 1
# For each sampled y
for i in range(h - self.data_min):
# Find start and end lane pixel
start = int(max(0, left_plt_x[i]))
end = int(min(w, right_plt_x[i]))
# Color lane pixels for current row to be green
im[i + self.data_min, start:end, 1] = 255
# Add Red spectrum based on how much away vehicle is from lane center
im[i + self.data_min, start:end, 2] = \
abs(self.vehicle_center) / ((self.lane_width - self.vehicle_width) / 2) * 255
return im
def mask_frame(self, frame):
"""
Mask/Highlight given frame with currently estimated lane area
:param frame: Current frame
:return: Masked frame
"""
# Get mask, un wrap the perspective and overlay on frame
mask = self.create_image_mask()
lane_mask = self.perspective_unwrap(mask)
frame = cv2.addWeighted(frame, 1, lane_mask, 0.5, 0)
# Check where vehicle is relative to lane center
direction = "left" if self.vehicle_center < 0 else "right"
# Show current curvature and vehicle position on image
cv2.putText(
frame,
f"Curvature: {int(self.curvature)} m",
(10, 35),
cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2,
cv2.LINE_AA
)
cv2.putText(
frame,
f"{direction}: {int(abs(self.vehicle_center) * 100) / 100} m",
(10, 85),
cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2,
cv2.LINE_AA
)
return frame
def process(self, frame):
"""
Update polynomials using previously estimated lane polynomials and given frame
:param frame: Current undistorted video frame
:return:
"""
# Perspective wrap and threshold the frame
frame = self.preprocess_frame(frame)
# Set search window margin
margin = self.search_window_margin
# Extract nonzero x and y locations from current frame
nonzero_y, nonzero_x = np.nonzero(frame)
# Given polynomials and search window margin check which nonzero pixels are polynomial candidates
nonzero_left = eval_poly(nonzero_y, self.left_poly)
nonzero_right = eval_poly(nonzero_y, self.right_poly)
left_candidates = (
(nonzero_x >= nonzero_left - margin) &
(nonzero_x < nonzero_left + margin)
).nonzero()[0]
right_candidates = (
(nonzero_x >= nonzero_right - margin) &
(nonzero_x < nonzero_right + margin)
).nonzero()[0]
# Extract x and y indices of polynomial candidates for both left and right
left_y = nonzero_y[left_candidates]
left_x = nonzero_x[left_candidates]
right_y = nonzero_y[right_candidates]
right_x = nonzero_x[right_candidates]
# Find confidence point i.e. the y point from top where we have data from both left and right polynomial
# we don't want to highlight area of lane where we are not confident
if np.sum(left_y) > 0 and np.sum(right_y) > 0:
self.data_min = max(left_y.min(), right_y.min())
# If polynomial candidates are greater than a threshold update polynomials both pixel and metric
if np.sum(left_candidates) > 50:
self.left_poly *= 0.7
self.left_poly_m *= 0.7
self.left_poly += 0.3 * np.polyfit(left_y, left_x, 2)
self.left_poly_m += 0.3 * \
np.polyfit(left_y * self.ym_per_pix, (left_x - self.warp_offset) * self.xm_per_pix, 2)
if np.sum(right_candidates > 50):
self.right_poly *= 0.7
self.right_poly_m *= 0.7
self.right_poly += 0.3 * np.polyfit(right_y, right_x, 2)
self.right_poly_m += 0.3 * \
np.polyfit(
right_y * self.ym_per_pix, (right_x - self.warp_offset) * self.xm_per_pix, 2
)
# Check if the found polynomials intercepts are correct if not reinitialize using sliding window method
if not self.are_intercepts_correct():
self.init(frame)
# Estimate lane curvature and vehicle position
self.estimate_curvature_and_position()
def are_intercepts_correct(self):
"""
Check if polynomial are correct by checking if their intercepts are at least 200 pixels apart
:return: None
"""
return self.right_poly[2] - self.left_poly[2] > 200
def __init__(self, frame, roi, warp_offset, win_count=8, search_window_margin=30, min_votes=50):
# Initialize internal parameters
self.win_count = win_count
self.image_size = frame.shape[:2]
self.search_window_margin = search_window_margin
self.min_votes = min_votes
self.roi_source = roi
self.warp_offset = warp_offset
# Estimate perspective transform matrices
self.estimate_perspective_transform()
# Initialize polynomials with sliding window method
preprocessed = self.preprocess_frame(frame)
self.init(preprocessed)
def preprocess_frame(self, frame):
"""
Perspective wrap and threshold frames to make the ready for processing
:param frame: Image
:return: None
"""
wrap = self.perspective_wrap(frame)
return self.threshold(wrap)
def init(self, frame):
"""
Initialize using sliding window method
:param frame: Image
:return: None
"""
self.find_hist_based_anchors(frame)
self.extract_poly(frame)
def estimate_perspective_transform(self):
"""
Calculate perspective transform matrices
:return: None
"""
h, w = self.image_size
offset = self.warp_offset
# Create destination polygon based on offset and image dimensions
roi_dest = np.float32([[w - offset, 0], [w - offset, h], [offset, h], [offset, 0]])
self.M_ = cv2.getPerspectiveTransform(np.float32(self.roi_source), roi_dest)
self.M_inv_ = cv2.getPerspectiveTransform(roi_dest, np.float32(self.roi_source))
def perspective_wrap(self, frame):
"""
Perspective Transform to obtain bird eye view
:param frame: Image
:return: None
"""
h, w = self.image_size
return cv2.warpPerspective(frame, self.M_, (w, h))
def perspective_unwrap(self, frame):
"""
Perspective Transform inverse to obtain original frame from bird eye view
:param frame: Image
:return: None
"""
h, w = self.image_size
return cv2.warpPerspective(frame, self.M_inv_, (w, h))
| true
|
64c644e5f0a55ef375c391ac186cefbb8f78d011
|
Python
|
AwsManas/Practice
|
/SieveOfErathonenis.py
|
UTF-8
| 1,079
| 3.640625
| 4
|
[] |
no_license
|
def SieveOfEratosthenes(n):
prime = [True for i in range(n+1)]
p = 2
while (p * p <= n):
# If prime[p] is not changed, then it is a prime
if (prime[p] == True):
# Update all multiples of p
for i in range(p * 2, n+1, p):
prime[i] = False
p += 1
# Print all prime numbers
for p in range(2, n):
if prime[p]:
print p,
# driver program
if __name__==\'__main__\':
n = 30
print "Following are the prime numbers smaller",
print "than or equal to", n
SieveOfEratosthenes(n)
//c+++
vector<int> primesum(int N) {
// Generate isPrime List less equal than N
vector<bool> isPrime(N + 1, true);
isPrime[0] = false;
isPrime[1] = false;
// Sieve of Erastothenes
for(int i = 2; i <= N; i++) {
if (!isPrime[i]) continue;
if (i > N / i) break;
for (int j = i * i; j <= N; j += i) isPrime[j] = false;
}
| true
|
655e869d768038629fa79d90614be3d85a4002d7
|
Python
|
Randy777/100_PyExample
|
/.history/10-19/14_20200908163947.py
|
UTF-8
| 143
| 3.21875
| 3
|
[] |
no_license
|
# 将一个正整数分解质因数。例如:输入90,打印出90=2*3*3*5。
def FunTest(n):
if __name__ == "__main__":
FunTest(90)
| true
|
5cca83c1368c920c2ec4e005495d18062c284408
|
Python
|
james20141606/eMaize
|
/bin/random_projection.py
|
UTF-8
| 6,883
| 2.71875
| 3
|
[] |
no_license
|
#! /usr/bin/env python
import argparse, sys, os, errno
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)s [%(levelname)s] : %(message)s')
logger = logging.getLogger('random_projection')
def prepare_output_file(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
if __name__ == '__main__':
main_parser = argparse.ArgumentParser(description='Command line interface for sklearn.random_projection')
subparsers = main_parser.add_subparsers(dest='command')
# command: generate
parser = subparsers.add_parser('generate', help='generate a random sparse matrix')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-i', '--input-file', type=str,
help='a HDF5 file')
group.add_argument('-p', '--n-features', type=int,
help='number of features')
parser.add_argument('--dataset', type=str,
help='dataset name in the HDF5 file')
parser.add_argument('--transpose', action='store_true',
help='transpose the matrix in the HDF5 file before random projection')
parser.add_argument('-r', '--n-components', type=int, required=True,
help='number of components after random projection')
parser.add_argument('-o', '--output-file', type=str, required=True,
help='matrix in .npz format')
# command: test_load
parser = subparsers.add_parser('test_load', help='test if a npz file can be loaded')
parser.add_argument('input_file', type=str,
help='input matrix file in .npz format')
# command: transform
parser = subparsers.add_parser('transform',
help='transform an input matrix using random projection')
parser.add_argument('-i', '--input-file', type=str, required=True,
help='data matrix')
parser.add_argument('--datasets', type=str, required=True,
help='comma-separated list of dataset names. * for all datasets.')
parser.add_argument('--components-file', type=str,
help='components in .npz format')
parser.add_argument('-o', '--output-file', type=str, required=True,
help='transformed features file')
parser.add_argument('--merge', action='store_true',
help='merge transformed features into a single matrix')
parser.add_argument('--output-dataset', type=str, default='data',
help='output dataset name')
# command: normalize
parser = subparsers.add_parser('normalize',
help='normalize the transformed features into z-scores')
parser.add_argument('-i', '--input-file', type=str, required=True,
help='output file of the transform command')
parser.add_argument('--scaler-file', type=str, required=True,
help='input file containing scales for each feature (HDF5 file with dataset mean_ and scale_)')
parser.add_argument('-o', '--output-file', type=str, required=True,
help='normalized features file')
# command: merge
args = main_parser.parse_args()
import numpy as np
from sklearn.random_projection import SparseRandomProjection
from scipy.sparse import load_npz, save_npz
if args.command == 'generate':
if args.input_file is not None:
import h5py
f = h5py.File(args.input_file, 'r')
if args.dataset is None:
raise ValueError('option --dataset is required for HDF5 input file')
if args.transpose:
n_features = f[args.dataset].shape[0]
else:
n_features = f[args.dataset].shape[1]
if args.n_features is not None:
n_features = args.n_features
logger.info('number of features: %d'%n_features)
X = np.zeros((2, n_features))
proj = SparseRandomProjection(args.n_components)
logger.info('generate random projection matrix (%d components)'%args.n_components)
proj.fit(X)
logger.info('save random projection matrix to ' + args.output_file)
prepare_output_file(args.output_file)
save_npz(args.output_file, proj.components_, compressed=False)
elif args.command == 'test_load':
load_npz(args.input_file)
elif args.command == 'transform':
import h5py
logger.info('load random projection components from ' + args.components_file)
components = load_npz(args.components_file)
proj = SparseRandomProjection(components.shape[0])
proj.components_ = components
fin = h5py.File(args.input_file)
y = {}
if args.datasets == '*':
datasets = fin.keys()
else:
datasets = args.datasets.split(',')
for dataset in datasets:
X = fin[dataset][:].reshape((1, -1))
logger.info('transform dataset ' + dataset)
y[dataset] = np.ravel(proj.transform(X))
del X
fin.close()
logger.info('save transformed features to ' + args.output_file)
prepare_output_file(args.output_file)
fout = h5py.File(args.output_file)
if args.merge:
logger.info('merge transformed features')
n_samples = len(y)
y = np.concatenate([y[dataset] for dataset in datasets]).reshape((n_samples, -1))
fout.create_dataset('data', data=y)
else:
logger.info('save transformed features as separate datasets')
for dataset in datasets:
fout.create_dataset(dataset, data=y[dataset])
fout.close()
elif args.command == 'normalize':
import h5py
from sklearn.preprocessing import StandardScaler
logger.info('read scaler file: ' + args.scaler_file)
fin = h5py.File(args.scaler_file, 'r')
scaler = StandardScaler(copy=False)
scaler.mean_ = fin['mean_'][:]
scaler.scale_ = fin['scale_'][:]
fin.close()
logger.info('read input file: ' + args.input_file)
fin = h5py.File(args.input_file, 'r')
logger.info('create output file: ' + args.output_file)
prepare_output_file(args.output_file)
fout = h5py.File(args.output_file, 'w')
for dataset in fin.keys():
logger.info('normalize dataset ' + dataset)
data = fin[dataset][:].reshape((1, -1))
data = scaler.transform(data)
fout.create_dataset(dataset, data=np.ravel(data))
fin.close()
fout.close()
| true
|
9ea3dd153802cfe3793e6a6120a590cfc09446e5
|
Python
|
poojagmahajan/Data_Analysis
|
/Data Analytics/Numpy/Transpose.py
|
UTF-8
| 784
| 4.5
| 4
|
[] |
no_license
|
import numpy as np
# Creating 2-D array
arr = np.arange(0,50,1).reshape(10,5) # Declare a 2-D array
print("The original array")
print(arr)
print("\nThe transposed array")
print(arr.transpose()) # Print the transposed array
#print(arr.T) # This can also be used and same result will be produced
# Declare 2 array
arr1 = np.arange(1,40,4)
arr2 = np.arange(1,30,3)
print("The first array\n", arr1, "\nThe second array\n", arr2, '\n')
print("The Exponent Function")
print(np.exp(arr1))
print("\nThe Square Function")
print(np.square(arr1))
print("\nThe Square root Function")
print(np.sqrt(arr1))
print("\nThe Cube root Function")
print(np.cbrt(arr1))
print("\nThe Addition Function")
print(np.add(arr1, arr2))
print("\nThe Subtraction Function")
print(np.subtract(arr1, arr2))
| true
|
4cda9bc26150dbefdf4f021d5711f9b661188d65
|
Python
|
juicyfruityo/spheremailru
|
/homework(1_semestr)/data_analysis/hw2/C_2.py
|
UTF-8
| 1,698
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/python3
import operator
from operator import add
from functools import reduce
def solution1(arr):
return [int(''.join(list(filter(str.isalnum, arr_i[::-1])))) for arr_i in arr]
def solution2(iterator):
return [x[0]*x[1] for x in iterator]
def solution3(iterator):
return [i for i in iterator
if i % 6 == 0
or i % 6 == 5
or i % 6 == 2]
def solution4(arr):
return [i for i in arr if bool(i) is True]
# переделать/доделать:
def solution5(rooms):
for room in rooms:
operator.setitem\
(room, 'square', room['width'] * room['length'])
return rooms
def solution6(rooms):
return [i for i in solution5(rooms)]
def solution7(rooms):
return [{'name': room['name'], 'width': room['width'],
'length': room['length'], 'square': room['width']*room['length']}
for room in rooms]
# ^^^^ надо бы переделать ^^^^
def solution8(people):
return int(reduce(add, [man['height'] for man in people])), \
len([man['height'] for man in people])
def solution9(students):
return [stud['name'] for stud in students if stud['gpa'] > 4.5]
def solution10(tickets):
return list(filter(lambda tick:
sum(map(int, [x for i, x in enumerate(tick) if i % 2 == 0])) \
== sum(map(int, [x for i, x in enumerate(tick) if i % 2 == 1])), tickets))
solutions = {
'solution1': solution1,
'solution2': solution2,
'solution3': solution3,
'solution4': solution4,
'solution5': solution5,
'solution6': solution6,
'solution7': solution7,
'solution8': solution8,
'solution9': solution9,
'solution10': solution10,
}
| true
|
bfd8a193fb8b6073ed76a04229d9585189c9bcdf
|
Python
|
TiMusBhardwaj/pythonExample
|
/python-and-mongo-master/python_dict.py
|
UTF-8
| 1,272
| 3.9375
| 4
|
[] |
no_license
|
#Dictionary
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
print(thisdict)
x = thisdict["model"]
y = thisdict.get("model")
if x==y:
print("Same Result")
#Dictionary constructor
thisdict = dict(brand="Ford", model="Mustang", year=1964)
thisdict["year"] = 2018
print(thisdict)
#keys
for x in thisdict:
print(x)
#values
for x in thisdict:
print(thisdict[x])
for x in thisdict.values():
print(x)
# Key and value
for x, y in thisdict.items():
print(x, y)
#Key Exists ??
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
if "model" in thisdict:
print("Yes, 'model' is one of the keys in the thisdict dictionary")
print(len(thisdict))
#Add an item
thisdict["color"] = "red"
print(thisdict)
#POP
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.pop("model")
print(thisdict)
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.popitem()
print(thisdict)
#DEL
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
del thisdict["model"]
print(thisdict)
#Clear
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.clear()
print(thisdict)
| true
|
369abcc07bcfbd4fd487da9e6355153e6362a7e9
|
Python
|
nPellejero/deepNet
|
/src/scripts/detect3.py
|
UTF-8
| 2,711
| 2.625
| 3
|
[] |
no_license
|
# para ejecutar en el directorio images-originals
# pasa imagenes a escala de grises, equaliza su histograma de intensidad y luego detecta la cara mas grande, recorta la imagen y la guarda en images-croped.
import numpy as np
import cv2
import os,sys
rootdir = os.getcwd()
face_cascade = cv2.CascadeClassifier('../scripts/haarcascade_frontalface_default.xml')
face_cascade1 = cv2.CascadeClassifier('../scripts/haarcascade_frontalface_alt.xml')
face_cascade2 = cv2.CascadeClassifier('../scripts/haarcascade_profileface.xml')
pathCroped = "/home/npellejero/tesis/AMFED/images-croped2/"
pathOrig = "/home/npellejero/tesis/AMFED/images-originals/"
contador = 0
contLoc = 0
contLocM = 0
contMaster = 0
fileName = sys.argv[1]
def detect(img, cascade):
detectTot = []
#for scale in [float(i)/10 for i in range(12, 12)]:
# for neighbors in range(3,4):
scale = 1.1
neighbors = 3
rects = cascade.detectMultiScale(img, scaleFactor=scale, minNeighbors=neighbors, minSize=(50, 50), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
detectTot = detectTot + list(rects)
return detectTot
def find_face_from_img(img):
rects1 = detect(img, face_cascade)
rects2 = detect(img, face_cascade1)
rects3 = detect(img, face_cascade2)
return rects1 + rects2 + rects3
def funaux(x):
if abs(x[1][3] - x[1][2]) < 10:
return x[1][3]/x[1][2]
else:
return 0
with open(fileName) as f:
dirNames = f.readlines()
dirNames = map( lambda x: x.strip(), dirNames)
for myDir in dirNames:
print myDir
pathHastaDir = pathOrig+myDir+"/"
pathHastaNewDir = pathCroped+myDir+"/"
try:
os.makedirs(pathHastaNewDir)
except OSError:
if not os.path.isdir(pathHastaNewDir):
raise
for fi in os.listdir(myDir):
absFile = pathHastaDir+fi
img = cv2.imread(absFile)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=5, tileGridSize=(2,2))
cl1 = clahe.apply(img)
contLocM = contLocM + 1
totalFace = find_face_from_img(cl1)
if len(totalFace) > 0:
contLoc = contLoc + 1
totalFace = map(lambda x: list(x),totalFace)
max_index, max_value = max(enumerate(totalFace), key=lambda x: funaux(x) )
x,y,w,h = max_value
#print x, y, w, h
#Dubugging boxes
#cv2.rectangle(cl1, (x, y), (x+w, y+h), (0, 255, 0), 2)
y1 = y - int(h/4.)
y2 = y + h + int(h/4.)
x1 = x - int(w/8.)
x2 = x + w + int(w/8.)
image = cl1[y1:y2, x1:x2]
cv2.imwrite(pathCroped+myDir+"/"+fi,image)
contador = contador + contLoc
contMaster = contMaster + contLocM
print "en este dire encontramos: "+ str(contLoc) + " de " + str(contLocM)
contLoc = 0
contLocM = 0
print "encontramos caras en: "+ str(contador) + " de " + str(contMaster)
| true
|
90e7466093dd1e08334f146f401c2ebd4f456cd5
|
Python
|
efaro2014/Dojo-Assignments
|
/python_stack/python/oop/chain_oop.py
|
UTF-8
| 761
| 3.328125
| 3
|
[] |
no_license
|
class User:
def __init__(self, name, email):
self.name = name
self.email = email
self.account_balance = 0
def make_deposit(self, amount):
self.account_balance += amount
return self
def make_withdrawal(self, amount):
self.account_balance -= amount
return self
def display_user_balance(self):
print(self.account_balance)
# return self
def transfer_money(self, other_user, amount):
self.account_balance -= amount
other_user.account_balance += amount
user1 = User('Efrem', 'efrem@gmail')
user2 = User('Ben', 'ben@gmail.com')
user3 = User('guido', 'guido@gmail.com')
user3.make_deposit(100).make_deposit(200).make_deposit(300).make_withdrawal(50).display_user_balance()
| true
|
26ba8ea3ca77c4e3cac2e7318a600611cfbc3afb
|
Python
|
ChoiHyeongGeun/ScriptProgramming
|
/Practice/#4/Problem6.16.py
|
UTF-8
| 737
| 4.59375
| 5
|
[] |
no_license
|
# 6.16
# 1년의 총 일수를 구하는 함
def numberOfDaysInYear(year) :
# 윤년이면
if (year % 4 == 0) and (year % 100 != 0) and (year % 400 == 0) :
return 366 # 총 366일
# 윤년이 아니면
else :
return 365 # 총 365일
# 결과를 출력하는 함수
def printResult() :
# 일수의 총 합을 0으로 초기화
sum = 0
# 2010년부터 2020년까지
for x in range(2010, 2021) :
# 총 일수를 합한다.
sum = sum + numberOfDaysInYear(x)
# 그 결과를 출력한다.
print("2010년부터 2020년까지의 총 일수는", sum, "일 입니다.")
# 메인에서 결과를 출력한다.
printResult()
| true
|
b9f6f319b4b5b99687a320d5e697ef32852a35ce
|
Python
|
davidaries/Job8
|
/tools.py
|
UTF-8
| 918
| 2.53125
| 3
|
[] |
no_license
|
import working_data
import initial_load_data as ild
def summary():
"""Prints out a summary of information to the console"""
print('\n========= Summary data at this point =========================================================')
print('\npdata =', working_data.pdata)
for h in working_data.pdata:
print(h)
print('\npe_outs =', working_data.pe_outs)
dev_outs = working_data.pe_outs.keys()
for i in working_data.pe_outs:
print('device_out = ', i)
for ii in working_data.pe_outs[i]:
print(' ', ii, working_data.pe_outs[i][ii])
print('\npe_waits =', working_data.pe_waits)
for j in working_data.pe_waits:
print(j, working_data.pe_waits[j])
print('\nadats =', working_data.adat)
for i in working_data.adat:
print('person = ', i)
for ii in working_data.adat[i]:
print(' ', ii, working_data.adat[i][ii])
| true
|
27e708816da9ef0bd3dd4c12d693c58973f3b463
|
Python
|
Mashfiq137/Hough_transform_Image_processing
|
/main.py
|
UTF-8
| 1,447
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 08 16:11:33 2021
@author: rizvee
"""
from patterns import pattern_detect
from collections import defaultdict
from PIL import Image, ImageDraw
from math import sqrt, pi, cos, sin
# ----------------------------------------------Input Image--------------------------------------------------#
input_image = Image.open("sphere.jpg")
output_image = Image.new("RGB", input_image.size)
output_image.paste(input_image)
draw_result = ImageDraw.Draw(output_image)
# ----------------------------------------------Define Range--------------------------------------------------#
steps = 100
rmin = 40
rmax = 80
threshold = 0.4
points = []
for r in range(rmin, rmax + 1):
for t in range(steps):
points.append((r, int(r * cos(2 * pi * t / steps)), int(r * sin(2 * pi * t / steps))))
acc = defaultdict(int)
for x, y in pattern_detect(input_image):
for r, dx, dy in points:
a = x - dx
b = y - dy
acc[(a, b, r)] += 1
circles_count = []
for k, v in sorted(acc.items(), key=lambda i: -i[1]):
x, y, r = k
if v / steps >= threshold and all((x - xc) ** 2 + (y - yc) ** 2 > rc ** 2 for xc, yc, rc in circles_count):
print(v / steps, x, y, r)
circles_count.append((x, y, r))
for x, y, r in circles_count:
draw_result.ellipse((x-r, y-r, x+r, y+r), outline=(255,0,0,0))
output_image.save("Final_Final_Final_Result.png")
| true
|
b27fae3d70457e239a458c2b77959a0ce0902658
|
Python
|
cerealkill/light_karma
|
/utils.py
|
UTF-8
| 1,722
| 3.09375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import re
from itertools import cycle
MANTRA = cycle(["om vajrasattva samaya", "manupalaya", "vajrasattva denopa titha", "dido me bhava",
"suto kayo me bhava", "supo kayo me bhava", "anurakto me bhava", "sarva siddhi me prayatsa",
"sarva karma su tsame", "tsittam shriyam kuru hum", "ha ha ha ha ho", "bhagavan", "sarva tathagata",
"vajra mame muntsa", "vajra bhava maha samaya sattva ah hum phet"])
def find_text(text, reg):
"""
Regex finder of first occurrence
:param text: text to find the match
:param reg: regex
:return:
"""
finder = re.compile(reg, re.MULTILINE)
return finder.findall(text)[0]
def process_tabular_data(headers_text, lines_list, wanted_list, replacement, cls=None):
"""
Bring it on and then just Class(*this) to profit!
:param headers_text: ["D", "C", "B"]
:param lines_list: [["VD", "VC", "VB"], ...]
:param wanted_list: ["A", "B", "C", "D"]
:param replacement: None or ''
:param cls: If a class is to be instanced with result
:return: [[None, "VB", "VC", "VD"], ...]
"""
columns = process_columns(headers_text, wanted_list)
table = [[line[columns[w]] if w in columns.keys() else replacement for w in wanted_list] for line in lines_list]
return [cls(*line) for line in table] if cls else table
def process_columns(headers_text, wanted_list):
"""
Find desired sublists in a huge list
:param headers_text: str
:param wanted_list: list
:return: dict
"""
columns = {word: headers_text.index(word) for word in wanted_list if word in headers_text}
if len(columns) < 1:
raise ValueError
return columns
| true
|
d8fed868b7675ca62266e52693b8e8c24eac9f6b
|
Python
|
rakibkuddus1109/pythonClass
|
/multithreading.py
|
UTF-8
| 478
| 3.71875
| 4
|
[] |
no_license
|
import time
import threading
a = [2,3,4,5]
def square(a):
for j in a:
time.sleep(0.5)
print(j**2)
def cube(a):
for j in a:
time.sleep(0.3)
print(j**3)
t = time.time()
# square(a)
# cube(a)
t1 = threading.Thread(target=square,args=(a,))
t2 = threading.Thread(target=cube,args=(a,))
t1.start()
t2.start()
# join : when one process is in sleep mode, other would come & join
t1.join()
t2.join()
print("Execution time",time.time() - t)
| true
|
434c9b0d8b8475bdbfd374f18de855dc000b0942
|
Python
|
genebarsukov/MadFuzzWebScraper
|
/src/models/Story.py
|
UTF-8
| 1,609
| 3.203125
| 3
|
[] |
no_license
|
class Story(object):
"""
A simple object to hold the parsed story parameters
The variables that are initialized are done so to match the default database values
"""
story_id = None
url = ''
title = ''
author = ''
body = ''
snippet = ''
source_id = None
active = False
position = 0
rating = 100
up_votes = 0
down_votes = 0
def __init__(self):
"""
Constructor
"""
pass
def escapeValues(self, db_conn):
"""
Escape all of the story's values before inserting into a database
"""
# Enforce integers
if self.story_id is not None:
self.story_id = int(self.story_id)
if self.source_id is not None:
self.source_id = int(self.source_id)
if self.position is not None:
self.position = int(self.position)
if self.rating is not None:
self.rating = int(self.rating)
if self.up_votes is not None:
self.up_votes = int(self.up_votes)
if self.down_votes is not None:
self.down_votes = int(self.down_votes)
# Escape strings
if self.url is not None:
self.url = db_conn.escape(self.url)
if self.title is not None:
self.title = db_conn.escape(self.title)
if self.author is not None:
self.author = db_conn.escape(self.author)
if self.body is not None:
self.body = db_conn.escape(self.body)
if self.snippet is not None:
self.snippet = db_conn.escape(self.snippet)
| true
|
664f8f38d7aeea06896476bfeb26faef467b80f0
|
Python
|
wattaihei/ProgrammingContest
|
/AtCoder/キーエンス2020/probD.py
|
UTF-8
| 3,094
| 3.125
| 3
|
[] |
no_license
|
# instead of AVLTree
class BITbisect():
def __init__(self, max):
self.max = max
self.data = [0]*(self.max+1)
# 0からiまでの区間和
# 立っているビットを下から処理
def query_sum(self, i):
s = 0
while i > 0:
s += self.data[i]
i -= i & -i
return s
# i番目の要素にxを足す
# 覆ってる区間すべてに足す
def add(self, i, x):
while i <= self.max:
self.data[i] += x
i += i & -i
def insert(self, x):
self.add(x, 1)
def delete(self, x):
self.add(x, -1)
def count(self, x):
return self.query_sum(x) - self.query_sum(x-1)
def length(self):
return self.query_sum(self.max)
# 下からc番目(0-indexed)の数
# O(log(N))
def search(self, c):
c += 1
s = 0
ind = 0
l = self.max.bit_length()
for i in reversed(range(l)):
if ind + (1<<i) <= self.max:
if s + self.data[ind+(1<<i)] < c:
s += self.data[ind+(1<<i)]
ind += (1<<i)
if ind == self.max:
return False
return ind + 1
def bisect_right(self, x):
return self.query_sum(x)
def bisect_left(self, x):
if x == 1:
return 0
return self.query_sum(x-1)
# listみたいに表示
def display(self):
print('inside BIT:', end=' ')
for x in range(1, self.max+1):
if self.count(x):
c = self.count(x)
for _ in range(c):
print(x, end=' ')
print()
import sys
input = sys.stdin.readline
from operator import itemgetter
INF = 10**15
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
for i in range(N):
if i%2 == 1:
a = A[i]
A[i] = B[i]
B[i] = a
INDs = []
def dfs(p,L):
if p == N:
if len(L) == (N+1)//2:
INDs.append(L)
return
dfs(p+1, L+[p])
dfs(p+1, L)
return
dfs(0, [])
ans = INF
for IND in INDs:
As = []
Bs = []
usingA = [False]*N
for ind in IND:
usingA[ind] = True
for ind in range(N):
if usingA[ind]:
As.append((A[ind], ind))
else:
Bs.append((B[ind], ind))
As.sort()
Bs.sort()
ok = True
pre = -INF
Array = [None]*N
for i in range(N):
if i%2 == 0:
if As[i//2][0] < pre:
ok = False
break
Array[As[i//2][1]] = i+1
pre = As[i//2][0]
else:
if Bs[i//2][0] < pre:
ok = False
break
Array[Bs[i//2][1]] = i+1
pre = Bs[i//2][0]
if not ok:
continue
bit = BITbisect(N)
nowans = 0
for j, a in enumerate(Array):
nowans += j - bit.query_sum(a)
bit.add(a, 1)
if nowans < ans:
ans = nowans
if ans == INF:
print(-1)
else:
print(ans)
| true
|
a6b00cd4e644c018b8cbb2cc86b053d8a17ca7dc
|
Python
|
burakbayramli/books
|
/PHY_604_Computational_Methods_in_Physics_and_Astrophysics_II_Zingale/code1/ODEs/eigenvalues/finite-well.py
|
UTF-8
| 1,233
| 2.8125
| 3
|
[] |
no_license
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import schrodinger
def V(x):
""" the potential well -- a finite square well """
idx = np.abs(x) < 1.0
Vwell = np.zeros_like(x)
Vwell[idx] = -1000.0
return Vwell
# pick a starting point far from the action -- we need this pretty
# big
x0 = 5.0
x_match = 0.257
nsteps = 250
s = schrodinger.Schrodinger(V, x_match=x_match, x_far=x0, nsteps=nsteps)
# some examples
for n, (E_old, E_new) in enumerate([(-970, -960), (-980, -975), (-990, -980), (-999, -998), (-100, -150)]):
if n == 4:
E, xi_s, psi_s = s.solve(E_old, E_new, plot_intermediate=True)
else:
E, xi_s, psi_s = s.solve(E_old, E_new)
print("eigenvalue is E = {}".format(E))
Vs = V(xi_s)
plt.clf()
plt.plot(xi_s, Vs/max(np.abs(Vs)), lw=2, label=r"$V(x)/\max{V(x)}$")
plt.plot(xi_s, psi_s/max(np.abs(psi_s)), label=r"$\psi(x)/\max{\psi(x)}$")
plt.xlabel("x")
plt.title(r"eigenvalue, $\mathcal{{E}}$ = {}".format(E))
plt.xlim(-0.5*x0, 0.5*x0)
plt.ylim(-1.2, 1.2)
plt.legend(frameon=False, loc="best")
ep = "{:6.2f}".format(E)
plt.savefig("finite-well-psi-E{}.png".format(ep.strip()))
| true
|
ab964e1838d1df76b32cfe80a28e076883981ce6
|
Python
|
AaronBecker/project-euler
|
/euler081.py
|
UTF-8
| 795
| 3.234375
| 3
|
[] |
no_license
|
with open('euler081_input.txt') as f:
pe81 = [map(int, line.strip().split(',')) for line in f.readlines()]
def euler81(matrix=pe81):
"""http://projecteuler.net/index.php?section=problems&id=81
Find the minimal path sum from the top left to the bottom right by moving
right and down."""
shortest_path = [[matrix[0][0]]*len(matrix[0]) for n in xrange(len(matrix))]
for n in xrange(1, len(matrix[0])):
shortest_path[0][n] = matrix[0][n] + shortest_path[0][n-1]
shortest_path[n][0] = matrix[n][0] + shortest_path[n-1][0]
for y in xrange(1, len(matrix[0])):
for x in xrange(1, len(matrix)):
shortest_path[x][y] = matrix[x][y] + \
min(shortest_path[x-1][y], shortest_path[x][y-1])
return shortest_path[-1][-1]
| true
|
5c96603cea1fe92470dd135c43b6d4dbab8e2793
|
Python
|
felixhalim/python-bot
|
/update_reg_user.py
|
UTF-8
| 1,340
| 2.640625
| 3
|
[] |
no_license
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('spreadsheet-token.json', scope)
client = gspread.authorize(creds)
registered_user = client.open('Project_Goodall').worksheet("Registered_User")
Dictionary={}
# Function to sync with data on spreadsheet
def getLatestData():
tele_id_list = registered_user.col_values(1)
matric_list = registered_user.col_values(2)
name_list = registered_user.col_values(3)
counter_help_list = registered_user.col_values(4)
bool_donate_list = registered_user.col_values(5)
index_list = registered_user.col_values(6)
value_list = list(map(list, zip(matric_list,name_list,counter_help_list,bool_donate_list,index_list)))
Dictionary.update(dict(zip(tele_id_list, value_list)))
return Dictionary
# Function to alter data on spreadsheet
def setData(telegram_id,index_dict,value):
if(index_dict=="bool_donate"):
index_dict=3
elif(index_dict=="counter_help"):
index_dict=2
row_dict = Dictionary[telegram_id][4] # 4 is the index in terms of row
registered_user.update_cell(int(row_dict)+2, int(index_dict)+2, value) # +2 offset based on spreadsheet
| true
|
e342384f741b23537d88e26e4a92c2dedbd77777
|
Python
|
cristinarivera/python
|
/untitled-11.py
|
UTF-8
| 220
| 3.109375
| 3
|
[] |
no_license
|
def udacify(string):
word = 'U'+string
return word
# Remove the hash, #, from infront of print to test your code.
print udacify('dacians')
#>>> Udacians
print udacify('turn')
#>>> Uturn
| true
|
8aa905946ab08c7ac77609161bb31eba0d566db5
|
Python
|
ironmann250/python-wikiquotes
|
/tests/test_random_titles.py
|
UTF-8
| 538
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
import wikiquote
import unittest
class SearchTest(unittest.TestCase):
"""
Test wikiquote.random_titles()
"""
def test_random(self):
for lang in wikiquote.langs.SUPPORTED_LANGUAGES:
results = wikiquote.random_titles(lang=lang, max_titles=20)
self.assertTrue(len(results) == 20)
def test_unsupported_lang(self):
self.assertRaises(wikiquote.utils.UnsupportedLanguageException,
wikiquote.random_titles,
lang='hlhljopjpojkopijj')
| true
|
cc602a0e49d72a055f8ee86c17d720836979e6c3
|
Python
|
veera-sivarajan/Weather-Graphing
|
/graph.py
|
UTF-8
| 270
| 2.765625
| 3
|
[] |
no_license
|
import datetime
from matplotlib import pyplot as plt
from weather import get_weather
def graph(x, y):
plt.xlabel('Time')
plt.ylabel('Temperature')
plt.xlim(0, 24, 2)
plt.ylim(10, 60)
plt.plot(x, y)
plt.show()
#plt.savefig("weather graph.pdf")
| true
|
3868551324de22aac6c858a809eb7b4f7bf60e15
|
Python
|
mmweber2/hackerrank
|
/test_convert_string.py
|
UTF-8
| 1,053
| 3.25
| 3
|
[] |
no_license
|
from nose.tools import assert_equals
from convert_string import convert_string
from string import ascii_lowercase as letters
def test_no_duplicates():
assert_equals("xyz", convert_string("xyz"))
def test_all_duplicate_no_wrap():
assert_equals("abc", convert_string("aaa"))
def test_all_duplicate_wrap():
assert_equals("zab", convert_string("zzz"))
def test_multiple_duplicates():
assert_equals("ampbq", convert_string("ampap"))
def test_empty_string():
assert_equals("", convert_string(""))
# Does an exact wrapping of 26 work?
def test_26_a():
assert_equals(letters, convert_string("a" * 26))
def test_26_z():
assert_equals("z" + letters[:25], convert_string("z" * 26))
# Does a wrapping of more than 26 work?
def test_27_a():
assert_equals(letters + "a", convert_string("a" * 27))
def test_27_k():
assert_equals("klmnopqrstuvwxyzabcdefghijk", convert_string("k" * 27))
# Does a wrapping of a multiple of 26 work?
def test_52_a():
assert_equals(letters * 2 + "dogs", convert_string("a" * 52 + "dogs"))
| true
|
f8d6403b16a48142d3ecac4ccf141209961d07ba
|
Python
|
fangwendong/machineLearning
|
/classify_tree.py
|
UTF-8
| 2,283
| 3.296875
| 3
|
[] |
no_license
|
# coding: utf-8
from sklearn.datasets import load_iris
from sklearn import tree
import numpy as np
'''
iris数据一共有150组,前100组作为训练样本,后50组作为测试样本
'''
def predict_train(x_train, y_train):
'''
使用信息熵作为划分标准,对决策树进行训练
参考链接: http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier
'''
clf = tree.DecisionTreeClassifier(criterion='entropy')
# print(clf)
clf.fit(x_train, y_train)
''' 系数反映每个特征的影响力。越大表示该特征在分类中起到的作用越大 '''
print 'feature_importances_: %s' % clf.feature_importances_
'''测试结果的打印'''
y_pre = clf.predict(x_train)
# print(x_train)
print "结果做差"
cha = y_pre - y_train
print(cha)
print(np.mean(y_pre == y_train))
return y_pre, clf
def show_pdf(clf):
'''
可视化输出
把决策树结构写入文件: http://sklearn.lzjqsdd.com/modules/tree.html
Mac报错:pydotplus.graphviz.InvocationException: GraphViz's executables not found
解决方案:sudo brew install graphviz
参考写入: http://www.jianshu.com/p/59b510bafb4d
'''
# with open("testResult/tree.dot", 'w') as f:
# from sklearn.externals.six import StringIO
# tree.export_graphviz(clf, out_file=f)
import pydotplus
from sklearn.externals.six import StringIO
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("/home/wendong/PycharmProjects/sklearn/output/tree1.pdf")
if __name__ == '__main__':
iris = load_iris()
# 前140组作为训练的数据
train_data = iris.data[:140]
train_lable = iris.target[:140]
print(train_data.shape[0])
print(iris.target)
# 采用决策树训练模型
ypre, clf = predict_train(train_data, train_lable)
#将决策树打印出来
show_pdf(clf)
# 后10组作为测试数据
test_data = iris.data[140:]
test_lable = iris.target[140:]
test_pre = clf.predict(test_data)
print("测试结果做差")
cha = test_pre - test_lable
print(cha)
| true
|
79df0ac8f7455f6993fef763f6cfa3797c49dff4
|
Python
|
pylinx64/mon_python_16
|
/mon_python_16/pepsibot/teext.py
|
UTF-8
| 219
| 3.5625
| 4
|
[] |
no_license
|
print('HELLO')
print('HELLO' == 'hello')
print(10 > 9 )
print(10 / 1)
print('Hello' == 'hello')
print('HELLO'.lower())
print('как' in 'как дела ?')
print('как' in 'приветкакдела?')
| true
|
181abba50e371627c90d6a1b676851217da702f2
|
Python
|
diekhans/t2t-chm13-gene-analysis
|
/bin/bioTypeToCat
|
UTF-8
| 735
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import argparse
from pycbio.sys import fileOps
from bioTypeCat import BioCategory
def parseArgs():
usage = """
Convert biotypes, including CAT-specified, to more general categories
"""
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('inTypes', nargs='?', default="/dev/stdin",
help="")
parser.add_argument('outCats', nargs='?', default="/dev/stdout",
help="")
args = parser.parse_args()
return args
def main(args):
with fileOps.opengz(args.outCats, "w") as outFh:
for line in fileOps.iterLines(args.inTypes):
print(BioCategory.fromCatType(line.strip()), file=outFh)
main(parseArgs())
| true
|
745f10e3784d7375279e5ee065eeaa87f9eb7945
|
Python
|
JesseWright/cs373
|
/notes/03-24.py
|
UTF-8
| 836
| 2.90625
| 3
|
[] |
no_license
|
# -----------
# Fri, 24 Mar
# -----------
def theta_join (
r: Iterable[Dict[str, int]],
s: Iterable[Dict[str, int]],
f: Callable[[Dict[str,int], Dict[str, int]], bool]
-> Iterator[Dict[str, int]] :
for v1 in r :
for v2 in s :
if f(v1, v2) :
yield dict(v1, **v2)
return (dict(v1, **v2) for v1 in r for v2 in s if f(v1, v2))
def natural_join (
r: Iterable[Dict[str, int]],
s: Iterable[Dict[str, int]]
-> Iterator[Dict[str, int]]
def f (v1, v2) :
for k in v1 :
if k in v2 :
if v1[k] != v2[k]
return False
return True
def f (v1, v2) :
all([v1[k] == v2[k] for k in v1 if k in v2])
for v1 in r :
for v2 in s :
if f(v1, v2) :
yield dict(v1, **v2)
| true
|
194c58beac8c5f868ac514553662348f144152cc
|
Python
|
q2806060/python-note
|
/numpydemo/03/demo3-05.py
|
UTF-8
| 487
| 3.078125
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as mp
# 生成网格点坐标矩阵
n = 1000
x, y = np.meshgrid(np.linspace(-3, 3,n), np.linspace(-3, 3, n))
# 根据x, y计算当前坐标下的z高度值
z = (1-x/2 + x**5 + y**3) * np.exp(-x**2 - y**2)
mp.figure("Imshow", facecolor="lightgray")
mp.title("Imshow", fontsize=18)
mp.xlabel("X", fontsize=14)
mp.ylabel("Y", fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=":")
mp.imshow(z, cmap='jet', origin="lower")
mp.show()
| true
|
8e8fddd224fbffb97a7f8da3930859db6ff3a1c4
|
Python
|
SoniaAmezcua/qa_minds_proyecto_final
|
/tests/test_menu_categories.py
|
UTF-8
| 1,016
| 2.59375
| 3
|
[] |
no_license
|
from actions.shop_actions import ShopActions
from core.utils import datafile_handler as data_file
from facades import menu_facade as menu
from facades import menu_categories_facade as menu_categories
import pytest
import datetime
from time import sleep
#Prueba 2.- Verificar los elementos del menú de categorías
@pytest.mark.parametrize('title_category, products_by_category',data_file.get_data('./input_data/menu_category.csv'))
def test_menu_categories(title_category, products_by_category):
global test_case_name
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') #20210511_211246
test_case_name = 'test_menu_categories {}'.format(timestamp)
menu.click_menu_option_shop()
menu_categories.view_menu_options(title_category, products_by_category)
#Metodo que se ejecuta siempre, despues de cada caso de prueba.
#Cierra la instancia del navegador
def teardown():
shop_actions = ShopActions()
shop_actions.save_screenshot(test_case_name)
shop_actions.close_browser()
| true
|
4a21d14b98d02566e9d097f354b1083296a4d442
|
Python
|
fantasysea/HelloPython
|
/getjikexuexi.py
|
UTF-8
| 1,683
| 2.65625
| 3
|
[] |
no_license
|
__author__ = 'wuheyou'
__author__ = 'CC'
# coding=utf-8
# -*- coding: utf8 -*-
import requests
import re
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# r = requests.get("http://www.baidu.com/")
# r.encoding = 'utf-8'
# print r.content
class spider:
def getHtml(self,url):
return requests.get(url)
def getItem(self,html):
item = {}
item['title'] = re.findall('title="(.*?)" ',html,re.S)[0]
item['content'] = re.findall('none;">(.*?)</p>',html,re.S)[0]
cf = re.findall('<em>(.*?)</em>',html,re.S)
item['time'] = cf[0]
item['xinhao'] = cf[1]
item['number'] = re.findall('learn-number">(.*?)</em>',html,re.S)[0]
return item
def saveItem(self,item):
f = open('info.txt','a')
f.writelines(item['title'].strip('\r\n\t') + '\n')
f.writelines(item['content'].strip('\r\n\t') + '\n')
f.writelines(item['time'].strip('\r\n\t') + '\n')
f.writelines(item['xinhao'].strip('\r\n\t') + '\n')
f.writelines(item['number'].strip('\r\n\t') + '\n')
if __name__=='__main__':
url = 'http://www.jikexueyuan.com/course/?pageNum=2'
jikespider = spider()
html = jikespider.getHtml(url)
html.encoding = 'utf-8'
print html.text
list = re.findall('deg=\"0\" >(.*?)</li>',html.text,re.S)
print list
for each in list:
item = jikespider.getItem(each)
print item
jikespider.saveItem(item)
print item['title'].strip('\r\n\t')
print item['content'].strip('\r\n\t')
print item['time'].strip('\n\t')
print item['xinhao'].strip('\r\n\t')
print item['number'].strip('\r\n\t')
| true
|
426ce8416c1463b6c670783ce6efb9cceb50c450
|
Python
|
ragnartrades/LimitOrderBooks
|
/Analytics/ExtractPrices.py
|
UTF-8
| 1,360
| 3.140625
| 3
|
[] |
no_license
|
from Analytics.Features import Features
from Analytics.LimitOrderBookSeries import LimitOrderBookSeries
class ExtractPrices(Features):
def __init__(self):
pass
def extract_data(self, data):
"""
:param data: data frame
:return: time series
"""
# return a limit order book series
levels = len(data.columns) / 4
ask_price = data['AskPrice1'].tolist()
ask_size = data['AskSize1'].tolist()
bid_price = data['BidPrice1'].tolist()
bid_size = data['BidSize1'].tolist()
mid_price = [0.5*(ask_price[i]+bid_price[i]) for i in range(0,len(ask_price))]
input_data = list()
output_data = list()
for i in range(0, len(ask_price)):
feature_set = list()
feature_set.append(ask_price[i])
feature_set.append(ask_size[i])
feature_set.append(bid_price[i])
feature_set.append(bid_size[i])
input_data.append(feature_set)
for i in range(1, len(mid_price)):
delta = mid_price[i] - mid_price[i-1]
if delta > 0:
output_data.append('Upward')
elif delta < 0:
output_data.append('Downward')
else:
output_data.append('Stationary')
return [input_data[:-1], output_data]
| true
|
8000a58f89b46c41be0023b9aef4066aa819b065
|
Python
|
rexarabe/Python_projects2
|
/020_operator/001_operator.py
|
UTF-8
| 209
| 4.125
| 4
|
[] |
no_license
|
#!/bin/python
"""Python Operators
Operators are used to perform operations on variables and values.
"""
x = 10
y = 15
print(x+y)
print(x-y)
print(x*y)
print(x/y)
print(y%x)
print(x**y)
print(y//x)
| true
|
5e5242c4da067d8149d8374e4f5038f2065a1652
|
Python
|
shoeferg13/fa18-hw-ref
|
/hw4.py
|
UTF-8
| 6,935
| 4.34375
| 4
|
[] |
no_license
|
"""
CS 196 FA18 HW4
Prepared by Andrew, Emilio, and Prithvi
You might find certain default Python packages immensely helpful.
"""
# Good luck!
"""
most_common_char
Given an input string s, return the most common character in s.
"""
def most_common_char(s):
if len(s) == 0:
return None
s = s.lower()
max = 0;
for char in s:
if s.count(char) > max:
max = s.count(char)
return s[max]
print(most_common_char(""))
"""
alphabet_finder
Given an input string s, return the shortest prefix of s (i.e. some s' = s[0:i] for some 0 < i <= n)
that contains all the letters of the alphabet.
If there is no such prefix, return None.
Your function should recognize letters in both cases, i.e. "qwertyuiopASDFGHJKLzxcvbnm" is a valid alphabet.
Example 1:
Argument:
"qwertyuiopASDFGHJKLzxcvbnm insensitive paella"
Return:
"qwertyuiopASDFGHJKLzxcvbnm"
Example 2:
Argument:
"aardvarks are cool!"
Return:
None
"""
def alphabet_finder(s):
alpha = {"abcdefghijklmnopqrstuv"}
arr = []
s = s.lower()
returnString = ""
input = s
for i in range(0, len(s)):
if s[i] in alpha:
alpha.remove(s[i])
arr += input[i]
if (len(alpha) == 0):
return returnString.join(arr)
return None
# """
# longest_unique_subarray
#
# Given an input list of integers arr,
# return a list with two values [a,b] such that arr[a:a+b] is the longest unique subarray.
# That is to say, all the elements of arr[a:a+b] must be unique,
# and b must be the largest value possible for the array.
# If multiple such subarrays exist (i.e. same b, different a), use the lowest value of a.
#
# Example:
# Argument:
# [1, 2, 3, 1, 4, 5, 6]
# Return:
# [1, 6]
# """
# def longest_unique_subarray(arr):
# pass
#
#
"""
string_my_one_true_love
A former(?) CA for this course really like[d] strings that have the same occurrences of letters.
This means the staff member likes "aabbcc", "ccddee", "abcabcabc", etcetera.
But the person who wrote all of your homework sets wants to trick the staff with really long strings,
that either could be the type of string that the staff member likes,
or a string that the CA would like if you remove exactly one character from the string.
Return True if it's a string that the homework creator made, and False otherwise.
Don't treat any characters specially, i.e. 'a' and 'A' are different characters.
Ungraded food for thought:
Ideally, your method should also work on integer arrays without any modification.
Example 1:
Argument:
"abcbabcdcdda"
There are 3 a's, 3 b's, 3 c's, and 3 d's. That means it is a very likable string!
Return:
True
Example 2:
Argument:
"aaabbbcccddde"
There are 3 a's, 3 b's, 3 c's, and 3 d's. We have 1 e, which we can remove.
Return:
True
Example 3:
Argument:
"aaabbbcccdddeeffgg"
This string is similar to the other ones, except with 2 e's, f's and g's at the end.
To make this string likable, we need to remove the 2 e's, f's, and g's or we can remove
one a, b, c, and d. However all of these require more than one removal, so it becomes invalid.
Return:
False
"""
def string_my_one_true_love(s):
if (s == None):
return null
charDict = {char : 0 for char in s}
for char in s:
charDict[char] += 1
valuesArr = charDict.values()
valuesArr.sort()
returnBool = True
change = 0
good = 0
diff = 0
maxDiff = 0
for i in range(len(valuesArr) - 1):
if valuesArr[i] != valuesArr[i + 1]:
returnBool = false
diff = abs(arr[i + 1] - arr[i])
good = arr[i]
for j in range(i, len(valuesArr)):
if valuesArr[j] != good:
change += diff
if diff > maxDiff:
maxDiff = diff
if (returnBool == True) or (maxDiff <= 1 and change <= 1) or (1 + diff == good):
return True
return False
print(string_my_one_true_love("abcbabcdcdda"))
"""
alive_people
You are given a 2-dimensional list data. Each element in data is a list [birth_year, age_of_death].
Assume that the person was alive in the year (birth_year + age_of_death).
Given that data, return the year where the most people represented in the list were alive.
If there are multiple such years, return the earliest year.
Example:
Argument:
[[1920, 80], [1940, 22], [1961, 10]]
Return:
1961
"""
def alive_people(data):
if data == None or len(data) == 0:
return None
sumArr = []
for i in data:
for j in range(i, i[1] + 1):
sumArr.append(i[0] + j)
dictYears = {i : 0 for i in sumArr}
for i in sumArr:
dictYears[i] += 1
returnYear = 0
max = 0
for i in dictYears:
if dictYears[i] > max:
max = dictYears[i]
returnYear = i
return returnYear
# """
# three_sum
#
# Given an input list of integers arr, and a constant target t,
# is there a triplet of distinct elements [a,b,c] so that a + b + c = t?
#
# Return a 2-dimensional list of all the unique triplets as defined above.
# Each inner list should be a triplet as we defined above.
# We don't care about the order of triplets, nor the order of elements in each triplet.
#
# Example:
# Arguments:
# [-1, 0, 1, 2, -1, -4], 0
# Return:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
# """
# def three_sum(arr, t):
# pass
#
#
"""
happy_numbers
Given an input integer n > 0, return the number of happy integers between 1 and n, bounds inclusive.
https://en.wikipedia.org/wiki/Happy_number
Example 1:
Argument:
8
The happy numbers between 1 and 8 are 1 and 7 (7 -> 49 -> 97 -> 130 -> 10 -> 1)
Return:
2468 // 1234 (i.e., 2)
Example 2:
Argument:
15
Return:
4294967296 ** (1 / 16) (i.e., 4)
12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1.
"""
# def happy_numbers(n):
# happyArr = []
# if (n == 0):
# return None
# if (n == 1):
# return 1
# nArr = []
# for i in range(1, n):
# result = 0
# nArr = [int(d) for d in str(i)]
# for j in nArr:
# result += j**2
# checker = [int(d) for d in str(result)]
# if (len(checker) == 1)
# if (result == 1):
# happArr.append(j)
# return happyArr
#
#
# """
# zero_sum_subarray
#
# Given an input list of integers arr,
# return a list with two values [a,b] such that sum(arr[a:a+b]) == 0.
# In plain English, give us the location of a subarray of arr that starts at index a
# and continues for b elements, so that the sum of the subarray you indicated is zero.
# If multiple such subarrays exist, use the lowest valid a, and then lowest valid b,
# in that order of priority.
# If no such subarray exists, return None.
#
# Ungraded food for thought:
# Think about how to generalize your solution to any arbitrary target sum.
#
# Example 1:
# Argument:
# [0, 1, 2, 3, 4, 5]
# Clearly, the first element by itself forms a subarray with sum == 0.
# Return:
# [0, 1]
#
# Example 2:
# Argument:
# [10, 20, -20, 3, 21, 2, -6]
# In this case, arr[1:3] = [20, -20], so there is a zero sum subarray.
# Return:
# [1, 2]
# """
# def zero_sum_subarray(arr):
# sum = 0
# startIndex = 0
# endIndex = 0
# returnArray = []
# for i in arr:
# sum += i
# if sum == 0:
#
#
#
#
#
| true
|
9e8ce4ad5f043443ae43d8445d21afabd0e73f1e
|
Python
|
amchugh/rocket
|
/python/rocketfollow.py
|
UTF-8
| 3,919
| 2.71875
| 3
|
[] |
no_license
|
import rocketenv
import pygame
import random
SIZE = (800,900)
FLOOR = 800
class Missile:
def __init__(self, world_size, pos, vel, dt):
self.world_size = world_size
self.pos = [pos[0], pos[1]]
self.vel = vel
self.dt = dt
def step(self):
self.pos[0] += self.vel[0] * self.dt
self.pos[1] += self.vel[1] * self.dt
return self.pos[0] < 0 or self.pos[0] > self.world_size[0] or self.pos[1] < 0 or self.pos[1] > FLOOR
def draw(self, screen):
LENGTH = 1
fin = (self.pos[0] - self.vel[0] * LENGTH, self.pos[1] - self.vel[1] * LENGTH)
pygame.draw.line(screen, (255,0,120), self.pos, fin, 3)
SLANT = 200
def makeMissile():
startX = random.randint(SLANT,SIZE[0]-SLANT)
endX = random.randint(-SLANT,SLANT)
vx, vy = rocketenv.normalize(endX, FLOOR)
vel = (vx*MISSILE_SPEED, vy*MISSILE_SPEED)
return Missile(SIZE, (startX, 0), vel, dt)
fps = 60
dt = 1/30.0
env = rocketenv.RocketEnv(SIZE, dt)
env.reset();
env.initrender(None, False)
clock = pygame.time.Clock()
# ----------------
# ::Switch these::
# Old, easy controller
#controller = rocketenv.MovingRocketController(1.30180042, 5.07822616, 0.00407172, 0.09638811, 4.64927884, 0.22577127, 0.62695137, dt)
#env.rocket.ROCKET_MAX_INDIVIDUAL_FORCE = 3.2
#env.rocket.ROCKET_ROTATIONAL_INERTIA = 0.6
# Harder controller FITNESS: 2.51736263
#controller = rocketenv.MovingRocketController(1.29434620, 14.02993530, -0.00853724, 0.11117422, 4.65478087, 0.29473669, 0.76614155, dt)
#env.rocket.ROCKET_MAX_INDIVIDUAL_FORCE = 10
#env.rocket.ROCKET_ROTATIONAL_INERTIA = 0.4
# Slower, but looks better. Evaluated based on time to point FITNESS: 802.6
#controller = rocketenv.MovingRocketController(1.30103649, 24.12807184, -0.00416717, 0.11021518, 4.79300645, 0.06620423, 0.81834318)
#env.rocket.ROCKET_MAX_INDIVIDUAL_FORCE = 10
#env.rocket.ROCKET_ROTATIONAL_INERTIA = 0.4
# This one is faster, but it overshoots a lot. FITNESS: 291.74
controller = rocketenv.MovingRocketController(1.30300312, 24.10937873, -0.00392903, 0.10979283, 4.80212557, 0.07160376, 0.84756384)
env.rocket.ROCKET_MAX_INDIVIDUAL_FORCE = 10
env.rocket.ROCKET_ROTATIONAL_INERTIA = 0.4
# ----------------
controller.reset()
running = True
missiles = []
MISSILE_SPEED = 30
ROCKET_INTERVAL = 1.6
DIST = 30
SQR_DIST = DIST * DIST
c_i = 0
CENTER = (SIZE[0]/2,SIZE[1]/2)
while running:
clock.tick(fps)
keys = pygame.key.get_pressed()
if keys[pygame.K_x]:
env.resetRandom(0);
env.render()
if len(missiles) > 0:
controller.target = missiles[0].pos
dx = env.rocket.x - missiles[0].pos[0]
dy = env.rocket.y - missiles[0].pos[1]
if dx*dx + dy*dy <= SQR_DIST:
missiles.remove(missiles[0])
else:
controller.target = CENTER
if pygame.mouse.get_pressed()[0]: controller.target = pygame.mouse.get_pos()
c_i+=1
if c_i >= ROCKET_INTERVAL * fps:
c_i -= ROCKET_INTERVAL * fps
missiles.append(makeMissile())
for m in missiles:
if m.step():
missiles.remove(m)
m.draw(env.screen)
for i in range(6):
f1, f2 = controller.step(env.rocket.x, env.rocket.y, env.rocket.vx, env.rocket.vy, env.rocket.omega, env.rocket.theta)
env.step((f1, f2))
pygame.draw.circle(env.screen, (0,255,0), controller.target, 2, 0)
pygame.draw.circle(env.screen, (0,0,255), (env.rocket.x, env.rocket.y), DIST, 1)
pygame.draw.line(env.screen, (0,0,0), (0, FLOOR), (SIZE[0], FLOOR))
pygame.display.flip()
# event handling, gets all event from the event queue
for event in pygame.event.get():
# only do something if the event is of type QUIT
if event.type == pygame.QUIT:
# change the value to False, to exit the main loop
running = False
| true
|
5a57ffa79b664adf3f82d0f5a5f09f6ec4b7855b
|
Python
|
Akankshipriya/Trade-App
|
/Trade App/s2Port.py
|
UTF-8
| 4,411
| 2.71875
| 3
|
[] |
no_license
|
from pandas_datareader import data as pdr
import yfinance as yf
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas_datareader as pdr
import matplotlib.axis as ax
from pandas.util.testing import assert_frame_equal
def plotPorts2(start_date,end_date):
plt.close()
yf.pdr_override()
data = pdr.get_data_yahoo("BTC-USD", start_date, end_date)
btc = pd.DataFrame(data)
btc = btc[['Adj Close']]
btc.columns = ['Close']
btc.reset_index(level=0, inplace = True)
# In[8]:
b = pd.DataFrame()
for x in btc:
b ['price'] = btc['Close']
b['sma'] = btc['Close'].rolling(window=20).mean()
b['std'] = btc['Close'].rolling(window=20).std()
b['bolU'] = b['sma'] + (2 * b['std'] )#Calculating Upper Bound
b['bolD'] = b['sma'] - (2 * b['std'] )#Calculating Lower Bound
#Convert Bollinger Bands to %b - bollinger column
b['bollinger'] = (b['price'] - b['bolD'])/(b['bolU']-b['bolD'])
bb1 = b[['price','bolU','bolD','bollinger']]
bb1.columns = ['Price','Upper Band','Lower Band','Bollinger']
bb1.fillna(0,inplace=True)
# In[ ]:
# In[9]:
RSI = pd.DataFrame(index=btc.index)
RSI['price'] = btc ['Close']
RSI['val'] = None
RSI['up'] = 0 #When there is no up value 0
RSI['down'] = 0 #When there is no down value 0
size = RSI.shape[0]
dp = 14
for x in range(size):
if x ==0:
continue #first day will continue
#calculating the ups , when closing price is higher in day x than x -1
if RSI['price'].iloc[x] > RSI['price'].iloc[x-1]: #
RSI['up'].iloc[x] = RSI['price'].iloc[x] - RSI['price'].iloc[x-1]
else:
#calculating the downs days , when closing price is lower in day x than x -1
RSI['down'].iloc[x] = RSI['price'].iloc[x-1]-RSI['price'].iloc[x]
if x >= dp:
avgUp = RSI['up'][x-dp:x].sum()/dp #calculates avg up of last dp days
avgDown = RSI['down'][x-dp:x].sum()/dp #calculates avg down of last dp days
rs = avgUp/avgDown #calculation of RS
RSI['val'].iloc[x] = 100 - 100/(1+rs)
signals = pd.DataFrame(index=btc.index)#copy index for BTC
signals['price'] = btc['Close']
signals['id']= 0.0
signals['RSI'] = RSI['val']
signals['RSI'].fillna(0, inplace=True)
signals['bollinger'] = bb1['Bollinger']
signals['id']=[np.nan for i in signals.index]
# only verifications for days after DPth (period of RSI) day
signals['id'][dp:].loc[((signals['RSI'] < 30) & (signals['bollinger'] < 0))] = 1
signals['id'][dp:].loc[((signals['RSI'] > 70) & (signals['bollinger'] > 1))] = 0
signals['id'].ffill(inplace=True) #fill empty values with 0
signals['id'].fillna(0,inplace=True)
signals['buySell'] = signals['id'].diff()
signals['buySell'].fillna(0,inplace=True)
###################################################################################
# Code taken from Willems, K., 2019. (Tutorial) Python For Finance: Algorithmic Trading
initInvestment = 100000
stocksOwned = pd.DataFrame(index=signals.index).fillna(0.0)
noCur = 10 #No of currency to be purchased
stocksOwned['BTC'] = noCur*signals['id']
portfolio = pd.DataFrame(index=signals.index)
portfolio['Holdings'] = stocksOwned['BTC'].multiply(btc['Close'], axis=0)
buySell = stocksOwned['BTC'].diff()
portfolio['cash'] = initInvestment - (buySell.multiply(btc['Close'], axis=0)).cumsum()
portfolio['total'] = portfolio['cash'] + portfolio['Holdings']
portfolio['cash'][0] = initInvestment
portfolio['total'][0] = initInvestment
###################################################################################
# In[50]:
fig, (ax) = plt.subplots(1, 1, sharex=True)
ax.plot(portfolio.index, portfolio['total'], label='Price')
ax.set_xlabel('Date')
ax.set_ylabel('Value of portfolio in USD')
day = signals.loc[signals.buySell == 1.0].index
day2 = signals.loc[signals.buySell == -1.0].index
ax.scatter(x = day, y=portfolio.loc[day, 'total'], color = 'green')
ax.scatter(x = day2, y=portfolio.loc[day2, 'total'], color = 'red')
plt.show()
| true
|
59f24fe9070f697e695846fba7883bed22e66da5
|
Python
|
yusheng88/RookieInstance
|
/Rookie054.py
|
UTF-8
| 1,114
| 3.859375
| 4
|
[] |
no_license
|
# -*- coding = utf-8 -*-
# @Time : 2020/7/8 22:18
# @Author : EmperorHons
# @File : Rookie054.py
# @Software : PyCharm
"""
https://www.runoob.com/python3/python3-examples.html
Python 使用正则表达式提取字符串中的 URL
给定一个字符串,里面包含 URL 地址,需要我们使用正则表达式来获取字符串的 URL
"""
import pysnooper
import re
@pysnooper.snoop()
def re_Find(string):
url = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', string)
"""
(?:x)
匹配 x 但是不记住匹配项。这种括号叫作非捕获括号,
使得你能够定义与正则表达式运算符一起使用的子表达式。
看看这个例子 /(?:foo){1,2}/。如果表达式是 /foo{1,2}/,{1,2} 将只应用于 'foo' 的最后一个字符 'o'。
如果使用非捕获括号,则 {1,2} 会应用于整个 'foo' 单词。执行以上代码输出结果为:
"""
return url
if __name__ == '__main__':
string = "百度 的网页地址为:https://www.baidu.com/, Google 的网页地址为:https://www.google.com"
print("url:", re_Find(string))
| true
|
a4082ceb7a01808681c50811afce5fe7b84055d1
|
Python
|
shills112000/django_course
|
/PYTHON/STATEMENTS_WHILE_FOR_IF/while.py
|
UTF-8
| 1,385
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/local/bin/python3.7
#https://docs.python.org/3/tutorial/introduction.html#first-steps-towards-programming
#while True:
# print ("Looping")
count = 1
while count <=4:
print (f"counting : {count}")
count += 1
count =0
while count < 10:
if count % 2 == 0:# miss even numbers
count +=1
continue
print (f"were counting odd numbers: {count}")
count += 1
count += 1
count = 1
while count < 10:
if count % 2 == 0:
break
print (f"We are counting odd numbers {count}")
count += 1
x = 0
while x < 5 :
print (f"the current value of x is {x}")
#x = x+1
x+= 1 # same as above
else:
print (f"x is {x} not less than 5")
# break continue pass
#break : breaks out of the current closest enclosing loop
#continue: goes to the top of the closted enclosing loop
#pass; does nothing at all
# PASS
x = [1,2,3]
for item in x:
pass # this is a filler and allows the for loop without having any code and processes no errorspass
#CONTINUE
mystring = 'Sammy'
for letter in mystring:
if letter == 'a':
continue # go to the top of the for loop
print (letter)
#BREAK
for letter in mystring:
if letter == 'a':
break # breaks out of loop when a is hit
print (letter)
x = 0
while x < 5:
if x == 2: # break out of loop if x equals to 2
break
print (x)
x+=1
| true
|
43c9ef192077a2f0050a6ebd70495f91ad73bf4a
|
Python
|
jdrubin91/BidirectionalTFAnalyzer
|
/src/Depletion_Simulator.py
|
UTF-8
| 844
| 2.609375
| 3
|
[] |
no_license
|
__author__ = 'Joseph Azofeifa'
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
def simulate(N=10000, mu=0, si=300, a=-1500,b=1500):
f = lambda x: (1.0 / math.sqrt(2*math.pi*pow(si,2)) )*math.exp(-pow(x-mu,2)/(2*pow(si,2)))
u = lambda x: 1.0 / (b-a)
xs = list()
for i in range(N):
x = np.random.uniform(a,b)
F,U = f(x), u(x)
r = f(x) / (u(x) + f(x) )
if np.random.uniform(0,1) > r:
xs.append(x)
c = 0.06
g = lambda x: c*((u(x) / (f(x)+u(x))))
#F = plt.figure()
#ax = F.add_subplot(1,2,1)
#ax.hist(xs,bins=50, normed=1)
#ax2 = F.add_subplot(1,2,2)
#xs.sort()
#N = float(len(xs))
#ax2.plot(xs,[i/N for i in range(len(xs))])
#
#plt.show()
#raise TypeError, "something"
return xs
if __name__ == "__main__":
X = simulate()
| true
|
4eff248cc023f334759b5ce93d11ee1e87263507
|
Python
|
kimdanny/HeapSort-and-QuickSort-Comparison
|
/Automation/testing.py
|
UTF-8
| 336
| 3.125
| 3
|
[] |
no_license
|
from random import randint
import numpy
randomList = []
size = 100_000 + 1100_000 * 5
for x in range(size):
randomList.append(randint(0, 1_000_000))
# See how many repetitions of elements in a single list
a = numpy.array(randomList)
unique, counts =numpy.unique(a, return_counts=True)
this = dict(zip(unique, counts))
print(this)
| true
|
0df114cc93c9f7f7ada529ed52fe07c072be4c27
|
Python
|
kbase/taxonomy_re_api
|
/src/exceptions.py
|
UTF-8
| 768
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
"""Exception classes."""
class ParseError(Exception):
code = -32700
class InvalidRequest(Exception):
code = -32600
class MethodNotFound(Exception):
code = -32601
def __init__(self, method_name):
self.method_name = method_name
class InvalidParams(Exception):
code = -32602
class InternalError(Exception):
code = -32603
class ServerError(Exception):
code = -32000
class REError(Exception):
"""Error from the RE API."""
def __init__(self, resp):
"""Takes a requests response object."""
self.resp_json = None
try:
self.resp_json = resp.json()
except ValueError:
pass
self.resp_text = resp.text
def __str__(self):
return self.resp_text
| true
|
41b14397da76619ae7c34efb12c94aff91ae6821
|
Python
|
da-ferreira/uri-online-judge
|
/uri/3096.py
|
UTF-8
| 592
| 4.09375
| 4
|
[] |
no_license
|
def kamenetsky(number):
"""
Formula de kamenetsky permite saber quantos
digitos tem o fatorial de um numero qualquer > 0
se calcular seu fatorial.
:param number: O numero do fatorial
:return: Quantidade de digitos do fatorial desse numero.
"""
import math
if number < 0: # nao existe
return 0
elif number <= 1:
return 1
digits = (number * math.log10(number / math.e)) + (math.log10(2 * math.pi * number) / 2)
return math.floor(digits) + 1
n = int(input())
print(kamenetsky(n))
| true
|
2dba2dcc2fd0c07c91985f8a2142653e172eae51
|
Python
|
tkhunlertkit/Sketches
|
/python/Bit Coin Computation/test.py
|
UTF-8
| 320
| 2.796875
| 3
|
[] |
no_license
|
import requests
response = requests.get('https://chain.so/api/v2/get_price/BTC/USD',verify=True)
response = response.json()['data']['prices']
cumulative = 0
for i in response:
cumulative += float(i['price'])
for key in i:
print key, i[key]
print
print cumulative
print 'avg:', cumulative / len(i)
| true
|
c7caf6b14a5bb703d03288ca138117f8b52c36bd
|
Python
|
ghomsy/makani
|
/analysis/aero/avl/avl_reader.py
|
UTF-8
| 21,745
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for parsing and analyzing AVL files."""
import collections
import importlib
import logging
import sys
import warnings
import numpy
class AvlReader(object):
"""Parses and analyzes AVL files.
Attributes:
filename: Filename of the AVL file to be processed.
avl: Ordered dict that represents the parsed structure of the AVL file.
properties: Dict that represents properties of the aircraft,
surfaces, and surface sections. Its structure mimics that of
the AVL file itself.
"""
def __init__(self, filename):
"""Initializes the class by parsing the AVL file."""
self.filename = filename
with open(filename, 'r') as f:
self.avl = self.Parse(f.read())
self.properties = self.Analyze(self.avl)
def Analyze(self, avl):
"""Analyze properties of the AVL geometry.
Args:
avl: Ordered dict representing a parsed AVL file.
Returns:
Dict that represents properties of the aircraft, surfaces, and
surface sections. Its structure mimics that of the AVL file
itself.
"""
properties = dict()
properties['surfaces'] = []
for avl_surface in avl['surfaces']:
transform = self._GetSurfaceTransformation(avl_surface)
sections = []
for avl_section in avl_surface['sections']:
sections.append(self._CalcSectionProperties(avl_section, transform))
panels = []
for section1, section2 in zip(sections[0:-1], sections[1:]):
panels.append(self._CalcPanelProperties(section1, section2))
surface = self._CalcSurfaceProperties(sections, panels)
surface['name'] = avl_surface['name']
surface['sections'] = sections
surface['panels'] = panels
properties['surfaces'].append(surface)
return properties
def _CalcSectionProperties(self, avl_section, transform=lambda x: x):
"""Calculates the properties of sections, i.e. stations along the span."""
# Apply the scaling and offset parameters, if any, from the AVL
# file.
chord = avl_section['Chord'] * transform([0.0, 1.0, 0.0])[1]
leading_edge_avl = transform([avl_section['Xle'],
avl_section['Yle'],
avl_section['Zle']])
return {
'chord': chord,
'incidence': numpy.pi / 180.0 * avl_section['Ainc'],
'leading_edge_b': numpy.array([-leading_edge_avl[0],
leading_edge_avl[1],
-leading_edge_avl[2]]),
'quarter_chord_b': numpy.array([-leading_edge_avl[0] - chord / 4.0,
leading_edge_avl[1],
-leading_edge_avl[2]])
}
def _CalcPanelProperties(self, section1, section2):
"""Calculates properties of the areas between sections."""
span = numpy.sqrt(
(section2['leading_edge_b'][1] - section1['leading_edge_b'][1])**2.0 +
(section2['leading_edge_b'][2] - section1['leading_edge_b'][2])**2.0)
area = (section1['chord'] + section2['chord']) * span / 2.0
taper_ratio = section2['chord'] / section1['chord']
c = ((2.0 * section1['chord'] + section2['chord']) /
(section1['chord'] + section2['chord']) / 3.0)
mean_incidence = (c * section1['incidence'] +
(1.0 - c) * section2['incidence'])
aerodynamic_center_b = (c * section1['quarter_chord_b'] +
(1.0 - c) * section2['quarter_chord_b'])
return {
'aerodynamic_center_b': aerodynamic_center_b,
'area': area,
'mean_aerodynamic_chord': (2.0 / 3.0 * section1['chord'] *
(1.0 + taper_ratio + taper_ratio**2.0) /
(1.0 + taper_ratio)),
'mean_incidence': mean_incidence,
'taper_ratio': taper_ratio,
'span': span,
'standard_mean_chord': area / span
}
def _CalcSurfaceProperties(self, sections, panels):
"""Calculates properties of full surfaces."""
area = 0.0
aerodynamic_center_b = numpy.array([0.0, 0.0, 0.0])
mean_aerodynamic_chord = 0.0
mean_incidence = 0.0
for panel in panels:
area += panel['area']
aerodynamic_center_b += panel['area'] * panel['aerodynamic_center_b']
mean_aerodynamic_chord += panel['area'] * panel['mean_aerodynamic_chord']
mean_incidence += panel['area'] * panel['mean_incidence']
aerodynamic_center_b /= area
mean_aerodynamic_chord /= area
mean_incidence /= area
# Set the span vector from the leading edge of the first section
# to the leading edge of the last section. Ignore the x
# component. Choose the direction such that the span is along the
# surface coordinate y axis.
span_b = sections[0]['leading_edge_b'] - sections[-1]['leading_edge_b']
span_b[0] = 0.0
if abs(span_b[1]) > abs(span_b[2]):
if span_b[1] < 0.0:
span_b *= -1.0
else:
if span_b[2] < 0.0:
span_b *= -1.0
span = numpy.linalg.norm(span_b)
# Surface coordinates are defined such that they are aligned with
# body coordinates for horizontal surfaces and are rotated about
# body x such that surface z is aligned with the *negative* body y
# for vertical surfaces. The negative is required to match the
# convention in AVL.
surface_x_b = [1.0, 0.0, 0.0]
surface_y_b = span_b / span
surface_z_b = numpy.cross(surface_x_b, surface_y_b)
return {
'aerodynamic_center_b': aerodynamic_center_b,
'area': area,
'aspect_ratio': span * span / area,
'dcm_b2surface': numpy.array([surface_x_b, surface_y_b, surface_z_b]),
'mean_aerodynamic_chord': mean_aerodynamic_chord,
'mean_incidence': mean_incidence,
'span': span,
'standard_mean_chord': area / span
}
def _GetSurfaceTransformation(self, surface):
"""Returns surface scaling and offset transformation function."""
if all([k in surface for k in ['Xscale', 'Yscale', 'Zscale']]):
scale = [surface['Xscale'], surface['Yscale'], surface['Zscale']]
else:
scale = [1.0, 1.0, 1.0]
if all([k in surface for k in ['dX', 'dY', 'dZ']]):
offset = [surface['dX'], surface['dY'], surface['dZ']]
else:
offset = [0.0, 0.0, 0.0]
return lambda coord: [x * m + b for x, m, b in zip(coord, scale, offset)]
def PlotGeometry(self):
"""Plots 3-D line drawing of surfaces."""
# b/120081442: Next lines removed the module initialization load of the
# matplotlib module which was causing a bazel pip-installed package issue on
# batch sim workers.
pyplot = importlib.import_module('matplotlib.pyplot')
mplot_3d = importlib.import_module('mpl_toolkits.mplot3d')
# Importing Axes3D has the side effect of enabling 3D projections, but
# it is not directly used, so we remove it here.
del mplot_3d.Axes3D
axes = pyplot.figure().add_subplot(1, 1, 1, projection='3d')
axes.w_xaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_yaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_zaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_xaxis.gridlines.set_color(('blue'))
axes.w_yaxis.gridlines.set_color(('blue'))
axes.w_zaxis.gridlines.set_color(('blue'))
# The _axinfo update requires additional specification of linestyle and
# linewidth on our linux distributions in order to function properly.
axes.w_xaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.w_yaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.w_zaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.set_aspect('equal')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
half_span = self.avl['Bref'] / 2.0
axes.set_xlim((-half_span * 0.5, half_span * 1.5))
axes.set_ylim((-half_span, half_span))
axes.set_zlim((-half_span, half_span))
color_order = ['black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue',
'violet', 'gray']
legend_plots = []
legend_labels = []
for i, surface in enumerate(self.avl['surfaces']):
transform = self._GetSurfaceTransformation(surface)
leading_edge_xs = []
leading_edge_ys = []
leading_edge_zs = []
trailing_edge_xs = []
trailing_edge_ys = []
trailing_edge_zs = []
for section in surface['sections']:
coord = transform([section['Xle'], section['Yle'], section['Zle']])
leading_edge_xs.append(coord[0])
leading_edge_ys.append(coord[1])
leading_edge_zs.append(coord[2])
coord = transform([section['Xle'] + section['Chord'],
section['Yle'],
section['Zle']])
trailing_edge_xs.append(coord[0])
trailing_edge_ys.append(coord[1])
trailing_edge_zs.append(coord[2])
xs = leading_edge_xs + list(reversed(trailing_edge_xs))
ys = leading_edge_ys + list(reversed(trailing_edge_ys))
zs = leading_edge_zs + list(reversed(trailing_edge_zs))
surface_line, = axes.plot(xs + [xs[0]], ys + [ys[0]], zs + [zs[0]],
color=color_order[i])
legend_plots.append(surface_line)
legend_labels.append(surface['name'])
# Plot symmetric surfaces.
if self.avl['iYsym']:
axes.plot(xs + [xs[0]], -numpy.array(ys + [ys[0]]), zs + [zs[0]], '--',
color=color_order[i])
elif 'Ydupl' in surface:
y_scale = surface['Yscale'] if 'Yscale' in surface else 1.0
axes.plot(xs + [xs[0]],
-numpy.array(ys + [ys[0]]) + 2.0 * surface['Ydupl'] * y_scale,
zs + [zs[0]], '--',
color=color_order[i])
axes.legend(legend_plots, legend_labels, loc='lower left',
prop={'size': 10})
pyplot.show()
def Parse(self, avl_file):
"""Parses AVL file.
Args:
avl_file: String of the read AVL file.
Returns:
Dictionary representing the information stored in the AVL file.
"""
# Make iterator over lines in file. Automatically, remove comments
# and blank lines. Terminate the file with an END keyword (this
# isn't mentioned in the AVL documentation, but at least one of the
# example files uses this convention and it makes the parsing more
# natural.
lines = iter([l.split('!', 1)[0].strip()
for l in avl_file.splitlines()
if l.strip() and l[0] not in '#!'] + ['END'])
# Parse the AVL header for information on the case name, reference
# areas, etc.
avl, line = self._ParseHeader(lines)
# Loop through the rest of the file, which should only be composed
# of surfaces and bodies.
while True:
tokens = line.split()
keyword = tokens[0][0:4]
if keyword == 'SURFACE'[0:4]:
surface, line = self._ParseSurface(lines)
avl.setdefault('surfaces', []).append(surface)
elif keyword == 'BODY':
body, line = self._ParseBody(lines)
avl.setdefault('body', []).append(body)
else:
if keyword != 'END':
logging.error('Encountered unexpected keyword: %s', tokens[0])
break
return avl
def _ParseHeader(self, lines):
"""Parses header information."""
header = collections.OrderedDict()
header['case'] = lines.next()
tokens = lines.next().split()
header['Mach'] = float(tokens[0])
tokens = lines.next().split()
header['iYsym'] = int(tokens[0])
header['iZsym'] = int(tokens[1])
header['Zsym'] = float(tokens[2])
tokens = lines.next().split()
header['Sref'] = float(tokens[0])
header['Cref'] = float(tokens[1])
header['Bref'] = float(tokens[2])
tokens = lines.next().split()
header['Xref'] = float(tokens[0])
header['Yref'] = float(tokens[1])
header['Zref'] = float(tokens[2])
line = lines.next()
try:
# CDp is optional.
header['CDp'] = float(line.split()[0])
line = lines.next()
except (IndexError, ValueError):
pass
return header, line
def _ParseAirfoil(self, lines):
"""Parses airfoil camber line definition."""
airfoil = [[]]
while True:
line = lines.next()
tokens = line.split()
try:
airfoil.append([float(tokens[0]), float(tokens[1])])
except (IndexError, ValueError):
break
return airfoil, line
def _ParseFilename(self, lines):
"""Parses filename of airfoil definition."""
line = lines.next()
# The file name may either be quoted or not.
if line[0] == '"':
filename = line.split()[0][1:-1]
else:
filename = line
return filename
def _ParseSection(self, lines):
"""Parses information describing cross-section of surface along span."""
section = collections.OrderedDict()
tokens = lines.next().split()
section['Xle'] = float(tokens[0])
section['Yle'] = float(tokens[1])
section['Zle'] = float(tokens[2])
section['Chord'] = float(tokens[3])
section['Ainc'] = float(tokens[4])
try:
# Nspan and Sspace are optional.
section['Nspan'] = int(tokens[5])
section['Sspace'] = float(tokens[6])
except (IndexError, ValueError):
pass
next_line = None
first_keyword = True
while True:
line = next_line if next_line else lines.next()
next_line = None
tokens = line.split()
keyword = tokens[0][0:4]
# Issue warnings if there is a suspicious ordering of the camber
# line keywords. According to the AVL documentation, the camber
# line keywords must immediately follow the data line of the
# SECTION keyword, and also later camber line keywords overwrite
# earlier ones.
if keyword in ['NACA', 'AIRFOIL'[0:4], 'AFILE'[0:4]]:
if not first_keyword:
logging.warning('%s did not immediately follow the data line of the '
'SECTION keyword.', tokens[0])
if any([k in section for k in ['naca', 'airfoil', 'afile']]):
logging.warning('Another camber line definition exists. This will '
'overwrite it.')
if keyword == 'NACA':
# Parse NACA camber line.
section['naca'] = int(lines.next().split()[0])
assert 0 <= section['naca'] and section['naca'] <= 9999
elif keyword == 'AIRFOIL'[0:4]:
# Parse airfoil coordinates.
try:
# x/c range is optional.
section['x1'] = float(tokens[1])
section['x2'] = float(tokens[2])
except (IndexError, ValueError):
pass
section['airfoil'], next_line = self._ParseAirfoil(lines)
elif keyword == 'AFILE'[0:4]:
# Parse airfoil filename.
try:
# x/c range is optional.
section['x1'] = float(tokens[1])
section['x2'] = float(tokens[2])
except (IndexError, ValueError):
pass
section['afile'] = self._ParseFilename(lines)
elif keyword == 'DESIGN'[0:4]:
# Parse design variable.
tokens = lines.next().split()
design = collections.OrderedDict()
design['DName'] = tokens[0]
try:
design['Wdes'] = float(tokens[1])
except (IndexError, ValueError):
# Although it is not listed as an optional value in the AVL
# documentation, some of the example AVL files do not have a
# value for Wdes.
logging.warning('Wdes value is missing for %s.', design['DName'])
section.setdefault('designs', []).append(design)
elif keyword == 'CONTROL'[0:4]:
# Parse control variable.
tokens = lines.next().split()
control = collections.OrderedDict()
control['name'] = tokens[0]
control['gain'] = float(tokens[1])
control['Xhinge'] = float(tokens[2])
control['XYZhvec'] = [float(tokens[3]),
float(tokens[4]),
float(tokens[5])]
try:
control['SgnDup'] = float(tokens[6])
except (IndexError, ValueError):
# Although it is not listed as an optional value in the AVL
# documentation, some of the example AVL files do not have a
# value for SgnDup.
logging.warning('SgnDup value is missing for %s.', control['name'])
section.setdefault('controls', []).append(control)
elif keyword == 'CLAF':
# Parse dCL/da scaling factor.
section['CLaf'] = float(lines.next().split()[0])
elif keyword == 'CDCL':
# Parse CD(CL) function parameters.
tokens = lines.next().split()
section['CL1'] = float(tokens[0])
section['CD1'] = float(tokens[1])
section['CL2'] = float(tokens[2])
section['CD2'] = float(tokens[3])
section['CL3'] = float(tokens[4])
section['CD3'] = float(tokens[5])
else:
break
first_keyword = False
return section, line
def _ParseSurface(self, lines):
"""Parses definition of a lifting surface."""
surface = collections.OrderedDict()
surface['name'] = lines.next()
tokens = lines.next().split()
surface['Nchord'] = int(tokens[0])
surface['Cspace'] = float(tokens[1])
try:
# Nspan and Sspace are optional.
surface['Nspan'] = int(tokens[2])
surface['Sspace'] = float(tokens[3])
except (IndexError, ValueError):
pass
next_line = None
while True:
line = next_line if next_line else lines.next()
next_line = None
keyword = line.split()[0][0:4]
if keyword in ['COMPONENT'[0:4], 'INDEX'[0:4]]:
# Parse component grouping.
surface['Lcomp'] = int(lines.next().split()[0])
elif keyword == 'YDUPLICATE'[0:4]:
# Parse duplicated surface y-plane.
surface['Ydupl'] = float(lines.next().split()[0])
elif keyword == 'SCALE'[0:4]:
# Parse surface scaling.
tokens = lines.next().split()
surface['Xscale'] = float(tokens[0])
surface['Yscale'] = float(tokens[1])
surface['Zscale'] = float(tokens[2])
elif keyword == 'TRANSLATE'[0:4]:
# Parse surface translation.
tokens = lines.next().split()
surface['dX'] = float(tokens[0])
surface['dY'] = float(tokens[1])
surface['dZ'] = float(tokens[2])
elif keyword == 'ANGLE'[0:4]:
# Parse surface incidence angle.
surface['dAinc'] = float(lines.next().split()[0])
elif keyword == 'NOWAKE'[0:4]:
surface['nowake'] = True
elif keyword == 'NOALBE'[0:4]:
surface['noalbe'] = True
elif keyword == 'NOLOAD'[0:4]:
surface['noload'] = True
elif keyword == 'SECTION'[0:4]:
# Parse airfoil section camber line along span.
section, next_line = self._ParseSection(lines)
surface.setdefault('sections', []).append(section)
else:
break
return surface, line
def _ParseBody(self, lines):
"""Parses description of non-lifting bodies shape."""
body = collections.OrderedDict()
body['name'] = lines.next()
tokens = lines.next().split()
body['Nbody'] = int(tokens[0])
body['Bspace'] = float(tokens[1])
while True:
line = lines.next()
keyword = line.split()[0][0:4]
if keyword == 'YDUPLICATE'[0:4]:
body['Ydupl'] = float(lines.next().split()[0])
elif keyword == 'SCALE'[0:4]:
# Parse body scaling.
tokens = lines.next().split()
body['Xscale'] = float(tokens[0])
body['Yscale'] = float(tokens[1])
body['Zscale'] = float(tokens[2])
elif keyword == 'TRANSLATE'[0:4]:
# Parse body translation.
tokens = lines.next().split()
body['dX'] = float(tokens[0])
body['dY'] = float(tokens[1])
body['dZ'] = float(tokens[2])
elif keyword == 'BFILE'[0:4]:
# Parse body shape filename.
body['bfile'] = self._ParseFilename(lines)
else:
break
return body, line
def main(argv):
# Internal matplotlib functions currently trigger the following
# warnings.
warnings.filterwarnings('ignore', 'elementwise comparison failed; returning '
'scalar instead, but in the future will perform '
'elementwise comparison')
warnings.filterwarnings('ignore', 'comparison to `None` will result in an '
'elementwise object comparison in the future.')
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO)
avl = AvlReader(argv[1])
avl.PlotGeometry()
logging.shutdown()
if __name__ == '__main__':
main(sys.argv)
| true
|
ee25c8ed2eabeb12a3826f3556c152606e6f464e
|
Python
|
nclv/My-Gray-Hacker-Resources
|
/Cryptography/Hash_Functions/MD5/Hash-Length-extension-attacks/VimeoHashExploit/server.py
|
UTF-8
| 1,638
| 2.53125
| 3
|
[
"CC-BY-SA-4.0",
"MIT"
] |
permissive
|
"""
adapted from Fillipo Valsorda's tutorial
august/2014
"""
import os
import binascii
import md5
import urlparse
from flask import Flask, request, abort, render_template
PORT = 4242
USER_ID = 42
USER_NAME = "Jack"
API_KEY = binascii.hexlify(os.urandom(16))
API_SECRET = binascii.hexlify(os.urandom(16))
app = Flask(__name__)
def sign_req(values, secret):
s = secret
for k, v in sorted(values.items()):
s += k
s += v
return md5.MD5(s).hexdigest()
@app.route('/')
def show_info():
req = {
"method": "vimeo.test.login",
"api_key": API_KEY
}
return render_template('info.html',
user_id=USER_ID, api_key=API_KEY, user_name=USER_NAME,
api_sig=sign_req(req, API_SECRET))
@app.route('/api', methods=['POST'])
def handle_api():
values = dict(urlparse.parse_qsl(request.get_data()))
if not 'api_sig' in values: abort(400)
if not 'api_key' in values: abort(400)
if not 'method' in values: abort(400)
if values['api_key'] != API_KEY: abort(403)
api_sig = values['api_sig']
del values['api_sig']
if sign_req(values, API_SECRET) != api_sig: abort(403)
if values["method"] == "vimeo.test.login":
return render_template("user.xml", user_id=USER_ID, user_name=USER_NAME)
elif values["method"] == "vimeo.videos.setFavorite":
if not 'video_id' in values: abort(400)
if not 'favorite' in values: abort(400)
if values["video_id"] != '1337': abort(404)
return render_template("ok.xml")
else:
abort(404)
if __name__ == '__main__':
app.debug = True
app.run(port=PORT)
| true
|
b54c8d732c668e9ed5536d8714db5e08a63e17e3
|
Python
|
Andrii-Dykyi/2D-game-pygame
|
/bullet.py
|
UTF-8
| 992
| 3.34375
| 3
|
[] |
no_license
|
import os
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""Class to manage bullets fired from rocket."""
def __init__(self, game_settings, screen, rocket):
"""Create a bullet object at the rocket's current position."""
super().__init__()
self.screen = screen
self.game_settings = game_settings
self.image = pygame.image.load(os.path.join('images', 'bullet.png'))
self.rect = self.image.get_rect()
self.rect.centerx = rocket.rect.centerx
self.rect.centery = rocket.rect.centery
self.rect.top = rocket.rect.top
self.y = self.rect.y
def update(self):
"""Move the bullet up the screen."""
self.y -= self.game_settings.bullet_speed_factor
# Update the rect position.
self.rect.y = self.y
def blit_bullet(self):
"""Draw the bullet to the screen."""
self.screen.blit(self.image, self.rect)
| true
|
70c5317cc3c2690738f19bfbe738a81fad822a13
|
Python
|
NeugeCZ/czech-derivation
|
/upravy.py
|
UTF-8
| 138
| 2.703125
| 3
|
[] |
no_license
|
import re
def uprava_pravopisu(slovo):
if 'rě' in slovo:
return re.sub('rě', 'ře', slovo)
else:
return slovo
| true
|
ee1a566c75bbd419f3939057260d9a4c228dfb93
|
Python
|
JosephLevinthal/Research-projects
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4160/codes/1723_2504.py
|
UTF-8
| 404
| 3.453125
| 3
|
[] |
no_license
|
v = int(input("Quantidade inicial de copias do virus no sangue de Micaleteia: "))
l = int(input("Quantidade inicial de leucocitos no sangue: "))
pv = int(input("Percentual de multiplicacao diaria do virus: "))
pl = int(input("Percentual de multiplicacao diaria dos leucocitos: "))
dias = 0
while(l < 2*v):
h = (pv * v/ 100)
v = v + h
m = (pl * l/ 100)
l = l + m
dias = dias + 1
print(dias)
| true
|
53ead851be5ab9e0516e264130b7f26fb50250ba
|
Python
|
voidnologo/advent_of_code_2020
|
/1_code.py
|
UTF-8
| 301
| 3.203125
| 3
|
[] |
no_license
|
from itertools import combinations
from math import prod
with open("1_data.txt", "r") as f:
data = f.read().splitlines()
print("Part 1:", next((c, prod(c)) for c in combinations(data, 2) if sum(c) == 2020))
print("Part 2:", next((c, prod(c)) for c in combinations(data, 3) if sum(c) == 2020))
| true
|
0348b226ac3edc1e5551b2135d8aa04b4775e588
|
Python
|
ZeweiSong/FAST
|
/filter_database.py
|
UTF-8
| 1,951
| 3.015625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 08 10:51:54 2015
This is a trial script for filtering the UNITE database.
All records with 'unidentified' in its name are discarded, resutling a clean reference database for taxonomic assignment.
Please feel free to contact me for any question.
--
Zewei Song
University of Minnesota
Dept. Plant Pathology
songzewei@outlook.com
"""
def main(name_space):
import argparse
import textwrap
from lib import File_IO
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
------------------------
By Zewei Song
University of Minnesota
Dept. Plant Pathology
songzewei@outlook.com
------------------------'''), prog = '-filter_database')
parser.add_argument("-i", "--input", help="Name of the input FASTA file.")
parser.add_argument("-o", "--output", help="Name of the output FASTA file")
args = parser.parse_args(name_space)
database = File_IO.read_seqs(args.input)
count = len(database)
print "Reading in %s ..." % args.input
print "%s contains %i records." % (args.input, count)
count_filter = 0
database_cleaned = []
for record in database:
if record[0].find('unidentified') == -1: # check if current record contain 'unidentified' taxonomic level.
database_cleaned.append(record)
count_filter += 1
print "%i records contain 'unidentified' string." % (count - count_filter)
count_write = File_IO.write_seqs(database_cleaned, args.output)
print "Filtered database is saved in %s with %i records." % (args.output, count_write)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| true
|
f284c9f0bb2cd3aaeb73f47ac0b3f89ed670e09e
|
Python
|
RakeshSharma21/HerokuDeployment
|
/model.py
|
UTF-8
| 383
| 2.921875
| 3
|
[] |
no_license
|
import pandas as pd
import pickle as pkl
hiringDf=pd.read_csv('hiring.csv')
print(hiringDf.head())
y=hiringDf['salary']
X=hiringDf.drop(['salary'],axis=1)
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X,y)
pkl.dump(regressor, open('model.pkl','wb'))
model=pkl.load(open('model.pkl','rb'))
print(model.predict([[2,8,8]]))
| true
|
4da4c4b183c6945c5a711540f2ef596d7d21efd8
|
Python
|
RobinVdBroeck/ucll_scripting
|
/exercises/basics/02-conditionals/student.py
|
UTF-8
| 330
| 3.84375
| 4
|
[] |
no_license
|
# Voorbeeld
def abs(x):
if x < 0:
return -x
else:
return x
# Merk op dat 'else if' in Python een speciale syntax heeft
# Zoek deze zelf op online
def sign(x):
if x < 0: return -1
if x is 0: return 0
return 1
def factorial(n):
if n in (0,1):
return 1
return n * factorial(n-1)
| true
|
e56b1f292163ad6b5d88e01eb7a2e7759b8bd58f
|
Python
|
timcosta/prawtools
|
/prawtools/stats.py
|
UTF-8
| 14,667
| 2.53125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
"""Utility to provide submission and comment statistics in a subreddit."""
from __future__ import print_function
from collections import defaultdict
from datetime import datetime
from tempfile import mkstemp
import codecs
import logging
import os
import re
import time
from praw import Reddit
from praw.models import Submission
from six import iteritems, text_type as tt
from .helpers import AGENT, arg_parser, check_for_updates
DAYS_IN_SECONDS = 60 * 60 * 24
RE_WHITESPACE = re.compile(r'\s+')
TOP_VALUES = {'all', 'day', 'month', 'week', 'year'}
logger = logging.getLogger(__package__)
class SubredditStats(object):
"""Contain all the functionality of the subreddit_stats command."""
post_footer = tt('>Generated with [BBoe](/u/bboe)\'s [Subreddit Stats]'
'(https://github.com/praw-dev/prawtools) '
'([Donate](https://cash.me/$praw))')
post_header = tt('---\n###{}\n')
post_prefix = tt('Subreddit Stats:')
@staticmethod
def _permalink(item):
if isinstance(item, Submission):
return tt('/comments/{}').format(item.id)
else:
return tt('/comments/{}//{}?context=1').format(item.submission.id,
item.id)
@staticmethod
def _points(points):
return '1 point' if points == 1 else '{} points'.format(points)
@staticmethod
def _rate(items, duration):
return 86400. * items / duration if duration else items
@staticmethod
def _safe_title(submission):
"""Return titles with whitespace replaced by spaces and stripped."""
return RE_WHITESPACE.sub(' ', submission.title).strip()
@staticmethod
def _save_report(title, body):
descriptor, filename = mkstemp('.md', dir='.')
os.close(descriptor)
with codecs.open(filename, 'w', 'utf-8') as fp:
fp.write('{}\n\n{}'.format(title, body))
logger.info('Report saved to {}'.format(filename))
@staticmethod
def _user(user):
return '_deleted_' if user is None else tt('/u/{}').format(user)
def __init__(self, subreddit, site, distinguished):
"""Initialize the SubredditStats instance with config options."""
self.commenters = defaultdict(list)
self.comments = []
self.distinguished = distinguished
self.min_date = 0
self.max_date = time.time() - DAYS_IN_SECONDS
self.reddit = Reddit(site, check_for_updates=False, user_agent=AGENT)
self.submissions = []
self.submitters = defaultdict(list)
self.submit_subreddit = self.reddit.subreddit('subreddit_stats')
self.subreddit = self.reddit.subreddit(subreddit)
def basic_stats(self):
"""Return a markdown representation of simple statistics."""
comment_score = sum(comment.score for comment in self.comments)
if self.comments:
comment_duration = (self.comments[-1].created_utc -
self.comments[0].created_utc)
comment_rate = self._rate(len(self.comments), comment_duration)
else:
comment_rate = 0
submission_duration = self.max_date - self.min_date
submission_rate = self._rate(len(self.submissions),
submission_duration)
submission_score = sum(sub.score for sub in self.submissions)
values = [('Total', len(self.submissions), len(self.comments)),
('Rate (per day)', '{:.2f}'.format(submission_rate),
'{:.2f}'.format(comment_rate)),
('Unique Redditors', len(self.submitters),
len(self.commenters)),
('Combined Score', submission_score, comment_score)]
retval = 'Period: {:.2f} days\n\n'.format(submission_duration / 86400.)
retval += '||Submissions|Comments|\n:-:|--:|--:\n'
for quad in values:
retval += '__{}__|{}|{}\n'.format(*quad)
return retval + '\n'
def fetch_recent_submissions(self, max_duration):
"""Fetch recent submissions in subreddit with boundaries.
Does not include posts within the last day as their scores may not be
representative.
:param max_duration: When set, specifies the number of days to include
"""
if max_duration:
self.min_date = self.max_date - DAYS_IN_SECONDS * max_duration
for submission in self.subreddit.new(limit=None):
if submission.created_utc <= self.min_date:
break
if submission.created_utc > self.max_date:
continue
self.submissions.append(submission)
def fetch_submissions(self, submissions_callback, *args):
"""Wrap the submissions_callback function."""
logger.debug('Fetching submissions')
submissions_callback(*args)
logger.debug('Found {} submissions'.format(len(self.submissions)))
if not self.submissions:
return
self.submissions.sort(key=lambda x: x.created_utc)
self.min_date = self.submissions[0].created_utc
self.max_date = self.submissions[-1].created_utc
self.process_submitters()
self.process_commenters()
def fetch_top_submissions(self, top):
"""Fetch top submissions by some top value.
:param top: One of week, month, year, all
:returns: True if any submissions were found.
"""
for submission in self.subreddit.top(limit=None, time_filter=top):
self.submissions.append(submission)
def process_commenters(self):
"""Group comments by author."""
logger.debug('Processing Commenters on {} submissions'
.format(len(self.submissions)))
for index, submission in enumerate(self.submissions):
if submission.num_comments == 0:
continue
logger.debug('{}/{} submissions'
.format(index + 1, len(self.submissions)))
submission.comment_sort = 'top'
more_comments = submission.comments.replace_more()
if more_comments:
skipped_comments = sum(x.count for x in more_comments)
logger.debug('Skipped {} MoreComments ({} comments)'
.format(len(more_comments), skipped_comments))
comments = [comment for comment in submission.comments.list() if
self.distinguished or comment.distinguished is None]
self.comments.extend(comments)
self.comments.sort(key=lambda x: x.created_utc)
for comment in self.comments:
if comment.author:
self.commenters[comment.author].append(comment)
def process_submitters(self):
"""Group submissions by author."""
logger.debug('Processing Submitters')
for submission in self.submissions:
if submission.author and (self.distinguished or
submission.distinguished is None):
self.submitters[submission.author].append(submission)
def publish_results(self, view, submitters, commenters):
"""Submit the results to the subreddit. Has no return value (None)."""
def timef(timestamp, date_only=False):
"""Return a suitable string representaation of the timestamp."""
dtime = datetime.fromtimestamp(timestamp)
if date_only:
retval = dtime.strftime('%Y-%m-%d')
else:
retval = dtime.strftime('%Y-%m-%d %H:%M PDT')
return retval
basic = self.basic_stats()
top_commenters = self.top_commenters(commenters)
top_comments = self.top_comments()
top_submissions = self.top_submissions()
# Decrease number of top submitters if body is too large.
body = None
while body is None or len(body) > 40000 and submitters > 0:
body = (basic + self.top_submitters(submitters) + top_commenters
+ top_submissions + top_comments + self.post_footer)
submitters -= 1
title = '{} {} {}posts from {} to {}'.format(
self.post_prefix, str(self.subreddit),
'top ' if view in TOP_VALUES else '', timef(self.min_date, True),
timef(self.max_date))
try: # Attempt to make the submission
return self.submit_subreddit.submit(title, selftext=body)
except Exception:
logger.exception('Failed to submit to {}'
.format(self.submit_subreddit))
self._save_report(title, body)
def run(self, view, submitters, commenters):
"""Run stats and return the created Submission."""
logger.info('Analyzing subreddit: {}'.format(self.subreddit))
if view in TOP_VALUES:
callback = self.fetch_top_submissions
else:
callback = self.fetch_recent_submissions
view = int(view)
self.fetch_submissions(callback, view)
if not self.submissions:
logger.warning('No submissions were found.')
return
return self.publish_results(view, submitters, commenters)
def top_commenters(self, num):
"""Return a markdown representation of the top commenters."""
num = min(num, len(self.commenters))
if num <= 0:
return ''
top_commenters = sorted(iteritems(self.commenters), reverse=True,
key=lambda x: (sum(y.score for y in x[1]),
len(x[1])))[:num]
retval = self.post_header.format('Top Commenters')
for author, comments in top_commenters:
retval += '0. {} ({}, {} comment{})\n'.format(
self._user(author),
self._points(sum(x.score for x in comments)),
len(comments), 's' if len(comments) != 1 else '')
return '{}\n'.format(retval)
def top_submitters(self, num):
"""Return a markdown representation of the top submitters."""
num = min(num, len(self.submitters))
if num <= 0:
return ''
top_submitters = sorted(iteritems(self.submitters), reverse=True,
key=lambda x: (sum(y.score for y in x[1]),
len(x[1])))[:num]
retval = self.post_header.format('Top Submitters\' Top Submissions')
for (author, submissions) in top_submitters:
retval += '0. {}, {} submission{}: {}\n'.format(
self._points(sum(x.score for x in submissions)),
len(submissions),
's' if len(submissions) != 1 else '', self._user(author))
for sub in sorted(submissions, reverse=True,
key=lambda x: x.score)[:10]:
title = self._safe_title(sub)
if sub.permalink in sub.url:
retval += tt(' 0. {}').format(title)
else:
retval += tt(' 0. [{}]({})').format(title, sub.url)
retval += ' ({}, [{} comment{}]({}))\n'.format(
self._points(sub.score), sub.num_comments,
's' if sub.num_comments != 1 else '',
self._permalink(sub))
retval += '\n'
return retval
def top_submissions(self):
"""Return a markdown representation of the top submissions."""
num = min(10, len(self.submissions))
if num <= 0:
return ''
top_submissions = sorted(
[x for x in self.submissions if self.distinguished or
x.distinguished is None],
reverse=True, key=lambda x: x.score)[:num]
if not top_submissions:
return ''
retval = self.post_header.format('Top Submissions')
for sub in top_submissions:
title = self._safe_title(sub)
if sub.permalink in sub.url:
retval += tt('0. {}').format(title)
else:
retval += tt('0. [{}]({})').format(title, sub.url)
retval += ' by {} ({}, [{} comment{}]({}))\n'.format(
self._user(sub.author), self._points(sub.score),
sub.num_comments, 's' if sub.num_comments != 1 else '',
self._permalink(sub))
return tt('{}\n').format(retval)
def top_comments(self):
"""Return a markdown representation of the top comments."""
num = min(10, len(self.comments))
if num <= 0:
return ''
top_comments = sorted(self.comments, reverse=True,
key=lambda x: x.score)[:num]
retval = self.post_header.format('Top Comments')
for comment in top_comments:
title = self._safe_title(comment.submission)
retval += tt('0. {}: {}\'s [comment]({}) in {}\n').format(
self._points(comment.score), self._user(comment.author),
self._permalink(comment), title)
return tt('{}\n').format(retval)
def main():
"""Provide the entry point to the subreddit_stats command."""
parser = arg_parser(usage='usage: %prog [options] SUBREDDIT VIEW')
parser.add_option('-c', '--commenters', type='int', default=10,
help='Number of top commenters to display '
'[default %default]')
parser.add_option('-d', '--distinguished', action='store_true',
help=('Include distinguished subissions and '
'comments (default: False). Note that regular '
'comments of distinguished submissions will still '
'be included.'))
parser.add_option('-s', '--submitters', type='int', default=10,
help='Number of top submitters to display '
'[default %default]')
options, args = parser.parse_args()
if options.verbose == 1:
logger.setLevel(logging.INFO)
elif options.verbose > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.NOTSET)
logger.addHandler(logging.StreamHandler())
if len(args) != 2:
parser.error('SUBREDDIT and VIEW must be provided')
subreddit, view = args
check_for_updates(options)
srs = SubredditStats(subreddit, options.site, options.distinguished)
result = srs.run(view, options.submitters, options.commenters)
if result:
print(result.permalink)
return 0
| true
|
d7e13ba422bac0211543ef74cb765ea7e3475219
|
Python
|
Billerens/conventional-commits-vscode-helper
|
/commit-msg
|
UTF-8
| 3,046
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import re, sys, os
def main():
buffered = ''
counter = -1
currentPosition = 0
patternHeaderType = r'^(build|ci|docs|feat|fix|perf|refactor|style|test|chore|revert)'
patternHeaderScope = r'(?:\(([^\)\s]+)\))?'
patternHeaderImportant = r'!?'
patternHeaderMessage = r': (?!(?:.* ){1})([<>!? ,.\-_a-zA-Z0-9]{0,52})'
patternHeaderEnd = r'^\n'
patternBody = r'^(.){0,72}(\n)'
filename = sys.argv[1]
lines = open(filename, 'r').readlines()
for line in lines:
counter = counter + 1
if line == '\n' and line == buffered:
print("2 free strings in a row! Found in ", counter ," line ")
else: buffered = line
#TYPE CHECK
print(lines[0])
headerRegexType = re.match(patternHeaderType, lines[0])
if headerRegexType != None:
print("<TYPE> ends on: ", headerRegexType.span()[1], " position. Is correct.")
currentPosition += headerRegexType.span()[1]
else:
print("<TYPE> has an error!: Check from 1st position.")
#SCOPE CHECK
print(lines[0][headerRegexType.span()[1]:len(lines[0])])
headerRegexScope = re.match(patternHeaderScope, lines[0][headerRegexType.span()[1]:len(lines[0])])
if headerRegexScope != None:
print("<SCOPE> ends on: ", currentPosition + headerRegexScope.span()[1], " position. Is correct.")
currentPosition += headerRegexScope.span()[1]
else:
print("<SCOPE> has an error!: Check from ", headerRegexType.span()[1], " position.")
#IMPORTANT CHECK
print(lines[0][currentPosition:len(lines[0])])
headerRegexImportant = re.match(patternHeaderImportant, lines[0][currentPosition:len(lines[0])])
if headerRegexImportant != None:
print("!IMPORTANT! ends on: ", currentPosition + headerRegexImportant.span()[1], " position. Is correct.")
currentPosition += headerRegexImportant.span()[1]
else:
print("!IMPORTANT! has an error!: Check from ", currentPosition - headerRegexScope.span()[1], " position.")
#MESSAGE CHECK
print(lines[0][currentPosition:len(lines[0])])
headerRegexMessage = re.match(patternHeaderMessage, lines[0][currentPosition:len(lines[0])])
if headerRegexMessage != None:
print("<MESSAGE> ends on: ", currentPosition + headerRegexMessage.span()[1], " position. Is correct.")
currentPosition += headerRegexMessage.span()[1]
else:
print("<MESSAGE> has an error!: Check from ", currentPosition - headerRegexImportant.span()[1], " position.")
#LENGTH CHECK
print(lines[0][currentPosition:len(lines[0])])
headerRegexEnd = re.match(patternHeaderEnd, lines[0][currentPosition:len(lines[0])])
if headerRegexEnd != None:
print("<HEAD> ends on: ", currentPosition + headerRegexEnd.span()[1], " position. Is correct.")
currentPosition += headerRegexEnd.span()[1]
else:
print("<HEAD> has an error!: Check from ", currentPosition, " position. Has more then 52 symbols.")
if __name__ == "__main__":
main()
| true
|
e21ea88ac1b078e33ff368e0819139b48438a9b8
|
Python
|
Aletvia/point_of_sale
|
/src/modules/inventory/models.py
|
UTF-8
| 332
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from django.db import models
"""
Model which represent a product (p. ej. Coca-Cola 500ml, 800.00, 500).
"""
class Products(models.Model):
description = models.TextField(max_length=100)
#unit_price = models.DecimalField(max_digits=7, decimal_places=2)
unit_price = models.IntegerField()
stock = models.IntegerField()
| true
|
0979196c44084f7438c405baa600add4938aacde
|
Python
|
Ofrogue/ENOT
|
/quad.py
|
UTF-8
| 2,422
| 3.5
| 4
|
[] |
no_license
|
# quad tree class
import random
class QuadNode:
def __init__(self, x, y, width, height, level):
# x, y are left upper corner
self.x = x
self.y = y
self.width = width
self.height = height
self.center = (x + width / 2, y + height / 2)
self.children = list()
self.elements = list()
self.level = level
def __str__(self):
return str((self.x, self.y))
def build(self, depth=5):
if depth == 0:
return
self.children.append(QuadNode(self.x, self.y, self.width/2, self.height/2, self.level+1))
self.children.append(QuadNode(self.x + self.width / 2, self.y, self.width / 2, self.height / 2, self.level+1))
self.children.append(QuadNode(self.x, self.y + self.height / 2, self.width / 2, self.height / 2, self.level+1))
self.children.append(QuadNode(self.x + self.width / 2, self.y + self.height / 2, self.width / 2, self.height / 2, self.level+1))
for child in self.children:
child.build(depth-1)
def add_xy_object(self, x, y, xy_object):
node = self
while node.children:
i, j = 0, 0
if x > node.center[0]:
i = 1
if y > node.center[1]:
j = 1
node = node.children[i + 2 * j]
node.elements.append(xy_object)
def neighbours(self, x, y):
node = self
while node.children:
i, j = 0, 0
if x > node.center[0]:
i = 1
if y > node.center[1]:
j = 1
node = node.children[i + 2 * j]
return node.elements
def pop(self, x, y):
node = self
while node.children:
i, j = 0, 0
if x > node.center[0]:
i = 1
if y > node.center[1]:
j = 1
node = node.children[i + 2 * j]
n = 0
for n, element in enumerate(node.elements):
if element.rect.x == x and element.rect.y == y:
break
element = node.elements.pop(n)
return element
def expand_to_level(self, level):
pass
class XYelement:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return str((self.x, self.y))
if __name__ == "__main__":
qn = QuadNode(0, 0, 600, 600, 0)
qn.build(4)
| true
|
e461d4e3441c38562cfac83b2cfd026273b3fcb3
|
Python
|
Juniorlimaivd/MachineLearningCIn
|
/lista-2/prototype.py
|
UTF-8
| 515
| 3.015625
| 3
|
[] |
no_license
|
import numpy as np
import math
import random
def generateRandomPrototypes(data, prototypesNumber):
n_instances = len(data.values)
n_attributes = len(data.values[0])
result_x = []
result_y = []
for _ in range(prototypesNumber):
prototype = [data.values[random.randrange(n_instances)][i] for i in range(n_attributes-1)]
result_x.append(prototype)
classe = data.values[random.randrange(n_instances)][-1]
result_y.append(classe)
return (result_x, result_y)
| true
|
a1d591556391ae558d7436ade8d7d1a0d6e76f90
|
Python
|
walkingpanda/walkingpanda
|
/get start/main.py
|
UTF-8
| 680
| 2.8125
| 3
|
[] |
no_license
|
import torch
import numpy as np
import torch.nn as nn
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = torch.nn.Sequential(
nn.Linear(D_in, H),
nn.ReLU(),
nn.Linear(H, D_out),
)
loss_fn = nn.MSELoss(reduction='sum')
lr = 1e-4
for t in range(1000):
# forward pass
y_pred = model(x)
# compute loss
loss = loss_fn(y_pred, y)
print(t, loss)
model.zero_grad()
# backward pass,compute the gradient
loss.backward()
# update weights of w1 and w2
with torch.no_grad():
for param in model.parameters():
param -= lr * param.grad
| true
|
adbeb3d9148e1eb971ee7c6347bb05b935cd1c9e
|
Python
|
ryu577/algorithms
|
/algorith/sequencegen/permutations/all_permutations.py
|
UTF-8
| 634
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
def perm(t,i,n):
"""
Based on procedure "perm" in ch6 of Giles and Bassard.
"""
if i==n:
print(t)
else:
for j in range(i,n+1):
swap(t,i,j)
perm(t,i+1,n)
swap(t,i,j)
def perm2(a,ix):
"""
My own version of perm.
"""
if ix==len(a):
print(a)
else:
for j in range(ix+1):
swap(a,j,ix)
perm2(a,ix+1)
swap(a,j,ix)
def swap(a,i,j):
if i<0 or j>len(a)-1:
return
tmp=a[j]
a[j]=a[i]
a[i]=tmp
if __name__=="__main__":
t=[1,2,3]
#perm(t,0,len(t)-1)
perm2(t,0)
| true
|
f71c782dc9099fced1acd39bdad33014a7de39df
|
Python
|
georgenewman10/stock
|
/history.py
|
UTF-8
| 647
| 2.96875
| 3
|
[] |
no_license
|
from datetime import datetime
from iexfinance.stocks import get_historical_data
import pandas as pd
### maybe replace hist() with hist(start, end, etc) so you can more easily change important variables
def hist(output=None):
#if output=='pandas'
histories = {}
stock_list = ['AAPL','GOOG','MSFT','AMZN','FB','BABA','JNJ','JPM','XOM']
names = [x.lower() for x in stock_list]
start = datetime(2018, 1, 1)
end = datetime(2019, 1, 1)
for i in range(len(stock_list)):
histories[names[i]] = get_historical_data(stock_list[i],start,end,output_format='pandas')
#output_format='pandas'
return histories
| true
|
600724228c3a068f87dc599158fef17bf64e9ef7
|
Python
|
CRSantiago/Python-Projects
|
/Micro Projects/prime_factorization.py
|
UTF-8
| 551
| 4.625
| 5
|
[] |
no_license
|
# Prime Factorization - Have the user enter a number and find all Prime Factors (if there are any) and display them.
import math
def find_prime_factors(n):
while n%2==0:
yield 2
n/=2
for i in range(3,int(math.sqrt(n))+1,2):
while n%i==0:
yield i
n/=i
if n>2:
yield n
user_input = int(input("Enter a number:\n"))
print("\nLet's see what are prime factors of the number you entered.")
for factor in find_prime_factors(user_input):
print(factor)
| true
|
1b8c54d7f65037e8e2af4518c6b29b3c3fe00586
|
Python
|
AlieksieienkoVitalii/Labs
|
/Lesson_6/Lesson_6_3_3.py
|
UTF-8
| 1,015
| 4.3125
| 4
|
[] |
no_license
|
# СПОСОБ 3_______________________________________________________________________________________________________________________________________________________
def my_function(n):
if n == 1 or n == 2:
return 1
else:
return my_function(n - 1) + my_function(n - 2)
steps = int(input('Введите количество ступеней в лестнице - '))
while True:
if steps <= 0:
steps = int(input('Число ступений должно быть не меньше одной. Повторите ввод - '))
elif steps == 1:
print('количество способов чтобы забраться на лестницу - 1')
exit(0)
elif steps == 2:
print('количество способов чтобы забраться на лестницу - 2')
exit(0)
else:
break
print('количество способов чтобы забраться на лестницу - ' + str(my_function(steps)))
| true
|
a13316066102cc3bd033fd937823cd811f4114ef
|
Python
|
frossie-shadow/astshim
|
/tests/test_xmlChan.py
|
UTF-8
| 2,060
| 2.609375
| 3
|
[] |
no_license
|
from __future__ import absolute_import, division, print_function
import os.path
import unittest
import astshim
from astshim.test import ObjectTestCase
DataDir = os.path.join(os.path.dirname(__file__), "data")
class TestObject(ObjectTestCase):
def test_XmlChanDefaultAttributes(self):
sstream = astshim.StringStream()
chan = astshim.XmlChan(sstream)
self.assertEqual(chan.xmlFormat, "NATIVE")
self.assertEqual(chan.xmlLength, 0)
self.assertEqual(chan.xmlPrefix, "")
zoommap = astshim.ZoomMap(3, 2.0)
self.checkXmlPersistence(sstream=sstream, chan=chan, obj=zoommap)
def test_XmlChanSpecifiedAttributes(self):
sstream = astshim.StringStream()
chan = astshim.XmlChan(
sstream, 'XmlFormat="QUOTED", XmlLength=2000, XmlPrefix="foo"')
self.assertEqual(chan.xmlFormat, "QUOTED")
self.assertEqual(chan.xmlLength, 2000)
self.assertEqual(chan.xmlPrefix, "foo")
zoommap = astshim.ZoomMap(4, 1.5)
self.checkXmlPersistence(sstream=sstream, chan=chan, obj=zoommap)
def test_XmlChanSetAttributes(self):
sstream = astshim.StringStream()
chan = astshim.XmlChan(sstream)
chan.xmlFormat = "QUOTED"
chan.xmlLength = 1500
chan.xmlPrefix = "test"
self.assertEqual(chan.xmlFormat, "QUOTED")
self.assertEqual(chan.xmlLength, 1500)
self.assertEqual(chan.xmlPrefix, "test")
zoommap = astshim.ZoomMap(1, 0.5)
self.checkXmlPersistence(sstream=sstream, chan=chan, obj=zoommap)
def checkXmlPersistence(self, sstream, chan, obj):
"""Check that an Ast object can be persisted and unpersisted
"""
chan.write(obj)
sstream.sinkToSource()
obj_copy = chan.read()
self.assertEqual(obj.className, obj_copy.className)
self.assertEqual(obj.show(), obj_copy.show())
self.assertEqual(str(obj), str(obj_copy))
self.assertEqual(repr(obj), repr(obj_copy))
if __name__ == "__main__":
unittest.main()
| true
|
af9e6dda53b19142673ca7cb39d84d571a3ad4fa
|
Python
|
Jovamih/PythonProyectos
|
/Pandas/Data Sciensist/alto-rendimiento.py
|
UTF-8
| 918
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#/usr/bin/env python
import pandas as pd
import numpy as np
import numexpr
def rendimiento():
#la libreria numexpr proporciona eval() funcion que evalua literales de cadena a expresiones python logicas
data=pd.DataFrame(np.random.randint(12,200,size=(8,4)),columns=list('ABCD'))
#la libreria Pandas tambien posee pd.eval() para operar atraves de estos
data_mask=pd.eval('data.A>100')
print(data[data_mask])
#podemos usar eval() en python en asignaciones internas
data.eval('TOTAL=A+B+C+D',inplace=True)
print(data)
#el uso particular de eval() es para sumas y gestion de columnas de forma mas rapida y eficiente
#tambien podemos usar query() y combinarlos con el uso de variables temporales
data_mean=(data.mean(axis=0)).mean()
data_mask=data.query('(A>@data_mean )& (D>@data_mean)')
print(data[data_mask])
if __name__=="__main__":
rendimiento()
| true
|
69ad3f4d4f52b3c9d14674659d5a6bbda834f54d
|
Python
|
varungambhir/Major_Project
|
/preprocess.py
|
UTF-8
| 297
| 2.96875
| 3
|
[] |
no_license
|
li=""
with open('response.txt') as f:
while True:
c = f.read(1)
if not c:
print "End of file"
break
li=li+c
print li
fn=""
for i in range(0,len(li)-2):
fn+=li[i]
if li[i+1]=='{' and li[i]=='}':
fn+=','
f=open('response.txt','w')
f.write(str(fn))
| true
|
4193f13355a64d6cc1ef850546db8a76f386f3a7
|
Python
|
JamesBrowns/data-analysis
|
/D A/python/回归预测/简单线性linearregression.py
|
UTF-8
| 996
| 2.953125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# 引入模块
import pandas as pd
from sklearn.linear_model import LinearRegression
# 读取数据
train = pd.read_csv("data/train1.csv")
test = pd.read_csv("data/test1.csv")
submit = pd.read_csv("data/sample_submit.csv")
# 删除id
train.drop('id', axis=1, inplace=True)
test.drop('id', axis=1, inplace=True)
# 取出训练集的y
y_train = train.pop('y')
# 建立线性回归模型
reg = LinearRegression()
reg.fit(train, y_train)
#y_pred = reg.predict(test)
# 若预测值是负数,则取0
#y_pred = map(lambda x: x if x >= 0 else 0, y_pred)
# 输出预测结果至my_XGB_prediction.csv
#submit['y'] = y_pred
#submit.to_csv('data/my_linearRegression_prediction22.csv', index=False)
print reg.coef_
from sklearn import metrics
import numpy as np
rmse=np.sqrt(metrics.mean_squared_error(y_train, reg.predict(train)))
print 'linearRegression rmse为%f'%rmse
#xgboots 18.5718185229
#linearRegression rmse为38.920108
| true
|
893f9b77a05a9de3e38dd61a9d334e6eaf5efbe6
|
Python
|
p2327/CS61a_Berkeley
|
/list_lab_recursion.py
|
UTF-8
| 1,308
| 4.03125
| 4
|
[] |
no_license
|
def reverse_recursive(lst):
if lst == []:
return []
else:
return reverse_recursive(lst[1:]) + [lst[0]]
test = reverse_recursive([1, 2, 3, 4])
def merge(lst1, lst2):
"""Merges two sorted lists recursively.
>>> merge([1, 3, 5], [2, 4, 6])
[1, 2, 3, 4, 5, 6]
>>> merge([], [2, 4, 6])
[2, 4, 6]
>>> merge([1, 2, 3], [])
[1, 2, 3]
>>> merge([5, 7], [2, 4, 6])
[2, 4, 5, 6, 7]
"""
"*** YOUR CODE HERE ***"
if lst1 == [] or lst2 == []:
return lst1 + lst2
elif lst1[0] < lst2[0]:
return [lst1[0]] + merge(lst1[1:], lst2)
elif lst1[0] > lst2[0]:
return [lst2[0]] + merge(lst1, lst2[1:])
test2 = merge([1, 3, 5], [2, 4, 6])
def merge_iter(lst1, lst2):
"""Merges two sorted lists.
>>> merge_iter([1, 3, 5], [2, 4, 6])
[1, 2, 3, 4, 5, 6]
>>> merge_iter([], [2, 4, 6])
[2, 4, 6]
>>> merge_iter([1, 2, 3], [])
[1, 2, 3]
>>> merge_iter([5, 7], [2, 4, 6])
[2, 4, 5, 6, 7]
"""
new = []
while lst1 and lst2:
if lst1[0] < lst2[0]:
new += [lst1[0]]
lst1 = lst1[1:]
else:
new += [lst2[0]]
lst2 = lst2[1:]
if lst1:
return new + lst1
else:
return new + lst2
| true
|