text
stringlengths 8
6.05M
|
|---|
n,m = map(int,input().split())
start = list(map(int,input().strip().split()))[:2]
end = list(map(int,input().strip().split()))[:2]
arr = []
for k in range(n):
a = list(map(int,input().split()))[:m]
arr.append(a)
mat = [[0 for i in range(n)] for j in range(m)]
mat[start[0]][start[1]] = 1
def make_steps(k):
for i in range(n):
for j in range(m):
if mat[i][j] == k:
if i > 0 and mat[i-1][j] == 0 and arr[i-1][j] == 0:
mat[i-1][j] = k+1
if j > 0 and mat[i][j-1] == 0 and arr[i][j-1] == 0:
mat[i][j-1] = k+1
if i < n-1 and mat[i+1][j] == 0 and arr[i+1][j] == 0:
mat[i+1][j] = k+1
if j < m-1 and mat[i][j+1] == 0 and arr[i][j+1] == 0:
mat[i][j+1] = k+1
k = 0
while mat[end[0]][end[1]] == 0:
k += 1
make_steps(k)
way = []
i,j = end
k = mat[i][j]
while k > 1:
if i > 0 and mat[i-1][j] == k-1:
i,j = i-1,j
way.append('D')
k -= 1
elif j > 0 and mat[i][j-1] == k-1:
i,j = i,j-1
way.append('R')
k -= 1
elif i < n-1 and mat[i+1][j] == k-1:
i,j = i+1,j
way.append('U')
k -= 1
elif j < m-1 and mat[i][j+1] == k-1:
i,j = i,j+1
way.append('L')
k -= 1
a = (''.join(way))
final_path = a[::-1]
print(final_path)
|
from headline_generator.config import Config1, Config2, Choose_config
from headline_generator.model import model1
import numpy as np
from keras.preprocessing import sequence
from keras.utils import np_utils
import random, sys
from sklearn.cross_validation import train_test_split
import pickle
# from keras.utils.visualize_util import plot
from keras.callbacks import Callback
import keras.backend as K
from headline_generator.data import load_article
import fnmatch
import os
import re
class Data_generator():
def __init__(self, idx2word, glove_idx2idx, config):
self.config = config
self.glove_idx2idx = glove_idx2idx
self.idx2word = idx2word
def prt(self, label, x):
idx2word = self.idx2word
print label + ':',
for w in x:
print idx2word[w],
print
def lpadd(self, x):
"""left (pre) pad a description to maxlend and then add eos.
The eos is the input to predicting the first word in the headline
"""
maxlend = self.config.maxlend
eos = self.config.eos
empty = self.config.empty
assert maxlend >= 0
if maxlend == 0:
return [eos]
n = len(x)
if n > maxlend:
x = x[-maxlend:]
n = maxlend
return [empty]*(maxlend-n) + x + [eos]
def flip_headline(self, x, nflips=None, model=None, debug=False):
"""given a vectorized input (after `pad_sequences`) flip some of the words in the second half (headline)
with words predicted by the model
"""
# Load settings
maxlend = self.config.maxlend
maxlen = self.config.maxlen
eos = self.config.eos
empty = self.config.empty
oov0 = self.config.oov0
idx2word = self.idx2word
if nflips is None or model is None or nflips <= 0:
return x
batch_size = len(x)
assert np.all(x[:, maxlend] == eos)
probs = model.predict(x, verbose=0, batch_size=batch_size)
x_out = x.copy()
for b in range(batch_size):
# pick locations we want to flip
# 0...maxlend-1 are descriptions and should be fixed
# maxlend is eos and should be fixed
flips = sorted(random.sample(xrange(maxlend + 1, maxlen), nflips))
if debug and b < debug:
print b,
for input_idx in flips:
if x[b, input_idx] == empty or x[b, input_idx] == eos:
continue
# convert from input location to label location
# the output at maxlend (when input is eos) is feed as input at maxlend+1
label_idx = input_idx - (maxlend + 1)
prob = probs[b, label_idx]
w = prob.argmax()
if w == empty: # replace accidental empty with oov
w = oov0
if debug and b < debug:
print '%s => %s' % (idx2word[x_out[b, input_idx]], idx2word[w]),
x_out[b, input_idx] = w
if debug and b < debug:
print
return x_out
def vocab_fold(self, xs):
"""convert list of word indexes that may contain words outside vocab_size to words inside.
If a word is outside, try first to use glove_idx2idx to find a similar word inside.
If none exist then replace all accurancies of the same unknown word with <0>, <1>, ...
"""
oov0 = self.config.oov0
vocab_size = self.config.vocab_size
nb_unknown_words = self.config.nb_unknown_words
glove_idx2idx = self.glove_idx2idx
xs = [x if x < oov0 else glove_idx2idx.get(x, x) for x in xs]
# the more popular word is <0> and so on
outside = sorted([x for x in xs if x >= oov0])
# if there are more than nb_unknown_words oov words then put them all in nb_unknown_words-1
outside = dict((x, vocab_size - 1 - min(i, nb_unknown_words - 1)) for i, x in enumerate(outside))
xs = [outside.get(x, x) for x in xs]
return xs
def conv_seq_labels(self, xds, xhs, nflips=None, model=None, debug=False):
"""description and hedlines are converted to padded input vectors. headlines are one-hot to label"""
maxlen = self.config.maxlen
maxlenh = self.config.maxlenh
vocab_size = self.config.vocab_size
empty = self.config.empty
eos = self.config.eos
batch_size = len(xhs)
assert len(xds) == batch_size
# pad input to same size: [empty]...[empty] Example description [eos] Example headline [empty]...[empty]
if debug:
self.prt('D cutted', xds[0])
# fold x(In large vocab) into word in vocab and 100 place holders
x = [self.vocab_fold(self.lpadd(xd) + xh) for xd, xh in zip(xds, xhs)] # the input does not have 2nd eos
x = sequence.pad_sequences(x, maxlen=maxlen, value=empty, padding='post', truncating='post')
if debug:
self.prt('D pad', x[0])
print("x[0] {}".format(x[0]))
# flip some data from xh to model prediction
x = self.flip_headline(x, nflips=nflips, model=model, debug=debug)
# print("x {}".format(x))
y = np.zeros((batch_size, maxlenh, vocab_size))
if debug:
self.prt("H cutted", xhs[0])
for i, xh in enumerate(xhs):
# right padding
# xh append [eos] and 25 [empty] then cut the end: Example: This yeild a great outcome [eos] [empty1] [empty19]
xh = self.vocab_fold(xh) + [eos] + [empty] * maxlenh # output does have a eos at end
xh = xh[:maxlenh]
if debug:
if i ==0:
self.prt("H pad", xh)
# change xh to one-hot matrix, each item in xh become a one-hot vector
y[i, :, :] = np_utils.to_categorical(xh, vocab_size)
return x, y
def gen(self, article_gen, word2idx, nb_batches = None, nflips=None, model=None, debug=False):
"""yield batches.
while training it is good idea to flip once in a while the values of the headlines from the
value taken from Xh to value generated by the model.
"""
batch_size = self.config.batch_size
maxlend = self.config.maxlend
maxlenh = self.config.maxlenh
seed = self.config.seed
c = nb_batches if nb_batches else 0
while True:
for articles in article_gen():
if nb_batches and c >= nb_batches:
c = 0
X_raw = [[word2idx[token] for token in d.split()] for d in articles['content']]
Y_raw = [[word2idx[token] for token in headline.split()] for headline in articles['title']]
assert len(X_raw) == len(Y_raw)
# Random slice the x and y
new_seed = random.randint(0, sys.maxint)
random.seed(c + 123456789 + seed)
for i in range(len(X_raw)):
s = random.randint(min(maxlend, len(X_raw[i])), max(maxlend, len(X_raw[i])))
X_raw[i] = X_raw[i][:s]
s = random.randint(min(maxlenh, len(Y_raw[i])), max(maxlenh, len(Y_raw[i])))
Y_raw[i] = Y_raw[i][:s]
# undo the seeding before we yield inorder not to affect the caller
c += 1
random.seed(new_seed)
# Padding
x, y = self.conv_seq_labels(X_raw, Y_raw, nflips=nflips, model=model, debug=debug)
yield (x, y)
# def gen(self, Xd, Xh, nb_batches=None, nflips=None, model=None, debug=False):
# """yield batches. for training use nb_batches=None
# for validation generate deterministic results repeating every nb_batches
#
# while training it is good idea to flip once in a while the values of the headlines from the
# value taken from Xh to value generated by the model.
# """
# # print("len(Xd)", len(Xd))
# batch_size = self.config.batch_size
# maxlend = self.config.maxlend
# maxlenh = self.config.maxlenh
# seed = self.config.seed
#
# c = nb_batches if nb_batches else 0
# while True:
# xds = []
# xhs = []
# if nb_batches and c >= nb_batches:
# c = 0
# new_seed = random.randint(0, sys.maxint)
# random.seed(c + 123456789 + seed)
# for b in range(batch_size):
# t = random.randint(0, len(Xd) - 1)
#
# # Cut xd and xh, in order to generate different text from same input X_i
# xd = Xd[t]
#
# # print("maxlend: {}, len(xd): {}".format(maxlend, len(xd)))
# s = random.randint(min(maxlend, len(xd)), max(maxlend, len(xd)))
# # print("xd[:s]: {}".format(xd[:s]))
#
# xds.append(xd[:s])
#
# xh = Xh[t]
# s = random.randint(min(maxlenh, len(xh)), max(maxlenh, len(xh)))
# xhs.append(xh[:s])
#
# # print("maxlend: {}, len(xd): {}".format(maxlenh, len(xh)))
# # print("len(xd[:s]) {}, len(xh[:s]) {}".format(len(xd[:s]), len(xh[:s])))
#
# # undo the seeding before we yield inorder not to affect the caller
# c += 1
# random.seed(new_seed)
#
# x, y = self.conv_seq_labels(xds, xhs, nflips=nflips, model=model, debug=debug)
# # print("x.shape {}, y.shape {}".format(x.shape, y.shape))
# yield x, y
def test_gen(self, gen, config, n=3):
maxlend = config.maxlend
eos = config.eos
for i in range(2):
print("interation:", i)
Xtr,Ytr = next(gen)
print(Xtr.shape, Ytr.shape)
for i in range(n):
assert Xtr[i,maxlend] == eos
x = Xtr[i,:maxlend]
y = Xtr[i,maxlend:]
yy = Ytr[i,:]
# From one hot matrix to index vector
yy = np.where(yy)[1]
self.prt('L',yy)
self.prt('H',y)
if maxlend:
self.prt('D',x)
def get_weight_file(weight_folder, config_name):
'''
input: model folder like: '/Users/zhaohengyang/PycharmProjects/FoxType_mike/model/'
:return: weight file with highest repo
'''
pattern = config_name + '.epoch_*.hdf5'
p = re.compile(config_name + ".epoch_(.*).hdf5")
weight_files = []
for root, dirs, files in os.walk(weight_folder):
for filename in fnmatch.filter(files, pattern):
epoch = int(p.search(filename).group(1))
weight_files += [(epoch, os.path.join(root, filename))]
weight_files.sort(key=lambda x: x[0], reverse=True)
return weight_files[0]
def main(debug=False):
# Load configuration
config_name = Choose_config.current_config['name']
config = Choose_config.current_config['class']()
batch_size = config.batch_size
seed = config.seed
FN0 = config.FN0
FN1 = config.FN1
nflips = config.nflips
nb_epoch = config.nb_epoch
LR = config.LR
early_stop = config.early_stop
nb_val_samples = config.nb_val_samples
train_path = "../sample_data/train.jsonl"
val_path = "../sample_data/test.jsonl"
train_on_weight = config.train_on_weight
# load word embeddings
with open('../sample_data/%s.pkl' % FN0, 'rb') as fp:
embedding, idx2word, word2idx, glove_idx2idx = pickle.load(fp)
print("word embedding shape", embedding.shape)
# load model sturcture
model = config.model['class'](embedding, config)
plot(model, to_file='model.png', show_shapes=True)
# load model weight
if train_on_weight:
config_name = Choose_config.current_config['name']
weight_folder = config.MODEL_PATH
newest_epoch, init_trained_weight_path = get_weight_file(weight_folder, config_name)
start_epoch = newest_epoch + 1
print("start epo:", start_epoch)
print("trained on weights: " + init_trained_weight_path)
model.load_weights(init_trained_weight_path)
# generate simulate data
load_train = lambda: load_article(raw_path=train_path, early_stop=early_stop, batch_size=batch_size)
load_test = lambda: load_article(raw_path=val_path, early_stop=nb_val_samples, batch_size=batch_size)
data_generator = Data_generator(idx2word, glove_idx2idx, config)
traingen = data_generator.gen(load_train, word2idx, nflips=nflips, model=model)
valgen = data_generator.gen(load_test, word2idx)
# test generator
if debug:
data_generator.test_gen(traingen, config)
# train
history = {}
for iteration in range(nb_epoch):
print 'Iteration', iteration
# Assume train sample size is 1000, each time is will feed 64(batch_size) sample to train,
# it will switch to next epoch when the 64*batch_round is exceeding 1000
h = model.fit_generator(traingen, samples_per_epoch=early_stop,
nb_epoch=1#, validation_data=valgen, nb_val_samples=nb_val_samples
)
# append new h.history to history list
for k, v in h.history.iteritems():
history[k] = history.get(k, []) + v
with open('../model/%s.history.pkl' % FN1, 'wb') as fp:
pickle.dump(history, fp, -1)
# if iteration % 5 == 0:
model_weight_path = '../model/{}.epoch_{}.hdf5'.format(config_name, iteration + start_epoch)
model.save_weights(model_weight_path, overwrite=True)
if iteration > 5:
# Reduce learning rate each epoch
LR *= 0.5
K.set_value(model.optimizer.lr, np.float32(LR))
if __name__ == "__main__":
main(debug=False)
|
from copy import deepcopy
import numpy as np
from clients.base import BaseClient
class FedProxClient(BaseClient):
def __init__(self, model, optimizer, criterion, dataloader, scheduler=None):
super().__init__(model, optimizer, criterion, dataloader, scheduler)
def local_update(self, epoch):
global_model = deepcopy(self.model)
self.model.train()
for i in range(epoch):
for inputs, targets in self.dataloader:
inputs = inputs.to(self.device)
targets = targets.to(self.device)
self.optimizer.zero_grad()
logits = self.model(inputs)
loss = self.criterion(logits, targets)
loss.backward()
self.optimizer.step(global_model.parameters())
def receive_param(self, state_dict):
self.model.load_state_dict(state_dict)
|
# -*- coding:utf-8 -*-
import os
import sys
sys.path.append('utils/')
from preprocess import *
from sentianalysis import *
main_path = os.path.abspath('.')
dict_path = main_path + '/dict/'
#修改各词库的路径
stopword_path = dict_path + 'stop_words.txt'
degreeword_path = dict_path + 'degreewords.txt'
sentimentword_path = dict_path + 'sentiment_word_score.txt'
deny_path = dict_path + 'denial_dict.txt'
# 停用词列表
stopwords = load_data(stopword_path)
#否定词表
notword = load_data(deny_path)
#程度词表
degree_dict = file2dict(degreeword_path)
#情感词表
sentiment_dict = file2dict(sentimentword_path)
text = '不太好吃,相当难吃,要是米饭再多点儿就好了'
# text = "剁椒鸡蛋好咸,土豆丝很好吃"
print('句子:%s的情感值为:%.4f' % (text, sents_score(text, sentiment_dict, degree_dict, notword)))
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from constants import constants
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def cosineLoss(A, B, name):
''' A, B : (BatchSize, d) '''
dotprod = tf.reduce_sum(tf.multiply(tf.nn.l2_normalize(A,1), tf.nn.l2_normalize(B,1)), 1)
loss = 1-tf.reduce_mean(dotprod, name=name)
return loss
def flatten(x):
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def deconv2d(x, out_shape, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, prevNumFeat=None):
with tf.variable_scope(name):
num_filters = out_shape[-1]
prevNumFeat = int(x.get_shape()[3]) if prevNumFeat is None else prevNumFeat
stride_shape = [1, stride[0], stride[1], 1]
# transpose_filter : [height, width, out_channels, in_channels]
filter_shape = [filter_size[0], filter_size[1], num_filters, prevNumFeat]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:2]) * prevNumFeat
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width"
fan_out = np.prod(filter_shape[:3])
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
deconv2d = tf.nn.conv2d_transpose(x, w, tf.pack(out_shape), stride_shape, pad)
# deconv2d = tf.reshape(tf.nn.bias_add(deconv2d, b), deconv2d.get_shape())
return deconv2d
def categorical_sample(logits, d):
value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
return tf.one_hot(value, d)
def inverseUniverseHead(x, final_shape, nConvs=4):
''' universe agent example
input: [None, 288]; output: [None, 42, 42, 1];
'''
print('Using inverse-universe head design')
bs = tf.shape(x)[0]
deconv_shape1 = [final_shape[1]]
deconv_shape2 = [final_shape[2]]
for i in range(nConvs):
deconv_shape1.append((deconv_shape1[-1]-1)/2 + 1)
deconv_shape2.append((deconv_shape2[-1]-1)/2 + 1)
inshapeprod = np.prod(x.get_shape().as_list()[1:]) / 32.0
assert(inshapeprod == deconv_shape1[-1]*deconv_shape2[-1])
# print('deconv_shape1: ',deconv_shape1)
# print('deconv_shape2: ',deconv_shape2)
x = tf.reshape(x, [-1, deconv_shape1[-1], deconv_shape2[-1], 32])
deconv_shape1 = deconv_shape1[:-1]
deconv_shape2 = deconv_shape2[:-1]
for i in range(nConvs-1):
x = tf.nn.elu(deconv2d(x, [bs, deconv_shape1[-1], deconv_shape2[-1], 32],
"dl{}".format(i + 1), [3, 3], [2, 2], prevNumFeat=32))
deconv_shape1 = deconv_shape1[:-1]
deconv_shape2 = deconv_shape2[:-1]
x = deconv2d(x, [bs] + final_shape[1:], "dl4", [3, 3], [2, 2], prevNumFeat=32)
return x
def universeHead(x, nConvs=4):
''' universe agent example
input: [None, 42, 42, 1]; output: [None, 288];
'''
print('Using universe head design')
for i in range(nConvs):
x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
# print('Loop{} '.format(i+1),tf.shape(x))
# print('Loop{}'.format(i+1),x.get_shape())
x = flatten(x)
return x
def nipsHead(x):
''' DQN NIPS 2013 and A3C paper
input: [None, 84, 84, 4]; output: [None, 2592] -> [None, 256];
'''
print('Using nips head design')
x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4], pad="VALID"))
x = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2], pad="VALID"))
x = flatten(x)
x = tf.nn.relu(linear(x, 256, "fc", normalized_columns_initializer(0.01)))
return x
def natureHead(x):
''' DQN Nature 2015 paper
input: [None, 84, 84, 4]; output: [None, 3136] -> [None, 512];
'''
print('Using nature head design')
x = tf.nn.relu(conv2d(x, 32, "l1", [8, 8], [4, 4], pad="VALID"))
x = tf.nn.relu(conv2d(x, 64, "l2", [4, 4], [2, 2], pad="VALID"))
x = tf.nn.relu(conv2d(x, 64, "l3", [3, 3], [1, 1], pad="VALID"))
x = flatten(x)
x = tf.nn.relu(linear(x, 512, "fc", normalized_columns_initializer(0.01)))
return x
def doomHead(x):
''' Learning by Prediction ICLR 2017 paper
(their final output was 64 changed to 256 here)
input: [None, 120, 160, 1]; output: [None, 1280] -> [None, 256];
'''
print('Using doom head design')
x = tf.nn.elu(conv2d(x, 8, "l1", [5, 5], [4, 4]))
x = tf.nn.elu(conv2d(x, 16, "l2", [3, 3], [2, 2]))
x = tf.nn.elu(conv2d(x, 32, "l3", [3, 3], [2, 2]))
x = tf.nn.elu(conv2d(x, 64, "l4", [3, 3], [2, 2]))
x = flatten(x)
x = tf.nn.elu(linear(x, 256, "fc", normalized_columns_initializer(0.01)))
return x
def linear(x, size, name, initializer=None, bias_init=0):
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=initializer)
b = tf.get_variable(name + "/b", [size], initializer=tf.constant_initializer(bias_init))
return tf.matmul(x, w) + b
class LSTMPolicy(object):
def __init__(self, ob_space, ac_space, designHead='universe',
add_cur_model=False, add_con_model=False):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space), name='x')
size = 256
if designHead == 'nips':
x = nipsHead(x)
elif designHead == 'nature':
x = natureHead(x)
elif designHead == 'doom':
x = doomHead(x)
elif 'tile' in designHead:
x = universeHead(x, nConvs=2)
else:
x = universeHead(x)
if add_cur_model:
with tf.variable_scope("cur_model"):
def curiosity_model(x):
for i,size in enumerate(constants['CURIOSITY_SIZES']):
x = tf.nn.relu(linear(x, size, "cur_model_"+str(i), normalized_columns_initializer(0.01)))
return linear(x, ac_space, "cur_model_last", normalized_columns_initializer(0.01))
self.curiosity_model = curiosity_model
self.curiosity_predictions = curiosity_model(x)
self.cur_model_sample = categorical_sample(self.curiosity_predictions, ac_space)[0, :]
if add_con_model:
with tf.variable_scope("con_model"):
def consistency_model(x):
for i,size in enumerate(constants['CONSISTENCY_SIZES']):
x = tf.nn.relu(linear(x, size, "con_model_"+str(i), normalized_columns_initializer(0.01)))
return linear(x, ac_space, "con_model_last", normalized_columns_initializer(0.01))
self.consistency_model = consistency_model
self.consistency_predictions = consistency_model(x)
self.con_model_sample = categorical_sample(self.consistency_predictions, ac_space)[0, :]
# introduce a "fake" batch dimension of 1 to do LSTM over time dim
x = tf.expand_dims(x, [0])
lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c], name='c_in')
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h], name='h_in')
self.state_in = [c_in, h_in]
state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
x = tf.reshape(lstm_outputs, [-1, size])
if add_cur_model:
x = tf.concat(concat_dim=1,values=[x, self.curiosity_predictions])
if add_con_model:
x = tf.concat(concat_dim=1, values=[x, self.consistency_predictions])
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
# [0, :] means pick action of first state from batch. Hardcoded b/c
# batch=1 during rollout collection. Its not used during batch training.
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.probs = tf.nn.softmax(self.logits, dim=-1)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
# tf.add_to_collection('probs', self.probs)
# tf.add_to_collection('sample', self.sample)
# tf.add_to_collection('state_out_0', self.state_out[0])
# tf.add_to_collection('state_out_1', self.state_out[1])
# tf.add_to_collection('vf', self.vf)
def get_initial_features(self):
# Call this function to get reseted lstm memory cells
return self.state_init
def act(self, ob, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})
def act_from_1step_cur_model(self, ob):
sess = tf.get_default_session()
return sess.run(self.cur_model_sample, {self.x: [ob]})
def predict_curiosity(self, ob):
sess = tf.get_default_session()
return sess.run(self.curiosity_predictions, {self.x: [ob]})
def act_inference(self, ob, c, h):
sess = tf.get_default_session()
return sess.run([self.probs, self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})[0]
class SupervisedPolicyTrainer(object):
def __init__(self, ob_space, ac_space, designHead='universe'):
self.ac_space = ac_space
self.ob_space = ob_space
with tf.variable_scope("policy_trainer"):
self.s1 = phi1 = tf.placeholder(tf.float32, input_shape, name="placeholder_s1")
self.asample = asample = tf.placeholder(tf.float32, [None, ac_space], name="placeholder_asample")
if designHead == 'nips':
phi1 = nipsHead(phi1)
elif designHead == 'nature':
phi1 = natureHead(phi1)
elif designHead == 'doom':
phi1 = doomHead(phi1)
elif 'tile' in designHead:
phi1 = universeHead(phi1, nConvs=2)
else:
phi1 = universeHead(phi1)
def policy_trainer(x):
for i,size in enumerate(constants['POLICY_TRAINER_SIZES']):
x = tf.nn.relu(linear(x, size, "policy_trainer_"+str(i), normalized_columns_initializer(0.01)))
return linear(x, ac_space, "policy_trainer_last", normalized_columns_initializer(0.01))
self.policy_trainer = policy_trainer
self.policy_trainer_preds = policy_trainer(phi1)
self.policy_trainer_sample = categorical_sample(self.policy_trainer_preds, ac_space)[0, :]
class StateActionPredictor(object):
def __init__(self, ob_space, ac_space, designHead='universe', imagined_weight=0.4,
no_stop_grads=False, stop_grads_forward=False, backward_model=False,
forward_sizes=[256], inverse_sizes=[256], activate_bug=False):
self.ac_space = ac_space
self.ob_space = ob_space
# input: s1,s2: : [None, h, w, ch] (usually ch=1 or 4)
# asample: 1-hot encoding of sampled action from policy: [None, ac_space]
input_shape = [None] + list(ob_space)
self.imagined_weight = imagined_weight
self.s1 = phi1 = tf.placeholder(tf.float32, input_shape, name="placeholder_s1")
self.s2 = phi2 = tf.placeholder(tf.float32, input_shape, name="placeholder_s2")
self.asample = asample = tf.placeholder(tf.float32, [None, ac_space], name="placeholder_asample")
self.con_bonus_phi_2 = tf.placeholder(tf.float32, [None,None], name="placeholder_con_bonus")
# feature encoding: phi1, phi2: [None, LEN]
print('okay using an imagined weight of', imagined_weight)
# settings that don't belong here
output_size = 256
batch_size = tf.shape(phi1)[0]
num_imagined = batch_size
if designHead == 'nips':
phi1 = nipsHead(phi1)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2 = nipsHead(phi2)
elif designHead == 'nature':
phi1 = natureHead(phi1)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2 = natureHead(phi2)
elif designHead == 'doom':
phi1 = doomHead(phi1)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2 = doomHead(phi2)
elif 'tile' in designHead:
phi1 = universeHead(phi1, nConvs=2)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2 = universeHead(phi2, nConvs=2)
else:
phi1 = universeHead(phi1)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2 = universeHead(phi2)
# forward model: f(phi1,asample) -> phi2
# predict next feature embedding
# Note: no backprop to asample of policy: it is treated as fixed for predictor training
def forward_model(phi1, asample):
f = tf.concat(1, [phi1, asample])
for i,size in enumerate(forward_sizes):
f = tf.nn.relu(linear(f, size, "forward_"+str(i), normalized_columns_initializer(0.01)))
return linear(f, phi1.get_shape()[1].value, "forward_last", normalized_columns_initializer(0.01))
self.forward_model = forward_model
self.guessed_phi2 = forward_model(phi1, asample)
self.forwardloss = 0.5 * tf.reduce_mean(tf.square(tf.subtract(self.guessed_phi2, phi2)), name='forwardloss')
self.forwardloss = self.forwardloss * 288.0 # lenFeatures=288. Factored out to make hyperparams not depend on it.
# inverse model: g(phi1,phi2) -> a_inv: [None, ac_space]
# predict action from feature embedding of s1 and s2
def inverse_model(phi1, phi2):
g = tf.concat(1,[phi1, phi2])
for i,size in enumerate(inverse_sizes):
g = tf.nn.relu(linear(g, size, "inverse_"+str(i), normalized_columns_initializer(0.01)))
return linear(g, ac_space, "inverse_last", normalized_columns_initializer(0.01))
self.inverse_model = inverse_model
# compute inverse loss on real actions
logits = inverse_model(phi1, phi2)
self.ainvprobs = tf.nn.softmax(logits, dim=-1)
aindex = tf.argmax(asample, axis=1) # aindex: [batch_size,]
self.invloss_real = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, aindex), name="invloss_real")
# compute inverse loss on placeholder embedding
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
con_logits = inverse_model(phi1, self.con_bonus_phi_2)
self.con_bonus_per_action = tf.nn.sparse_softmax_cross_entropy_with_logits(
con_logits, aindex, name="con_bonus_per_action")
self.con_bonus = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
con_logits, aindex), name="con_bonus")
# Imagine some actions and states that weren't encountered
imagined_action_idxs = tf.random_uniform(dtype=tf.int32, minval=0, maxval=ac_space, shape=[num_imagined])
imagined_actions = tf.one_hot(imagined_action_idxs, ac_space)
imagined_start_states_idxs = tf.random_uniform(dtype=tf.int32, minval=0, maxval=batch_size, shape=[num_imagined])
if no_stop_grads:
print('Not stopping gradients from consistency to encoder')
imagined_phi1 = tf.gather(phi1, imagined_start_states_idxs)
else:
print('Stopping gradients from consistency to encoder')
imagined_phi1 = tf.stop_gradient(tf.gather(phi1, imagined_start_states_idxs), name="stop_gradient_consistency_to_encoder")
# predict next state for imagined actions
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
if stop_grads_forward and not no_stop_grads:
print('Stopping grads from consistency to forward model')
imagined_phi2 = tf.stop_gradient(forward_model(imagined_phi1, imagined_actions), name="stop_grad_consistency_to_forward")
else:
print('Not stopping grads from consistency to forward model')
imagined_phi2 = forward_model(imagined_phi1, imagined_actions)
# compute inverse loss on imagined actions
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
imagined_logits = inverse_model(imagined_phi1, imagined_phi2)
self.ainvprobs_imagined = tf.nn.softmax(imagined_logits, dim=-1)
if activate_bug:
self.invloss_imagined = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, imagined_action_idxs), name="invloss_imagined")
else:
self.invloss_imagined = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
imagined_logits, imagined_action_idxs), name="invloss_imagined")
# Compute aggregate inverses loss
self.invloss = tf.add(self.invloss_real, imagined_weight * self.invloss_imagined, name="invloss")
#(1.0 - imagined_weight) *
# variable list
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def pred_act(self, s1, s2):
'''
returns action probability distribution predicted by inverse model
input: s1,s2: [h, w, ch]
output: ainvprobs: [ac_space]
'''
sess = tf.get_default_session()
return sess.run(self.ainvprobs, {self.s1: [s1], self.s2: [s2]})[0, :]
def pred_bonus(self, s1, s2, asample):
'''
returns bonus predicted by forward model
input: s1,s2: [h, w, ch], asample: [ac_space] 1-hot encoding
output: scalar bonus
'''
sess = tf.get_default_session()
# error = sess.run([self.forwardloss, self.invloss],
# {self.s1: [s1], self.s2: [s2], self.asample: [asample]})
# print('ErrorF: ', error[0], ' ErrorI:', error[1])
error = sess.run(self.forwardloss,
{self.s1: [s1], self.s2: [s2], self.asample: [asample]})
error = error * constants['PREDICTION_BETA']
return error
def consistency_pred_bonus(self, s1, asample):
sess = tf.get_default_session()
guessed_phi2 = sess.run(self.guessed_phi2, {self.s1: [s1], self.asample: [asample]})
if len(np.shape(guessed_phi2)) > 2:
guessed_phi2 = np.reshape(guessed_phi2, [1,-1])
error = sess.run(self.con_bonus, {self.s1: [s1], self.con_bonus_phi_2: guessed_phi2,
self.asample: [asample]})
return error
def consistency_bonus_all_actions(self, s1):
actions = np.zeros((self.ac_space,self.ac_space))
actions[np.arange(self.ac_space), np.arange(self.ac_space)] = 1.
np.random.shuffle(actions)
repeat_s1 = np.tile(s1,(self.ac_space,1,1,1))
sess = tf.get_default_session()
guessed_phi2 = sess.run(self.guessed_phi2, {self.s1: repeat_s1, self.asample: actions})
error = sess.run(self.con_bonus_per_action, {self.s1: repeat_s1, self.con_bonus_phi_2: guessed_phi2,
self.asample: actions})
return error
class StatePredictor(object):
'''
Loss is normalized across spatial dimension (42x42), but not across batches.
It is unlike ICM where no normalization is there across 288 spatial dimension
and neither across batches.
'''
def __init__(self, ob_space, ac_space, designHead='universe', unsupType='state'):
# input: s1,s2: : [None, h, w, ch] (usually ch=1 or 4)
# asample: 1-hot encoding of sampled action from policy: [None, ac_space]
input_shape = [None] + list(ob_space)
self.s1 = phi1 = tf.placeholder(tf.float32, input_shape)
self.s2 = phi2 = tf.placeholder(tf.float32, input_shape)
self.asample = asample = tf.placeholder(tf.float32, [None, ac_space])
self.stateAenc = unsupType == 'stateAenc'
# feature encoding: phi1: [None, LEN]
if designHead == 'universe':
phi1 = universeHead(phi1)
if self.stateAenc:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2_aenc = universeHead(phi2)
elif 'tile' in designHead: # for mario tiles
phi1 = universeHead(phi1, nConvs=2)
if self.stateAenc:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
phi2_aenc = universeHead(phi2)
else:
print('Only universe designHead implemented for state prediction baseline.')
exit(1)
# forward model: f(phi1,asample) -> phi2
# Note: no backprop to asample of policy: it is treated as fixed for predictor training
f = tf.concat(1, [phi1, asample])
f = tf.nn.relu(linear(f, phi1.get_shape()[1].value, "f1", normalized_columns_initializer(0.01)))
if 'tile' in designHead:
f = inverseUniverseHead(f, input_shape, nConvs=2)
else:
f = inverseUniverseHead(f, input_shape)
self.forwardloss = 0.5 * tf.reduce_mean(tf.square(tf.subtract(f, phi2)), name='forwardloss')
if self.stateAenc:
self.aencBonus = 0.5 * tf.reduce_mean(tf.square(tf.subtract(phi1, phi2_aenc)), name='aencBonus')
self.predstate = phi1
# variable list
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def pred_state(self, s1, asample):
'''
returns state predicted by forward model
input: s1: [h, w, ch], asample: [ac_space] 1-hot encoding
output: s2: [h, w, ch]
'''
sess = tf.get_default_session()
return sess.run(self.predstate, {self.s1: [s1],
self.asample: [asample]})[0, :]
def pred_bonus(self, s1, s2, asample):
'''
returns bonus predicted by forward model
input: s1,s2: [h, w, ch], asample: [ac_space] 1-hot encoding
output: scalar bonus
'''
sess = tf.get_default_session()
bonus = self.aencBonus if self.stateAenc else self.forwardloss
error = sess.run(bonus,
{self.s1: [s1], self.s2: [s2], self.asample: [asample]})
# print('ErrorF: ', error)
error = error * constants['PREDICTION_BETA']
return error
|
from django.contrib import admin
# Register your models here.
from .models import Profile
from django.utils.html import format_html
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('ph_no', 'company', 'designation')
def image_tag(self, profile):
return format_html('<img src="{}" />'.format(profile.photo.url))
image_tag.short_description = 'Image of the user'
|
DEBUG = True
"""邮件配置"""
MAIL_SERVER = 'smtp.163.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'melondean@163.com'
MAIL_PASSWORD = 'admin123'
MAIL_DEFAULT_SENDER = ('CZAOAO', 'melondean@163.com')
|
import turtle
def drawSq(the_turtle):
for x in range(1,5):
the_turtle.forward(100)
the_turtle.right(90)
def draw_square():
window = turtle.Screen()
window.bgcolor("yellow") #turns the shell's window yellow
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("blue")
brad.speed(2)
for i in range(1,36):
drawSq(brad)
brad.right(10)
## angie = turtle.Turtle() #init
## angie.shape("arrow")
## angie.color("black")
## angie.circle(100)
##
##
## billie = turtle.Turtle()
## billie.shape("classic")
## billie.color("purple")
## billie.forward(100)
## billie.right(45)
## billie.backward(100)
## billie.right(65)
## billie.forward(75)
##
window.exitonclick()
draw_square()
|
from django.shortcuts import render
from django.http import HttpResponse
def openhomepage(request):
type="home"
return render(request, "home.html", {"type": type})
def UserLogin(request):
type = request.GET.get("type")
return render(request,"home.html",{"type":type})
def booking(request):
type = request.GET.get("type")
return render(request,"home.html",{"type":type})
def gallary(request):
type =request.GET.get("type")
return render(request,"home.html",{"type":type})
def Register(request):
type =request.GET.get("type")
return render(request,"home.html",{"type":type})
|
from flask import Flask, request, g, render_template, logging, Response
from functools import reduce
from os import getenv
import uuid
import time
import structlog
from pymongo import MongoClient
import traceback
import prometheus_client
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
COUNTER_PAGES_SERVED = prometheus_client.Counter('web_pages_served', 'Number of pages served by frontend')
HISTOGRAM_PAGE_GEN_TIME = prometheus_client.Histogram('web_page_gen_time', 'Page generation time')
logg = logging.getLogger('werkzeug')
logg.disabled = True # disable default logger
log = structlog.get_logger()
structlog.configure(processors=[
structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S"),
structlog.stdlib.add_log_level,
# to see indented logs in the terminal, uncomment the line below
# structlog.processors.JSONRenderer(indent=2, sort_keys=True)
# and comment out the one below
structlog.processors.JSONRenderer(sort_keys=True)
])
app = Flask(__name__)
def connect_db():
try:
db = MongoClient(
getenv('MONGO', 'mongo'),
int(getenv('MONGO_PORT', '27017'))
)
db.admin.command('ismaster')
except Exception as e:
log.error('connect_db',
service='web',
message="Failed connect to Database",
traceback=traceback.format_exc(e),
)
else:
log.info('connect_to_db',
service='web',
message='Successfully connected to database',
)
return db
def get_word(word):
return g.db.words.find_one( {'word': word } )
def get_word_id(word):
search = get_word(word)
if search and '_id' in search:
return search['_id']
return None
def get_pages_id (word_id):
search = g.db.words.find_one({ '_id': word_id })
if search and 'ref_pages' in search:
return search['ref_pages']
return None
def get_page_by_id (page_id):
return g.db.pages.find_one( {'_id': page_id} )
def get_page_score (page_id):
page = get_page_by_id(page_id)
return len(page['ref_pages']) if page and 'ref_pages' in page else 0
def intersect(a, b):
return list(set(a) & set(b))
@app.before_request
def before_request():
g.request_start_time = time.time()
g.db_connection = connect_db()
g.db = g.db_connection.search_engine
g.request_time = lambda: (time.time() - g.request_start_time)
@app.route('/metrics')
def metrics():
return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST)
@app.route('/')
def start():
phrase = request.args.get('query', '').split()
COUNTER_PAGES_SERVED.inc()
if not phrase:
return render_template('index.html', gen_time=g.request_time())
word_ids = []
for word in phrase:
word_id = get_word_id(word)
print(word_id)
if not word_id:
return render_template('index.html', gen_time=g.request_time())
word_ids.append(word_id)
pages_ids = {}
for word_id in word_ids:
pages_ids[word_id] = get_pages_id(word_id)
pages = reduce(intersect, [pages_ids[word_id] for word_id in pages_ids])
res = []
for page_id in pages:
url = get_page_by_id(page_id)['url']
score = get_page_score(page_id)
res.append((score, url))
res.sort(reverse=True)
return render_template('index.html', gen_time=g.request_time(), result=res)
@app.after_request
def after_request(response):
HISTOGRAM_PAGE_GEN_TIME.observe(g.request_time())
request_id = request.headers['Request-Id'] \
if 'Request-Id' in request.headers else uuid.uuid4()
log.info('request',
service='web',
request_id=request_id,
addr=request.remote_addr,
path=request.path,
args=request.args,
method=request.method,
response_status=response.status_code)
return response
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'db_connection'):
g.db_connection.close()
# Log Exceptions
@app.errorhandler(Exception)
def exceptions(e):
request_id = request.headers['Request-Id'] \
if 'Request-Id' in request.headers else None
tb = traceback.format_exc()
log.error('internal_error',
service='web',
request_id=request_id,
addr=request.remote_addr,
path=request.path,
args=request.args,
method=request.method,
traceback=tb)
return 'Internal Server Error', 500
|
from typing import Dict
from variables import district
for k,v in district.items():
for l,u in district[k].items():
district[k][l].pop('name')
for m,w in district[k][l].items():
district[k][l][m]['name']=m
district[k][l][m]['lat']=1
district[k][l][m]['long']=1
b:dict=district[k][l][m]
#print(b)
district[k][l]['name']=l
district[k]['name']=k
print(district)
|
from shared import read_input_lines, exec_cl_function
from collections import Counter
def letter_repeats(strings, recurrences=(2, 3)):
counts = {r: [] for r in recurrences}
for string in strings:
for n in recurrences:
counts[n].append(contains_letter_repeated_n(n, string))
return counts
def contains_letter_repeated_n(n, string):
'''True if string contains a letter repeated n times'''
counter = Counter(string)
return bool([k for k, v in counter.items() if v == n])
def part1():
strings = read_input_lines('../input/day2.txt')
repeats = letter_repeats(strings)
print(sum(repeats[2]) * sum(repeats[3]))
def part2():
strings = read_input_lines('../input/day2.txt')
total = len(strings)
for i, string in enumerate(strings):
others = strings[:i] + strings[i+1:]
print(f'{i} / {total}', end='\r')
for i, other in enumerate(others):
one_difference = differs_by_one(string, other)
if one_difference:
print(f'string1: {string}')
print(f'string2: {other}')
print(f'difference: {str(set(string).difference(other))}')
print(f'common string: {one_difference}')
return
def differs_by_one(string1, string2):
n_differing = 0
common = ''
for i, (l1, l2) in enumerate(zip(string1, string2)):
if l1 == l2:
common += l1
elif n_differing == 0:
n_differing += 1
else:
return
return common
if __name__ == '__main__':
exec_cl_function()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 8/16/2017 10:55 AM
# @Author : Winnichen
# @File : excel_reader.py
from openpyxl import Workbook
from openpyxl import load_workbook
from config import settings
class Common_ExcelReader(object):
def __init__(self,sheet):
self.wb=load_workbook(settings.testcase_path)
self.ws=self.wb.get_sheet_by_name(sheet)
def getCellValue(self,row,column):
return self.ws.cell(row=row,column=column).value
|
def indent(text, by=4, first_line=True):
r"""
>>> indent("a\nb\nc", by=1) == ' a\n b\n c'
True
"""
spaces = " " * by
lines = text.splitlines(True)
prefix = lines.pop(0) if (lines and not first_line) else ""
return prefix + "".join(spaces + line for line in lines)
|
#!/usr/bin/python
import sys
import os
import logging
activate_this = '/home/http/medlemsregistrering/venv/bin/activate_this.py'
exec(open(activate_this).read())
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/home/http/medlemsregistrering")
os.chdir("/home/http/medlemsregistrering")
from medlemsregistrering import app as application
|
import datetime
from django.db import models
# Create your models here.
class Course(models.Model):
name = models.CharField(max_length=64, verbose_name='课程名称')
desc = models.CharField(max_length=512, verbose_name='课程描述')
detail = models.TextField(verbose_name='课程详情')
level = models.CharField(
choices=(('beginner', '初级'), ('intermediate', '中级'), ('advanced', '高级')), max_length=16)
duration = models.IntegerField(default=0, verbose_name='课程时长')
num_students = models.IntegerField(default=0, verbose_name='学习人数')
num_likes = models.IntegerField(default=0, verbose_name='收藏人数')
image = models.ImageField(upload_to='courses/%Y/%m', verbose_name='封面')
num_clicks = models.IntegerField(default=0, verbose_name='点击数')
created_at = models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')
class Meta:
verbose_name = '课程'
verbose_name_plural = verbose_name
class Lesson(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name='所属课程')
name = models.CharField(max_length=128, verbose_name='章节名')
created_at = models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')
class Meta:
verbose_name = '课程章节'
verbose_name_plural = verbose_name
class Video(models.Model):
name = models.CharField(max_length=128, verbose_name='视频名')
lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE, verbose_name='所属章节')
created_at = models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')
class Meta:
verbose_name = '课程视频'
verbose_name_plural = verbose_name
class CourseResource(models.Model):
name = models.CharField(max_length=128, verbose_name='资源名')
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name='所属课程')
url = models.FileField(upload_to='courses/resources/%Y/%m', verbose_name='资源文件')
created_at = models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')
class Meta:
verbose_name = '课程资源'
verbose_name_plural = verbose_name
|
#coding=utf8
from django.http import HttpResponse
from usr.models import Accounts
import json
# 注册行为返回值类型
class LoginReturnType:
def __init__(self):
self.result = 'FAILED'
self.reason = ''
def deal_login(request):
# 取得注册数据
ac = Accounts()
ac.usr_name = request.REQUEST.get('usr_name','null')
ac.passwd = request.REQUEST.get('passwd','null')
# 返回值
lrt = LoginReturnType()
# 判定合法性
if ac.usr_name == 'null':
lrt.reason = 'usr_name null'
else:
if ac.passwd == 'null':
lrt.reason = 'passwd null'
else:
# 查找用户
result_set = Accounts.objects.filter(usr_name=ac.usr_name)
if result_set[0]:
if result_set[0].passwd == ac.passwd:
lrt.result = 'OK'
else:
lrt.reason = 'passwd not match'
else:
lrt.result = 'FAILED'
lrt.reason = 'no such usr_name'
ret = {"result": lrt.result, 'reason': lrt.reason}
return HttpResponse(json.JSONEncoder().encode(ret))
|
ab = {'Swaroop': 'swaroopch@byteofpython.info',
'Larry': 'larry@wall.org',
'Matsumoto': 'matz@ruby-lang.org',
'Spammer': 'spammer@hotmail.com'}
print 'Swaroop\'s address is %s'%ab['Swaroop']
ab['Guido'] = 'guido@python.org'
del ab['Spammer']
print 'There are %d contacts in the address book.'%len(ab)
for name, address in ab.items():
print 'Contact %s at %s'%(name, address)
if 'Guido' in ab:
print 'Guido\'s address is %s'%ab['Guido']
|
from __future__ import print_function
from __future__ import division
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
from PIL import Image
from models import *
feature_extract = True
def main(args):
# Initialize the model for this run
model_ft, input_size = initialize_model(args['model'], 6, feature_extract, use_pretrained=False)
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Send the model to GPU
model_ft = model_ft.to(device)
# Send the model to GPU
model_ft.load_state_dict(torch.load(args['weight']))
since = time.time()
model_ft.eval() # Set model to evaluate mode
image = Image.open(args['input'], "r")
image = image.resize((224,224))
image = transforms.ToTensor()(image).unsqueeze_(0)
image = image.to(device, dtype=torch.float)
outputs = model_ft(image)
_, preds = torch.max(outputs, 1)
time_elapsed = time.time() - since
print('process time::',time_elapsed)
print('predicted number::', preds.item()+3)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input",help="path to input image", default = "input/4.png")
ap.add_argument("-w", "--weight",help="path to weight", default = "./weights/ResNet18_Adam_best.pth")
ap.add_argument("-m", "--model",help="name of model", choices=['resnet18', 'resnet152', 'ResNeXt-101-32x8d'], default = "resnet18")
args = vars(ap.parse_args())
main(args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import threading
import time
def receive_message():
while not stopThread:
indata = s.recv(4096).decode("utf-8")
if len(indata) == 0: # connection closed
s.close()
print('server closed connection.')
break
print(indata[:-1])
time.sleep(0.25)
HOST = '192.168.50.209' #0.0.0.0' # 3.121.226.198
PORT = 5378
# initialize TCP socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
while True:
outdata = input('please input name: ')
outdata = 'HELLO-FROM ' + outdata + '\n'
s.send(outdata.encode("utf-8"))
indata = s.recv(4096).decode("utf-8")
if len(indata) == 0: # connection closed
s.close()
print('server closed connection.')
break
print(indata[:-1])
if indata != 'IN-USE\n' and indata != 'BUSY\n' and indata != 'BAD-RQST-HDR\n' and indata != 'BAD-RQST-BODY\n':
break
t = threading.Thread(target=receive_message)
t.setDaemon(True)
stopThread = False
t.start()
while True:
outdata = input('please input action: ')
if outdata == '':
continue
if outdata == '!quit':
stopThread = True
time.sleep(1)
s.close()
print('close socket')
break
elif outdata == '!who':
outdata = 'WHO\n'
s.send(outdata.encode("utf-8"))
elif '@' in outdata:
space = outdata.find(' ')
user = outdata[1:space]
msg = outdata[space+1:]
outdata = 'SEND ' + user + ' ' + msg + '\n'
s.send(outdata.encode("utf-8"))
else:
pass
time.sleep(1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import configparser
import logging
import logging.handlers
import json
from threading import Lock
import sys
import traceback
import platform
import threading
import src.common.constant as constant
from getpass import getuser
import time
import datetime
from decimal import Decimal
# if "Linux" in platform.system():
# import fcntl
EMPTY_RETURN = ""
#global dict to read json files
JSON_DICT = {}
#a lock to protect json conf file read and write
JSON_LOCK = Lock()
TERM_RED = '\033[1;31m'
TERM_NFMT = '\033[0;0m'
TERM_BLUE = '\033[1;34m'
TERM_GREEN = '\033[1;32m'
# return true if current system is Windows
def is_windows_system():
return "Windows" in platform.system()
# return true if current system is Linux
def is_linux_system():
return "Linux" in platform.system()
# return true if current system is MacOS
def is_macos_system():
return "Darwin" in platform.system()
def get_transfer_addr(ip):
"""get target address based on types: tcp, udp, ipc
Arguments:
ip {str} -- [string get from the json conf file]
Returns:
[str] -- [address string]
"""
mark = "://"
mark_index = ip.index(mark) if mark in ip else 0
type = ip[: mark_index]
if not type:
return None
if type.lower() == "tcp" or type.lower() == "udp":
return ip
elif type.lower() == "ipc":
cur_dir = os.path.dirname(os.path.dirname(__file__))
file_name = ip[mark_index + len("://"):] # the len("://") is 3
path = os.path.join(cur_dir, "ipc")
if not os.path.exists(path):
os.makedirs(path)
path = "ipc://" + os.path.join(path, file_name)
return path
def get_current_func_name():
return (sys._getframe().f_code.co_filename + " : " + sys._getframe().f_code.co_name + "()")
#TODO 测一下
def get_common_parent_path(file=None):
"""
return to upper level of common
当前文件在common文件夹, 返回到上一层目录的路径
file {string} -- "file name if you need to add after parent path/要组装的文件名"
"""
parent = os.path.dirname(os.path.dirname(__file__))
if file:
result = os.path.join(parent, file)
return result
else:
return parent
def get_json_config(file, section , key=None, default = EMPTY_RETURN):
"""get json file
Arguments:
file {string} -- absolute file path
section {string} -- level1 key
Keyword Arguments:
key {string} -- level2 key (default: {None})
Returns:
dict -- json dict
"""
try:
global JSON_LOCK
with JSON_LOCK:
global JSON_DICT
if file not in JSON_DICT:
if os.path.exists(file):
if os.path.getsize(file):
with open(file, mode="r", encoding="utf-8") as json_file:
# if is_linux_system():
# fcntl.flock(json_file, fcntl.LOCK_EX)
global_dict = json.load(json_file)
JSON_DICT[file] = global_dict
else:
JSON_DICT[file] = {}
else:
JSON_DICT[file] = {}
if section in JSON_DICT[file]:
if key and key in JSON_DICT[file][section]:
data = JSON_DICT[file][section][key]
elif key and key not in JSON_DICT[file][section]:
data = default
else:
data = JSON_DICT[file][section]
else:
data = JSON_DICT[file]
return data
except:
traceback.print_exc()
return {}
def set_json_config(file, section, value, key=None):
"""set json file
Arguments:
file {string} -- absolute file path
section {string} -- level1 key
Keyword Arguments:
key {string} -- level2 key (default: {None})
Returns:
dict -- json dict
"""
try:
global JSON_DICT
global JSON_LOCK
with JSON_LOCK:
if file not in JSON_DICT:
if os.path.exists(file):
if os.path.getsize(file):
with open(file, mode="r", encoding="utf-8") as json_file:
# if is_linux_system():
# fcntl.flock(json_file, fcntl.LOCK_EX)
global_dict = json.load(json_file)
JSON_DICT[file] = global_dict
else:
JSON_DICT[file] = {}
else:
JSON_DICT[file] = {}
if section not in JSON_DICT[file]:
JSON_DICT[file][section] = {}
if key:
JSON_DICT[file][section][key] = value
else:
JSON_DICT[file][section] = value
with open(file, mode="w", encoding="utf-8") as json_file:
# if is_linux_system():
# fcntl.flock(json_file, fcntl.LOCK_EX)
data = json.dumps(JSON_DICT[file], ensure_ascii=False, indent=4)
json_file.write(data)
except:
traceback.print_exc()
def get_ini_config(file, section, key):
try:
config = configparser.ConfigParser()
config.read(file)
return config.get(section, key)
except:
traceback.print_exc()
return EMPTY_RETURN
def set_ini_config(file, section, key, value):
try:
config = configparser.ConfigParser()
config.read(file)
config.set(section, key, value)
config.write(open(file, "w"))
except:
traceback.print_exc()
def setup_logger(log_file_name, log_level=logging.INFO, print_level=logging.INFO, log_path=None, file_size=None):
"""
Init LOG module here
"""
#1.读属于哪个文件夹
LOG_DIR = './logs'
#2.拼一个文件夹路径
if is_windows_system():
#windows下 如果没有配置, 则使用文件夹下log文件夹
if not log_path:
LOG_DIR = os.path.join(get_common_parent_path(), "log")
LOG_FILE = os.path.join(LOG_DIR, log_file_name + ".log")
else:
LOG_DIR = log_path
LOG_FILE = os.path.join(LOG_DIR, log_file_name+".log")
elif is_linux_system() or is_macos_system():
# linux下 如果没有配置, 则使用/tmp文件夹
if not log_path:
LOG_DIR = "/tmp/coin_trade/log/" + getuser() + "/default_log" #配置到统一 /tmp文件夹
LOG_FILE = LOG_DIR + "/" + log_file_name + ".log"
else:
LOG_DIR = log_path
LOG_FILE = os.path.join(LOG_DIR, log_file_name + ".log")
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
#加入不同的文件存储方式: size time
if file_size:
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=file_size, backupCount=60)
else:
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE, when='D', interval=1)
handler.suffix = '%Y-%m-%d.log'
fmt = '%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s ---- %(message)s'
formatter = logging.Formatter(fmt) # 实例化formatter
handler.setFormatter(formatter) # 为handler添加formatter
logger = logging.getLogger(log_file_name) # 获取名为tst的logger
logger.addHandler(handler) # 为logger添加handler
#DEBUG 20180418
logger.setLevel(log_level)
# Prints logger info to terminal
ch = logging.StreamHandler()
#DEBUG 20180418
ch.setLevel(print_level)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
# 添加日志文件权限
os.chmod(LOG_FILE, 0o777) #0o标志, 777全读写运行权限
os.chmod(LOG_DIR, 0o777)
return logger
def setup_save_data_logger(file_name, level=logging.INFO, isPrint=False, path=None, file_size=None):
"""
Init LOG module here
"""
#1.读属于哪个文件夹
#2.拼一个文件夹路径
if is_windows_system():
#windows下 如果没有配置, 则使用文件夹下log文件夹
if not path:
LOG_DIR = os.path.join(get_common_parent_path(), "log")
LOG_FILE = os.path.join(LOG_DIR, file_name + ".dict")
else:
LOG_DIR = path
LOG_FILE = os.path.join(LOG_DIR, file_name + ".dict")
elif is_linux_system():
# linux下 如果没有配置, 则使用/tmp文件夹
if not path:
json_dir = os.path.join(get_common_parent_path(), "global.json")
folder_name = get_json_config(file=json_dir, section="path", key="folder_name")
LOG_DIR = "/tmp/coin_trade/log/" + getuser() + "/" + folder_name #配置到统一 /tmp文件夹
LOG_FILE = LOG_DIR + "/" + file_name + ".dict"
else:
LOG_DIR = path
LOG_FILE = os.path.join(LOG_DIR, file_name + ".dict")
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
#加入不同的文件存储方式: size time
if file_size:
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=file_size, backupCount=60)
else:
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE, when='D', interval=1)
handler.suffix = '%Y-%m-%d.dict'
fmt = '%(message)s'
formatter = logging.Formatter(fmt) # 实例化formatter
handler.setFormatter(formatter) # 为handler添加formatter
logger = logging.getLogger(file_name) # 获取名为tst的logger
logger.addHandler(handler) # 为logger添加handler
logger.setLevel(level)
# Prints logger info to terminal
if isPrint:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
# 添加日志文件权限
os.chmod(LOG_FILE, 0o777) # 0o标志, 777全读写运行权限
os.chmod(LOG_DIR, 0o777)
return logger
def print_error(actual_func):
"""decorator to print exception
打印错误的装饰器糖
"""
def my_decorate(*args, **keyargs):
try:
return actual_func(*args, **keyargs)
except:
print("Error execute: {}".format(actual_func.__name__))
traceback.print_exc()
return my_decorate
def call_timer(execute_method=None, args=[]):
"""
通过timer执行指定函数
:param execute_method:
:param args:
:return:
"""
if execute_method:
try:
timer = threading.Timer(constant.TIMER_INTERVAL_NOW, execute_method, args)
timer.start()
except:
print("Error: call_timer error.")
traceback.print_exc()
else:
print("Error: call_timer error,execute_method is None.")
def get_current_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
def format_timestamp_to_date(timestamp):
"""
时间戳(毫秒级、微秒级)转换成日期
:param timestamp:
:return:
"""
ts_str = str(timestamp)
return time.strftime("%Y-%m-%d", time.localtime(int(ts_str[:10])))
def get_yestoday(mytime, format="%Y-%m-%d"):
"""
获取当前日期的前一天
:param mytime:
:param format:
:return:
"""
myday = datetime.datetime.strptime(mytime, format)
my_yestoday = myday - datetime.timedelta(days=1)
return my_yestoday.strftime(format)
def get_current_trade_day():
return get_current_time()[:len("2000-01-01")]
def get_port_from_address(address):
"""
find the port(str) in a ip address
:param address:
:return:
"""
try:
index1 = address.find("://")+1
index2 = address.find(":", index1)+1
port = address[index2:]
return port
except:
traceback.print_exc()
return None
def get_availabel_ip( is_tcp=False):
"""读取json得到所有默认可用的ip
Arguments:
is_tcp {bool} -- 是否是tcp模式, 默认是False(ipc模式)
"""
json_path = get_common_parent_path("global.json")
ip_section = "ip_section"
ip_dict = get_json_config(file=json_path, section=ip_section)
ip = "" #TODO
tcp_ips = ip_dict["tcp"]
ipc_ips = ip_dict["ipc"]
if is_tcp:
return tcp_ips
else:
return ipc_ips
def set_default_address(address, is_tcp=False):
"""设置json的ip配置项
Arguments:
address_list {list} -- 可用列表
is_tcp {bool} -- 是否是tcp模式 (default: {False})
"""
ip_section = 'ip_section'
key = "tcp" if is_tcp else "ipc"
json_path = get_common_parent_path("global.json")
colon_index_1 = address.find(":")
colon_index_2 = address.find(":",colon_index_1 + 1) + 1
port = int(address[colon_index_2: ])
set_json_config(file=json_path, section=ip_section, key=key, value=port)
# 时区转换 utc转换本地时间
def utc_local(utc_st):
now_stamp = time.time()
local_time = datetime.datetime.fromtimestamp(now_stamp)
utc_time = datetime.datetime.utcfromtimestamp(now_stamp)
offset = local_time - utc_time
local_st = utc_st + offset
return local_st
# def main():
# # mfile = get_common_parent_path("global.json")
# m = setup_logger(log_file_name="testname")
# # print(get_json_config(file=mfile, section="data_proxy",key="proxy_bind_server_request"))
# # set_json_config(file=mfile, section="te2st",key="test_key1",value="ddd")
# # print(JSON_DICT)
def current_milli_ts() -> str:
return str(int(time.time() * 1000))
def current_time_string() -> str:
return datetime.datetime.now().strftime("%Y%m%d%H%M%S")
def add(a, b) -> str:
return str(Decimal(a) + Decimal(b))
def sub(a, b) -> str:
return str(Decimal(a) - Decimal(b))
def mul(a, b) -> str:
return str(Decimal(a) * Decimal(b))
def div(a, b) -> str:
return str(Decimal(a) / Decimal(b))
if __name__ == '__main__':
# get_one_availabel_addr()
pass
|
import pyfaidx
import argparse
from tqdm import tqdm
import pandas as pd
def parse_args():
parser=argparse.ArgumentParser(description="get gc content from a foreground bed file")
parser.add_argument("-i","--input_bed", help="bed file in narrow peak format - we will find gc content of these regions centered on the summit")
parser.add_argument("-c","--chrom_sizes",type=str, required=True, help="TSV file with chromosome name in first column and size in the second column")
parser.add_argument("-g", "--genome", help="reference genome fasta")
parser.add_argument("-op", "--output_prefix", help="output file prefix for storing gc-content values of given foreground bed")
parser.add_argument("-il","--inputlen",type=int,default=2114, help="inputlen to use to find gc-content")
return parser.parse_args()
def main(args):
chrom_sizes_dict = {line.strip().split("\t")[0]:int(line.strip().split("\t")[1]) for line in open(args.chrom_sizes).readlines()}
ref=pyfaidx.Fasta(args.genome)
data=pd.read_csv(args.input_bed,header=None,sep='\t')
assert(args.inputlen%2 == 0) # for symmtery
num_rows=str(data.shape[0])
print("num_rows:"+num_rows)
outf=open(args.output_prefix+".bed",'w')
filtered_points=0
for index,row in tqdm(data.iterrows()):
chrom=row[0]
start=row[1]
end=row[2]
summit=start+row[9]
start=summit-args.inputlen//2
end=summit+args.inputlen//2
if start < 0:
filtered_points+=1
continue
if end > chrom_sizes_dict[chrom]:
filtered_points+=1
continue
# calculate gc content when centered at summit
seq=str(ref[chrom][start:end]).upper()
g=seq.count('G')
c=seq.count('C')
gc=g+c
gc_fract=round(gc/len(seq),2)
outf.write(chrom+'\t'+str(start)+'\t'+str(end)+'\t'+str(gc_fract)+"\n")
outf.close()
print("Number of regions filtered because inputlen sequence cannot be constructed: " + str(filtered_points))
print("Percentage of regions filtered " + str(round(filtered_points*100.0/data.shape[0],3)) + "%" )
if round(filtered_points*100.0/data.shape[0],3) > 25:
print("WARNING: If percentage of regions filtered is high (>25%) - your genome is very small - consider using a reduced input/output length for your genome")
if __name__=="__main__":
args=parse_args()
main(args)
|
import pytest
@pytest.mark.asyncio
async def test_llen(redis):
length = await redis.llen('foo')
assert 0 == length
redis._redis.lpush('foo', 'bar')
length = await redis.llen('foo')
assert 1 == length
@pytest.mark.asyncio
async def test_lpush(redis):
ret = await redis.lpush('foo', 'bar')
assert 1 == ret
ret = await redis.lpush('foo', 'baz')
assert 2 == ret
ret = await redis.lpush('foo', 'blub', 'blargh')
assert 4 == ret
assert 4 == redis._redis.llen('foo')
assert [b'blargh', b'blub', b'baz', b'bar'] == redis._redis.lrange('foo', 0, -1)
@pytest.mark.asyncio
async def test_lpop(redis):
redis._redis.lpush('foo', 'bar', 'baz', 'blub', 'blargh')
ret = await redis.lpop('foo')
assert b'blargh' == ret
ret = await redis.lpop('foo', encoding='utf-8')
assert 'blub' == ret
@pytest.mark.asyncio
async def test_rpush(redis):
ret = await redis.rpush('foo', 'bar')
assert 1 == ret
ret = await redis.rpush('foo', 'baz')
assert 2 == ret
ret = await redis.rpush('foo', 'blub', 'blargh')
assert 4 == ret
assert 4 == redis._redis.llen('foo')
assert [b'bar', b'baz', b'blub', b'blargh'] == redis._redis.lrange('foo', 0, -1)
@pytest.mark.asyncio
async def test_rpop(redis):
redis._redis.lpush('foo', 'bar', 'baz', 'blub', 'blargh')
ret = await redis.rpop('foo')
assert b'bar' == ret
ret = await redis.rpop('foo', encoding='utf-8')
assert 'baz' == ret
|
"""
-------------------------------------------------------------------------------
| Copyright 2016 Esri
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
------------------------------------------------------------------------------
"""
# dlaService - functions that interact with ArcGIS Online/Portal Services - refactored from dla.py and dlaPublish.py in Dec 2016
# ----------------------------------------------------------------------------------------------------------------------
'''
'''
import arcpy,dla,xml.dom.minidom,os
import json, urllib
import urllib.parse as parse
import urllib.request as request
_chunkSize = 100
def getOIDs(targetUrl,expr):
# get the list of oids.
ids = []
arcpy.SetProgressor("default","Querying Existing Rows")
arcpy.SetProgressorLabel("Querying Existing Rows")
url = targetUrl + '/query'
#dla.addMessage("Url:"+url)
token = getSigninToken()
if expr != '':
params = {'f': 'pjson', 'where': expr,'token':token,'returnIdsOnly':'true'}
else:
params = {'f': 'pjson', 'where': '1=1','token':token,'returnIdsOnly':'true'}
#dla.addMessage("Params:"+json.dumps(params))
result = sendRequest(url,params)
try:
if result['error'] != None:
retval = False
dla.addMessage("Query Rows from Service failed")
dla.addMessage(json.dumps(result))
error = True
except:
ids = result['objectIds']
lenFound = len(ids)
msg = str(lenFound) + " Rows found in existing Service"
print(msg)
dla.addMessage(msg)
retval = True
return ids
def deleteRows(source,targetUrl,expr):
# delete Rows using chunks of _chunkSize
retval = False
error = False
# delete section
ids = getOIDs(targetUrl,expr)
try:
lenDeleted = 100
#Chunk deletes using chunk size at a time
rowsProcessed = 0
numFeat = len(ids)
if numFeat == 0:
dla.addMessage("0 Rows to Delete, exiting")
return True # nothing to delete is OK
if numFeat > _chunkSize:
chunk = _chunkSize
else:
chunk = numFeat
arcpy.SetProgressor("default","Deleting Rows")
while rowsProcessed < numFeat and error == False:
#Chunk deletes using chunk size at a time
next = rowsProcessed + chunk
msg = "Deleting rows " + str(rowsProcessed) + ":" + str(next)
dla.addMessage(msg)
arcpy.SetProgressorLabel(msg)
oids = ",".join(str(e) for e in ids[rowsProcessed:next])
url = targetUrl + '/deleteFeatures'
token = getSigninToken()
params = {'f': 'pjson', 'objectIds': oids,'token':token}
result = sendRequest(url,params)
try:
if result['error'] != None:
retval = False
dla.addMessage("Delete rows from Service failed")
dla.addMessage(json.dumps(result))
error = True
except:
try:
lenDeleted = len(result['deleteResults'])
total = rowsProcessed + chunk
if total > numFeat:
total = numFeat
msg = str(lenDeleted) + " rows deleted, " + str(total) + "/" + str(numFeat)
print(msg)
dla.addMessage(msg)
retval = True
except:
retval = False
error = True
dla.showTraceback()
dla.addMessage("Delete rows from Service failed")
dla.addError(json.dumps(result))
rowsProcessed += chunk
except:
retval = False
error = True
dla.showTraceback()
dla.addMessage("Delete rows from Service failed")
pass
return retval
def addRows(source,targetUrl,expr):
# add rows using _chunkSize
retval = False
error = False
# add section
try:
arcpy.SetProgressor("default","Adding Rows")
arcpy.SetProgressorLabel("Adding Rows")
rowjs = rowsToJson(source)
url = targetUrl + '/addFeatures'
try:
numFeat = len(rowjs['features'])
except:
numFeat = 0
if numFeat == 0:
dla.addMessage("0 Rows to Add, exiting")
return True # nothing to add is OK
if numFeat > _chunkSize:
chunk = _chunkSize
else:
chunk = numFeat
rowsProcessed = 0
while rowsProcessed < numFeat and error == False:
next = rowsProcessed + chunk
rows = rowjs['features'][rowsProcessed:next]
msg = "Adding rows " + str(rowsProcessed) + ":" + str(next)
dla.addMessage(msg)
arcpy.SetProgressorLabel(msg)
token = getSigninToken()
params = {'rollbackonfailure': 'true','f':'json', 'token':token, 'features': json.dumps(rows)}
result = sendRequest(url,params)
try:
if result['error'] != None:
retval = False
dla.addMessage("Add rows to Service failed")
dla.addMessage(json.dumps(result))
error = True
except:
try:
lenAdded = len(result['addResults'])
total = rowsProcessed + chunk
if total > numFeat:
total = numFeat
msg = str(lenAdded) + " rows added, " + str(total) + "/" + str(numFeat)
print(msg)
dla.addMessage(msg)
retval = True
except:
retval = False
dla.addMessage("Add rows to Service failed. Unfortunately you will need to re-run this tool.")
#dla.showTraceback()
#dla.addError(json.dumps(result))
error = True
rowsProcessed += chunk
except:
retval = False
dla.addMessage("Add rows to Service failed")
dla.showTraceback()
error = True
pass
return retval
def doPublishHttp(source,targetUrl,expr,useReplaceSettings):
# logic for publishing to service registered on Portal or ArcGIS Online
retval = True
token = getSigninToken()
if token == None:
dla.addError("Unable to retrieve token, exiting")
return False
dla.setupProxy()
if expr != '' and useReplaceSettings == True:
arcpy.SetProgressor("default","Deleting Existing Rows")
arcpy.SetProgressorLabel("Deleting Existing Rows")
retval = deleteRows(source,targetUrl,expr)
if retval == True:
arcpy.SetProgressor("default","Appending Rows")
arcpy.SetProgressorLabel("Appending Rows")
#retval = dla.appendRows(source,targetUrl,expr) does this work from a local file gdb? I can't make it...
retval = addRows(source,targetUrl,expr)
return retval
def rowsToJson(dataset):
# converts a feature class/table to a json dictionary representation
try:
rows = arcpy.FeatureSet(dataset) # Load the feature layer into a feature set
except:
rows = arcpy.RecordSet(dataset) # Load the feature layer into a feature set
desc = arcpy.Describe(rows) # use the json property of the feature set
return json.loads(desc.json)
def sendRequest(url, qDict=None, headers=None):
"""Robust request maker - from github https://github.com/khibma/ArcGISProPythonAssignedLicensing/blob/master/ProLicense.py"""
#Need to handle chunked response / incomplete reads. 2 solutions here: http://stackoverflow.com/questions/14442222/how-to-handle-incompleteread-in-python
#This function sends a request and handles incomplete reads. However its found to be very slow. It adds 30 seconds to chunked
#responses. Forcing the connection to HTTP 10 (1.0) at the top, for some reason makes it faster.
qData = parse.urlencode(qDict).encode('UTF-8') if qDict else None
reqObj = request.Request(url)
if headers != None:
for k, v in headers.items():
reqObj.add_header(k, v)
try:
if qDict == None: #GET
r = request.urlopen(reqObj)
else: #POST
r = request.urlopen(reqObj, qData)
responseJSON=""
while True:
try:
responseJSONpart = r.read()
except client.IncompleteRead as icread:
responseJSON = responseJSON + icread.partial.decode('utf-8')
continue
else:
responseJSON = responseJSON + responseJSONpart.decode('utf-8')
break
return (json.loads(responseJSON))
except Exception as RESTex:
print("Exception occurred making REST call: " + RESTex.__str__())
def openRequest(url,params):
"""
Open an http request, handles the difference between urllib and urllib2 implementations if the includes are
done correctly in the imports section of this file. Currently disabled.
"""
response = None
if uselib2 == True:
data = urllib.urlencode(params)
data = data.encode('utf8')
req = urllib2.Request(url,data)
response = urllib2.urlopen(req)
else:
data = parse.urlencode(params)
data = data.encode('utf8')
req = request.Request(url,data)
response = request.urlopen(req)
return response
def getSigninToken():
data = arcpy.GetSigninToken()
token = None
if data is not None:
token = data['token']
#expires = data['expires']
#referer = data['referer']
else:
arcpy.AddMessage("Error: No token - Please sign in to ArcGIS Online or your Portal to continue")
return token
## Added May 2016
def hasCapabilities(url,token,checkList):
hasit = False
if token != None and isFeatureLayerUrl(url):
params = {'f': 'pjson', 'token':token}
response = sendRequest(url,params)
if response != None:
try:
error = json.dumps(response['error'])
dla.addError('Unable to access service properties ' + error)
return False
except:
hasit = True
try:
capabilities = json.dumps(response['capabilities'])
dla.addMessage('Service REST capabilities: ' + capabilities)
for item in checkList:
if capabilities.find(item) == -1:
#dla.addMessage('Service does not support: ' + item)
hasit = False
else:
dla.addMessage('Service supports: ' + item)
except:
dla.addError('Unable to access service capabilities')
hasit = False
else:
dla.addError('Unable to access service')
hasit = False
return hasit
def getServiceName(url):
parts = url.split('/')
lngth = len(parts)
if len(parts) > 8:
dla.addMessage("Service Name: " + parts[len(parts)-3])
return parts[len(parts)-3]
else:
return None
def isFeatureLayerUrl(url):
# assume layer string has already had \ and GIS Servers or other characters switched to be a url
parts = url.split('/')
lngth = len(parts)
try:
# check for number at end
# last = int(parts[lngth-1])
if parts[lngth-2] == 'FeatureServer':
return True
except:
dla.addError("2nd last part of url != 'FeatureServer'")
return False
def checkLayerIsService(layerStr):
## moved here from dlaPublish
# Check if the layer string is a service
if layerStr.lower().startswith("http") == True or layerStr.lower().startswith("gis servers") == True:
return True
else:
return False
def checkServiceCapabilities(pth,checklist):
res = False
if pth == None:
dla.addMessage('Error: No path available for layer')
return False
if checkLayerIsService(pth):
url = pth
if isFeatureLayerUrl(url):
data = arcpy.GetSigninToken()
token = data['token']
name = getServiceName(url)
if name != None:
# checklist is a list like: ['Create','Delete']
res = hasCapabilities(url,token,checklist)
if res != True:
dla.addMessage('WARNING: ' + name + ' does not have ' + '+'.join(checklist) + ' privileges')
dla.addMessage('Verify the service properties for: ' + url)
dla.addMessage('This tool will not run until this is addressed')
return res
else:
dla.addMessage("Unable to retrieve Service name from the url")
return res
else:
dla.addMessage(pth + ' Does not appear to be a feature service layer, exiting. Check that you selected a layer not a service')
return False
else:
return None # if it's not a service return None
def validateSourceUrl(pth):
valid = checkServiceCapabilities(pth,['Query'])
return valid # can be None, True, False
def validateTargetUrl(pth):
valid = checkServiceCapabilities(pth,['Create','Delete'])
return valid # can be None, True, False
def validateTargetAppend(pth):
valid = checkServiceCapabilities(pth,['Create'])
return valid # can be None, True, False
def validateTargetReplace(pth):
valid = validateTargetUrl(pth)
return valid # can be None, True, False
|
import sys
import os
import shutil
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/families')
sys.path.insert(0, 'tools/trees')
sys.path.insert(0, 'tools/mappings')
import experiments as exp
import fam
from ete3 import Tree
from ete3 import SeqGroup
import get_dico
from read_tree import read_tree
from read_tree import read_trees_list
def get_species_to_keep(inputdir, species_to_prune):
species_tree = Tree(fam.get_species_tree(inputdir), format = 1)
all_species = set(species_tree.get_leaf_names())
return all_species - set(species_to_prune)
"""
detach the current node from its parent
return the pruned subtree (which needs to be reattached)
return None if there is the whole subtree should be pruned
"""
def prune_rec(node, to_keep):
node.detach()
children = node.get_children()
if (len(children) == 0):
#leaf
if (node.name in to_keep):
return node
else:
return None
ok_children = []
for child in children:
ok_child = prune_rec(child, to_keep)
if (ok_child != None):
ok_children.append(ok_child)
if (len(ok_children) == 0):
return None
if (len(ok_children) == 1):
return ok_children[0]
for child in ok_children:
node.add_child(child)
return node
def prune(tree, to_keep):
return prune_rec(tree, to_keep)
def extract_species_tree(inputdir, outputdir, species_to_keep):
input_tree = fam.get_species_tree(inputdir)
output_tree = fam.get_species_tree(outputdir)
with open(output_tree, "w") as writer:
for tree in read_trees_list(input_tree):
tree = prune(tree, species_to_keep)
writer.write(tree.write())
writer.write("\n")
def extract_family(family, inputdir, outputdir, gene_method, subst_model, species_to_keep):
print(family)
#tree = Tree(fam.get_true_tree(inputdir, family), format = 1)
species_to_genes = get_dico.get_species_to_genes_family(inputdir, family)
new_species_to_genes = {}
genes_to_keep = set()
for species in species_to_genes:
if (species in species_to_keep):
new_species_to_genes[species] = species_to_genes[species]
genes_to_keep |= set(species_to_genes[species])
if (len(genes_to_keep) < 4):
return
fam.init_family_directories(outputdir, family)
fam.write_phyldog_mapping(new_species_to_genes, fam.get_mappings(outputdir, family))
input_gene_tree = fam.build_gene_tree_path(inputdir, subst_model, family, gene_method)
if (os.path.isfile(input_gene_tree)):
trees = read_trees_list(input_gene_tree)
output_gene_tree = fam.build_gene_tree_path(outputdir, subst_model, family, gene_method)
with open(output_gene_tree, "w") as writer:
for tree in trees:
tree = prune(tree, genes_to_keep)
writer.write(tree.write())
writer.write("\n")
input_ali = fam.get_alignment(inputdir, family)
if (os.path.isfile(input_ali)):
msa = SeqGroup(input_ali)
new_msa = SeqGroup()
for entry in msa:
if (entry[0] in genes_to_keep):
new_msa.set_seq(entry[0], entry[1])
new_msa.write(outfile = fam.get_alignment(outputdir, family))
def generate(inputdir, outputdir, gene_method, subst_model, keep, species_to_prune):
print("Species to keep/prune: " + " ".join(species_to_prune))
fam.init_top_directories(outputdir)
with open(os.path.join(fam.get_misc_dir(outputdir), "info.txt"), "w") as writer:
writer.write("Extracted from " + os.path.basename(inputdir))
if (keep):
writer.write(" by keeping the species:\n" + "\n".join(species_to_prune))
else:
writer.write(" by removing the species:\n" + "\n".join(species_to_prune))
species_to_keep = None
if (keep):
species_to_keep = species_to_prune
else:
species_to_keep = get_species_to_keep(inputdir, species_to_prune)
print("Species to keep: " + " ".join(species_to_keep))
extract_species_tree(inputdir, outputdir, species_to_keep)
families = fam.get_families_list(inputdir)
index = 0
for family in families:
print("treating " + family + " " + str(index) + "/" + str(len(families)))
extract_family(family, inputdir, outputdir, gene_method, subst_model, species_to_keep)
index += 1
fam.postprocess_datadir(outputdir)
print("Result datadir in " + outputdir)
print("Species to keep: " + " ".join(species_to_keep))
if (__name__ == "__main__"):
if (len(sys.argv) < 6):
print("Syntax: python " + os.path.basename(__file__) + " input output gene_method subst_model keep use_species_tree species1 [species2 species3 ...]")
print("Set keep to 0 for pruning the species, and to 1 for keeping the species")
exit(1)
inputdir = sys.argv[1]
outputdir = sys.argv[2]
gene_method = sys.argv[3]
subst_model = sys.argv[4]
keep = int(sys.argv[5]) != 0
use_species_tree = int(sys.argv[6]) != 0
species_to_prune = sys.argv[7:]
if (use_species_tree):
species_tree = sys.argv[7]
species_to_prune = read_tree(species_tree).get_leaf_names()
generate(inputdir, outputdir, gene_method, subst_model, keep, species_to_prune)
|
import pandas as pd
import numpy as np
import os
import sys
def hotencode_train(data):
train = pd.DataFrame()
parents = pd.get_dummies(data[0], prefix="parents")
has_nurs = pd.get_dummies(data[1], prefix="has_nurs")
form = pd.get_dummies(data[2], prefix="form")
children = pd.get_dummies(data[3], prefix="children")
housing = pd.get_dummies(data[4], prefix="housing")
finance = pd.get_dummies(data[5], prefix="finance")
social = pd.get_dummies(data[6], prefix="social")
health = pd.get_dummies(data[7], prefix="health")
classDistribution = pd.get_dummies(data[8], prefix="class")
train = pd.concat([parents, has_nurs, form, children, housing,
finance, social, health, classDistribution], axis=1)
cols = ['parents_usual', 'parents_pretentious', 'parents_great_pret', 'has_nurs_proper', 'has_nurs_less_proper', 'has_nurs_improper',
'has_nurs_critical', 'has_nurs_very_crit',
'form_complete', 'form_completed', 'form_incomplete', 'form_foster',
'children_1', 'children_2', 'children_3', 'children_more',
'housing_convenient', 'housing_less_conv', 'housing_critical',
'finance_convenient', 'finance_inconv',
'social_nonprob', 'social_slightly_prob', 'social_problematic',
'health_recommended', 'health_priority', 'health_not_recom',
'class_not_recom', 'class_recommend', 'class_very_recom', 'class_priority', 'class_spec_prior']
return train[pd.Index(cols)]
def hotencode_test(data):
train = pd.DataFrame()
parents = pd.get_dummies(data[0], prefix="parents")
has_nurs = pd.get_dummies(data[1], prefix="has_nurs")
form = pd.get_dummies(data[2], prefix="form")
children = pd.get_dummies(data[3], prefix="children")
housing = pd.get_dummies(data[4], prefix="housing")
finance = pd.get_dummies(data[5], prefix="finance")
social = pd.get_dummies(data[6], prefix="social")
health = pd.get_dummies(data[7], prefix="health")
train = pd.concat([parents, has_nurs, form, children, housing,
finance, social, health], axis=1)
cols = ['parents_usual', 'parents_pretentious', 'parents_great_pret', 'has_nurs_proper', 'has_nurs_less_proper', 'has_nurs_improper',
'has_nurs_critical', 'has_nurs_very_crit',
'form_complete', 'form_completed', 'form_incomplete', 'form_foster',
'children_1', 'children_2', 'children_3', 'children_more',
'housing_convenient', 'housing_less_conv', 'housing_critical',
'finance_convenient', 'finance_inconv',
'social_nonprob', 'social_slightly_prob', 'social_problematic',
'health_recommended', 'health_priority', 'health_not_recom']
return train[pd.Index(cols)]
train_data = pd.read_csv(sys.argv[1], header=None)
test_data = pd.read_csv(sys.argv[2], header=None)
train = hotencode_train(train_data)
test = hotencode_test(test_data)
classes = train.iloc[:, -5:].values
features = train.iloc[:, :-5].values
features = np.c_[np.ones(len(train)), features]
# print(X_train.shape, Y_train.shape,X_test.shape)
def gradient(X,Y,W):
XW = np.exp(np.matmul(X, W))
denom = np.sum(XW, axis=1)
Y_predict = np.divide(XW, denom.reshape(X.shape[0], 1))
return np.matmul(X.transpose(), Y - Y_predict)/X.shape[0]
def cost(W,X,Y):
XW = np.exp(np.matmul(X,W))
logTerm = np.log(np.sum(XW,axis=1))
weightedTerm = np.sum(np.multiply(np.matmul(Y,W.T),X),axis=1)
error = np.sum(logTerm-weightedTerm)/X.shape[0]
return error
hyperParam = [0.000001,0.00001]
# alpha = [0.1,.02]
f1= 0
k=10
trainLen = features.shape[0]
print(trainLen)
a = np.zeros((len(hyperParam),k))
for m in range(0,len(hyperParam)):
for i in range(0,k):
X_train = np.concatenate((np.copy(features[:int(i*trainLen/k)]),np.copy(features[int((i+1)*trainLen/k):])),axis=0)
Y_train = np.concatenate((np.copy(classes[:int(i*trainLen/k)]),np.copy(classes[int((i+1)*trainLen/k):])),axis=0)
X_test = np.copy(features[int(i*trainLen/k):int((i+1)*trainLen/k)])
Y_test = np.copy(classes[int(i*trainLen/k):int((i+1)*trainLen/k)])
w_initial = np.zeros(28*5).reshape(28, 5)
costAr = []
costAr.append(0)
l = X_train.shape[0]
cl = np.sum(Y_test,axis=0)
for j in range(1, 10000):
grad = gradient(X_train , Y_train, w_initial)
w_temp = w_initial+0.01*(grad-hyperParam[m]*w_initial)
c = cost(w_initial,X_train,Y_train)
print((j,c), end="\r", flush=True)
costAr.append(c)
if(costAr[j-1]-costAr[j]<.00001 and j>10):
print(costAr[j],costAr[j-1])
break
w_initial = w_temp
print(j)
WmulX = np.exp(np.matmul(X_test, w_initial))
denom = np.sum(WmulX, axis=1)
Y_predict = np.divide(WmulX, denom.reshape(len(X_test), 1))
b = np.zeros_like(Y_predict)
b[np.arange(len(Y_predict)), Y_predict.argmax(1)] = 1
Y_predict = b
accuracy = np.trace(np.matmul(Y_predict, Y_test.T))/len(X_test)
Y_predict[:,0] =1*Y_predict[:,0]
Y_predict[:,1] =2*Y_predict[:,1]
Y_predict[:,2] =3*Y_predict[:,2]
Y_predict[:,3] =4*Y_predict[:,3]
Y_predict[:,4] =5*Y_predict[:,4]
Y_predict = np.sum(Y_predict,axis=1).tolist()
Y_test[:,0] =1*Y_test[:,0]
Y_test[:,1] =2*Y_test[:,1]
Y_test[:,2] =3*Y_test[:,2]
Y_test[:,3] =4*Y_test[:,3]
Y_test[:,4] =5*Y_test[:,4]
Y_test = np.sum(Y_test,axis=1).tolist()
mat = np.zeros((5,5))
f1Score = []
for i in range(0,len(Y_predict)):
mat[int(Y_predict[i])-1][Y_test[i]-1]=mat[int(Y_predict[i])-1][Y_test[i]-1]+1
for i in range(len(mat)):
TP = mat[i,i]
FP = np.sum(mat[:,i])-mat[i,i]
FN = np.sum(mat[i,:])-mat[i,i]
if(TP==0 and FP==0 and FN==0):
precision=100
recall=100
F1_score=100
elif(TP==0 and (FP==0 or FN==0)):
precision=0
recall=0
F1_score=0
else:
precision = mat[i,i]/np.sum(mat[:,i])
recall = mat[i,i]/np.sum(mat[i,:])
F1_score = 2*precision*recall/(precision+recall)*100
f1Score.append([TP,FP,FN,precision,recall,F1_score])
F1Score = pd.DataFrame([],columns=["TP","FP","FN","precision","recall","F1_Score"])
F1Score.loc["class_not_recom"] = f1Score[0]
F1Score.loc["class_recommend"] = f1Score[1]
F1Score.loc["class_very_recom"] = f1Score[2]
F1Score.loc["class_priority"] = f1Score[3]
F1Score.loc["class_spec_prior"] = f1Score[4]
Weighted_F1_score = np.dot(cl,F1Score.iloc[:,5].values)/np.sum(cl)
Macro_F1_score = np.sum(F1Score.iloc[:,5].values)/len(cl)
print("micro_F1_score= ",Weighted_F1_score,Macro_F1_score,m,accuracy)
# if(f1<Weighted_F1_score and Weighted_F1_score==Weighted_F1_score):
# print("updated")
# f1= Weighted_F1_score
# lamIndex=n
# batchIndex = m
a[m][i]=Weighted_F1_score
print(a)
X_train = features[:, :]
Y_train = classes[:, :]
X_test = features[:, :]
Y_test = classes[:, :]
print(X_train.shape,Y_train.shape)
# lamIndex, batchIndex = np.where(a==np.max(a))[0][0], np.where(a==np.max(a))[1][0]
w_initial = np.zeros(28*5).reshape(28, 5)
costAr = []
k = 100
l = X_train.shape[0]
costAr.append(0)
for j in range(1, 10000):
for i in range(0,k):
grad = gradient(X_train[int((l/k)*i):int((l/k)*(i+1)),:] , Y_train[int((l/k)*i):int((l/k)*(i+1)),:], w_initial)
w_initial = w_initial+0.01*(grad-hyperParam[0]*w_initial)
c = cost(w_initial,X_train,Y_train)
print((j,c), end="\r", flush=True)
costAr.append(c)
if(costAr[j-1]-costAr[j]<.00001 and j>1):
print(costAr[j],costAr[j-1])
break
print(j)
features = test.iloc[:, :].values
features = np.c_[np.ones(len(test)), features]
X_test = features[:, :]
WmulX = np.exp(np.matmul(X_test, w_initial))
denom = np.sum(WmulX, axis=1)
Y_predict = np.divide(WmulX, denom.reshape(len(X_test), 1))
b = np.zeros_like(Y_predict)
b[np.arange(len(Y_predict)), Y_predict.argmax(1)] = 1
Y_predict = b
Y_predict[:, 0] = 1*Y_predict[:, 0]
Y_predict[:, 1] = 2*Y_predict[:, 1]
Y_predict[:, 2] = 3*Y_predict[:, 2]
Y_predict[:, 3] = 4*Y_predict[:, 3]
Y_predict[:, 4] = 5*Y_predict[:, 4]
Y_predict = np.sum(Y_predict, axis=1).tolist()
predict = []
for i in range(0, len(Y_predict)):
if(Y_predict[i] == 1):
predict.append("not_recom")
elif(Y_predict[i] == 2):
predict.append("recommend")
elif(Y_predict[i] == 3):
predict.append("very_recom")
elif(Y_predict[i] == 4):
predict.append("priority")
elif(Y_predict[i] == 5):
predict.append("spec_prior")
Y_predict = pd.DataFrame(predict)
Y_predict.to_csv(sys.argv[3], header=False, index=False)
w_initial = pd.DataFrame(w_initial)
w_initial.to_csv(sys.argv[4], header=False, index=False)
|
a = input().upper()
D = {}
for i in a:
try:
D[i] += 1
except KeyError:
D[i] = 1
d2 = list(D.items())
d2.sort(key=lambda x: x[1], reverse=True)
if len(d2) != 1:
if d2[0][1] != d2[1][1]:
print(d2[0][0])
else:
print("?")
else:
print(d2[0][0])
# Done
|
import numpy as np
import torch
from torch.autograd import Variable
class DataLoaderS(object):
# train and valid is the ratio of training set and validation set. test = 1 - train - valid
def __init__(self, cmip5, soda, godas, device, horizon, window, valid_split=0.1,
transfer=True, concat_cmip5_and_soda=False, **kwargs):
"""
n - length of time series (i.e. dataset size)
m - number of nodes/grid cells (105 if using exactly the ONI region)
:param file_name: Omitted if data is not None (e.g. if you use the datareader from ninolearn, as is enso_mtgnn.py)
:param train: fraction to use for training
:param valid: fraction to use for validation
:param device: which device to run on (e.g. "cpu" or "cuda:0", ...)
:param horizon: self.h - How many timesteps in advance to predict
:param window: self.P - How many timesteps to use for prediction
:param normalize: Valid: 0 (data is used as is), 1, 2,..,6, "EEMD" (will run node-wise EEMD, can be slow)
"""
self.window = window
self.h = horizon
self.device = device
self.T, self.channels, w, self.n_nodes = soda[0].shape # T=#time series, m=#nodes
assert w == window, f"Data shape {soda[0].shape} not consistent with argument window={window}"
self.normalize = -1
sodaX = np.array(soda[0]) if not isinstance(soda[0], np.ndarray) else soda[0]
cmip5X = np.array(cmip5[0]) if not isinstance(cmip5[0], np.ndarray) else cmip5[0]
godasX = np.array(godas[0]) if not isinstance(godas[0], np.ndarray) else godas[0]
if transfer:
self.pre_train = torch.tensor(cmip5X).float(), torch.tensor(cmip5[1]).float()
first_val = int(valid_split * len(soda[0]))
self.train = [torch.tensor(sodaX[:-first_val]).float(), torch.tensor(soda[1][:-first_val]).float()]
self.valid = torch.tensor(sodaX[-first_val:]).float(), torch.tensor(soda[1][-first_val:]).float()
self.test = torch.tensor(godasX).float(), torch.tensor(godas[1]).float()
self.transfer = transfer
self.is_train_concat = concat_cmip5_and_soda
if concat_cmip5_and_soda: # instead of transfer, concat the cmip5 and soda data
print("SODA AND CMIP5 for training")
self.merge_transfer_and_train(cmip5X, cmip5[1])
self.T = self.train[0].shape[0]
def __str__(self):
string = f"Pre-training set of {self.pre_train[0].shape[0]} samples, " if self.transfer else ""
string += f"Training, Validation, Test samples = {self.T}, {self.valid[0].shape[0]}, {self.test[0].shape[0]}, " \
f"#nodes = {self.n_nodes}, #channels = {self.channels}, " \
f"Predicting {self.h} time steps in advance using {self.window} time steps --- CNN DATA used"
return string
def merge_transfer_and_train(self, transfer_data, transfer_labels):
transfer_data, transfer_labels = torch.tensor(transfer_data).float(), torch.tensor(transfer_labels).float()
self.train[0] = torch.cat((transfer_data, self.train[0]), dim=0)
self.train[1] = torch.cat((transfer_labels, self.train[1]), dim=0)
def get_batches(self, inputs, targets, batch_size, shuffle=True):
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while start_idx < length:
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]
Y = targets[excerpt]
X = X.to(self.device)
Y = Y.to(self.device)
yield Variable(X), Variable(Y)
start_idx += batch_size
class IndexLoader:
def __init__(self, args, start_date="1984-01", end_date="2020-08", data_dir="../data", transfer=True,
godas_data='GODAS.input.36mn.1980_2015.nc', # Input of GODAS data set
godas_label='GODAS.label.12mn_3mv.1982_2017.nc', # Label of gods set
verbose=False,
test_set="GODAS",
device=None,
ersstv5_to_cnn_format=False
):
from utils import read_ssta, get_index_mask, load_cnn_data, reformat_cnn_data
self.device = device or args.device
self.horizon = args.horizon
self.window = args.window
self.n_nodes = args.num_nodes
try:
cnn_mask = args.cnn_data_mask
GODAS_X, GODAS_Y = reformat_cnn_data(lead_months=args.horizon, window=args.window,
use_heat_content=args.use_heat_content, lon_min=args.lon_min,
lon_max=args.lon_max, lat_min=args.lat_min, lat_max=args.lat_max,
data_dir=data_dir + "GODAS/", sample_file=godas_data,
label_file=godas_label,
get_valid_nodes_mask=False, get_valid_coordinates=False)
GODAS = GODAS_X[24:, :, :, cnn_mask], GODAS_Y[24:] # start at 1984
except AttributeError:
_, _, GODAS = load_cnn_data(window=args.window, lead_months=args.horizon, lon_min=args.lon_min,
lon_max=args.lon_max, lat_min=args.lat_min, lat_max=args.lat_max,
data_dir=data_dir, use_heat_content=args.use_heat_content,
return_mask=False, truncate_GODAS=True)
transfer = False
print(GODAS.shape)
if args.use_heat_content or test_set == "GODAS":
self.dataset = "GODAS"
if verbose:
print("Testing on unseen GODAS data...")
self.test = torch.tensor(np.array(GODAS[0])).float(), torch.tensor(GODAS[1]).float()
self.semantic_time_steps = GODAS[0].attrs["time"][24:] # start in 1984
else:
self.dataset = "ERSSTv5"
if verbose:
print("Testing on unseen ERSSTv5 data...")
flattened_ssta = read_ssta(data_dir=data_dir, index=args.index,
resolution=args.resolution, stack_lon_lat=True,
start_date=start_date, end_date=end_date, # end date can be anything for eval.
lon_min=args.lon_min, lon_max=args.lon_max,
lat_min=args.lat_min, lat_max=args.lat_max)
self.semantic_time_steps = flattened_ssta.get_index("time")[self.window + self.horizon - 1:]
if transfer:
flattened_ssta = flattened_ssta[:, cnn_mask]
_, self.mask = get_index_mask(flattened_ssta, args.index, flattened_too=True, is_data_flattened=True)
if ersstv5_to_cnn_format:
self.test = self._batchify_index(np.array(flattened_ssta))
else:
self.test = self._batchify(np.array(flattened_ssta))
def _batchify_index(self, data):
Y_matrix = data[self.window + self.horizon - 1:] # horizon = #time steps predicted in advance
timesteps = Y_matrix.shape[0]
X = torch.zeros((timesteps, 1, self.window, self.n_nodes))
Y = torch.zeros((timesteps,))
for start, Y_i in enumerate(Y_matrix):
end = start + self.window
X[start, 0, :, :] = torch.from_numpy(data[start:end, :])
Y[start] = torch.tensor(np.mean(Y_i[self.mask]))
return [X, Y]
def _batchify(self, data):
Y_matrix = data[self.window + self.horizon - 1:, :] # horizon = #time steps predicted in advance
timesteps = Y_matrix.shape[0]
X = torch.zeros((timesteps, self.window, self.n_nodes))
Y = torch.zeros((timesteps, self.n_nodes))
for start, Y_i in enumerate(Y_matrix):
end = start + self.window
X[start, :, :] = torch.from_numpy(data[start:end, :])
Y[start, :] = torch.tensor(Y_i)
return [X, Y]
def get_batches(self, inputs, targets, batch_size, shuffle=True):
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while start_idx < length:
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]
Y = targets[excerpt]
X = X.to(self.device)
Y = Y.to(self.device)
yield Variable(X), Variable(Y)
start_idx += batch_size
|
def multiply(solid_number):
x = None
res = 1
for i in str(solid_number):
if not x:
x = int(i)
res = int(i)
else:
res *= int(i)
return res
def persistence(n):
steps = 0
b = str(n)
while len(b) != 1:
y = multiply(b)
steps += 1
b = str(y)
return steps
|
from value_objects.util.compat import izip, unicode
from value_objects.util.once import once
class ObjectHelper( object ):
def __init__( self, object_class, field_names, field_values ):
self.object_class = object_class
self.field_names = field_names
self.field_values = field_values
@property
def field_pairs( self ):
return izip( self.field_names, self.field_values )
@once
def class_and_state( self ):
return ( self.object_class, self.field_values )
# ====================================
# strs
# ====================================
@once
def class_and_state_unicode( self ):
pairStrs = (
'%s=%s' % ( name, unicode( value ) ) for name, value in self.field_pairs
)
body = ', '.join( pairStrs )
return '%s{%s}' % ( self.object_class.__name__, body )
@once
def class_and_state_repr( self ):
pairStrs = (
'%s=%s' % ( name, repr( value ) ) for name, value in self.field_pairs
)
body = ', '.join( pairStrs )
return '%s{%s}' % ( self.object_class.__name__, body )
# ====================================
# equals
# ====================================
def class_and_state_equals( self, other_object ):
try:
other_helper = other_object.object_helper
except AttributeError:
return False
if not isinstance( other_helper, ObjectHelper ):
return False
return self.class_and_state == other_helper.class_and_state
# ====================================
# hash
# ====================================
@once
def class_and_state_hash( self ):
return hash( self.class_and_state )
|
text = input("Digite seu texto: ")
print(len(text))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 06:59:02 2019
@author: Sneha
"""
import numpy as np
import vrep
import sys
text_file = open("FinalPath.txt", "r")
lines = (text_file.read()).split(' ')
text_file.close()
final_path=[]
for i in lines:
if(i!=''):
k=i.split(',')
for j in range(5):
k[j]=float(k[j])
final_path.append(k)
vrep.simxFinish(-1) # just in case, close all opened connections
clientID=vrep.simxStart('127.0.0.1',19999,True,True,5000,5) # Connect to V-REP
goal=[final_path[len(final_path)-1][0],final_path[len(final_path)-1][1]]
if clientID!=-1:
print ('Connected to remote API server')
[_,left_wheel] = vrep.simxGetObjectHandle(clientID,'wheel_left_joint',vrep.simx_opmode_oneshot_wait);
[ _,right_wheel]=vrep.simxGetObjectHandle(clientID,'wheel_right_joint',vrep.simx_opmode_oneshot_wait);
[_,reference_frame]=vrep.simxGetObjectHandle(clientID,'Dummy',vrep.simx_opmode_oneshot_wait);
[_,turtlebot]=vrep.simxGetObjectHandle(clientID,'Turtlebot2',vrep.simx_opmode_oneshot_wait);
[_,start_position]=vrep.simxGetObjectPosition(clientID, turtlebot,-1,vrep.simx_opmode_oneshot_wait);
start = np.double([start_position[0],start_position[1]])
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,0,vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,0,vrep.simx_opmode_oneshot_wait);
err,simtime = vrep.simxGetFloatSignal(clientID,'Turtlebot2_simulation_time',vrep.simx_opmode_streaming)
for i in range(0,len(final_path)-1):
[_,position]=vrep.simxGetObjectPosition(clientID, turtlebot,-1,vrep.simx_opmode_oneshot_wait);
[_,theta]=vrep.simxGetObjectOrientation(clientID, turtlebot,reference_frame,vrep.simx_opmode_oneshot_wait);
position = np.double(position);
theta_req = final_path[i][2]
if(np.abs(theta_req - final_path[i+1][2])>0.1 ):
while np.abs(theta[2] - theta_req)> 0.05:
if theta[2] < theta_req :
if(np.abs(theta[2] - theta_req)<1):
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,-1,vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,1,vrep.simx_opmode_oneshot_wait);
else:
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,-2,vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,2,vrep.simx_opmode_oneshot_wait);
else:
if(np.abs(theta[2] - theta_req)<1):
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,1,vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,-1,vrep.simx_opmode_oneshot_wait);
else:
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,2,vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,-2,vrep.simx_opmode_oneshot_wait);
[_,theta]=vrep.simxGetObjectOrientation(clientID, turtlebot,reference_frame,vrep.simx_opmode_oneshot_wait);
[_,position]=vrep.simxGetObjectPosition(clientID, turtlebot,-1,vrep.simx_opmode_oneshot_wait);
position = np.double(position);
[_,position]=vrep.simxGetObjectPosition(clientID, turtlebot,-1,vrep.simx_opmode_oneshot_wait);
position = np.double(position);
# print(np.abs(position[0] - goal[0]),' ',np.abs(position[1] - goal[1]))
if np.abs(position[0] - goal[0]) < 0.2 and np.abs(position[1] - goal[1]) < 0.2:
break;
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,final_path[i][3],vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,final_path[i][4],vrep.simx_opmode_oneshot_wait);
_,simtime1 = vrep.simxGetFloatSignal(clientID,'Turtlebot2_simulation_time',vrep.simx_opmode_buffer)
_,simtime2 = vrep.simxGetFloatSignal(clientID,'Turtlebot2_simulation_time',vrep.simx_opmode_buffer)
while (simtime2 - simtime1) < 1.14:
_,simtime2 = vrep.simxGetFloatSignal(clientID,'Turtlebot2_simulation_time',vrep.simx_opmode_buffer)
print("Location Reached");
s = vrep.simxSetJointTargetVelocity(clientID,left_wheel,0,vrep.simx_opmode_oneshot_wait);
s = vrep.simxSetJointTargetVelocity(clientID,right_wheel,0,vrep.simx_opmode_oneshot_wait);
vrep.simxFinish(-1);
vrep.simxFinish(clientID)
else:
print('Connection unsuccessful!')
sys.exit("Could not connect")
|
import os
PREFIX = 'data/'
path = '{{ .VARIABLE }}'
data_path = os.path.join(PREFIX, path)
full_path = os.path.abspath(data_path)
data = ''
with open(full_path, 'r') as f:
data = f.read()
print(data)
|
#!/usr/bin/env python
import netfilterqueue
#the purpose of this program is to drop or accept packets being sent from a victims computer to the internet. it is intended
#to be used after already being the Man in the middle from our arp_spoof program. To be able to access these packets,
#we must first put them in a queue using iptables. That command in the terminal is below:
# iptables -I FORWARD -j NFQUEUE --queue-num 0
#When we are done, be sure to do iptables - flush to get rid of the iptables we created.
def process_packet(packet):
print(packet)
#We use packet.drop() to drop packets in the queue to cut the internet connection on the machine, or
#alternatively packet.accept() to allow the victim pc to continue to the internet.
packet.drop()
#creating an instance of a netfilterqueue object and putting it in a variable called queue. We then bind our new
#variable to the queue number we set up previously with iptables, and add a callback function we created called
#process_packet
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
|
import sys
import os
from PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow, QPushButton, QFileDialog, QWidget, QCheckBox, QHBoxLayout
# User defined imports
from OCR_reader import OCRReader
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.image_path = None
self.display_bg = True
self.display_image = True
self.initUI()
def initUI(self):
self.setWindowTitle("OCR Reader")
bg_box = QCheckBox("Display Background")
bg_box.setChecked(True)
bg_box.stateChanged.connect(lambda: self.bg_state(bg_box))
image_box = QCheckBox("Display Image")
image_box.setChecked(True)
image_box.stateChanged.connect(lambda: self.image_state(image_box))
upload_button = QPushButton("Upload Image", self)
upload_button.clicked.connect(self.upload_image)
reconvert_button = QPushButton("Re-Convert", self)
reconvert_button.clicked.connect(self.convert_image_to_text)
widget = QWidget()
h_layout = QHBoxLayout()
h_layout.addStretch(1)
h_layout.addWidget(bg_box)
h_layout.addWidget(image_box)
v_layout = QVBoxLayout()
v_layout.addStretch(1)
v_layout.addLayout(h_layout)
v_layout.addWidget(upload_button)
v_layout.addWidget(reconvert_button)
widget.setLayout(v_layout)
self.setCentralWidget(widget)
def upload_image(self):
image_path, _ = QFileDialog.getOpenFileName(self, "Select image", os.getenv("HOME"), "(*.png *.xpm .jpg)")
if image_path != "":
self.image_path = image_path
self.convert_image_to_text()
def convert_image_to_text(self):
if self.image_path is not None:
OCRReader(self.image_path, self.display_bg, self.display_image)
else:
print("Please select an image file first!")
def bg_state(self, button):
self.display_bg = True if button.isChecked() else False
def image_state(self, button):
self.display_image = True if button.isChecked() else False
if __name__ == "__main__":
ocr_app = QApplication(sys.argv)
ocr_app.setStyle("Fusion")
gui = MainWindow()
gui.show()
sys.exit(ocr_app.exec_())
|
"""
Entrada: Um texto qualquer
Saida: No lugar de nomes (com letras maiúsculas), retorna M, se for preposição, retorna p,
se for numeral, retorna N, se for conjunção, retorna c ...
"""
import libplnbsi
#Codifica recebe um texto tokenizado
def codifica(pTexto):
preposicoes = ['a', 'ante', 'após', 'com', 'contra','de','do', 'desde','em','entre','para','per',
'perante','por','sem','sob','sobre','trás']
conjuncoes = ['e', 'nem', 'mas também', 'como também', 'bem como', 'mas ainda','mas', 'porém', 'todavia', 'contudo', 'antes']
artigos = ['o', 'a', 'os', 'as', 'um', 'uma', 'uns', 'umas']
mpTratamentos = ['Sr', 'Sra', 'Srta', 'Srs', 'Sras', 'Srª', 'Srº', 'Srªs', 'Ema']
pTratamentos = ['A','Ema','Emas','Revma','Ex.a','Mag.a','M.','M.I.','S.','Sra','O.']
strCodificada = ""
i = 0
for elem in pTexto:
if elem.isalpha():
if elem.lower() in preposicoes:
strCodificada += 'p'
elif elem.lower() in artigos:
strCodificada += 'a'
elif elem.lower() in conjuncoes:
strCodificada += 'c'
#elif elem in pTratamentos:
#strCodificada += 'T'
#strCodificada = strCodificada[: -2] + 'T'
elif elem in mpTratamentos:
strCodificada += 'T'
elif elem[0].isupper():
strCodificada += 'M'
elif elem.islower:
strCodificada += 'm'
elif elem.isdigit():
strCodificada += 'N'# Número
elif elem.isalnum():
strCodificada += 'A' #Alfanumérico
else:
strCodificada += elem
#
return strCodificada
#
def main():
#arquivo = open("")
texto = "Srª Cristine ... Tudo tem. O seu MP3 tempo determinado, e há tempo para todas V.Emª Karen coisas debaixo do céu. V.Sra está pronta? 55/55/55"
texto = libplnbsi.insereEspacos(texto)
tokens, pos = libplnbsi.tokenizador(texto)
print(tokens)
print(libplnbsi.extraiPadrao(tokens, ['V.TM', 'M', 'TM']))
#~ #print(tokens)
#~ #print(codifica(tokens))
#~ l = ['Cris','Al','Haha','Teste']
#~ print(l[:2])
return 0
if __name__ == '__main__':
main()
|
__author__ = 'Ben'
from helper import greeting
greeting("new file says hi")
greeting()
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
import builtins as _builtins, sys, typing as _typing
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
from google.protobuf.message import Message as _Message
__all__ = [
"IdentityProviderConfig",
"CreateIdentityProviderConfigRequest",
"CreateIdentityProviderConfigResponse",
"GetIdentityProviderConfigRequest",
"GetIdentityProviderConfigResponse",
"ListIdentityProviderConfigsRequest",
"ListIdentityProviderConfigsResponse",
"UpdateIdentityProviderConfigRequest",
"UpdateIdentityProviderConfigResponse",
"DeleteIdentityProviderConfigRequest",
"DeleteIdentityProviderConfigResponse",
]
class IdentityProviderConfig(_Message):
identity_provider_id: _builtins.str
is_deactivated: _builtins.bool
issuer: _builtins.str
jwks_url: _builtins.str
def __init__(self, *, identity_provider_id: _typing.Optional[_builtins.str] = ..., is_deactivated: _typing.Optional[_builtins.bool] = ..., issuer: _typing.Optional[_builtins.str] = ..., jwks_url: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_id", "is_deactivated", "issuer", "jwks_url"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_id", "is_deactivated", "issuer", "jwks_url"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class CreateIdentityProviderConfigRequest(_Message):
@property
def identity_provider_config(self) -> IdentityProviderConfig: ...
def __init__(self, *, identity_provider_config: _typing.Optional[IdentityProviderConfig] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_config"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_config"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class CreateIdentityProviderConfigResponse(_Message):
@property
def identity_provider_config(self) -> IdentityProviderConfig: ...
def __init__(self, *, identity_provider_config: _typing.Optional[IdentityProviderConfig] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_config"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_config"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class GetIdentityProviderConfigRequest(_Message):
identity_provider_id: _builtins.str
def __init__(self, *, identity_provider_id: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_id"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_id"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class GetIdentityProviderConfigResponse(_Message):
@property
def identity_provider_config(self) -> IdentityProviderConfig: ...
def __init__(self, *, identity_provider_config: _typing.Optional[IdentityProviderConfig] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_config"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_config"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class ListIdentityProviderConfigsRequest(_Message):
def __init__(self): ...
def HasField(self, field_name: _typing.NoReturn) -> _typing.NoReturn: ...
def ClearField(self, field_name: _typing.NoReturn) -> _typing.NoReturn: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class ListIdentityProviderConfigsResponse(_Message):
@property
def identity_provider_configs(self) -> RepeatedCompositeFieldContainer[IdentityProviderConfig]: ...
def __init__(self, *, identity_provider_configs: _typing.Optional[_typing.Iterable[IdentityProviderConfig]] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_configs"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_configs"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class UpdateIdentityProviderConfigRequest(_Message):
@property
def identity_provider_config(self) -> IdentityProviderConfig: ...
@property
def update_mask(self) -> FieldMask: ...
def __init__(self, *, identity_provider_config: _typing.Optional[IdentityProviderConfig] = ..., update_mask: _typing.Optional[FieldMask] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_config", "update_mask"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_config", "update_mask"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class UpdateIdentityProviderConfigResponse(_Message):
@property
def identity_provider_config(self) -> IdentityProviderConfig: ...
def __init__(self, *, identity_provider_config: _typing.Optional[IdentityProviderConfig] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_config"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_config"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class DeleteIdentityProviderConfigRequest(_Message):
identity_provider_id: _builtins.str
def __init__(self, *, identity_provider_id: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["identity_provider_id"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["identity_provider_id"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class DeleteIdentityProviderConfigResponse(_Message):
def __init__(self): ...
def HasField(self, field_name: _typing.NoReturn) -> _typing.NoReturn: ...
def ClearField(self, field_name: _typing.NoReturn) -> _typing.NoReturn: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
|
#coding:utf8
import pandas as pd
df=pd.read_excel("./corrDataSet.xlsx")
print df.corr()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 13:17:37 2019
@author: quiath
"""
import time
from collections import namedtuple
from PIL import Image, ImageEnhance
from net_image_cache import ImageCache
from config_util import read_config_json
"""
contents of config.json enabling a 128x128 LCD display
if "lcd" is not set, PIL built in Image.show() is used (e.g. for debugging)
{
"tiles_hor": 4,
"tiles_ver": 4,
"tiles_size": 128,
"check_every_n_frames": 120,
"win_w": 800,
"win_h": 600,
"lcd": 1,
"lcd_w": 128,
"lcd_h": 128,
"app_id": "YOUR_APP_ID",
"app_code": "YOUR_APP_CODE",
"origin": [13,4092,2723]
}
"""
config = read_config_json()
RPi_has_LCD = config.get("lcd", False)
if RPi_has_LCD:
import Adafruit_ILI9341 as TFT
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
# Raspberry Pi configuration.
DC = 18
RST = 23
SPI_PORT = 0
SPI_DEVICE = 0
FONTSIZE = 32
Evt = namedtuple("Evt", "type, key")
class MapDisplay:
def __init__(self, config, targetsurf, font):
self.mysurf = targetsurf
self.TW, self.TH = config["tiles_hor"], config["tiles_ver"]
self.TS = config["tiles_size"]
self.CHECK_EVERY_N_FRAMES = config["check_every_n_frames"]
self.origin = config["origin"]
self.refresh_s = config.get("refresh_s", 600)
self.view_ofs_x, self.view_ofs_y = 0, 0
self.last_refreshed = time.time()
self.map_screen_tile_to_name = {}
self.total_loops = 0
self.tiledsurf = Image.new("RGB", (self.TW * self.TS, self.TH * self.TS))
self.font = font
self.cache = ImageCache(self.TW, self.TH, config, tilesize = self.TS, refresh_s = self.refresh_s,
global_indexing = True)
# set this at start to show something while the initial update
self.move_flag = True
# used for displaying the info about the update in progress
self.need_update_flag = False
# PIL!
self.mysurf_w, self.mysurf_h = self.mysurf.size
self.window_move = (self.mysurf_w < self.TS * self.TW)
self.src_x = 0
self.src_y = 0
def render_info(self):
lastm = int(time.time() - self.last_refreshed)
lasts = lastm % 60
lastm = lastm // 60
print("last refreshed {:02d}m{:02d}s ago ".format(lastm, lasts))
def process_event(self, evt):
pass
def process_joy(self, joy):
pass
def update(self):
if (self.total_loops % self.CHECK_EVERY_N_FRAMES == 0 or
self.move_flag) and not self.need_update_flag:
self.need_update_flag = True
elif self.need_update_flag:
self.need_update_flag = False
TW, TH, TS = self.TW, self.TH, self.TS
traffic_d = self.cache.get_tiles(self.origin, self.total_loops > 0)
if any(not x[1] for x in traffic_d.values()) or self.total_loops == 0:
self.last_refreshed = time.time()
for y in range(TH):
for x in range(TW):
tile_zxy = (self.origin[0], self.origin[1] + x, self.origin[2] + y)
if tile_zxy in traffic_d:
rfn, rdata = traffic_d[tile_zxy]
if rdata and rfn == self.map_screen_tile_to_name.get((x, y), ""):
continue
self.map_screen_tile_to_name[(x, y)] = rfn
try:
surf = Image.open(rfn)
self.tiledsurf.paste(surf, (x * TS, y * TS, (x + 1) * TS, (y + 1) * TS))
except:
print("Error while parsing image", rfn)
pass
self.total_loops += 1
self.move_flag = False
enh = ImageEnhance.Color(self.tiledsurf)
enhanced = enh.enhance(2.0)
result_img = enhanced
if RPi_has_LCD:
x, y = self.src_x, self.src_y
cropped = enhanced.transform((self.mysurf_w, self.mysurf_h),
Image.EXTENT,
data = (x, y, x + 1 * self.mysurf_w, y + 1 * self.mysurf_h), # TODO: scale down
resample = Image.BILINEAR)
cropped = cropped.transpose(Image.FLIP_LEFT_RIGHT)
cropped = cropped.transpose(Image.ROTATE_90)
result_img = cropped
if self.window_move:
HSTEP = 4
VSTEP = 16
VSTEP2 = 2 * VSTEP
delta_x = HSTEP if (self.src_y // VSTEP2) % 2 == 0 else -HSTEP
self.src_x += delta_x
if self.src_x + self.mysurf_w >= self.TW * self.TS:
self.src_x = self.TW * self.TS - self.mysurf_w - 1
self.src_y += VSTEP // 2
elif self.src_x < 0:
self.src_x = 0
self.src_y += VSTEP // 2
if self.src_y + self.mysurf_h >= self.TH * self.TS:
self.src_x, self.src_y = 0, 0
self.mysurf.paste(result_img,
(self.view_ofs_x,
self.view_ofs_y,
self.view_ofs_x + result_img.size[0],
self.view_ofs_y + result_img.size[1]))
def main():
if RPi_has_LCD:
WINW, WINH = config["lcd_w"], config["lcd_h"]
else:
WINW, WINH = config["win_w"], config["win_h"]
if RPi_has_LCD:
rpi_disp = TFT.ILI9341(DC, rst=RST, width = WINW, height = WINH, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=64000000))
rpi_disp.begin()
running = True
try:
font = None
mysurf = Image.new("RGB", (WINW, WINH))
map_disp = MapDisplay(config, mysurf, font)
while running:
map_disp.update()
map_disp.render_info()
if RPi_has_LCD:
rpi_disp.display(mysurf)
else:
mysurf.show()
time.sleep(5)
finally:
pass
if __name__ == "__main__":
main()
|
from fabric.api import sudo
from fabtools import require
from appconfig import APPS
from appconfig.config import App
def require_certbot():
require.deb.package('software-properties-common')
require.deb.ppa('ppa:certbot/certbot')
require.deb.package('python-certbot-nginx')
def require_cert(domain):
if isinstance(domain, App):
domains = domain.domain
if domain.with_www_subdomain:
domains += ',www.{0}'.format(domain.domain)
else:
domains = domain
# If an App instance is passed, we lookup its domain attribute:
sudo('certbot --nginx -n -d {0} certonly --agree-tos --expand --email {1}'.format(
domains, APPS.defaults['error_email']))
def delete(cert):
sudo('certbot delete --cert-name {0}'.format(cert))
def renew():
sudo('certbot --nginx -n renew')
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as py
import random as rd
import math as m
ylim = 200
xlim = 200
def create_instance(n):
inst = [(0, 0)]
route = [0]
for i in range(n):
x = rd.randint(-xlim, xlim)
y = rd.randint(-ylim, ylim)
inst.append((x, y))
route.append(i+1)
route.append(0)
return inst, route
def print_instance(inst):
dep = inst[0]
cust = inst[1:]
py.plot(dep[0], dep[1], color='blue', marker='o')
for i in cust:
py.plot(i[0], i[1], color='red', marker='o')
def print_route(route, inst):
x = []
y = []
for i in range(len(route)):
x.append(inst[route[i]][0])
y.append(inst[route[i]][1])
py.plot(x, y, color='black')
def print_costs(costs):
for i in range(len(costs)):
py.plot(i, costs[i], color="blue", marker='o')
def distance(p1, p2):
return m.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
def cost_sol(route, inst):
c = 0
for r in range(len(route)-1):
p1 = inst[route[r]]
p2 = inst[route[r+1]]
c += distance(p1, p2)
return c
def DeuxOpt(route, inst):
l = len(route)-1
best_tuple = (0, 0)
best = 0
for i in range(l-1):
pi = inst[route[i]]
spi = inst[route[i+1]]
for j in range(i,l-1):
if j != i-1 and j != i and j != i+1:
pj = inst[route[j]]
spj = inst[route[j+1]]
d = (distance(pi, spi) + distance(pj, spj)) - \
distance(pi, pj)-distance(spi, spj)
if d > best:
best_tuple = (i, j)
best = d
if best_tuple[0] != best_tuple[1]:
cand = route.copy()
"""
print_instance(inst)
py.plot(inst[route[best_tuple[0]+1]][0],
inst[route[best_tuple[0]+1]][1], color="green", marker="o")
py.plot(inst[route[best_tuple[1]]][0],
inst[route[best_tuple[1]]][1], color="yellow", marker="o")
py.plot(inst[route[best_tuple[0]]][0],
inst[route[best_tuple[0]]][1], color="green", marker="o")
py.plot(inst[route[best_tuple[1]+1]][0],
inst[route[best_tuple[1]+1]][1], color="yellow", marker="o")
"""
cand[best_tuple[0]+1], cand[best_tuple[1]
] = cand[best_tuple[1]], cand[best_tuple[0]+1]
return cand
else:
return route
def LK(route, inst):
next_cand = DeuxOpt(route, inst)
while next_cand != route:
route = next_cand.copy()
print(cost_sol(route, inst))
next_cand = DeuxOpt(route, inst)
return next_cand
'''
print(possible_cand)
for i in range(k-1):
next_possible_cand=[]
for p in possible_cand:
next_possible_cand + = DeuxOpt(p,inst)
possible_cand = next_possible_cand.copy()
return (possible(cand))
'''
"""
def divided_lk(lim,routes,inst):
mini_routes = decoupe_route(route)
cand = []
for i in mini_routes:
cand = cand + LK(lim,i,inst)
cand = LK(lim,cand,inst)
return cand
"""
inst, route = create_instance(100)
print_instance(inst)
print_route(route, inst)
py.show()
opt_route = LK(route, inst)
print_instance(inst)
print_route(opt_route, inst)
py.show()
|
from flask_wtf import Form
from flask_wtf.file import FileRequired, FileAllowed
from wtforms import StringField, TextAreaField, FileField, SelectField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Optional
from app import photos
class NewAlbumForm(Form):
title = StringField(u'标题')
about = TextAreaField(u'介绍', render_kw={'rows': 8})
photo = FileField(u'图片', validators=[
FileRequired(u'你还没有选择图片!'),
FileAllowed(photos, u'只能上传图片!')
])
asc_order = SelectField(u'显示顺序',
choices=[('True', u'按上传时间倒序排列'), ('False', u'按上传时间倒序排列')])
no_public = BooleanField(u'私密相册(勾选后相册仅自己可见)')
no_comment = BooleanField(u'禁止评论')
submit = SubmitField(u'提交')
def create_album(self):
pass
class CommentForm(Form):
body = TextAreaField(u'留言', validators=[DataRequired(u'内容不能为空!')], render_kw={'rows': 5})
submit = SubmitField(u'提交')
class EditAlbumForm(Form):
title = StringField(u'标题')
about = TextAreaField(u'介绍', render_kw={'rows': 8})
asc_order = SelectField(u'显示顺序',
choices=[("1", u'按上传时间倒序排列'), ("0", u'按上传时间倒序排列')])
no_public = BooleanField(u'私密相册(右侧滑出信息提示:勾选后相册仅自己可见)')
no_comment = BooleanField(u'允许评论')
submit = SubmitField(u'提交')
class AddPhotoForm(Form):
photo = FileField(u'图片', validators=[
FileRequired(),
FileAllowed(photos, u'只能上传图片!')
])
submit = SubmitField(u'提交')
class SettingForm(Form):
username = StringField(u'姓名或昵称', validators=[Length(0, 64)])
status = StringField(u'签名档', validators=[Length(0, 64)])
location = StringField(u'城市', validators=[Length(0, 64)])
website = StringField(u'网站', validators=[Length(0, 64), Optional(),
], render_kw={"placeholder": "http://..."})
about_me = TextAreaField(u'关于我', render_kw={'rows': 8})
like_public = BooleanField(u'公开我的喜欢')
submit = SubmitField(u'提交')
def validate_website(self, field):
if field.data[:4] != "http":
field.data = "http://" + field.data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
from codecs import open
#get the readme file for the long description below--optional
with open('README.md', 'rb', encoding='utf-8') as f:
readme = f.read()
# see https://github.com/pypa/sampleproject/blob/master/setup.py for explanation of each parameter and links
setup(
name='multilevel_mesa',
version='0.0.1',
description="Provides Extension module to Mesa to allow for Heirarhcies and Modules of Agents",
long_description=readme,
url='https://github.com/tpike3/ml_mesa',
author='Tom Pike',
author_email='tpike3@gmu.edu',
classifiers=[
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Life',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
],
keywords='agent based modeling model ABM simulation multi-agent coaltion game theory',
packages = ["multilevel_mesa"],
#for more elaborate projects with directories of files such as tests etc
install_requires=['networkx', "mesa"]
)
|
from .fetal_net import fetal_envelope_model
from .fetal_net_skip import fetal_origin_model
from .fetal_net_skip2 import fetal_origin2_model
from .fetal_net_skip3 import fetal_origin3_model
from .unet.unet import unet_model_2d
from .unet.isensee import isensee2017_model
from .unet3d.unet import unet_model_3d
from .unet3d.isensee2017 import isensee2017_model_3d
from .unet3d.vnet import vnet
from .norm.NormNet import norm_net_model
|
import configparser
import os.path
class CurtisConfig:
CONFIG_PATHS = [
'./curtis.ini',
'~/.curtis.ini',
]
DEFAULT_TIMEOUT = 10
SITE_PREFIX = 'site:'
def __init__(self, config_file=None, site=None):
self.parser = configparser.ConfigParser()
self.parser.add_section('curtis')
config_paths = [config_file] if config_file else self.CONFIG_PATHS
for file_path in config_paths:
file_path = os.path.expanduser(file_path)
if os.path.exists(file_path):
with open(file_path) as fp:
self.parser.read_file(fp)
break
self.site = site or self.default_site
@property
def sites(self):
return sorted([
name[len(self.SITE_PREFIX):]
for name in self.parser.sections()
if name.startswith(self.SITE_PREFIX)
])
@property
def default_site(self):
return self.parser['curtis'].get('default_site') or self.sites[0]
def _get_site_config(self, key, default=None):
return self.parser['{}{}'.format(self.SITE_PREFIX, self.site)].get(key, default)
@property
def url(self):
return self._get_site_config('url')
@property
def token(self):
return self._get_site_config('token')
@property
def timeout(self):
return self._get_site_config('timeout', self.DEFAULT_TIMEOUT)
def load_config(config_file, site):
return CurtisConfig(config_file, site)
|
ee=int(input())
fl=0
if ee>2:
for i in range(3,int(ee/2)):
if ee%i==0:
fl=1
print("no")
break
if fl==0 or ee==2:
print("yes")
|
from gensim.models import word2vec
import csv
#import pandas as pd
import os
# word2vec_kadai.pyにより作成されたモデルを読み込み
model = word2vec.Word2Vec.load("./training.model")
# [テスト用]指定単語の類似度Top10を表示
# results = model.wv.most_similar(positive=['AI'])
# for result in results:
# print(result)
# 課題で指定された50単語及びターゲットの5単語をリストに格納
with open('wordlist.txt', 'r', encoding='utf_8') as f_in:
wordlist = f_in.read().split()
target = ['岩手', 'サッカー', 'AI', 'パスタ', 'ねこ']
# [テスト用]ターゲット単語の類似度Top10を表示
# results = model.wv.most_similar(positive=[target[4]])
# for result in results:
# print(result)
# 課題で指定された50単語とターゲットの5単語を総当たりで類似度を計算しリストへ格納,CSVへ書き込み
with open('sorted_result.csv', 'a', encoding='utf_8') as f_out:
writer = csv.writer(f_out, lineterminator='\n')
for i in range(0, 5):
t = []
writer.writerows(t)
for j in range(0, 50):
similarity = model.wv.similarity(w1=target[i], w2=wordlist[j])
t.append([wordlist[j], similarity])
writer.writerows(sorted(t, key=lambda x: x[1], reverse=True))
|
t = int(input())
while t:
t -= 1
a, b, n = [int(x) for x in input().split()]
if(a>=0 and b>= 0):
if(a>b):
print(1)
elif(b>a):
print(2)
else:
print(0)
else:
if(n%2 == 0):
if(abs(a)>abs(b)):
print(1)
elif(abs(a)<abs(b)):
print(2)
else:
print(0)
else:
if(a>b):
print(1)
elif(b>a):
print(2)
else:
print(0)
|
"""Create the input data pipeline using `tf.data`"""
import tensorflow as tf
import numpy as np
#
def _parse_function(data_index, label, alldata):
"""Obtain the image from the filename (for both training and validation).
The following operations are applied:
- Decode the image from jpeg format
- Convert to float and to range [0, 1]
"""
data = tf.gather(tf.constant(alldata), data_index)
return data, label
#
#
# def train_preprocess(image, label, use_random_flip):
# """Image preprocessing for training.
#
# Apply the following operations:
# - Horizontally flip the image with probability 1/2
# - Apply random brightness and saturation
# """
# if use_random_flip:
# image = tf.image.random_flip_left_right(image)
#
# image = tf.image.random_brightness(image, max_delta=32.0 / 255.0)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
#
# # Make sure the image is still in [0, 1]
# image = tf.clip_by_value(image, 0.0, 1.0)
#
# return image, label
def input_fn(is_training, alldata, labels, params):
"""Input function for the SIGNS dataset.
TODO: update description here
The data has format "{label}_IMG_{id}.jpg".
For instance: "data_dir/2_IMG_4584.jpg".
Args:
is_training: (bool) whether to use the train or test pipeline.
At training, we shuffle the data and have multiple epochs
data: (list) filenames of the images, as ["data_dir/{label}_IMG_{id}.jpg"...]
labels: (list) corresponding list of labels
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
num_samples = alldata.shape[0]
assert num_samples == labels.shape[0], "Data and labels should have same length"
data_index = np.arange(num_samples)
# Create a Dataset serving batches of data and labels
# We don't repeat for multiple epochs because we always train and evaluate for one epoch
parse_fn = lambda d, l: _parse_function(d, l, alldata)
if is_training:
dataset = (tf.data.Dataset.from_tensor_slices((tf.constant(data_index, shape=[num_samples, 1]), tf.constant(labels, shape=[num_samples, 1])))
.shuffle(num_samples) # whole dataset into the buffer ensures good shuffling
.map(parse_fn)
.batch(params.batch_size)
.prefetch(1) # make sure you always have one batch ready to serve
)
else:
dataset = (tf.data.Dataset.from_tensor_slices((tf.constant(data_index), tf.constant(labels)))
.map(parse_fn)
.batch(params.batch_size)
.prefetch(1) # make sure you always have one batch ready to serve
)
# Create reinitializable iterator from dataset
iterator = dataset.make_initializable_iterator()
data, labels = iterator.get_next()
iterator_init_op = iterator.initializer
inputs = {'data': data, 'labels': labels, 'iterator_init_op': iterator_init_op}
return inputs
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
p1 = head
p2 = head
# stops when p1 and p2 points to the same node
while p1.next != p2.next:
# if any pointer points to none then there is no cycle
if p1.next is None or p2.next is None:
return False
p1 = p1.next
p2 = p2.next.next
return True
|
import pandas as pd
import numpy as np
# Classe ReadData
# Recebe o caminho do arquivo csv já agrupado
# Retorna a base de dados discretizada, as informações da discretização e um frame de cada grupo
def read_csv(path):
bd = pd.read_csv(path,sep=',',parse_dates=True)
return bd
# separador_grupos - Método para divisão da base de dados em um conjunto de dataframes de acordo com o atributo cluster
# deve ser chamado apenas se a base de dados já estiver agrupada/contem o atributo "grupo"
# Recebe um DataFrame (data). Retorna um conjunto de DataFrames [(cluster, frame)]
def group_separator(data, attr_name):
frames = []
for clt, grupo in data.groupby(attr_name) :
frames.append(grupo)
return frames
def num_instancias(data, attr_cluster):
data = data.drop([attr_cluster], axis=1)
num_instancias = []
for i in range(0, data.shape[1]):
values = data.loc[:,data.columns[i]].values
num_instancias.append(len(sorted(set(values))))
return num_instancias
|
from django.conf.urls import url
from django.urls import path
from apicontent import views
from django.conf.urls.static import static
from django.conf import settings
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('login', obtain_auth_token, name = "login"),
url(r'^file/savefile$', views.fileUpload)
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
import numpy as np
import matplotlib.pyplot as plt
def getvals(zmin):
folder = 'distributions'
name1 = 'KiDS_2017-05-26_deepspecz_photoz_10th_BLIND_specweight_1000_4_ZB'
name2 = '_rlim_percentile'
name3 = '_blindB_Nz.asc'
zmin = np.round(zmin,1)
zmax = zmin+0.2
if(zmin==0.9):
zmax = 1.2
inter = str(zmin).replace('.','p')+'t'+str(zmax).replace('.','p')
vals = []
for i in range(10):
fil = open(folder+'/'+name1+inter+name2+str(10*i)+'t'+str(10*(i+1))+name3)
values = fil.readlines()
fil.close()
l = len(values)
zs = np.zeros(l)
vs = np.zeros(l)
for j in range(l):
zs[j],vs[j]=values[j].split()
if(i==1):
redshifts = zs
vals.append(vs)
return redshifts,vals
def getredshift(values,redshifts,length):
valmax = values.sum()
valinter = np.zeros(length+1)
valtemp = 0
for i in range(length):
valtemp = valtemp+values[i]
valinter[i+1] = valtemp
r = np.random.uniform(0,valmax)
for i in range(length):
if(valinter[i]<r and r<=valinter[i+1]):
binnum = i
if(binnum != length-1):
return np.random.uniform(redshifts[binnum],redshifts[binnum+1])
else:
return redshifts[binnum]
|
import os
import json
import falcon
class HelloResource(object):
def on_get(self, req, res):
res.status = falcon.HTTP_200
res.body = json.dumps({
'message': "hello!"
})
class TestVariableResource(object):
def on_get(self, req, res):
res.status = falcon.HTTP_200
res.body = json.dumps({
'message': os.getenv('TEST_THING')
})
|
#! / usr / bin / python
print ( "Hola, buen dia hace")
|
#from flask_login import UserMixin
from db import get_db
#class Friend(UserMixin):
class Friend():
def __init__(self, id_, user_id, friend_user_id):
self.friend_id = id_
self.user_id = user_id
self.friend_user_id = friend_user_id
@staticmethod
def get(friend_id):
db = get_db()
friend = db.execute(
"SELECT * FROM friend WHERE friend_id = ?", (friend_id,)
).fetchone()
if not friend:
return None
friend = Friend(
id_=friend[0], user_id=friend[1], friend_user_id=friend[2]
)
return friend
@staticmethod
def create(id_, user_id, friend_user_id):
db = get_db()
db.execute(
"INSERT INTO friend (friend_id, user_id, friend_user_id) "
"VALUES (?, ?, ?)",
(id_, user_id, friend_user_id),
)
db.commit()
|
import sys
import json
scores = {}
'''
load sentiments file into dictionary
'''
def load_sentiment_file():
sent_file = open(sys.argv[1])
#'AFINN-111.txt')
for line in sent_file:
term, score = line.split("\t")
scores[term] = int(score)
'''
Derive the sentiment score for each 'new term' that isn't a known term (not in file in AFINN-111.txt).
The score is calculated by considering how many times the new terms occurs in all tweets (term frequency) /
the total sentiment score for the known terms that were found in the all of the same tweets as the new terms
'''
def derive_tweet_sentiment_score():
json_data = {}
new_terms_master = {}
with open(sys.argv[2]) as json_file:
#with open('assignment_1_problem_3.json') as json_file:
for line in json_file:
json_data = json.loads(line)
text = json_data.get('text')
terms = []
terms = text.split()
total_score = 0
new_terms = []
for term in terms:
term_score = scores.get(term.lower())
if term_score is None:
new_terms.append(term)
else:
total_score += term_score
from collections import Counter
c = Counter(new_terms) #count occurrences of new terms
for key in c:
value = c.get(key)
if not new_terms_master.has_key(key):
new_terms_master[key] = value, total_score
else:
# increase existing term frequency and sentiment total_score
term_freq = new_terms_master[key][0]
term_freq += value
sentiment_total_score = new_terms_master[key][1]
sentiment_total_score += total_score
new_terms_master[key] = term_freq, sentiment_total_score
for key in new_terms_master:
#print str.format('New term {0}, frequency {1}, known terms score {2}', key.encode('utf-8'), float(new_terms_master[key][0]), float(new_terms_master[key][1]))
print key.encode('utf-8'), float(new_terms_master[key][1]) / float(new_terms_master[key][0])
def main():
load_sentiment_file()
derive_tweet_sentiment_score()
if __name__ == '__main__':
main()
#raw_input()
|
import json
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework import status, mixins
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework_simplejwt import serializers
from rest_framework_simplejwt.exceptions import TokenError
from rest_framework_simplejwt.views import TokenViewBase
from accounts.models import User
from .filters import InfoFilter
from .models import certPage
from .serializers import AIinfoSerialilzer, UserSerializer, UserPatternSerializer
# API로 받은거 보여주기
class certPost(GenericViewSet, mixins.CreateModelMixin):
queryset = certPage.objects.all().order_by('created_at')
serializer_class = AIinfoSerialilzer
# http_method_names = ['GET', 'POST', 'PUT']
# permission_classes = []
# 권한 설정해야하는데..ㅎㅎ
def create(self, request, *args, **kwargs):
# proxy에 로그인 기능이 없어서 user_id보고 직접 user 객체 연결해주기
user_id = request.data['user']
# string으로 온 정확도들을 lfoat으로 바꿔주기
request.data['user'] = User.objects.get(username=user_id).pk
request.data['mouse_prediction'] = float(request.data['mouse_prediction'])
request.data['resource_prediction'] = float(request.data['resource_prediction'])
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
print("끝났다") # 왜 이걸 넣으면 winError 10054 에러가 안날까..?
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
# 얘도 serializer로 할 필요 X
class certUpdate(GenericViewSet, mixins.UpdateModelMixin):
queryset = certPage.objects.all().order_by('created_at')
serializer_class = AIinfoSerialilzer
@method_decorator(login_required(login_url='login'))
@method_decorator(staff_member_required)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object() # certPage
user = instance.user # User
# 바꿔줄 라벨
new_label = self.request.POST.get('label_state')
# 라벨 선택 안했을 시
if new_label is None:
messages.info(request, 'label을 선택해주세요.')
return redirect('manager:certDetailView')
# 처벌 여부 결정
if user.username != new_label:
if instance.type == 3:
# 벌점 부과
user.penalty = user.penalty + 1
# 벌점이 임계치에 도달하면 계정 정지
if user.penalty >= 2:
user.is_active = 0
user.penalty = 0
messages.info(request, '이상행위가 감지되어 해당 계정을 정지시켰습니다.')
# 정지 권고
elif instance.type == 2:
user.is_active = 0
messages.info(request, '이상행위가 감지되어 계정이 정지시켰습니다.')
instance.label = new_label # label 바꿔주기
instance.done = 1 # done 바꿔주기
# certPage 업데이트
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
# User 업데이트
serializer = UserSerializer(user, data={
'penalty': user.penalty,
'is_active': user.is_active,
}, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return redirect('manager:certDetailView')
# 얘는 serializer로 할 필요 X. 나중에 리팩토링
class certDetailView(GenericViewSet, mixins.ListModelMixin):
queryset = certPage.objects.all().order_by('created_at')
serializer_class = AIinfoSerialilzer
@method_decorator(login_required(login_url='login'))
@method_decorator(staff_member_required)
def list(self, request, *args, **kwargs):
# queryset = self.filter_queryset(self.get_queryset())
queryset = self.filter_queryset(certPage.objects.filter(Q(done=0)))
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
# tuple orderedlist to str
cnt_json = json.dumps(serializer.data)
# json to list
lists = json.loads(cnt_json)
# list to dictionary
context = {}
for list in lists:
context[list['id']] = list
user_list = queryset
info_filter = InfoFilter(request.GET, queryset=user_list)
context['filter'] = info_filter
# 중복 없이 user_id 가져오기
context['users'] = User.objects.all().values_list('username', flat=True).distinct()
return render(request, 'manager/main.html', context, status=status.HTTP_200_OK)
# 유저가 패턴 데이터를 주면 proxy로 보내기 -> 추후 클라이언트 분리되면 클라이언트가 할 작업
class toProxyView(APIView):
def post(self, request, *args, **kwargs):
print(request.data)
return Response(status=status.HTTP_200_OK)
class TokenVerifyView(TokenViewBase):
"""
Takes a token and indicates if it is valid. This view provides no
information about a token's fitness for a particular use.
"""
serializer_class = serializers.TokenVerifySerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except TokenError as e:
return Response(status=status.HTTP_401_UNAUTHORIZED)
# raise InvalidToken(e.args[0])
return Response(serializer.validated_data, status=status.HTTP_200_OK)
|
n = 1
c = 0
for _ in range(5):
A = input()
if A.find('FBI') != -1:
c = 1
print(n, end=' ')
n += 1
if c == 0:
print("HE GOT AWAY!")
# Done
|
import sys
import os
import time
import datetime
import hashlib #import hashlib.md5() as md5()
import http.client
import urllib, urllib.request, time
from io import StringIO #真是改的心力交瘁啊哭哭。
#raise except print>> 的写法3里均有改变。
#unichr——chr
__version__ = '0.5.0'
__url__ = 'http://code.google.com/p/pydelicious/'# 这个网址打不开的原因么。因为下面的可以打开的呀。
DLCS_RSS = 'http://del.icio.us/rss/'
USER_AGENT = 'pydelicious.py/%s %s' % (__version__, __url__)
try:
from elementtree.ElementTree import parse as parse_xml
except ImportError:
from xml.etree.ElementTree import parse as parse_xml
class PyDeliciousException(Exception):
'''Std. pydelicious error'''
pass
import feedparser
def get_popular(tag = ""):
return getrss(tag = tag, popular = 1)
def get_userposts(user):
return getrss(user = user)
def get_urlposts(url):
return getrss(url = url)
def getrss(tag="", popular=0, url='', user=""):
return dlcs_rss_request(tag=tag, popular=popular, user=user, url=url)
def dlcs_rss_request(tag = "", popular = 0, user = "", url = ''):
tag = str2quote(tag)
user = str2quote(user)
h=hashlib.md5()
if url != '':
url = DLCS_RSS + '''url/%s'''%h.hexdigest()###版本问题咯。
elif user != '' and tag != '':
url = DLCS_RSS + '''%(user)s/%(tag)s'''%dict(user=user, tag=tag)
elif user != '' and tag == '':
url = DLCS_RSS + '''%s'''%user
elif popular == 0 and tag == '':
url = DLCS_RSS
elif popular == 0 and tag != '':
url = DLCS_RSS + "tag/%s"%tag
elif popular == 1 and tag == '':
url = DLCS_RSS + '''popular/'''
elif popular == 1 and tag != '':
url = DLCS_RSS + '''popular/%s'''%tag
rss = http_request(url).read()
rss = feedparser.parse(rss)
l = posts()
for e in rss.entries:
if e.has_key("links") and e["links"]!=[] and e["links"][0].has_key("href"):
url = e["links"][0]["href"]
elif e.has_key("link"):
url = e["link"]
elif e.has_key("id"):
url = e["id"]
else:
url = ""
if e.has_key("title"):
description = e['title']
elif e.has_key("title_detail") and e["title_detail"].has_key("title"):
description = e["title_detail"]['value']
else:
description = ''
try: tags = e['categories'][0][1]
except:
try: tags = e["category"]
except: tags = ""
if e.has_key("modified"):
dt = e['modified']
else:
dt = ""
if e.has_key("summary"):
extended = e['summary']
elif e.has_key("summary_detail"):
e['summary_detail']["value"]
else:
extended = ""
if e.has_key("author"):
user = e['author']
else:
user = ""
l.append(post(url = url, description = description, tags = tags, dt = dt, extended = extended, user = user))
return l
def str2quote(s):
return urllib.parse.quote_plus("".join([chr(ord(i)).encode("utf-8") for i in s]))
def http_request(url, user_agent=USER_AGENT, retry=4):
request = urllib.request.Request(url, headers={'User-Agent':user_agent})
e = None
tries = retry;
while tries:
try:
return urllib.request.urlopen(request)
except urllib.error.HTTPError as e: # protocol errors,
raise PyDeliciousException( "%s" % e)
except urllib.error.URLError as e:
print( "%s, %s tries left." % (e, tries),file = sys.stderr,)
Waiter()
tries = tries - 1
raise PyDeliciousException ("Unable to retrieve data at '%s', %s" % (url, e))
|
#!/usr/bin/env python
import gzip
from pipeline import *
import util
config = util.read_config('import')
log = util.config_logging(config)
table = 'area.area'
columns=('area', 'zip', 'po_name', 'geom')
filename = 'bayareadata.gz'
area = 'sfbayarea'
db = util.DB.from_config(config)
log.info('importing file %r to table %r' % (filename, table))
# compose import pipeline
cat(gzip.open(filename)) | skip(head=2, tail=2) | split(sep='|') |\
transform([lambda r: area, 0, 1, 2]) |\
join('\t') | load_data(db, table, columns=columns, clean=True)
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
import math
def Round(a):
return int(a+.5)
def init():
glClearColor(1.0,1.0,1.0,0.0)
glColor3f(1.0,0.0,0.0)
glPointSize(3.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,600.0,0.0,600.0)
def readinput():
global xc,yc,r,n
xc=input("Enter Xcoordinate of center : ")
yc=input("Enter Ycoordinate of center : ")
r=input("Enter Radius of circle : ")
n=input("Enter number of turn in circle : ")
def setpixel(x,y):
glBegin(GL_POINTS)
glVertex2i(x,y)
glEnd()
glFlush()
def circledraw(xc,yc,r,n):
section=1000 # we can vary section for more or less plotting points
theta=0
xs=1/float(section)
setpixel(Round(xc),Round(yc))
i=0
while(i<=(int(n/2))):
theta=0
while(0<=theta<180):
x=xc+r*math.cos(theta)
y=yc+r*math.sin(theta)
theta+=xs;
setpixel(Round(x),Round(y))
r=2*r
while(180<=theta<=360):
x=xc+r*math.cos(theta)
y=yc+r*math.sin(theta)
theta+=xs;
setpixel(Round(x),Round(y))
i+=1
r=2*r
def display():
glClear(GL_COLOR_BUFFER_BIT)
circledraw(xc,yc,r,n)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(100,100)
glutCreateWindow("Parametric Circle Drawing Method ")
readinput()
glutDisplayFunc(display)
init()
glutMainLoop()
main()
|
from app.models.models import db
|
#!/usr/bin/env python
"""
This program finds the location of each HELIX in a PDB file.
(For each helix, it prints out the starting and ending residue
using a format which is recognized by the "select_interval.py" script.
That script is typically used for extracting excerpts of a PDB file.)
This script is not really intended to be used on it's own.
"""
import sys
def main():
for line in sys.stdin:
if (line[0:6] == "HELIX "):
initChainID = line[19:20]
initSeqNum = int(line[21:25])
initICode = line[25:26]
endChainID = line[31:32]
endSeqNum = int(line[33:37])
endICode = line[37:38]
sys.stdout.write("\""+initChainID+"\" "+str(initSeqNum)+" \""+initICode+
"\" \""+
endChainID+"\" "+str(endSeqNum)+" \""+endICode+"\"\n")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import random
from typing import Any
RULES = {'rock': 'scissors',
'paper': 'rock',
'scissors': 'paper'}
def relationship_builder(user_option: list, chosen_option: Any):
index_of_chosen = user_option.index(chosen_option)
if chosen_option != user_option[-1]:
new_list = user_option[index_of_chosen + 1:]
for preceding in user_option[:index_of_chosen]:
new_list.append(preceding)
else:
new_list = user_option[:index_of_chosen]
return new_list
def score_getter(player_name):
with open('rating.txt') as file:
all_records = file.read().splitlines()
user_score = list(filter(lambda x: x.split(' ')[0] == player_name, all_records))
if user_score:
return int(user_score[0].split(' ')[1])
else:
return 0
def game_player():
user_name = input('Enter your name: ')
print(f'Hello, {user_name}')
user_score = score_getter(user_name)
playing_options = input().split(',')
if playing_options[0] != '':
weapons = playing_options
else:
weapons = list(RULES.keys())
print("Okay, let's start")
while True:
user_weapon = input()
random_computer_weapon = random.choice(weapons)
if user_weapon == '!exit':
print('Bye')
break
if user_weapon == '!rating':
print(f'Your rating: {user_score}')
elif user_weapon not in weapons:
print('Invalid input')
else:
winning_weapons = relationship_builder(weapons, user_weapon)
if user_weapon == random_computer_weapon:
print(f'There is a draw ({user_weapon})')
user_score += 50
elif random_computer_weapon in winning_weapons[len(winning_weapons) // 2:]:
print(f'Well done. The computer chose {random_computer_weapon} and failed')
user_score += 100
else:
print(f'Sorry, but the computer chose {random_computer_weapon}')
if __name__ == '__main__':
game_player()
|
from scipy.integrate import ode,odeint
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
import inspect, os
#import progressbar
from progbar import *
pool_count=12
#params
Nrun1=2000 # total run count
stamp_versions() # stamps the versions of Scipy, Numpy, GCC, current file name and directory
def run(Nrun1):
global launch_time,c_launch_time
progress_bar(pool_count)
#import the times when the code first launched
launch_time=time.time()
c_launch_time=time.ctime()
#multiprocessing in queue (to use other module comment the line below)
q.put([Nrun1])
for i in range (Nrun1):
#your current process:
current = multiprocessing.current_process()
#current coordinate of your cursor
current_xcoordinate=current._identity[0]
#current process id
current_pid=os.getpid()
#puts '>' when some Nrun1/25 of the job completed
progress_estimator(i,current_xcoordinate,Nrun1)
#pops up the percentage, launch date of the code (including year month and day),
#estimated execution period of the code (your computer will do the job in 'X.XXX'ours)
percentage(current_pid, i, current_xcoordinate, Nrun1,launch_time,c_launch_time)
#do some complicated work
time.sleep(0.004)
#stuff for multi processing
from multiprocessing import Pool,Process,Queue
def mmap():
pool = Pool(pool_count)
result = pool.map(run, [Nrun1]*pool_count)
pool.close()
pool.join()
def mmap_asn():
pool=Pool(pool_count)
result = pool.map_async(run, [Nrun1]*pool_count)
pool.close()
pool.join()
def immap():
pool = Pool(pool_count)
result = pool.imap(run, [Nrun1]*pool_count)
pool.close()
pool.join()
def proc():
jobs = []
global q
for i in range(pool_count):
q = Queue()
p = Process(target=run, args=(Nrun1,))
jobs.append(p)
p.start()
q.get()
p.join()
proc()
#mmap()
#mmap_asn()
#immap()
|
# -*- coding: utf-8 -*-
"""
@author: Stephanie McCumsey
CIS 472 WINTER 2015
Naive Bayes
./nb <train> <test> <beta> <model>
the input I use to test : "run nb.py spambase-train.csv spambase-test.csv 1 nb.model"
"""
from __future__ import division
import sys
import pandas as pd
import numpy as np
import pdb
import math
def naiveBayes(trainingData, maxIter, beta):
'''
build the logisticTrain from training data
activation value determines if logisticTrain should update weights or not
'''
beta = float(beta)
trainingData = trainingData.as_matrix()
labels = trainingData[:,-1]
numRows = len(labels)
float(sum(labels)/numRows)
# pdb.set_trace()
px1data = []
for i in range(0, trainingData.shape[1]):
'P(x = 1)'
p = sum(trainingData[:,i])/numRows
px1data.append(p)
if i != trainingData.shape[1]-1 :
print p
px0data = []
for i in range(0, len(px1data)):
px0data.append(1-px1data[i])
prod = np.product(px0data)
print "product of all probabilities:", np.product(px1data)
'P(Y=1)'
py1 = px1data[-1]
'P(Y=0)'
py0 = 1 - py1
print "probability y = 1:", py1," \nprobability y = 0:", py0
''' comput base log odds '''
ratio = 0
for i in range(0,len(px1data)-1) :
'P( X[i] = 0 | Y = 1 )'
px0y1 = ((1-px1data[i])*py1)/py1
'P( X[i] = 0 | Y = 0 )'
px0y0 = ((1-px1data[i])*py0)/py0
'P( X[i] = 0 | Y = 1 ) / P( X[i] = 0 | Y = 0 )'
# pdb.set_trace()
ratio += math.log(px0y1/px0y0)
base = math.log(py1/py0) + ratio
# pdb.set_trace()
print base
''' compute w[i] '''
weights = []
for i in range(0, len(px1data)-1) :
'P( X[i] = 0 | Y = 1 )'
px0y1 = (1-px1data[i])/py1
'P( X[i] = 0 | Y = 0 )'
px0y0 = (1-px1data[i])/py0
'P( X[i] = 1 | Y = 1 )'
px1y1 = (px1data[i])/py1
'P( X[i] = 1 | Y = 0 )'
px1y0 = (px1data[i])/py0
w = math.log(px1y1/px1y0) - math.log(px0y1/px0y0)
weights.append(w)
return weights, base
def nbTest(w, b, data):
'''make prediction from learned perceptron
input: weightVector, base, test example
output: accuracy
'''
count = 0
for row in data:
p = 0
example = row[:-1]
for j, val in enumerate(example):
if val == 1:
p += w[j]
p += b
print p
accuracy = count/len(data) * 100
return accuracy
def writeFile(file, w, b, attributes):
''' write base and weights to the input.model file'''
file.write(str(b) + "\n")
for idx, w_i in enumerate(w):
file.write(attributes[idx] + " " + str(w_i) + "\n")
pass
def read_csv(CSVfile):
'''read input .cvs file with pandas'''
dataTable = pd.read_csv((CSVfile))
return dataTable
def main (trainCSV, testCSV, beta, modelFile):
trainingData = read_csv(trainCSV) #get feature vectors
testData = read_csv(testCSV)
attributes = trainingData.columns[:-1].values
maxIter = 100
w, b = naiveBayes(trainingData, maxIter, beta)
# print w, b
trainingData2 = trainingData.as_matrix()
testData = testData.as_matrix()
trainAccuracy = nbTest(w, b, trainingData2)
testAccuracy = nbTest(w, b, testData)
# print "training accuracy :", trainAccuracy, "%"
# print "test accuracy :", testAccuracy, "%"
with open(modelFile, 'w+') as file :
writeFile(file, w, b, attributes)
return
if __name__ == "__main__":
args = sys.argv[1:]
main ( *args )
|
"""
N-gram model
"""
class NGramTrie:
def __init__(self, n_gram_size: int, encoded_text: tuple):
self.size = n_gram_size
self.encoded_text = encoded_text
self.n_grams = ()
self.n_gram_frequencies = {}
self.uni_grams = {}
self._fill_n_grams()
self._calculate_n_grams_frequencies()
def _fill_n_grams(self):
if not isinstance(self.encoded_text, tuple):
raise ValueError
n_grams = []
for i in range(len(self.encoded_text) - self.size + 1):
n_grams.append(tuple(self.encoded_text[i:i + self.size]))
self.n_grams = tuple(n_grams)
for word in self.encoded_text:
if (word, ) not in self.uni_grams:
self.uni_grams[(word, )] = self.encoded_text.count(word)
def _calculate_n_grams_frequencies(self):
for n_gram in self.n_grams:
if n_gram in self.n_gram_frequencies:
self.n_gram_frequencies[n_gram] += 1
else:
self.n_gram_frequencies[n_gram] = 1
def public_method(self):
pass
def another_public_method(self):
pass
|
import logging
import json
import os
import paho.mqtt.subscribe as subscribe
import paho.mqtt.publish as publish
import RPi.GPIO as gpio
switch_name = os.getenv('SWITCH_NAME', 'test01')
host = os.getenv('MQTT_HOST', 'nas.home')
pin = int(os.getenv('GPIO_PIN', '40'))
invert_logic = False if os.getenv('INVERT_LOGIC', 'False') == 'False' else True
entity_topic = 'home/switch/' + switch_name
command_topic = entity_topic + '/command'
config_topic = entity_topic + '/config'
state_topic = entity_topic + '/state'
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
logger.info('entity topic: %s', entity_topic)
def setup_rpi_gpio():
logger.info('seting up rpi gpio for pin: %s', str(pin))
gpio.setmode(gpio.BOARD)
gpio.setup(pin, gpio.OUT)
def turn_on():
logger.info('turning %s on', switch_name)
gpio.output(pin, True if not invert_logic else False)
report_state()
def turn_off():
logger.info('turning %s off', switch_name)
gpio.output(pin, False if not invert_logic else True)
report_state()
def report_state():
pin_state = True if gpio.input(pin) else False
if invert_logic:
rst = not pin_state
else:
rst = pin_state
on_off = 'on' if rst else 'off'
logger.info('state: %s', on_off)
publish.single(state_topic, payload=on_off,
hostname=host, retain=True)
def turn_switch(cmd):
if cmd == 'on':
turn_on()
else:
turn_off()
def on_new_command(client, userdata, msg):
cmd = msg.payload.decode('utf-8')
logger.info('get new command: %s from topic: %s', cmd, msg.topic)
turn_switch(cmd)
if __name__ == '__main__':
setup_rpi_gpio()
conf = {"name": switch_name, "command_topic": command_topic,
"payload_on": "on", "payload_off": "off", "optimistic": False,
"retain": True}
publish.single(config_topic, payload=json.dumps(conf), hostname=host,
retain=True)
subscribe.callback(on_new_command, command_topic, hostname=host)
|
import os
from flask import Flask, request, jsonify, url_for, Blueprint, Response
# from flask_dotenv import DotEnv
# from flask_sqlalchemy import SQLAlchemy
# from flask_migrate import Migrate
from flask_swagger import swagger
from flask_cors import CORS
# from api.user.model import db,User
app = Flask(__name__)
# app.url_map.strict_slashes = False
# app.config.from_pyfile('settings.cfg')
# MIGRATE = Migrate(app, db)
# db.init_app(app)
# with app.app_context():
# db.create_all()
# db.session.commit()
# CORS(app)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return Response("<h1>Flask on Now</h1><p>You visited: /%s</p>" % (path), mimetype="text/html")
# @app.route('/api/user/', methods=["GET", "POST"])
# def user():
# if request.method == 'POST':
# request_json = request.get_json()
# name = request_json.get('username')
# email = request_json.get('email')
# new = User(username=name, email=email)
# db.session.add(new)
# db.session.commit()
# return 'User Added',200
# else:
# json_list = [i.serialize for i in User.query.all()]
# return jsonify(json_list), 200
|
# -*- coding: utf-8 -*-
while True:
year=int(input())
if year==-9999:
break
elif (year%4==0)or (year%100!=0 and year%400==0):
print(year,"is a leap year.")
else:
print(year,"is not a leap year.")
|
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print("you have %d cheeses!" % cheese_count)
print("have %d boxes of bcrackers" % boxes_of_crackers)
print("aha!")
print("we can blabla:")
cheese_and_crackers(20,30)
print("\nwe can blabla:")
amount_of_cheese = 10
amount_of_crackers = 15
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print("\nwe can even do math!")
cheese_and_crackers(10+12, 5+4)
print("\nand we combine those:")
cheese_and_crackers(amount_of_cheese+100, amount_of_crackers+222)
|
import re, datetime, json, tempfile
from pathlib import Path
class Settings:
filename = None
settings = {}
def __init__(self, json_file:str=None) -> None:
if json_file is not None:
Settings.filename = json_file
self.load(json_file)
def __str__(self) -> str:
return json.dumps([Settings.filename, Settings.settings], indent=2)
def __repr__(self) -> str:
return json.dumps(Settings.settings)
def load_settings(self, json_string:str) -> None:
if json_string != "":
Settings.settings = json.loads(json_string)
def load(self, json_file:str=None) -> None:
"""Reads settings.json and returns a dictionary"""
if json_file is None and Settings.filename == "":
raise Exception("Settings load: No settings file")
Settings.filename = json_file
reg = re.compile(r" //.*$")
try:
with open(self.filename, "rt", encoding="utf8") as f:
settings_lines = f.readlines()
settings_str = "".join([ re.sub(reg, "", l) for l in settings_lines])
self.load_settings(settings_str)
Settings.settings["temp_dir"] = f"{str(Path(tempfile.gettempdir()).as_posix())}/sic_{str(int(datetime.datetime.today().timestamp()))}/"
except Exception as e:
print(e)
def edit(self, setting:str, value) -> None:
if setting in Settings.settings and value is not None:
Settings.settings[setting] = value
|
"""Demo 類別的範例。
這個模組示範文件字串的寫法。"""
#類別的註解。
class Demo:
"""類別的文件字串。"""
# __init__() 的註解。
def __init__(self, v1=11, v2=22):
""" __init__() 的文件字串。"""
self.__a = v1
self.__b = v2
# 方法的註解。
def do_something(self):
"""方法的文件字串。"""
return self.__a + self.__b
if __name__ == "__main__":
d = Demo(12, 34)
print(d.do_something())
# 檔名: class_demo10.py
# 作者: Kaiching Chang
# 時間: May, 2018
|
# Implementation of mergesort
# Divide and conquer ~ O(n log n)
def mergesort(array):
if len(array) == 1:
return array
# split array into left and right
split = int(len(array)/2)
left = array[:split]
right = array[split:]
return merge(
mergesort(left),
mergesort(right)
)
def merge(left, right):
array = []
leftCount = 0
rightCount = 0
# iterate over each half, comparing the two
while leftCount < len(left) and rightCount < len(right):
if left[leftCount] < right[rightCount]:
array.append(left[leftCount])
leftCount += 1
elif right[rightCount] < left[leftCount]:
array.append(right[rightCount])
rightCount += 1
else:
array.append(left[leftCount])
leftCount += 1
# add leftover array
array += left[leftCount:]
array += right[rightCount:]
return array
numbers = [3,53,65,1,321,54,76,43,2,4,66]
print(mergesort(numbers))
|
#!/usr/bin/env python
import sys,os
from PIL import Image
image=Image.open(sys.argv[1])
if image.width&7: raise Exception("%s: Width must be multiple of 8 (have %d)"%(sys.argv[1],image.width))
data=image.getdata()
frames=[] # each is an array of bytes, byte is one 8-pixel row
def read_row(x,y):
"""Read 8 pixels starting at (x,y) and proceeding rightward. Returns as one byte."""
row=0
mask=0x80
for i in xrange(8):
pixel=data.getpixel((x+i,y))
if len(pixel)==4: a=pixel[3]
elif len(pixel)==3: a=(pixel[0]+pixel[1]+pixel[2]>=384)
elif len(pixel)==2: a=pixel[1]
elif type(pixel)==int: a=pixel
else: raise Exception("%s: Unexpected pixel format, eg %r"%(sys.argv[1],pixel))
if a: row|=mask
mask>>=1
return row
for srcx in xrange(0,image.width,8):
frame=[]
for srcy in xrange(image.height):
row=read_row(srcx,srcy)
# skip leading empty rows -- there will be a lot of these
if row or len(frame): frame.append(row)
frames.append(frame)
# Result must have yielded exactly one frame of each height (1..framec) inclusive.
# Sort by length ascending (note this is reverse of the order I drew them).
def cmp_frames(a,b):
if len(a)<len(b): return -1
if len(a)>len(b): return 1
return 0
frames.sort(cmp_frames)
for i in xrange(len(frames)):
if len(frames[i])!=i+1: raise Exception("%s: Expected %d-pixel tall frame"%(sys.argv[1],i+1))
sys.stdout.write("#define ARC_HEIGHT_LIMIT %d\n"%(len(frames),))
sys.stdout.write("static uint8_t arc_raw_data[]={\n")
for frame in frames: sys.stdout.write(" %s,\n"%(','.join(map(str,frame))))
sys.stdout.write("};\n")
# The starting points could be calculated at runtime, but let's be clear.
# We'll add a dummy at the beginning, so it can be indexed by exact desired height instead of h-1.
sys.stdout.write("static uint8_t arc_starts[1+ARC_HEIGHT_LIMIT]={\n 0,")
p=0
for frame in frames:
sys.stdout.write("%d,"%(p,))
p+=len(frame)
sys.stdout.write("\n};\n")
|
# author:lyr time:2019-10-17
# from random import randint#随机数包
# print(randint(0,1))
class Tigter:
def rora(self):
print('父类t属性')
@staticmethod
def tell():
print('父类t静态属性')
class Tigter2:
def rora(self):
print('父类t2属性')
@staticmethod
def tell():
print('父类t2静态属性')
class Sheep:
def rora(self):
print('父类s属性')
@staticmethod
def tell():
print('父类s静态属性')
#基础子类基础父类
class Sou(Tigter2,Tigter,Sheep):
#子类自己的方法
def rora(self):
print('子类jc属性')
@staticmethod
def tell():
print('子类jc静态属性')
s1=Sou()#创建子类对象
s1.rora()#调用的是子类自己的属性
#注,根据子类继承的顺序进行调用的
super(Sou,s1).rora()#super调用父类的一个继承类的属性
#super调用父类的二个继承类的属性顺序少一个比如调用子类对象点方法,则调用继承父类的第一个类方法
super(Tigter,s1).tell()
# super(Tigter2,s1).rora()
|
# -*- coding: utf-8 -*-
from discord import Member
from .converters import add_converter
from .converters import ConverterError
from .utils import get_member
# some converters for base types
# noinspection PyUnusedLocal
@add_converter(str)
def convert_str(arg, ctx):
return arg
# noinspection PyUnusedLocal
@add_converter(int)
def convert_int(arg, ctx):
try:
return int(arg)
except ValueError:
raise ConverterError("INT_RESOLVE_error") from None
# noinspection PyUnusedLocal
@add_converter(float)
def convert_float(arg, ctx):
try:
return float(arg)
except ValueError:
raise ConverterError("FLOAT_RESOLVE_error") from None
# noinspection PyUnusedLocal
@add_converter(None, type(None))
def convert_none(arg, ctx):
return None
@add_converter(Member)
async def member_converter(arg, ctx):
if ctx.guild is None:
raise RuntimeError("Cannot use Member resolver in DM")
member = await get_member(arg, ctx)
if member is None:
raise ConverterError("MEMBER_RESOLVE_not_found")
return member
|
vv=input()
print(len(vv))
|
import os
import sys
import unittest
import test_configmgr
import test_pluginmgr
import test_baseimager
import test_msger
import test_runner
import test_chroot
if os.getuid() != 0:
raise SystemExit("Root permission is needed")
suite = unittest.TestSuite()
suite.addTests(test_pluginmgr.suite())
suite.addTests(test_configmgr.suite())
suite.addTests(test_baseimager.suite())
suite.addTests(test_msger.suite())
suite.addTests(test_runner.suite())
suite.addTests(test_chroot.suite())
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
fname = input("Enter the file name: ")
fhandle = open(fname)
# creating a dictionary to count sender
sender = dict()
for line in fhandle:
line = line.strip()
# count the second word if the line starts with 'From'
if line.startswith('From '):
words = line.split()
address = words(1)
sender[words] = sender.get(address,0) + 1
#Loop through the dictionary to find the name and count
name = None
count = None
for key, value in sender.items():
if name == None or count < value:
name = key
count = value
print(name, count)
|
string = 'Monty Python'
print(string[0:5])
print(string[6:12])
fruit = 'banana'
print(fruit[:3])
print(fruit[3:])
print(fruit[3:3])
print(fruit[:])
|
"""toc builder.
toc builder has business logic.
"""
import typing
class TOCBuilder:
"""TOC Builder class.
TOCBuilder is a class for main process.
"""
HEADER_CHAR = "#"
ITEM_CHAR = "* "
ITEM_INDENT = " "
SECTION_JOINT = "-"
SECTION_PREFIX = "sec"
CODE_BLOCK_CHAR = "```"
def __init__(self, in_file: typing.TextIO, out_file: typing.TextIO,
level: int, to_embed: str, exclude_symbol: str) -> None:
"""Initialize TOCBuilder.
constructor.
"""
self.in_file = in_file
self.out_file = out_file
self.upper_level = level
self.to_embed = to_embed
self.exclude_symbol = exclude_symbol
self.is_code_block = False
self.section_counter_list = [0] * self.upper_level
self.toc_item_list = []
self.new_contents_list = []
self.toc = ""
self.new_contents = ""
def build(self) -> None:
"""Build Table of Contents.
build is a main function of this class to make TOC.
"""
for line in self.in_file:
if self._is_code_block(line):
self.new_contents_list.append(line)
continue
if not self._is_header(line):
self.new_contents_list.append(line)
continue
level = self._detect_level(line)
if level > self.upper_level:
self.new_contents_list.append(line)
continue
if self._has_exclude_comment(line):
self.new_contents_list.append(line)
continue
section = self._build_section(level)
title = self._extract_header_title(line, level)
self.new_contents_list.append(self._make_section_tag(section))
self.new_contents_list.append(line)
self._append_toc_row(title, section, level)
self.toc = "\n".join(self.toc_item_list) + "\n"
self.new_contents = "".join(self.new_contents_list)
self._embed_toc()
def write(self) -> None:
"""Write a markdown.
write a markdown file.
"""
self.out_file.write(self.new_contents)
def _is_code_block(self, line: str) -> bool:
if self.CODE_BLOCK_CHAR in line:
self.is_code_block = not self.is_code_block
return self.is_code_block
def _is_header(self, line: str) -> bool:
return line.startswith(self.HEADER_CHAR)
def _has_exclude_comment(self, line: str) -> bool:
return self.exclude_symbol in line
def _detect_level(self, line: str) -> int:
level = 0
for char in line:
if char != self.HEADER_CHAR:
break
level += 1
return level
def _append_toc_row(self, title: str, section: str, level: int) -> None:
indent = self.ITEM_INDENT * (level - 1)
title = f"[{title}]"
section = f"(#{section})"
self.toc_item_list.append(indent + self.ITEM_CHAR + title + section)
def _extract_header_title(self, line: str, level: int) -> str:
title = line[level:]
return title.strip()
def _build_section(self, level: int) -> str:
for i in range(len(self.section_counter_list)):
if i < level - 1:
continue
elif i == level - 1:
self.section_counter_list[i] += 1
else:
self.section_counter_list[i] = 0
section_num = [str(s) for s in self.section_counter_list]
return self.SECTION_PREFIX + self.SECTION_JOINT.join(section_num)
def _make_section_tag(self, section: str) -> str:
return f"<a id=\"{section}\"></a>\n"
def _embed_toc(self) -> None:
self.new_contents = self.new_contents.replace(self.to_embed, self.toc)
|
import mysql.connector
import datos_db
conexion = mysql.connector.connect(**datos_db.dbConnect)
cursor = conexion.cursor()
sql = "update usuarios set clave = 'solgty780' where id = 27"
cursor.execute(sql)
'''n_id = int(input("Id: "))
clave = input("Clave usuario: ")
sql = "update usuarios set clave = %s where id = %s"
cursor.execute(sql, (clave, n_id,))'''
conexion.commit()
cursor.close()
conexion.close()
|
from .flow_interface import FlowInterface
from synonym_dict import SynonymSet
class Flow(FlowInterface):
"""
A partly-abstract class that implements the flow specification but not the entity specification.
"""
_context = ()
_context_set_level = 0
_filt = str.maketrans('\u00b4\u00a0\u2032', "' '", '') # filter name strings to pull out problematic unicode
def _catch_context(self, key, value):
"""
Add a hook to set context in __getitem__ or wherever is appropriate, to capture and automatically set context
according to the following precedence:
context > compartment > category > class | classification > cutoff (default)
:param key:
:param value:
:return:
"""
try:
level = {'none': 0,
'class': 1,
'classification': 1,
'classifications': 1,
'category': 2,
'categories': 2,
'compartment': 3,
'compartments': 3,
'context': 4}[key.lower()]
except KeyError:
return
if isinstance(value, str):
value = (value, )
if level > self._context_set_level:
self._context_set_level = min([level, 3]) # always allow context spec to override
self._context = tuple(filter(None, value))
def _add_synonym(self, term, set_name=False):
if set_name:
tm = term.translate(self._filt).strip() # have to put strip after because \u00a0 turns to space
self._flowable.add_term(tm)
self._flowable.set_name(tm)
self._flowable.add_term(term.strip())
def _catch_flowable(self, key, value):
"""
Returns True or None- allow to chain to avoid redundant _catch_context
:param key:
:param value:
:return:
"""
if key == 'name':
self._add_synonym(value, set_name=True)
return True
elif key == 'casnumber':
self._add_synonym(value)
return True
elif key == 'synonyms':
if isinstance(value, str):
self._add_synonym(value)
return True
else:
for v in value:
self._add_synonym(v)
return True
__flowable = None
@property
def _flowable(self):
if self.__flowable is None:
self.__flowable = SynonymSet()
return self.__flowable
@property
def name(self):
return self._flowable.name
@name.setter
def name(self, name):
"""
:param name:
:return:
"""
if name is None:
raise TypeError('None is not a valid name')
elif name in self.synonyms:
self._flowable.set_name(name)
else:
raise ValueError('name %s not a recognized synonym for %s' % (name, self.name))
@property
def synonyms(self):
for t in self._flowable.terms:
yield t
@property
def context(self):
"""
A flow's context is any hierarchical tuple of strings (generic, intermediate, ..., specific).
:return:
"""
return self._context
@context.setter
def context(self, value):
self._catch_context('Context', value)
def match(self, other):
"""
match if any synonyms match
:param other:
:return:
"""
'''
return (self.uuid == other.uuid or
self['Name'].lower() == other['Name'].lower() or
(trim_cas(self['CasNumber']) == trim_cas(other['CasNumber']) and len(self['CasNumber']) > 4) or
self.external_ref == other.external_ref) # not sure about this last one! we should check origin too
'''
if isinstance(other, str):
return other in self._flowable
return any([t in self._flowable for t in other.synonyms])
|
import curses
import random
import time
def update(win_col, k, player):
if k == ord("a") and player[2] >= 1:
player[2] -= 1
if k == ord("d") and player[2] <= win_col:
player[2] += 1
if k == ord("s"):
player[3] = True
return player
def draw_man(win, chances):
parts = [(3,2), (3,0), (1,2), (1,0), (0,1)]
man = [
" O ",
"/|\\",
" | ",
u"\u2143 L"
]
for char in man:
char[parts] = " "
win.addstr(50, 50, char)
def main(stdscr):
curses.noecho()
curses.cbreak()
curses.curs_set(0)
chances = 5
game_win = curses.newwin(0, 0)
#game_win.nodelay(True)
board_win = curses.newwin(game_win.getmaxyx()[0]/2, game_win.getmaxyx()[1], 0, 0)
board_win.border()
#hud_win.nodelay(True)
# Player = [Char, Y, X, Shoot]
#player = ['^', game_win_max[0]-1, game_win_max[1]//2, False]
while True:
game_win.clear()
board_win.clear()
k = game_win.getch()
game_win.refresh()
board_win.refresh()
time.sleep(0.1)
curses.nocbreak()
curses.echo()
if __name__ == "__main__":
curses.wrapper(main)
|
import numpy as np
def to_class_lables(score_list,number_of_classes):
lables = ['A','B','C','D','E','F','G','H','I','J','K']
matching_scores_to_lables = [[], [], [], [], [], [], [], [], [], [], []]
rounded_score_list = [round(x) for x in score_list]
class_labels=[]
if number_of_classes == 11:
all_possible_scores = range(-5,6)
score_label_dict = dict()
for i in range(number_of_classes):
score_label_dict[all_possible_scores[i]] = lables[i]
for i in range(len(score_list)):
label = score_label_dict[rounded_score_list[i]]
lab_index = lables.index(label)
matching_scores_to_lables[lab_index].append(score_list[i])
class_labels.append(label)
categories = lables
else:
start = 100/float(number_of_classes)
edges = [start*i for i in range(1,number_of_classes+1)]
percentiles = np.percentile(score_list,edges)
categories = lables[:number_of_classes]
print 'PERCENTILES,:::,',percentiles
for i in range(len(score_list)):
score = rounded_score_list[i]
actual_values_score = score_list[i]
for a in range(number_of_classes):
if a == 0:
if score < percentiles[a]:
label = lables[a]
matching_scores_to_lables[a].append(actual_values_score)
#print "score/label: ", str(score) +"/" + str(label)
elif a >0 and a < number_of_classes-1:
b = a-1
if score >= percentiles[b] and score < percentiles[a]:
label=lables[a]
matching_scores_to_lables[a].append(actual_values_score)
#print "score/label: ", str(score) +"/" + str(label)
elif a == number_of_classes-1:
b = a-1
if score>= percentiles[b] and score <= percentiles[a]:
label = lables[a]
matching_scores_to_lables[a].append(actual_values_score)
#print "score/label: ", str(score) +"/" + str(label)
class_labels.append(label)
return class_labels,categories,matching_scores_to_lables
def back_to_numbers(class_scores,scores_to_lables_lists,numclass):
from scipy.stats import mode
back_to_values_mean = np.zeros(len(class_scores))
back_to_values_mode_small = np.zeros(len(class_scores))
back_to_values_mode_larger = np.zeros(len(class_scores))
back_to_values_median = np.zeros(len(class_scores))
back_to_values_max = np.zeros(len(class_scores))
back_to_values_min = np.zeros(len(class_scores))
lables = ['A','B','C','D','E','F','G','H','I','J','K']
numbers_lables_dict = dict()
for j in range(0,11):
numbers_lables_dict[lables[j]] = j
for i in range(len(class_scores)):
cs = class_scores[i]
bin = numbers_lables_dict[cs]
back_to_values_mean[i] = np.array(scores_to_lables_lists[bin]).mean()
back_to_values_mode_small[i] = mode(scores_to_lables_lists[bin])[0][0]
back_to_values_mode_larger[i] = mode(scores_to_lables_lists[bin])[1][0]
back_to_values_median[i] = np.median(scores_to_lables_lists[bin])
back_to_values_max[i] = np.array(scores_to_lables_lists[bin]).max()
back_to_values_min[i] = np.array(scores_to_lables_lists[bin]).min()
return [back_to_values_mean,back_to_values_mode_small,back_to_values_mode_larger,back_to_values_median,back_to_values_max,back_to_values_min ]
|
import numpy as np
import matplotlib.pyplot as plt
import sys
np.random.seed(42)
def init_config(m,init_type='zero',bc='nonperiodic'):
ret = np.zeros((m,m))
if init_type == 'zero':
pass
elif init_type == 'full':
if bc == 'nonperiodic':
ret[::2,::2] = np.ones((m/2,m/2))
ret[1::2,1::2] = np.ones((m/2,m/2))
elif bc == 'periodic':
ret[2::2,2::2] = np.ones((m/2-1,m/2-1))
ret[1:-2:2,1:-2:2] = np.ones((m/2-1,m/2-1))
else:
print 'Error: Unknown boundary condition requested'
else:
print 'Error: Unknown initial configuration requested'
return ret
def get_random_pos(m):
return np.random.randint(0,m),np.random.randint(0,m)
def is_valid_flip(i,j,m,occ_pos,bc='nonperiodic'):
ret = True
if occ_pos[i,j] == 1:
pass
else:
if bc == 'nonperiodic':
n1 = (i,j) if j == m-1 else (i,j+1)
n2 = (i,j) if j == 0 else (i,j-1)
n3 = (i,j) if i == m-1 else (i+1,j)
n4 = (i,j) if i == 0 else (i-1,j)
elif bc == 'periodic':
n1 = (i,(j+1)%m)
n2 = (i,(j-1)%m)
n3 = ((i+1)%m,j)
n4 = ((i-1)%m,j)
else:
print 'Error: Unknown boundary condition requested'
if occ_pos[n1]+occ_pos[n2]+occ_pos[n3]+occ_pos[n4]>0:
ret = False
return ret
def flip(i,j,occ_pos):
ret = occ_pos
ret[i,j] = 1 - ret[i,j]
return ret
def get_rel_occupancy(occ_pos):
return np.mean(occ_pos)
def MCMC(m,init_type,bc,burn_in,N):
occ_pos = init_config(m,init_type,bc)
for k in range(burn_in):
i,j = get_random_pos(m)
if is_valid_flip(i,j,m,occ_pos,bc):
occ_pos = flip(i,j,occ_pos)
rel_occupancy_list = np.zeros(N)
for k in range(N):
sys.stdout.write('Step {} of {}\r'.format(k,N))
sys.stdout.flush()
i,j = get_random_pos(m)
if is_valid_flip(i,j,m,occ_pos,bc):
occ_pos = flip(i,j,occ_pos)
rel_occupancy_list[k] = get_rel_occupancy(occ_pos)
print ' '
return rel_occupancy_list
def get_data(M,N,burn_in):
for m in M:
res = MCMC(m,'zero','nonperiodic',burn_in,N)
np.save('data/{}_zero_np.npy'.format(m),res)
res = MCMC(m,'full','nonperiodic',burn_in,N)
np.save('data/{}_full_np.npy'.format(m),res)
res = MCMC(m,'zero','periodic',burn_in,N)
np.save('data/{}_zero_p.npy'.format(m),res)
res = MCMC(m,'full','periodic',burn_in,N)
np.save('data/{}_full_p.npy'.format(m),res)
def plot_data(M,N):
bc = 'np'
plt.figure()
N_arr = np.linspace(1,N,N)
for m in M:
#~ print m
res1 = np.load('data/{}_zero_{}.npy'.format(m,bc))
res2 = np.load('data/{}_full_{}.npy'.format(m,bc))
mn1 = np.cumsum(res1)/N_arr
mn2 = np.cumsum(res2)/N_arr
#~ start = int(9e6)
#~ print np.mean(mn1[start:]), '+-', np.std(mn1[start:])
#~ print np.mean(mn2[start:]), '+-', np.std(mn2[start:])
p = plt.plot(N_arr[::4],mn1[::4],label=r'$m = {{{}}}$'.format(m))
plt.plot(N_arr[::4],mn2[::4],color=p[0].get_color())
plt.semilogx()
plt.legend(loc='best')
plt.xlabel(r'$n$')
plt.ylabel(r'$M_n(f)(\omega)$')
plt.show()
plt.savefig('hardcore.pdf')
def animate(m,N,interval,init_type,bc):
occ_pos = init_config(m,init_type,bc)
mn = 0.
fig,ax = plt.subplots(1,2)
ax[1].set_title('Mean occupancy fraction')
for k in range(N):
i,j = get_random_pos(m)
if is_valid_flip(i,j,m,occ_pos,bc):
occ_pos = flip(i,j,occ_pos)
mn += get_rel_occupancy(occ_pos)
if k % interval == 0:
p = ax[0].imshow(occ_pos,cmap='plasma')
cb = plt.colorbar(p,ax=ax[0])
ax[0].set_title(r'Configuration at $N = {{{}}}$ steps'.format(k))
ax[1].scatter(k,mn/k,color='red')
plt.tight_layout()
plt.pause(0.01)
cb.remove()
ax[0].clear()
if __name__ == '__main__':
M = [8,16,32,64,128]
N = int(1e7)
burn_in = 0
#~ get_data(M,N,burn_in)
#~ plot_data(M,N)
animate(128,100000,500,'zero','periodic')
|
from py4j.java_gateway import JavaGateway
class Token:
def __init__(self, conll):
# CoNLL-U Format
# https://universaldependencies.org/format.html
# id, form, lemma, uPOSTag, xPOSTag, feats, head, depRel, deps, misc
conll = conll.split("\t")
self.id = conll[0]
self.form = conll[1]
self.lemma = conll[2]
self.uPOSTag = conll[3]
self.xPOSTag = conll[4]
self.feats = conll[5]
self.head = conll[6]
self.depRel = conll[7]
self.deps = conll[8]
self.misc = conll[9]
class BKParser:
def __init__(self, jar_file="BKParser-1.0.jar"):
try:
self.__model = self.get_bkparser(jar_file)
except:
raise RuntimeError("Can not init model. Check log file!")
def get_bkparser(self, jar_file):
gateway = JavaGateway.launch_gateway(classpath=jar_file, die_on_exit=True)
parser = gateway.jvm.vn.edu.hust.nlp.parser.BKParser()
return parser
def parse(self, text):
result = []
res = self.__model.parse(text)
for tokenList in res:
for tokenC in tokenList:
line = tokenC.toString()
if line == '':
continue
token = Token(line)
result.append(token)
return result
if __name__ == '__main__':
parser = BKParser()
text = 'Cuối cùng, đoàn quân của HLV Conte đã hoàn thành nhiệm vụ này khi vượt qua đối thủ với tỷ số 4-2'
print(parser.parse(text))
|
###############################################################################
######## Retrieval of candidate neologisms ending with -age from frWaC ########
###############################################################################
## Before running the script, we need a list extracted from NoSketchEngine : https://www.clarin.si/noske/run.cgi/first_form?corpname=frwac;align=
## We select 'CQL' and request [word=".{3,}ages?"]
## We want at least three characters (.{3,}) before -age and singular as well as plural words ending in -age (ages?).
## With the list that is generated we select 'Frequency > Node forms' at the left tab and click 'Save' at the top, with '100000' as the maximum of lines.
## We then download the results. All the script is based on this file, named here 'frwac_age.txt'.
## We also need to download the dictionary lists :
# 'Lexique.382.csv' (from http://www.lexique.org)
# 'lefff-3.4.elex' (from https://www.labri.fr/perso/clement/lefff/)
import re
from collections import Counter
##### Word extraction from frWaC ####
#####################################
words_frwac = Counter()
with open('frwac_age.txt', encoding='utf-8') as res_frwac:
for line in res_frwac:
line = line.rstrip()
# We search for a regex pattern such as : "pistonnage 9"
res_search = re.search('([^\t]+)\t([0-9]+)', line)
if res_search:
word, freq = res_search.groups()
freq = int(freq) # we convert the frequency from string to integer
word = word.lower() # we lowercase the words to reduce variation
if re.search(r"\W|[0-9]|([a-zA-Z])\1{2}|http|Message|Page|Image", word):
# this regex eliminate all -age ending entries that are clearly not deverbal words (URL, 3 times the same letter, etc.)
continue
if word[-1] == 's':
word = word[:-1]
words_frwac[word] += freq
## We thus obtain data with words ending in -age and their frequencies
print("The first list of words ending in -age contains " + str(len(words_frwac.keys())) + " entries")
#### Comparison with Lexique.org and Lefff
#####################################
## We take a set() to keep only unique forms
set_words_lexique = set() # set of forms of Lexique
words_age_lexique = set() # set of -age ending forms of Lexique
with open('Lexique382.csv', encoding='utf-8') as lexique:
for line in lexique:
if not line.startswith('1_ortho'):
word = line.split(';')[0]
if word[-1] == 's':
word = word[:-1]
set_words_lexique.add(word)
if re.search('.{3,}ages?$', word):
words_age_lexique.add(word)
print("{} different forms in Lexique.org and {} ending in -age".format(len(set_words_lexique), len(words_age_lexique)))
## Now we compare the lists of frWaC and Lexique with the difference() function
set_words_frwac = set(words_frwac.keys())
diff = set_words_frwac.difference(set_words_lexique)
print("There is {} forms ending in -age not present in Lexique.org".format(len(diff)))
# We do the same with the file 'lefff-3.4.elex2 as we did for 'Lexique382.csv'
set_words_lefff = set()
words_age_lefff = set()
with open('lefff-3.4.elex') as lexique:
for line in lexique:
word = line.split('\t')[0]
if word[-1] == 's':
word = word[:-1]
set_words_lefff.add(word)
if re.search('.{3,}ages?', word):
words_age_lefff.add(word)
print("{} different forms in Lefff and {} ending in -age".format(len(set_words_lefff), len(words_age_lefff)))
diff_lefff = set_words_frwac.difference(set_words_lefff)
print("There is {} forms ending in -age not present in Lefff".format(len(diff_lefff)))
diff_lefff_lexique = diff.difference(set_words_lefff)
print("There is {} forms ending in -age absent from both Lefff and Lexique.org".format(len(diff_lefff_lexique)))
new_words = sorted(diff, key=lambda x: words_frwac[x], reverse=True)
new_words_counts = [words_frwac[w] for w in new_words]
with open('candidate-age-frwac.txt', 'w') as f_diff:
for item in sorted(diff_lefff, key=lambda x: words_frwac[x], reverse=True):
print(item, words_frwac[item], file=f_diff)
#### Sampling of new_words list #####
#####################################
## As the list is too long we sample it following the method below
k = 20
N = len(new_words)
q = N // k # division rounded of to 0
r = N % k # gives the remainder of the rounded of to 0 division
sublists = [new_words[i*q:(i+1)*q] for i in range(k)] # splits the q*k first words in k sublists
sublists[-1] += new_words[-r:] # adds the remainder to the last sublist
from random import seed, sample
seed(0)
## We randomly draw 100 words per sublist and write them into a file
with open('sampled_words.txt', 'w') as file:
for sublist in sublists:
sampled = sample(sublist, 100)
for word in sorted(sampled, key=lambda w: -words_frwac[w]):
print(word, " ", words_frwac[word], file=file)
|
'''Select files to be edited in Vim from Git repository data'''
# Copyright (c) 2013-2016 Benjamin Althues <benjamin@babab.nl>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os.path
import subprocess
import sys
import pycommand
try:
from colors import green, red
except ImportError:
green = red = lambda x: x
__version__ = '0.1.0'
class Gim:
vim_executable = 'vim'
def __init__(self, flags):
self.flags = flags
def statuslist(self):
ls = subprocess.check_output(['git', 'status', '--short']) \
.decode('UTF-8', errors='strict').split('\n')[:-1]
ret = []
n = 0
for i in ls:
if 'M' in i[:2] or 'A' in i[:2]:
n += 1
ret.append((n, green(i[0]), red(i[1]), i[3:]))
elif 'UU' in i[:2]:
n += 1
ret.append((n, red(i[0]), red(i[1]), i[3:]))
elif 'R' in i[:2]:
n += 1
ret.append((n, green('R'), red(i[1]), i[3:].split()[2]))
elif '?' in i[:2] and not self.flags['staged']:
n += 1
ret.append((n, red(i[0]), red(i[1]), i[3:]))
return ret
def ls_files(self):
ls = subprocess.check_output(['git', 'ls-files']) \
.decode('UTF-8', errors='strict').split('\n')[:-1]
ret = []
n = 0
for i in ls:
n += 1
ret.append((n, i))
return ret
def last_commit(self):
files = subprocess.check_output(
['git', 'show', '--format=', '--name-only', '--no-show-signature']
).decode('UTF-8', errors='strict').split('\n')[:-1]
ret = []
n = 0
for i in files:
n += 1
ret.append((n, i))
return ret
def vimargs(self, files):
if self.flags.emacs:
ret = ['emacsclient', '-q', '-n']
elif self.flags.diff:
ret = ['vimdiff']
else:
if len(files) == 1:
ret = [self.vim_executable]
elif len(files) == 2:
cols = int(subprocess.check_output(['tput', 'cols'])
.decode('ascii', errors='strict'))
if cols < 80:
ret = [self.vim_executable, '-p']
elif cols < 191:
ret = [self.vim_executable, '-o']
else:
ret = [self.vim_executable, '-O']
else:
ret = [self.vim_executable, '-p']
args = ret + files
print(green(' '.join(args)))
return args
class ShellCommand(pycommand.CommandBase):
'''Select files to be edited in Vim from Git repository data'''
usagestr = 'usage: {0} [options]'.format(os.path.basename(sys.argv[0]))
description = __doc__
optionList = (
('help', ('h', False, 'show this help information')),
('staged', ('s', False, 'Do not include unstaged files')),
('unstaged', ('u', False, 'Include unstaged files [default]')),
('indexed', ('i', False, 'Show indexed files only (git ls-files)')),
('last', ('1', False, 'Show files of last commit')),
('diff', ('d', False, 'Open selected files with vimdiff')),
('emacs', ('e', False, 'Open selected files with Emacs')),
('version', ('', False, 'show version information')),
)
def run(self):
if self.flags['help']:
print(self.usage)
return 0
elif self.flags['version']:
print('Gim version: {0} / Python version: {1} '
.format(__version__, sys.version.split()[0]))
return 0
self.gim = Gim(self.flags)
print('Gim version {version}, run "gim --help" for more information'
.format(version=__version__))
if self.flags.indexed or self.flags.last:
try:
if self.flags.indexed:
gimFiles = self.gim.ls_files()
elif self.flags.last:
gimFiles = self.gim.last_commit()
except subprocess.CalledProcessError as e:
return 2
if not gimFiles:
return 0
padding = 2 if len(gimFiles) > 9 else 1
for i in gimFiles:
print('{0:{p}} {1}'
.format(i[0], i[1], p=padding))
inp = input('\nSelect files to edit [{0}]: '
.format(green(gimFiles[0][1])))
if not inp:
inp = '1'
files = []
for i in inp.split():
try:
files.append(gimFiles[int(i) - 1][1])
except (IndexError, ValueError):
pass
else:
try:
git_status = self.gim.statuslist()
except subprocess.CalledProcessError as e:
return 2
if not git_status:
return 0
padding = 2 if len(git_status) > 9 else 1
for i in git_status:
print('{0:{p}} {1}{2} {3}'
.format(i[0], i[1], i[2], i[3], p=padding))
inp = input('\nSelect files to edit [{0}]: '
.format(red(git_status[0][3])))
inp = inp or '1'
files = []
for i in inp.split():
try:
files.append(git_status[int(i) - 1][3])
except (IndexError, ValueError):
pass
if files:
try:
# Instead of `subprocess.call`, use `os.execvp` to
# execute `Gim.vimargs()`, replacing the python process
# of gim with the editor process.
args = self.gim.vimargs(files)
os.execvp(args[0], args)
except IOError as e:
print(e)
return e.errno
def main(argv=sys.argv[1:]):
"""Main function for the CLI to be used as script (entry point)."""
try:
cmd = ShellCommand(argv)
if cmd.error:
print('error: {0}'.format(cmd.error))
return 1
else:
return cmd.run()
except (KeyboardInterrupt, EOFError):
return 1
if __name__ == '__main__':
sys.exit(main())
|
from random import random, randint
from django.core.management import BaseCommand
from faker import Faker, providers
from blog.models import *
CATEGORIE = ['Art', 'Animals', 'Clothing', 'Dairy Products', 'Drinks', 'Emotions', 'Foods', 'Fruits','Furniture', 'Insects',
'Jobs', 'Kitchen', 'Tools', 'Meats', 'Musical Instruments', 'Music', 'Places', 'Shapes', 'Sports', 'Vegetables',
'Transportation', 'Colors', 'Holidays', 'Seasons', 'Christmas', 'Winter', 'Easter', 'Spring', 'Flag', 'Memorial',
'Halloween', 'New Year', 'Summer']
import os
class Providers(providers.BaseProvider):
def blog_categorie(self):
return self.random_element(CATEGORIE)
def categorie(self):
return self.random_element(Categories.objects.all())
def blogger(self):
return self.random_element(Blogger.objects.filter(is_staff=False, is_blogger=False))
def blog_blogger(self):
return self.random_element(Blogger.objects.filter(is_blogger=True))
def viewer(self):
return self.random_element(Blogger.objects.all())
def blog(self):
return self.random_element(Blog.objects.all())
class Command(BaseCommand):
help = 'command info'
def handle(self, *args, **kwargs):
img = 60
maxcom = 40
faker = Faker(['en'])
faker.add_provider(Providers)
## create categories 30
# for i in range(30):
# d = faker.unique.blog_categorie()
# cat = Categories.objects.create(title=d, description=d)
# print('Categories -',cat)
# cat.save()
## create viewer (user) 1000+300+30 ~~= 1400~1500
for i in range(1400):
data = faker.profile()
name = data['username']
user = User.objects.create(username=f'{name}{str(randint(0,9))}{str(randint(0,9))}', email=data['mail'], first_name=data['name'])
print('User -',user.username)
user.set_password('1234test')
user.groups.add(1)
bio=faker.text(max_nb_chars=200)
pic = randint(0,img)
Blogger.objects.create(user=user, bio=bio, website=data['website'][0],
user_pic=f'static/color/{pic}.jpg')
## create blogger 300
for i in range(300):
user = faker.unique.blogger()
user.is_blogger = True
user.save()
user.user.groups.add(2)
print('blogger -', user)
## create staff 30
for i in range(30):
user = faker.unique.blogger()
user.is_blogger = True
user.is_staff = True
user.user.groups.add(3)
user.save()
print('staff -',user)
## create blog 3000
for i in range(3000):
title = faker.text(max_nb_chars=15)
des = faker.text(max_nb_chars=1000)
des_s = faker.text(max_nb_chars=100)
owner = faker.blog_blogger()
pic = randint(0,img)
photo = f'static/color/{pic}.jpg'
pic = randint(1, 6)
cat = [faker.categorie() for i in range(pic)]
blog = Blog.objects.create(title=title,description_short=des_s, description=des,
owner=owner.user, photo=photo )
blog.categorie.set(cat)
blog.save()
print('blog -', blog.title)
for i in Blog.objects.all():
count = randint(5, maxcom)
for j in range(count):
user = faker.viewer()
text = faker.text(max_nb_chars=200)
Comment.objects.create(blog=i, owner=user.user, description=text )
i.count()
print('comment -',i)
|
import threading
import multiprocessing
import subprocess
import time
import inspect
from ha_engine import ha_parser
from ha_engine import ha_infra
import os
import signal
import sys
LOG = ha_infra.ha_logging(__name__)
class HAExecutor(object):
def __init__(self, parser):
"""
Get the resource form haparser as input parameter and creates
all the objects that are needed to run the executor.
"""
# Resoruce from haparser
self.executor_threads = []
self.executor_data = parser.parsed_executor_config
self.plugin_to_class_map = parser.plugin_to_class_map
self.node_plugin_map = parser.node_plugin_map
self.sync_objects = {}
self.finish_execution_objects = {}
self.open_pipes = []
self.xterm_position = ["-0-0", "+50-0", "+0+0", "-0+0"]
if self.executor_data:
ha_infra.dump_on_console(self.executor_data, "Executor Data")
ha_infra.dump_on_console(self.plugin_to_class_map,
"Plugin to class map")
def run(self):
"""
Actual execution starts here
"""
# Exit if the executor is not defined.
execute = self.executor_data.get('executors', None)
if execute is None:
LOG.critical('Nothing to run')
ha_infra.ha_exit(0)
self.executor_threads = []
for executor_index, executor_block in enumerate(execute):
parallel = False
repeat_count = 1
LOG.info('Executing %s' % str(executor_index+1))
# Check whether the executor block needs to be repeated
# process the repeat commandi
if not executor_block:
LOG.info("******** Completing the execution ******** ")
ha_infra.ha_exit(0)
if 'repeat' in executor_block:
repeat_count = executor_block.get('repeat', 1)
executor_block.pop('repeat')
LOG.info("Block will be repeated %s times", repeat_count)
# Repeat count in each steps
for i in range(repeat_count):
LOG.info("******** Block Execution Count %s ******** ",
str(i+1))
# process the mdoe command
if 'mode' in executor_block:
# if mode is parallel set parllel flag
if executor_block['mode'].lower() == 'parallel':
LOG.info('starting thread')
parallel = True
elif executor_block['mode'].lower() == 'sequence':
LOG.info('sequential execution')
else:
LOG.critical('Unsupported mode , '
'must be either '
'"parallel" or "sequence"')
ha_infra.ha_exit(0)
executor_block.pop('mode')
# process the timer command
if 'timer' in executor_block:
LOG.info('Do timer related stuff..')
hatimer = True
executor_block.pop('timer')
try:
# Execute the command and the respective parameters
del self.executor_threads[:]
for step_action, nodes in executor_block.iteritems():
self.execute_the_block(executor_index,
nodes,
step_action,
executor_block,
parallel=parallel)
if self.executor_threads:
# start all the executor threads
[t.start() for t in self.executor_threads]
[t.join() for t in self.executor_threads]
except NotImplementedError as runerror:
LOG.critical('Unable to execute %s - %s'
% runerror, step_action)
ha_infra.ha_exit(0)
except ha_infra.NotifyNotImplemented as notifyerr:
LOG.critical('Notify is not implmented in %s '
%(notifyerr))
ha_infra.ha_exit(0)
except Exception as runerror:
LOG.critical('Unable to continue execution %s'
%str(runerror))
ha_infra.ha_exit(0)
LOG.info("******** Completing the executions ******** ")
# clean up all the pipes
for f in self.open_pipes:
os.unlink(f)
def execute_the_block(self, executor_index,
nodes, step_action, step_info, parallel=False):
node_list = []
if isinstance(nodes, list):
node_list = nodes
sync = None
finish_execution = None
if parallel:
if self.sync_objects.get(executor_index, None):
sync = self.sync_objects[executor_index]
else:
sync = threading.Event()
self.sync_objects[executor_index] = sync
if self.finish_execution_objects.get(executor_index, None):
finish_execution = self.finish_execution_objects[executor_index]
else:
finish_execution = threading.Event()
self.finish_execution_objects[executor_index] = finish_execution
for node in node_list:
# find the module and class object of each node
module_name = self.node_plugin_map.get(node, None)
if module_name is None:
LOG.critical("Cannot find module %s when trying to execute",
module_name)
class_object = self.plugin_to_class_map[module_name.lower()]
plugin_commands = \
[member[0] for member in
inspect.getmembers(class_object,
predicate=inspect.ismethod)]
if step_action in ha_parser.REPORT_CMDS:
LOG.info('DISPLAYING REPORT')
ha_infra.display_report(class_object, step_action)
elif step_action in ha_parser.PUBLISHER_CMDS:
pass
elif step_action in ha_parser.SUBSCRIBER_CMDS:
ha_infra.add_subscribers_for_module(node, step_info)
elif step_action in plugin_commands:
if parallel:
print "Creating a thread for " + node
pipe_path = "/tmp/" + module_name
if not os.path.exists(pipe_path):
LOG.info("Creating a file path for " + pipe_path)
os.mkfifo(pipe_path)
self.open_pipes.append(pipe_path)
pos = self.get_xterm_position()
subprocess.Popen(['xterm', '-geometry', pos, '-e', 'tail', '-f', pipe_path])
t = threading.Thread(target=self.execute_the_command,
args=(class_object, step_action,
sync, finish_execution))
self.executor_threads.append(t)
else:
self.execute_the_command(class_object, step_action)
elif step_action in ha_parser.BUILT_IN_CMDS:
getattr(self, step_action)(node)
else:
LOG.critical('Unknown command: %s' % str(step_action))
ha_infra.ha_exit(0)
@staticmethod
def execute_the_command(class_object, cmd, sync=None,
finish_execution=None):
"""
Execute the command
"""
if class_object and cmd:
getattr(class_object, cmd)(sync=sync,
finish_execution=finish_execution)
@staticmethod
def delay(self, val):
"""
built-in-method for delay
"""
LOG.info('Waiting for %d seconds' %(val))
time.sleep(val)
@staticmethod
def timer(self, val):
LOG.info('Executing timer..')
def post(self, rsrc_obj):
pass
def remove_instance(self):
pass
def get_xterm_position(self):
return self.xterm_position.pop()
def signal_term_handler(signal, fram):
print "GOT SIGTERM ......"
sys.exit(0)
signal.signal(signal.SIGTERM, signal_term_handler)
|
def fib_digit(n):
if n <= 2:
return 1
else:
a = 1
b = 1
res = 0
for i in range(3, n + 1):
res = (a + b) % 10
a = b
b = res
return res
print(fib_digit(317457))
|
'''
Ошибки - это когда вы налажали.
Неправильное использование конструкций языка.
Исключение - это когда налажали НЕ вы.
Код написан правильно, но пользуются
ей неправильно.
if 2 + 3 = 5
print('DA') <- ошибка = вместо == и не стоит : в конце условия
a = int(input('Введите целое число: '))
5.6 <- исключение
'''
x = int(input('Введите число: '))
if x > 10:
raise Exception('x должна быть меньше 10')
|
import requests
try:
import simplejson as json
except ImportError:
import json
import sys
import logging
log = logging.getLogger(__name__)
class BzAPI(object):
def __init__(self, api,
username=None, password=None):
self.api = api
self.username = username
self.password = password
def request(self, path, data=None, method=None, params=None, use_auth=False):
url = self.api + path
if data:
data = json.dumps(data)
if use_auth and self.username and self.password:
if not params:
params = {}
else:
params = params.copy()
params['username'] = self.username
params['password'] = self.password
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
method = method or "GET"
log.debug("%s %s %s", method, url, data)
req = requests.request(method, url, headers=headers, data=data, params=params)
try:
return json.loads(req.content)
except:
log.exception("couldn't load data from %s (%s):\n%s", req, url, req.content)
def check_request(self, *args, **kw):
result = self.request(*args, **kw)
assert not result.get('error'), result
return result
def get_bug(self, bug_num, use_auth=False):
try:
bug = self.request("/bug/%s" % bug_num, use_auth=use_auth)
if bug.get('error') == True:
return None
return bug
except KeyboardInterrupt:
raise
except:
log.exception("Error fetching bug %s" % bug_num)
return None
def get_comments(self, bug_num):
try:
comments = self.request("/bug/%s/comment" % bug_num)
return comments
except KeyboardInterrupt:
raise
except:
log.exception("Error fetching comments for bug %s" % bug_num)
return None
def add_comment(self, bug_num, message):
assert self.username and self.password
self.check_request("/bug/%s/comment" % bug_num,
{"text": message, "is_private": False}, "POST")
def create_bug(self, bug):
assert self.username and self.password
return self.check_request("/bug", bug, "POST", use_auth=True)
def save_bug(self, bug_id, params):
assert self.username and self.password
return self.check_request("/bug/%s" % bug_id, data=params, method="PUT", use_auth=True)
if __name__ == '__main__':
logging.basicConfig()
api = "https://api-dev.bugzilla.mozilla.org/1.0/"
api = "https://api-dev.bugzilla.mozilla.org/test/1.0/"
bz = BzAPI(api, username="catlee@mozilla.com", password="asdfkjsadf;laskjfd;salkdjf")
bug = bz.get_bug("reboots-scl1")
if not bug:
bug = {
#'product': 'mozilla.org',
'product': 'FoodReplicator',
#'component': 'Server Operations: RelEng',
'component': 'Salt',
'alias': 'reboots-scl1',
'summary': 'reboots-scl1',
'comment': 'reboot it!',
'op_sys': 'All',
'platform': 'All',
'version': '1.0',
}
r = bz.create_bug(bug)
print r
else:
bug_id = bug['id']
print bz.get_comments(bug_id)
|
from collections import defaultdict
junks = defaultdict(int)
key_materials = {
'shards': 0,
'fragments': 0,
'motes': 0
}
collected = ''
while collected == '':
data = input().lower().split()
for index in range(0, len(data), 2):
quantity = int(data[index])
material = data[index + 1]
if material in key_materials:
key_materials[material] += quantity
if key_materials[material] >= 250:
key_materials[material] -= 250
collected = material
break
else:
junks[material] += quantity
if collected == 'shards':
print('Shadowmourne obtained!')
elif collected == 'fragments':
print("Valanyr obtained!")
else:
print("Dragonwrath obtained!")
for key, value in sorted(key_materials.items(), key=lambda el: (-el[1], el[0])):
print(f"{key}: {value}")
for key, value in sorted(junks.items()):
print(f"{key}: {value}")
|
# Generated by Django 2.2 on 2020-11-21 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CoffeeShop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(max_length=360)),
],
),
]
|
# 进程》线程〉协程
# 创建进程
'''
首先:导入创建进程的函数
from multiprocessing import Process
process = Process(target = 函数, name=进程的名字, agres(给函数传递的参数
process 对象
对象调用的方法
process.start() 启动进程并执行任务
process.run() 只执行了任务,没有启动进程
terminate()终止
'''
from multiprocessing import Process
from time import sleep
import os
m = 1
def task1(s):
global m
while True:
sleep(s)
m += 1
print("执行的是任务1。。。。。。。。。", m)
def task2(s):
global m
while True:
sleep(s)
m += 2
print("执行的是任务2。。。。。。。。。。。", m)
number = 1
if __name__ == '__main__':
# 子进程
p1 = Process(target=task1, name='任务1', args=(1,)) # 这个是子进程
p1.start() # 这个是子进程
print(p1.name) # 这个是主进程,运行时先运行主进程
p2 = Process(target=task2, name='任务2', args=(2,)) # 这个是子进程
p2.start() # 这个是子进程
print(p2.name) # 主进程
print('************') # 主进程
while True:
number += 1
sleep(0.2)
if number == 200:
p1.terminate()
p2.terminate()
break
else:
print(number)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.