text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import os
os.environ['KERAS_BACKEND'] = 'theano'
os.environ['THEANO_FLAGS']='mode=FAST_RUN,device=gpu0,floatX=float32,optimizer=fast_compile'
import pylab as pl
import matplotlib.cm as cm
import itertools
import numpy as np
import theano.tensor as T
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.layers.noise import GaussianNoise
import keras.models as models
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Merge, Permute
from keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.regularizers import ActivityRegularizer
from keras.utils.visualize_util import plot
from keras import backend as K
import cv2
import numpy as np
#path = './CamVid/'
#path = './tmm_dataset/'
path = './image_test/'
data_shape = 250*250
h,w = 250,250
nlabels = 4
def normalized(rgb):
#return rgb/255.0
norm=np.zeros((rgb.shape[0], rgb.shape[1], 3),np.float32)
b=rgb[:,:,0]
g=rgb[:,:,1]
r=rgb[:,:,2]
norm[:,:,0]=cv2.equalizeHist(b)
norm[:,:,1]=cv2.equalizeHist(g)
norm[:,:,2]=cv2.equalizeHist(r)
return norm
def binarylab(labels):
x = np.zeros([h,w,nlabels])
for i in range(h):
for j in range(w):
x[i,j,labels[i][j]]=1
return x
def prep_data():
train_data = []
train_label = []
import os
with open(path+'train.txt') as f:
txt = f.readlines()
txt = [line.split(' ') for line in txt]
print(str(len(txt))+'samples')
for i in range(len(txt)):
train_data.append(np.rollaxis(normalized(cv2.imread(txt[i][0])),2))
train_label.append(binarylab(cv2.imread(txt[i][1][:-1])[:,:,0]))
print('Loading completed')
return np.array(train_data), np.array(train_label)
train_data, train_label = prep_data()
train_label = np.reshape(train_label,(4,data_shape,nlabels))
class_weighting = [1,1,1,1]
#class_weighting = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
#class_weighting= [0.2595, 0.1826, 4.5640, 0.1417, 0.5051, 0.3826, 9.6446, 1.8418, 6.6823, 6.2478, 3.0, 7.3614]
class UnPooling2D(Layer):
"""A 2D Repeat layer"""
def __init__(self, poolsize=(2, 2)):
super(UnPooling2D, self).__init__()
self.poolsize = poolsize
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], input_shape[1],
self.poolsize[0] * input_shape[2],
self.poolsize[1] * input_shape[3])
def get_output(self, train):
X = self.get_input(train)
s1 = self.poolsize[0]
s2 = self.poolsize[1]
output = X.repeat(s1, axis=2).repeat(s2, axis=3)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"poolsize":self.poolsize}
def create_encoding_layers():
kernel = 3
filter_size = 64
pad = 1
pool_size = 2
return [
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(filter_size, kernel, kernel, border_mode='valid'),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(128, kernel, kernel, border_mode='valid'),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(256, kernel, kernel, border_mode='valid'),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(512, kernel, kernel, border_mode='valid'),
BatchNormalization(),
Activation('relu'),
#MaxPooling2D(pool_size=(pool_size, pool_size)),
]
def create_decoding_layers():
kernel = 3
filter_size = 64
pad = 1
pool_size = 2
return[
#UpSampling2D(size=(pool_size,pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(512, kernel, kernel, border_mode='valid'),
BatchNormalization(),
UpSampling2D(size=(pool_size,pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(256, kernel, kernel, border_mode='valid'),
BatchNormalization(),
UpSampling2D(size=(pool_size,pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(128, kernel, kernel, border_mode='valid'),
BatchNormalization(),
UpSampling2D(size=(pool_size,pool_size)),
ZeroPadding2D(padding=(pad,pad)),
Convolution2D(filter_size, kernel, kernel, border_mode='valid'),
BatchNormalization(),
]
autoencoder = models.Sequential()
# Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
autoencoder.add(Layer(input_shape=(3, h, w)))
#autoencoder.add(GaussianNoise(sigma=0.3))
autoencoder.encoding_layers = create_encoding_layers()
autoencoder.decoding_layers = create_decoding_layers()
for l in autoencoder.encoding_layers:
autoencoder.add(l)
for l in autoencoder.decoding_layers:
autoencoder.add(l)
autoencoder.add(Convolution2D(nlabels, 1, 1, border_mode='valid',))
#import ipdb; ipdb.set_trace()
# test avec ajout d'une couche de padding
autoencoder.add(ZeroPadding2D(padding=(1,1)))
autoencoder.add(Reshape((nlabels,data_shape), input_shape=(nlabels,h,w))) # sortie en 23,296,200 ?????
autoencoder.add(Permute((2, 1)))
autoencoder.add(Activation('softmax'))
#from keras.optimizers import SGD
print('Compiling the model')
autoencoder.compile(loss="categorical_crossentropy", optimizer='adadelta', metrics=["accuracy"])
# test de prediction pour voir la shape de la sortie
'''
output = autoencoder.predict(np.zeros((1,3,h,w)),batch_size=32)
print('output shape')
print(output.shape)
'''
current_dir = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(current_dir, "autoencoder.png")
#plot(model_path, to_file=model_path, show_shapes=True) #uses graphviz....
nb_epoch = 100
batch_size = 4
print('Starting Training')
history = autoencoder.fit(train_data, train_label, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, class_weight=class_weighting )#, validation_data=(X_test, X_test))
autoencoder.load_weights('model_weight_ep100.hdf5')
autoencoder.save_weights('model_weight_ep100.hdf5')
|
import tensorflow as tf
from module.Backbone import Backbone
from module.Encoder import Encoder
from module.Decoder import Decoder
from tensorflow.contrib import slim
class SARModel(object):
def __init__(self,
num_classes,
encoder_dim=512,
encoder_layer=2,
decoder_dim=512,
decoder_layer=2,
decoder_embed_dim=512,
seq_len=40,
beam_width=5,
is_training=True):
self.num_classes = num_classes
self.encoder_dim = encoder_dim
self.encoder_layer = encoder_layer
self.decoder_dim = decoder_dim
self.decoder_layer = decoder_layer
self.decoder_embed_dim = decoder_embed_dim
self.seq_len = seq_len
self.beam_width = beam_width
self.is_training = is_training
self.backbone = Backbone(is_training=self.is_training)
self.encoder = Encoder(hidden_nums=self.encoder_dim, layer_nums=self.encoder_layer, is_training=self.is_training)
self.decoder = Decoder(output_classes=self.num_classes,
hidden_nums=self.decoder_dim,
layer_nums=self.decoder_layer,
embedding_dim=self.decoder_embed_dim ,
seq_len=self.seq_len,
lstm_keep_prob=1.0,
att_keep_prob=0.5,
is_training=self.is_training)
def __call__(self, input_images, input_labels, input_widths, reuse=False, decode_type='greed'):
with tf.variable_scope(name_or_scope="sar", reuse=reuse):
encoder_state, feature_map, mask_map = self.inference(input_images, input_widths, batch_size)
decoder_logits, attention_weights, pred = self.decode(encoder_state, feature_map, input_labels, mask_map, decode_type=decode_type)
return decoder_logits, attention_weights, pred
def inference(self, input_images, input_widths):
# with tf.variable_scope(name_or_scope='sar', reuse=reuse):
img_W = tf.cast(tf.shape(input_images)[2], tf.float32)
feature_map = self.backbone(input_images)
fea_W = tf.cast(tf.shape(feature_map)[2], tf.float32)
input_widths = tf.cast(tf.math.floor(input_widths * (fea_W / img_W)), tf.int32)
encoder_state = self.encoder(feature_map, input_widths)
with tf.name_scope(name="fea_post_process"):
# construct mask map
input_widths_list = tf.unstack(input_widths, axis=0)
mask_map = []
for i, width in enumerate(input_widths_list):
mask_slice = tf.pad(tf.zeros(dtype=tf.float32, shape=width), [[0, tf.shape(feature_map)[2]-width]], constant_values=1)
mask_slice = tf.tile(tf.expand_dims(mask_slice, axis=0), [tf.shape(feature_map)[1], 1])
mask_map.append(mask_slice)
# mask_map = tf.expand_dims(tf.zeros_like(feature_map[:, :, :, 0]), axis=-1) # N * H * W * 1
mask_map = tf.stack(mask_map, axis=0)
mask_map = tf.expand_dims(mask_map, axis=3) # N * H * W * 1
reverse_mask_map = 1 - mask_map
feature_map = feature_map * reverse_mask_map
return encoder_state, feature_map, mask_map
def loss(self, pred, input_labels, input_lengths_mask):
"""
cross-entropy loss
:param pred: Decoder outputs N * L * C
:param input_labels: N * L
:param input_lengths_mask: N * L (0 & 1 like indicating the real length of the label)
:return:
"""
with tf.name_scope(name="MaskCrossEntropyLoss"):
input_labels = tf.one_hot(input_labels, self.num_classes, 1, 0) # N * L * C
input_labels = tf.stop_gradient(input_labels) # since softmax_cross_entropy_with_logits_v2 will bp to labels
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=input_labels, logits=pred, dim=-1)
mask_loss = loss * tf.cast(input_lengths_mask, tf.float32)
loss = tf.reduce_sum(mask_loss) / tf.cast(tf.shape(pred)[0], tf.float32)
return loss
def decode(self, encoder_state, feature_map, input_labels, mask_map, decode_type='greed'):
assert decode_type.lower() in ['greed', 'beam', 'lexicon'], "Not support decode type"
# with tf.variable_scope(name_or_scope='sar', reuse=reuse):
if decode_type.lower() == "greed":
decoder_outputs, attention_weights = self.decoder(encoder_state, feature_map, input_labels, mask_map)
pred = tf.argmax(decoder_outputs, axis=2)
return decoder_outputs, attention_weights, pred
elif decode_type == "beam" and not self.is_training:
pred, attention_weights = self.decoder.beam_search(encoder_state, feature_map, mask_map, self.beam_width)
return None , attention_weights, pred
elif decode_type == "lexicon":
return None
def test():
input_images = tf.placeholder(dtype=tf.float32, shape=[32, 48, 160, 3])
input_labels = tf.placeholder(dtype=tf.int32, shape=[32, 40])
input_lengths = tf.placeholder(dtype=tf.int32, shape=[32, 40])
input_feature_map = tf.placeholder(dtype=tf.float32, shape=[32, 12, 20, 512])
input_widths = tf.placeholder(dtype=tf.float32, shape=[32])
sar_model = SARModel(95)
encoder_state, feature_map, mask_map = sar_model.inference(input_images, input_widths, batch_size=32)
logits, att_weights, pred = sar_model.decode(encoder_state, feature_map, input_labels, mask_map)
loss = sar_model.loss(logits, input_labels, input_lengths)
optimizer = tf.train.AdamOptimizer(learning_rate=1.0).minimize(loss)
import numpy as np
_input_images = np.random.rand(32, 48, 160, 3)
_input_labels = np.random.randint(0,95,size=[32,40])
_input_lenghts = np.random.randint(0,2,size=[32,40])
_input_feature_map = np.random.rand(32, 12, 20, 512)
_input_images_width = np.random.randint(10, 30, 32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(10):
_, loss_value = sess.run([optimizer, loss], feed_dict={input_images: _input_images,
input_labels: _input_labels,
input_lengths: _input_lenghts,
input_feature_map: _input_feature_map,
input_widths: _input_images_width})
print(loss_value)
if __name__ == "__main__":
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "1"
test()
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import src.dataprocessing as dataproc
from src.evaluation import get_embeddings, inferred_variables_evaluation
import src.evaluation as evaluation
from src.flow_loss import ScaledFlowLoss, compute_simple_weighting
from src.utils import EMAMeter, TrainingConfig, HyperParamConfig, EvalConfig, InitConfig, LossConfig, \
BaselineHyperParamConfig
class TrainerBase:
def __init__(self, train_graph: dataproc.Graph, val_graph: dataproc.Graph,
device=torch.device('cpu'), loss_config: LossConfig = LossConfig()):
self.device = device
self.num_nodes = train_graph.num_vertices()
self.train_graph = train_graph
self.val_graph = val_graph
# self._train_edges = train_graph.edges
# self._val_edges = val_graph.edges
self.train_flow = torch.from_numpy(train_graph.flow).to(device)
self.val_flow = torch.from_numpy(val_graph.flow).to(device)
self.train_source_nodes = torch.from_numpy(train_graph.src_nodes).to(device)
self.train_target_nodes = torch.from_numpy(train_graph.dst_nodes).to(device)
self.val_source_nodes = torch.from_numpy(val_graph.src_nodes).to(device)
self.val_target_nodes = torch.from_numpy(val_graph.dst_nodes).to(device)
nu = np.median(np.abs(train_graph.flow)) if loss_config.nu == 'auto' else loss_config.nu
self.scaled_loss = ScaledFlowLoss(use_student_t_loss=loss_config.use_student_t_loss, nu=nu,
use_squared_weighting=loss_config.use_squared_weighting)
self.train_loss_weighting = compute_simple_weighting(self.train_flow,
min_flow_weight=loss_config.min_flow_weight,
max_flow_weight=loss_config.max_flow_weight).to(device)
self.val_loss_weighting = compute_simple_weighting(self.val_flow, min_flow_weight=loss_config.min_flow_weight,
max_flow_weight=loss_config.max_flow_weight).to(device)
self.return_history = False
self.gt_norm_grad_flow, self.grad_norm, self.gt_norm_harmonic_flow, self.harmonic_norm = \
dataproc.decompose_flow_normalized(
source_nodes=np.concatenate((train_graph.src_nodes, val_graph.src_nodes), axis=0),
target_nodes=np.concatenate((train_graph.dst_nodes, val_graph.dst_nodes), axis=0),
flow=np.concatenate((train_graph.flow, val_graph.flow), axis=0),
num_nodes=self.num_nodes
)
self.gt_norm_grad_flow = torch.from_numpy(self.gt_norm_grad_flow).to(device=device, dtype=torch.float)
self.gt_norm_harmonic_flow = torch.from_numpy(self.gt_norm_harmonic_flow).to(device=device, dtype=torch.float)
def train_val_graph(self):
edges = np.concatenate((self.train_graph.edges, self.val_graph.edges), axis=0)
flow = np.concatenate((self.train_graph.flow, self.val_graph.flow), axis=0)
return dataproc.Graph(num_nodes=self.num_nodes, edges=edges, flow=flow)
@staticmethod
def get_embeddings(model, subtract_mean=True):
return get_embeddings(model, subtract_mean=subtract_mean)
@staticmethod
def calc_convergence_crit(new_loss, current_loss):
if np.isinf(new_loss) or np.isinf(current_loss):
return np.inf
return TrainerBase.stop_crit_rel_prev_value(new_loss, current_loss)
@staticmethod
def stop_crit_rel_prev_value(new_loss, current_loss):
return np.abs(new_loss - current_loss) / (new_loss + 1e-4)
class Trainer:
def __init__(self, base: TrainerBase, model: nn.Module, optimizer: optim.Optimizer,
train_config: TrainingConfig = TrainingConfig(), eval_config: EvalConfig = EvalConfig()):
self.base = base
self.optimizer = optimizer
self.model = model
self.use_lbfgs = train_config.use_lbfgs
self.return_history = False
self.use_bootstrap = train_config.use_bootstrap
self.fast_eval_fun = self.eval_model_val
self.history_eval_fun = self.eval_model
self.model_chp_path = eval_config.model_chp_path
self.train_loss_meter = EMAMeter()
self.best_val_loss = np.inf
self.best_val_results = {}
self.best_val_eval_coeff = eval_config.best_val_eval_coeff
self.iter_ = 0
def is_finished(self, current_loss, new_loss, iteration, max_steps, tol):
is_done = new_loss != float('inf') and self.convergence_criteria(new_loss, current_loss) < tol
is_done = is_done or iteration >= max_steps
if is_done:
pass
return is_done
def convergence_criteria(self, new_loss, current_loss):
return self.base.calc_convergence_crit(new_loss, current_loss)
def _early_stopping(self, val_loss, iter_):
if val_loss < self.best_val_eval_coeff * self.best_val_loss:
self.best_val_loss = val_loss
self.best_val_results = {key + '*': val for key, val in self.eval_model().items()}
self.best_val_results.update({"num_iter*": iter_})
if self.model_chp_path:
torch.save(self.model.state_dict(), self.model_chp_path)
def train(self, tol=1e-4, max_steps=20000, verbosity=0, return_history=False):
self.return_history = return_history
current_loss = np.inf
new_loss = np.inf
fast_eval_res = {}
history = []
while not self.is_finished(current_loss=current_loss, new_loss=new_loss, iteration=self.iter_,
max_steps=max_steps, tol=tol):
self.iter_ += 1
current_loss = new_loss
new_loss, _ = self.train_step(current_loss, self.iter_)
if self.train_loss_meter is not None:
self.train_loss_meter.add(new_loss)
new_loss = self.train_loss_meter.mean
fast_eval_res = self.fast_eval_fun()
fast_eval_res.update({'criterion': self.convergence_criteria(new_loss, current_loss)})
self._early_stopping(fast_eval_res['val_loss'], self.iter_)
history_entry = {'loss': new_loss, 'criterion': self.convergence_criteria(new_loss, current_loss)}
if self.return_history:
history_entry.update(self.history_eval_fun())
history.append(history_entry)
if verbosity > 1 and self.iter_ % max(max_steps // (verbosity + 1), 1) == 0:
print(f"Iteration {self.iter_} | loss {new_loss:.5f}")
print(" | ".join([f"{key} {item:.2f}" for key, item in fast_eval_res.items()]))
if verbosity > 0:
print(f"loss {new_loss:.5f} ")
print(" | ".join([f"{key} {item:.2f}" for key, item in fast_eval_res.items()]))
results = self.eval_model()
results.update(self.best_val_results)
results.update({"num_iter": self.iter_})
return results, history
def train_step(self, current_loss=None, iter_=None):
self.model.train()
self.optimizer.zero_grad()
loss = self.forward_pass()
loss.backward()
self.optimizer.step()
return loss.detach().item(), None
def lbfgs_train_step(self, current_loss=None, iter_=None):
def closure():
self.optimizer.zero_grad()
loss_ = self.forward_pass()
loss_.backward()
return loss_
loss = self.optimizer.step(closure)
return loss, None
def forward_pass(self):
if self.use_bootstrap:
bootstrap_index = torch.randint(low=0, high=len(self.base.train_flow),
size=(len(self.base.train_flow),), dtype=torch.long,
device=self.base.device)
output = self.model(source_nodes=self.base.train_source_nodes[bootstrap_index],
target_nodes=self.base.train_target_nodes[bootstrap_index])
loss = self.base.scaled_loss(output, self.base.train_flow[bootstrap_index],
self.base.train_loss_weighting[bootstrap_index])
else:
output = self.model(source_nodes=self.base.train_source_nodes, target_nodes=self.base.train_target_nodes)
loss = self.base.scaled_loss(output, self.base.train_flow, self.base.train_loss_weighting)
return loss
def eval_model(self):
res = {}
res.update(self.eval_model_val())
res.update(self.eval_model_train())
res.update(self.eval_flow_decomposition())
return res
def eval_flow_decomposition(self):
self.model.eval()
with torch.no_grad():
source_nodes = torch.cat((self.base.train_source_nodes, self.base.val_source_nodes), dim=0)
target_nodes = torch.cat((self.base.train_target_nodes, self.base.val_target_nodes), dim=0)
output = self.model(source_nodes=source_nodes, target_nodes=target_nodes)
output_norm = torch.norm(output)
grad_coeff = torch.sum(self.base.gt_norm_grad_flow * output) / output_norm
harmonic_coeff = torch.sum(self.base.gt_norm_harmonic_flow * output) / output_norm
results = {'grad_coeff': grad_coeff.item(), 'harmonic_coeff': harmonic_coeff.item(),
'flow_output_norm': output_norm.item()}
return results
def eval_model_val(self):
self.model.eval()
with torch.no_grad():
output = self.model(source_nodes=self.base.val_source_nodes, target_nodes=self.base.val_target_nodes)
loss = self.base.scaled_loss(output, self.base.val_flow, self.base.val_loss_weighting)
output = output.detach().cpu().numpy()
val_flow = self.base.val_flow.detach().cpu().numpy()
results = evaluation.calc_flow_prediction_evaluation(output, val_flow, prefix="val")
results['val_loss'] = loss.item()
return results
def eval_model_train(self):
self.model.eval()
with torch.no_grad():
output = self.model(source_nodes=self.base.train_source_nodes, target_nodes=self.base.train_target_nodes)
loss = self.base.scaled_loss(output, self.base.train_flow, self.base.train_loss_weighting)
output = output.detach().cpu().numpy()
train_flow = self.base.train_flow.detach().cpu().numpy()
results = evaluation.calc_flow_prediction_evaluation(output, train_flow, prefix="train")
results['train_loss'] = loss.item()
return results
def calc_val_loss(self):
self.model.eval()
with torch.no_grad():
output = self.model(source_nodes=self.base.val_source_nodes, target_nodes=self.base.val_target_nodes)
loss = self.base.scaled_loss(output, self.base.val_flow, self.base.val_loss_weighting)
return loss.detach().item()
class SheafTrainer(Trainer):
def __init__(self, base: TrainerBase, model: nn.Module, optimizer: optim.Optimizer,
gt_embeddings=None, gt_gates=None,
train_config: TrainingConfig = TrainingConfig(), eval_config: EvalConfig = EvalConfig(),
hyperpara_config: HyperParamConfig = HyperParamConfig()
):
super().__init__(base=base, model=model, optimizer=optimizer, train_config=train_config,
eval_config=eval_config)
self.emb_reg = hyperpara_config.embeddings_reg
self.gates_reg = hyperpara_config.gates_reg
self.gt_embeddings = gt_embeddings
self.gt_gates = gt_gates
self.gt_num_emb_modes = eval_config.gt_num_emb_modes
self.gt_num_gate_modes = eval_config.gt_num_gate_modes
self.emb_grad_noise = hyperpara_config.emb_grad_noise
self.gates_grad_noise = hyperpara_config.gates_grad_noise
self.use_proportional_noise = hyperpara_config.use_proportional_noise
self.proportional_noise_cutoff = torch.tensor(hyperpara_config.proportional_noise_cutoff,
device=self.base.device)
self.fast_eval_fun = self.eval_model_val
self.history_eval_fun = self.eval_model
def forward_pass(self):
loss = super().forward_pass() + self.embedding_regularization_loss()
return loss
def train_step(self, current_loss=None, iter_=None):
self.model.train()
self.add_noise_to_embeddings(iter_)
self.add_noise_to_gates(iter_)
self.optimizer.zero_grad()
loss = self.forward_pass()
loss.backward()
self.optimizer.step()
return loss.detach().item(), None
def add_noise_to_embeddings(self, iter_):
if self.emb_grad_noise.add_gradient_noise and iter_ % self.emb_grad_noise.noise_interval == 0:
with torch.no_grad():
# noise_std = self.emb_grad_noise.std / np.power(1 + 0.002 * iter_, 0.55)
noise_std = self.emb_grad_noise.std
noise = noise_std * torch.randn_like(self.model.node_embeddings.weight)
if self.use_proportional_noise:
noise *= torch.maximum(torch.abs(self.model.node_embeddings.weight), self.proportional_noise_cutoff)
self.model.node_embeddings.weight += noise
def add_noise_to_gates(self, iter_):
if self.gates_grad_noise.add_gradient_noise and iter_ % self.gates_grad_noise.noise_interval == 0:
with torch.no_grad():
# noise_std = self.gates_grad_noise.std / np.power(1 + 0.002 *
noise_std = self.gates_grad_noise.std
noise = noise_std * torch.randn_like(self.model.gates.weight)
if self.use_proportional_noise:
noise *= torch.maximum(torch.abs(self.model.gates.weight), self.proportional_noise_cutoff)
self.model.gates.weight += noise
def calc_full_loss(self, model_output):
return self.base.scaled_loss(model_output, self.base.train_flow,
self.base.train_loss_weighting) + self.embedding_regularization_loss()
def eval_model(self):
res = super(SheafTrainer, self).eval_model()
res.update(self.eval_learned_parameters())
return res
def eval_learned_parameters(self):
embedding_eval = {}
self.model.eval()
with torch.no_grad():
if self.gt_embeddings is not None:
embedding_eval = inferred_variables_evaluation(self.model.node_embeddings.weight.detach(),
self.gt_embeddings,
num_modes=self.gt_num_emb_modes)
gate_eval = {}
if self.gt_gates is not None:
gate_eval = inferred_variables_evaluation(self.model.gates.weight.detach(), self.gt_gates,
num_modes=self.gt_num_gate_modes)
gate_eval = {key + "_gates": value for key, value in gate_eval.items()}
embedding_eval.update(gate_eval)
return embedding_eval
def embedding_regularization_loss(self):
loss = 0.
if hasattr(self.model, 'node_embeddings') and self.emb_reg.weight > 0.:
loss += self.emb_reg.weight * self.emb_reg.loss_fun(
self.model.node_embeddings.weight, torch.zeros_like(self.model.node_embeddings.weight.detach())
)
if hasattr(self.model, 'gates') and self.gates_reg.weight > 0.:
loss += self.gates_reg.weight * self.gates_reg.loss_fun(
self.model.gates.weight, torch.zeros_like(self.model.gates.weight.detach())
)
return loss
class GatesInitTrainer(SheafTrainer):
def __init__(self, base: TrainerBase, model: nn.Module, optimizer: optim.Optimizer,
gt_embeddings=None, gt_gates=None,
train_config: TrainingConfig = TrainingConfig(), eval_config: EvalConfig = EvalConfig(),
hyperpara_config: HyperParamConfig = HyperParamConfig(), init_config: InitConfig = InitConfig()):
super().__init__(base=base, model=model, optimizer=optimizer, gt_embeddings=gt_embeddings, gt_gates=gt_gates,
train_config=train_config, eval_config=eval_config, hyperpara_config=hyperpara_config)
self.bce_loss = nn.BCELoss()
truncated_flow_values = compute_simple_weighting(self.base.train_flow,
min_flow_weight=init_config.min_gates_auto_weight,
max_flow_weight=init_config.max_gates_auto_weight)
flow_normalizer = torch.max(truncated_flow_values)
self.flow_activation = torch.minimum(torch.abs(self.base.train_flow) / flow_normalizer,
torch.tensor(1, device=self.base.train_flow.device))
def forward_pass(self):
output = self.model.gates_forward(source_nodes=self.base.train_source_nodes,
target_nodes=self.base.train_target_nodes)
loss = self.bce_loss(torch.mean(output, dim=1), self.flow_activation)
return loss
class BaselineTrainer(Trainer):
def __init__(self, base: TrainerBase, model: nn.Module, optimizer: optim.Optimizer,
train_config: TrainingConfig = TrainingConfig(), eval_config: EvalConfig = EvalConfig(),
baseline_hyperpara_config: BaselineHyperParamConfig = BaselineHyperParamConfig()
):
super().__init__(base=base, model=model, optimizer=optimizer, train_config=train_config,
eval_config=eval_config)
self.reg = baseline_hyperpara_config.reg
self.grad_noise = baseline_hyperpara_config.grad_noise
self.use_proportional_noise = baseline_hyperpara_config.use_proportional_noise
self.proportional_noise_cutoff = torch.tensor(baseline_hyperpara_config.proportional_noise_cutoff,
device=self.base.device)
def forward_pass(self):
loss = super().forward_pass() + self.parameter_regularization_loss()
return loss
def train_step(self, current_loss=None, iter_=None):
self.model.train()
self.add_noise_parameters(iter_)
self.optimizer.zero_grad()
loss = self.forward_pass()
loss.backward()
self.optimizer.step()
return loss.detach().item(), None
def add_noise_parameters(self, iter_):
if self.grad_noise.add_gradient_noise and iter_ % self.grad_noise.noise_interval == 0:
with torch.no_grad():
# noise_std = self.emb_grad_noise.std / np.power(1 + 0.002 * iter_, 0.55)
noise_std = self.grad_noise.std
for parameter in self.model.parameters():
noise = noise_std * torch.randn_like(parameter.detach())
if self.use_proportional_noise:
noise *= torch.maximum(torch.abs(parameter.detach()), self.proportional_noise_cutoff)
parameter += noise
def parameter_regularization_loss(self):
loss = 0.
if self.reg.weight > 0.:
for parameter in self.model.parameters():
loss += self.reg.weight * self.reg.loss_fun(parameter, torch.zeros_like(parameter.detach()))
return loss
|
# ex29: What if
people = 20
cats = 30
dogs = 15
# An if statement creates a "branch" in the code.
# If the boolean expression is true then run the code, otherwise skip.
# The colon indicates a new block of code, and anything
# that is indented underneath is part of that block.
if people < cats:
print "Many cats! The world is saved!"
if people > cats:
print "Not many cats! The world is doomed!"
if people < dogs:
print "The world is drooled on!"
if people > dogs:
print "The world is dry!"
dogs += 5
if people >= dogs:
print "People are greater than or equal to dogs."
if people <= dogs:
print "People are less than or equal to dogs."
if people == dogs:
print "People are dogs."
if (dogs < cats) and (people < cats):
print "Cats are more than people and dogs. People are scared by cats!"
if (dogs < cats) and not (people < cats):
print "Cats are more than dogs. Mice are living a hard life!"
if (dogs == cats) or (cats < 10):
print "Cats are fighting against dogs! Mice are happy!"
if cats != 0:
print "There are cats. Mice cannot be too complacent."
|
x = input("Enter cells: ")
print("---------")
print("|", x[0], x[1], x[2], "|")
print("|", x[3], x[4], x[5], "|")
print("|", x[6], x[7], x[8], "|")
print("---------")
#O match detection
VertO = False
for n in range(3):
if x[n] == "O" and x[n + 3] == "O" and x[n + 6] == "O": #Vertical "O" match detection
VertO = True
HoriO = False
for n in range(0, 7, 3):
if x[n] == "O" and x[n + 1] == "O" and x[n + 2] == "O": #Horizontal "O" match detection
HoriO = True
DiagO = False
if (x[0] == "O" and x[4] == "O" and x[8] == "O") or (x[2] == "O" and x[4] == "O" and x[6] == "O"): #Diagonal "O" match detection
DiagO = True
VictO = False
if VertO or HoriO or DiagO: #O victory detection
VictO = True
#X match detection
VertX = False
for n in range(3):
if x[n] == "X" and x[n + 3] == "X" and x[n + 6] == "X": #Vertical "X" match detection
VertX = True
HoriX = False
for n in range(0, 7, 3):
if x[n] == "X" and x[n + 1] == "X" and x[n + 2] == "X": #Horizontal "X" match detection
HoriX = True
DiagX = False
if (x[0] == "X" and x[4] == "X" and x[8] == "X") or (x[2] == "X" and x[4] == "X" and x[6] == "X"): #Diagonal "X" match detection
DiagX = True
VictX = False
if VertX or HoriX or DiagX: #X victory detection
VictX = True
#amount of X's and O's
QuantX = 0
QuantO = 0
for n in x:
if n == "O":
QuantO += 1
elif n == "X":
QuantX += 1
#is the game impossible
if (VictO and VictX) or QuantO > QuantX + 1 or QuantX > QuantO + 1:
impossible = True
else:
impossible = False
#empty space detection
if QuantO + QuantX < 9:
space = True
else:
space = False
#finish message
if impossible:
print("Impossible")
elif VictX:
print("X wins")
elif VictO:
print("O wins")
elif space:
print("Game not finished")
else:
print("Draw")
#X move
y = input("Enter the coordinates: ").split()
#coordinate detection
if y[0] in ["1", "2", "3"] and y[1] in ["1", "2", "3"]:
coordinate = True
else:
coordinate = False
#number detection
nums = True
if coordinate == False:
for n in y[0]:
if n not in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]:
nums = False
for n in y[1]:
if n not in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]:
nums = False
#index finder
def index_finder(a, b):
if a == 1:
return int(b) - 1
if a == 2:
return int(b) + 2
if a == 3:
return int(b) + 5
#position availability detection
availability = True
if x[index_finder(int(y[0]), int(y[1]))] in ["O", "X"]:
availability = False
#putting move on board
if coordinate == True and availability == True:
x = list(x)
x[index_finder(int(y[0]), int(y[1]))] = "X"
x = "".join(x)
#x move
legal = 0
while legal == 0:
y = input("Enter the coordinates: ").split()
legal = 1
#number detector
if legal == 1:
def num_detector(x):
if list(x) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
|
"""
Component of WMAgent that runs an alert Processor pipeline to forward
alerts to various other systems & monitoring.
"""
import logging
import signal
import traceback
from WMCore.Agent.Harness import Harness
from WMCore.Alerts.ZMQ.Processor import Processor
from WMCore.Alerts.ZMQ.Receiver import Receiver
class AlertProcessor(Harness):
def __init__(self, config):
Harness.__init__(self, config)
self.config = config
# instance of processor
self._processor = None
# instance of Receiver which owns Processor (self._processor)
# and runs on background
self._receiver = None
#3602 related:
# Harness, nor the components, handle signal.SIGTERM which
# is used by wmcoreD --shutdown, hence shutdown sequence is not called
# this shall later be moved into (hopefully largely improved) Harness
signal.signal(signal.SIGTERM, self._signalHandler)
def _signalHandler(self, signalNumber, frame):
logging.info("Signal number %s caught." % signalNumber)
self.prepareToStop()
def preInitialization(self):
"""
Start up the ZMQ Receiver + Processor.
"""
logging.info("preInitialization ...")
# something fishy (again) going on in Harness, wmcoreD
# component may fail, still will be considered as running (#3602)
# this is why #3320 is difficult to fix ... wmcoreD would happily
# continue even after raising an exception even from this very method directly
self._processor = Processor(self.config.AlertProcessor)
# Receiver listens on work channel (address) and on control
# channel (controlAddr)
self._receiver = Receiver(self.config.AlertProcessor.address,
self._processor,
self.config.AlertProcessor.controlAddr)
self._receiver.startReceiver()
logging.info("preInitialization - finished.")
def stopAlertProcessor(self):
"""
Method to shutdown the AlertProcessor.
"""
logging.info("stopAlertProcessor - stopping Receiver ...")
self._receiver.shutdown()
logging.info("stopAlertProcessor finished.")
def prepareToStop(self, wait = False, stopPayload = ""):
"""
Override prepareToStop to include call to stopProcessor.
Ugly, but seems no other way to do this...
"""
logging.info("Shutting down the component - prepareToStop ...")
self.stopAlertProcessor()
Harness.prepareToStop(self, wait, stopPayload)
logging.info("prepareToStop finished.")
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import warnings
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from tests.fixtures import warning_free
from tsfresh import extract_relevant_features
from tsfresh.feature_extraction.settings import MinimalFCParameters
from tsfresh.utilities import dataframe_functions
class RollingTestCase(TestCase):
def test_with_wrong_input(self):
test_df = pd.DataFrame(
{
"id": [0, 0],
"kind": ["a", "b"],
"value": [3, 3],
"sort": [np.NaN, np.NaN],
}
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="id",
column_sort="sort",
column_kind="kind",
rolling_direction=1,
n_jobs=0,
)
test_df = pd.DataFrame(
{"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [1, 1]}
)
self.assertRaises(
AttributeError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="strange_id",
column_sort="sort",
column_kind="kind",
rolling_direction=1,
n_jobs=0,
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id=None,
column_sort="sort",
column_kind="kind",
rolling_direction=1,
n_jobs=0,
)
test_df = {"a": pd.DataFrame([{"id": 0}])}
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="id",
column_sort=None,
column_kind="kind",
rolling_direction=1,
n_jobs=0,
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id=None,
column_sort=None,
column_kind="kind",
rolling_direction=1,
n_jobs=0,
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=0,
n_jobs=0,
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id=None,
column_sort=None,
column_kind=None,
rolling_direction=0,
n_jobs=0,
)
test_df = pd.DataFrame(
{"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [1, 1]}
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="id",
column_kind="kind",
column_sort="sort",
max_timeshift=0,
rolling_direction=1,
n_jobs=0,
)
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="id",
column_kind="kind",
column_sort="sort",
min_timeshift=-1,
rolling_direction=1,
n_jobs=0,
)
def test_assert_single_row(self):
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(
ValueError,
dataframe_functions.roll_time_series,
df_or_dict=test_df,
column_id="id",
column_sort="sort",
column_kind="kind",
rolling_direction=1,
n_jobs=0,
)
def test_positive_rolling(self):
first_class = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)}
)
second_class = pd.DataFrame(
{"a": [10, 11], "b": [12, 13], "time": range(20, 22)}
)
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
""" df_full is
a b time id
0 1 5 0 1
1 2 6 1 1
2 3 7 2 1
3 4 8 3 1
4 10 12 20 2
5 11 13 21 2
"""
correct_indices = [
(1, 0),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 2),
(1, 3),
(1, 3),
(1, 3),
(1, 3),
(2, 20),
(2, 21),
(2, 21),
]
correct_values_a = [
1.0,
1.0,
2.0,
1.0,
2.0,
3.0,
1.0,
2.0,
3.0,
4.0,
10.0,
10.0,
11.0,
]
correct_values_b = [
5.0,
5.0,
6.0,
5.0,
6.0,
7.0,
5.0,
6.0,
7.0,
8.0,
12.0,
12.0,
13.0,
]
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
n_jobs=0,
)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
max_timeshift=4,
n_jobs=0,
)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
max_timeshift=2,
n_jobs=0,
)
correct_indices = [
(1, 0),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 2),
(1, 3),
(1, 3),
(1, 3),
(2, 20),
(2, 21),
(2, 21),
]
correct_values_a = [
1.0,
1.0,
2.0,
1.0,
2.0,
3.0,
2.0,
3.0,
4.0,
10.0,
10.0,
11.0,
]
correct_values_b = [
5.0,
5.0,
6.0,
5.0,
6.0,
7.0,
6.0,
7.0,
8.0,
12.0,
12.0,
13.0,
]
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
max_timeshift=2,
min_timeshift=2,
n_jobs=0,
)
correct_indices = [(1, 2), (1, 2), (1, 2), (1, 3), (1, 3), (1, 3)]
correct_values_a = [1.0, 2.0, 3.0, 2.0, 3.0, 4.0]
correct_values_b = [5.0, 6.0, 7.0, 6.0, 7.0, 8.0]
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
def test_negative_rolling(self):
first_class = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)}
)
second_class = pd.DataFrame(
{"a": [10, 11], "b": [12, 13], "time": range(20, 22)}
)
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
""" df_full is
a b time id
0 1 5 0 1
1 2 6 1 1
2 3 7 2 1
3 4 8 3 1
4 10 12 20 2
5 11 13 21 2
"""
correct_indices = [
(1, 0),
(1, 0),
(1, 0),
(1, 0),
(1, 1),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 3),
(2, 20),
(2, 20),
(2, 21),
]
correct_values_a = [
1.0,
2.0,
3.0,
4.0,
2.0,
3.0,
4.0,
3.0,
4.0,
4.0,
10.0,
11.0,
11.0,
]
correct_values_b = [
5.0,
6.0,
7.0,
8.0,
6.0,
7.0,
8.0,
7.0,
8.0,
8.0,
12.0,
13.0,
13.0,
]
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-1,
n_jobs=0,
)
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-1,
max_timeshift=None,
n_jobs=0,
)
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-1,
max_timeshift=1,
n_jobs=0,
)
correct_indices = [
(1, 0),
(1, 0),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 3),
(2, 20),
(2, 20),
(2, 21),
]
correct_values_a = [1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 10.0, 11.0, 11.0]
correct_values_b = [5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 12.0, 13.0, 13.0]
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-1,
max_timeshift=2,
n_jobs=0,
)
correct_indices = [
(1, 0),
(1, 0),
(1, 0),
(1, 1),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 3),
(2, 20),
(2, 20),
(2, 21),
]
correct_values_a = [
1.0,
2.0,
3.0,
2.0,
3.0,
4.0,
3.0,
4.0,
4.0,
10.0,
11.0,
11.0,
]
correct_values_b = [
5.0,
6.0,
7.0,
6.0,
7.0,
8.0,
7.0,
8.0,
8.0,
12.0,
13.0,
13.0,
]
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-1,
max_timeshift=4,
n_jobs=0,
)
correct_indices = [
(1, 0),
(1, 0),
(1, 0),
(1, 0),
(1, 1),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 3),
(2, 20),
(2, 20),
(2, 21),
]
correct_values_a = [
1.0,
2.0,
3.0,
4.0,
2.0,
3.0,
4.0,
3.0,
4.0,
4.0,
10.0,
11.0,
11.0,
]
correct_values_b = [
5.0,
6.0,
7.0,
8.0,
6.0,
7.0,
8.0,
7.0,
8.0,
8.0,
12.0,
13.0,
13.0,
]
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-1,
min_timeshift=2,
max_timeshift=3,
n_jobs=0,
)
correct_indices = [(1, 0), (1, 0), (1, 0), (1, 0), (1, 1), (1, 1), (1, 1)]
correct_values_a = [1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 4.0]
correct_values_b = [5.0, 6.0, 7.0, 8.0, 6.0, 7.0, 8.0]
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
def test_rolling_with_larger_shift(self):
first_class = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)}
)
second_class = pd.DataFrame(
{"a": [10, 11], "b": [12, 13], "time": range(20, 22)}
)
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
""" df_full is
a b time id
0 1 5 0 1
1 2 6 1 1
2 3 7 2 1
3 4 8 3 1
4 10 12 20 2
5 11 13 21 2
"""
correct_indices = [
(1, 1),
(1, 1),
(1, 3),
(1, 3),
(1, 3),
(1, 3),
(2, 21),
(2, 21),
]
correct_values_a = [1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 10.0, 11.0]
correct_values_b = [5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 12.0, 13.0]
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=2,
n_jobs=0,
)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
correct_indices = [
(1, 0),
(1, 0),
(1, 0),
(1, 0),
(1, 2),
(1, 2),
(2, 20),
(2, 20),
]
correct_values_a = [1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 10.0, 11.0]
correct_values_b = [5.0, 6.0, 7.0, 8.0, 7.0, 8.0, 12.0, 13.0]
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=-2,
n_jobs=0,
)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
def test_stacked_rolling(self):
first_class = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)}
)
second_class = pd.DataFrame(
{"a": [10, 11], "b": [12, 13], "time": range(20, 22)}
)
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
df_stacked = pd.concat(
[
df_full[["time", "id", "a"]].rename(columns={"a": "_value"}),
df_full[["time", "id", "b"]].rename(columns={"b": "_value"}),
],
ignore_index=True,
)
df_stacked["kind"] = ["a"] * 6 + ["b"] * 6
""" df_stacked is
time id _value kind
0 0 1 1 a
1 1 1 2 a
2 2 1 3 a
3 3 1 4 a
4 20 2 10 a
5 21 2 11 a
6 0 1 5 b
7 1 1 6 b
8 2 1 7 b
9 3 1 8 b
10 20 2 12 b
11 21 2 13 b
"""
df = dataframe_functions.roll_time_series(
df_stacked,
column_id="id",
column_sort="time",
column_kind="kind",
rolling_direction=-1,
n_jobs=0,
)
correct_indices = (
[(1, 0)] * 2 * 4
+ [(1, 1)] * 2 * 3
+ [(1, 2)] * 2 * 2
+ [(1, 3)] * 2 * 1
+ [(2, 20)] * 2 * 2
+ [(2, 21)] * 2 * 1
)
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["kind"].values), ["a", "b"] * 13)
self.assertListEqual(
list(df["_value"].values),
[
1.0,
5.0,
2.0,
6.0,
3.0,
7.0,
4.0,
8.0,
2.0,
6.0,
3.0,
7.0,
4.0,
8.0,
3.0,
7.0,
4.0,
8.0,
4.0,
8.0,
10.0,
12.0,
11.0,
13.0,
11.0,
13.0,
],
)
def test_dict_rolling(self):
df_dict = {
"a": pd.DataFrame(
{"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}
),
"b": pd.DataFrame(
{"_value": [5, 6, 7, 8, 12, 13], "id": [1, 1, 1, 1, 2, 2]}
),
}
df = dataframe_functions.roll_time_series(
df_dict,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=-1,
n_jobs=0,
)
""" df is
{a: _value id
1.0 1
2.0 1
3.0 1
4.0 1
10.0 2
11.0 2,
b: _value id
5.0 1
6.0 1
7.0 1
8.0 1
12.0 2
13.0 2
}
"""
correct_indices = [
(1, 0),
(1, 0),
(1, 0),
(1, 0),
(1, 1),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 3),
(2, 0),
(2, 0),
(2, 1),
]
self.assertListEqual(list(df["a"]["id"].values), correct_indices)
self.assertListEqual(list(df["b"]["id"].values), correct_indices)
self.assertListEqual(
list(df["a"]["_value"].values),
[1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 4.0, 3.0, 4.0, 4.0, 10.0, 11.0, 11.0],
)
self.assertListEqual(
list(df["b"]["_value"].values),
[5.0, 6.0, 7.0, 8.0, 6.0, 7.0, 8.0, 7.0, 8.0, 8.0, 12.0, 13.0, 13.0],
)
def test_dict_rolling_maxshift_1(self):
df_dict = {
"a": pd.DataFrame(
{"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}
),
"b": pd.DataFrame(
{"_value": [5, 6, 7, 8, 12, 13], "id": [1, 1, 1, 1, 2, 2]}
),
}
df = dataframe_functions.roll_time_series(
df_dict,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=-1,
max_timeshift=1,
n_jobs=0,
)
""" df is
{a: _value id
1.0 1
2.0 1
3.0 1
4.0 1
10.0 2
11.0 2,
b: _value id
5.0 1
6.0 1
7.0 1
8.0 1
12.0 2
13.0 2
}
"""
correct_indices = [
(1, 0),
(1, 0),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 3),
(2, 0),
(2, 0),
(2, 1),
]
self.assertListEqual(list(df["a"]["id"].values), correct_indices)
self.assertListEqual(list(df["b"]["id"].values), correct_indices)
self.assertListEqual(
list(df["a"]["_value"].values),
[1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 10.0, 11.0, 11.0],
)
self.assertListEqual(
list(df["b"]["_value"].values),
[5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 12.0, 13.0, 13.0],
)
def test_order_rolling(self):
first_class = pd.DataFrame({"x": [1, 2, 3, 4], "time": [1, 15, 132, 145]})
second_class = pd.DataFrame({"x": [5, 6, 7], "time": [16, 133, 146]})
first_class["initial_id"] = 1
second_class["initial_id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
# Do not show the warning on non-equidistant time
with warning_free():
window_size = 2
df_rolled = dataframe_functions.roll_time_series(
df_full,
column_id="initial_id",
column_sort="time",
min_timeshift=window_size - 1,
max_timeshift=window_size - 1,
)
""" df is
{x: _value id
1.0 1
2.0 1
3.0 1
4.0 1
5.0 2
6.0 2
7.0 2,
}
"""
correct_indices = [
(1, 15),
(1, 15),
(1, 132),
(1, 132),
(1, 145),
(1, 145),
(2, 133),
(2, 133),
(2, 146),
(2, 146),
]
self.assertListEqual(list(df_rolled["id"]), correct_indices)
def test_warning_on_non_uniform_time_steps(self):
with warnings.catch_warnings(record=True) as w:
first_class = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": [1, 2, 4, 5]}
)
second_class = pd.DataFrame(
{"a": [10, 11], "b": [12, 13], "time": list(range(20, 22))}
)
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
n_jobs=0,
)
self.assertGreaterEqual(len(w), 1)
self.assertIn(
"Your time stamps are not uniformly sampled, which makes rolling "
"nonsensical in some domains.",
[str(warning.message) for warning in w],
)
def test_multicore_rolling(self):
first_class = pd.DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)}
)
second_class = pd.DataFrame(
{"a": [10, 11], "b": [12, 13], "time": range(20, 22)}
)
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
""" df_full is
a b time id
0 1 5 0 1
1 2 6 1 1
2 3 7 2 1
3 4 8 3 1
4 10 12 20 2
5 11 13 21 2
"""
correct_indices = [
(1, 0),
(1, 1),
(1, 1),
(1, 2),
(1, 2),
(1, 2),
(1, 3),
(1, 3),
(1, 3),
(1, 3),
(2, 20),
(2, 21),
(2, 21),
]
correct_values_a = [
1.0,
1.0,
2.0,
1.0,
2.0,
3.0,
1.0,
2.0,
3.0,
4.0,
10.0,
10.0,
11.0,
]
correct_values_b = [
5.0,
5.0,
6.0,
5.0,
6.0,
7.0,
5.0,
6.0,
7.0,
8.0,
12.0,
12.0,
13.0,
]
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(
df_full,
column_id="id",
column_sort="time",
column_kind=None,
rolling_direction=1,
n_jobs=0,
)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
class CheckForNanTestCase(TestCase):
def test_all_columns(self):
test_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=[0, 1])
# should not raise an exception
dataframe_functions.check_for_nans_in_columns(test_df)
test_df = pd.DataFrame([[1, 2, 3], [4, np.NaN, 6]], index=[0, 1])
self.assertRaises(
ValueError, dataframe_functions.check_for_nans_in_columns, test_df
)
def test_not_all_columns(self):
test_df = pd.DataFrame(
[[1, 2, 3], [4, np.NaN, 6]], index=[0, 1], columns=["a", "b", "c"]
)
self.assertRaises(
ValueError, dataframe_functions.check_for_nans_in_columns, test_df
)
self.assertRaises(
ValueError,
dataframe_functions.check_for_nans_in_columns,
test_df,
["a", "b"],
)
self.assertRaises(
ValueError, dataframe_functions.check_for_nans_in_columns, test_df, ["b"]
)
self.assertRaises(
ValueError, dataframe_functions.check_for_nans_in_columns, test_df, "b"
)
self.assertRaises(
ValueError,
dataframe_functions.check_for_nans_in_columns,
test_df,
["c", "b"],
)
dataframe_functions.check_for_nans_in_columns(test_df, columns=["a", "c"])
dataframe_functions.check_for_nans_in_columns(test_df, columns="a")
class ImputeTestCase(TestCase):
def test_impute_zero(self):
df = pd.DataFrame([{"value": np.NaN}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0])
df = pd.DataFrame([{"value": np.PINF}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0])
df = pd.DataFrame([{"value": np.NINF}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0])
df = pd.DataFrame(
[{"value": np.NINF}, {"value": np.NaN}, {"value": np.PINF}, {"value": 1}]
)
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0, 0, 0, 1])
df = pd.DataFrame(
[{"value": np.NINF}, {"value": np.NaN}, {"value": np.PINF}, {"value": 1}]
)
df = df.astype(np.float64)
df = dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0, 0, 0, 1])
df = pd.DataFrame(
[{"value": np.NINF}, {"value": np.NaN}, {"value": np.PINF}, {"value": 1}]
)
df = df.astype(np.float32)
df = dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0, 0, 0, 1])
df = pd.DataFrame([])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(len(df), 0)
def test_toplevel_impute(self):
df = pd.DataFrame(
np.transpose([[0, 1, 2, np.NaN], [1, np.PINF, 2, 3], [1, -3, np.NINF, 3]]),
columns=["value_a", "value_b", "value_c"],
)
dataframe_functions.impute(df)
self.assertEqual(list(df.value_a), [0, 1, 2, 1])
self.assertEqual(list(df.value_b), [1, 3, 2, 3])
self.assertEqual(list(df.value_c), [1, -3, -3, 3])
df = pd.DataFrame(
np.transpose(
[[0, 1, 2, np.NaN], [1, np.PINF, 2, np.NaN], [np.NaN, -3, np.NINF, 3]]
),
columns=["value_a", "value_b", "value_c"],
)
df = df.astype(np.float64)
dataframe_functions.impute(df)
self.assertEqual(list(df.value_a), [0, 1, 2, 1])
self.assertEqual(list(df.value_b), [1, 2, 2, 1.5])
self.assertEqual(list(df.value_c), [0, -3, -3, 3])
df = pd.DataFrame(
np.transpose(
[[0, 1, 2, np.NaN], [1, np.PINF, 2, 3], [np.PINF, -3, np.NINF, 3]]
),
columns=["value_a", "value_b", "value_c"],
)
df = df.astype(np.float32)
dataframe_functions.impute(df)
self.assertEqual(list(df.value_a), [0, 1, 2, 1])
self.assertEqual(list(df.value_b), [1, 3, 2, 3])
self.assertEqual(list(df.value_c), [3, -3, -3, 3])
df = pd.DataFrame([])
dataframe_functions.impute(df)
self.assertEqual(len(df), 0)
def test_impute_range(self):
def get_df():
return pd.DataFrame(
np.transpose(
[[0, 1, 2, np.NaN], [1, np.PINF, 2, 3], [1, -3, np.NINF, 3]]
),
columns=["value_a", "value_b", "value_c"],
)
# check if values are replaced correctly
df = get_df()
col_to_max = {"value_a": 200, "value_b": 200, "value_c": 200}
col_to_min = {"value_a": -134, "value_b": -134, "value_c": -134}
col_to_median = {"value_a": 55, "value_b": 55, "value_c": 55}
dataframe_functions.impute_dataframe_range(
df, col_to_max, col_to_min, col_to_median
)
self.assertEqual(list(df.value_a), [0, 1, 2, 55])
self.assertEqual(list(df.value_b), [1, 200, 2, 3])
self.assertEqual(list(df.value_c), [1, -3, -134, 3])
# check for error if column key is missing
df = get_df()
col_to_max = {"value_a": 200, "value_b": 200, "value_c": 200}
col_to_min = {"value_a": -134, "value_b": -134, "value_c": -134}
col_to_median = {"value_a": 55, "value_c": 55}
self.assertRaises(
ValueError,
dataframe_functions.impute_dataframe_range,
df,
col_to_max,
col_to_min,
col_to_median,
)
# check for no error if column key is too much
col_to_max = {"value_a": 200, "value_b": 200, "value_c": 200}
col_to_min = {"value_a": -134, "value_b": -134, "value_c": -134}
col_to_median = {"value_a": 55, "value_b": 55, "value_c": 55, "value_d": 55}
dataframe_functions.impute_dataframe_range(
df, col_to_max, col_to_min, col_to_median
)
# check for error if replacement value is not finite
df = get_df()
col_to_max = {"value_a": 200, "value_b": np.NaN, "value_c": 200}
col_to_min = {"value_a": -134, "value_b": -134, "value_c": -134}
col_to_median = {"value_a": 55, "value_b": 55, "value_c": 55}
self.assertRaises(
ValueError,
dataframe_functions.impute_dataframe_range,
df,
col_to_max,
col_to_min,
col_to_median,
)
df = get_df()
col_to_max = {"value_a": 200, "value_b": 200, "value_c": 200}
col_to_min = {"value_a": -134, "value_b": np.NINF, "value_c": -134}
col_to_median = {"value_a": 55, "value_b": 55, "value_c": 55}
self.assertRaises(
ValueError,
dataframe_functions.impute_dataframe_range,
df,
col_to_max,
col_to_min,
col_to_median,
)
df = get_df()
col_to_max = {"value_a": 200, "value_b": 200, "value_c": 200}
col_to_min = {"value_a": -134, "value_b": -134, "value_c": -134}
col_to_median = {"value_a": 55, "value_b": 55, "value_c": np.PINF}
self.assertRaises(
ValueError,
dataframe_functions.impute_dataframe_range,
df,
col_to_max,
col_to_min,
col_to_median,
)
df = pd.DataFrame([0, 1, 2, 3, 4], columns=["test"])
col_dict = {"test": 0}
dataframe_functions.impute_dataframe_range(df, col_dict, col_dict, col_dict)
self.assertEqual(df.columns, ["test"])
self.assertListEqual(list(df["test"].values), [0, 1, 2, 3, 4])
df = pd.DataFrame([])
dataframe_functions.impute_dataframe_range(df, {}, {}, {})
self.assertEqual(len(df), 0)
class RestrictTestCase(TestCase):
def test_restrict_dataframe(self):
df = pd.DataFrame({"id": [1, 2, 3] * 2})
df_restricted = dataframe_functions.restrict_input_to_index(df, "id", [2])
self.assertEqual(list(df_restricted.id), [2, 2])
df_restricted2 = dataframe_functions.restrict_input_to_index(
df, "id", [1, 2, 3]
)
self.assertTrue(df_restricted2.equals(df))
def test_restrict_dict(self):
kind_to_df = {
"a": pd.DataFrame({"id": [1, 2, 3]}),
"b": pd.DataFrame({"id": [3, 4, 5]}),
}
kind_to_df_restricted = dataframe_functions.restrict_input_to_index(
kind_to_df, "id", [3]
)
self.assertEqual(list(kind_to_df_restricted["a"].id), [3])
self.assertEqual(list(kind_to_df_restricted["b"].id), [3])
kind_to_df_restricted2 = dataframe_functions.restrict_input_to_index(
kind_to_df, "id", [1, 2, 3, 4, 5]
)
self.assertTrue(kind_to_df_restricted2["a"].equals(kind_to_df["a"]))
self.assertTrue(kind_to_df_restricted2["b"].equals(kind_to_df["b"]))
def test_restrict_wrong(self):
other_type = np.array([1, 2, 3])
self.assertRaises(
TypeError,
dataframe_functions.restrict_input_to_index,
other_type,
"id",
[1, 2, 3],
)
class GetRangeValuesPerColumnTestCase(TestCase):
def test_ignores_non_finite_values(self):
df = pd.DataFrame([0, 1, 2, 3, np.NaN, np.PINF, np.NINF], columns=["value"])
(
col_to_max,
col_to_min,
col_to_median,
) = dataframe_functions.get_range_values_per_column(df)
self.assertEqual(col_to_max, {"value": 3})
self.assertEqual(col_to_min, {"value": 0})
self.assertEqual(col_to_median, {"value": 1.5})
def test_range_values_correct_with_even_length(self):
df = pd.DataFrame([0, 1, 2, 3], columns=["value"])
(
col_to_max,
col_to_min,
col_to_median,
) = dataframe_functions.get_range_values_per_column(df)
self.assertEqual(col_to_max, {"value": 3})
self.assertEqual(col_to_min, {"value": 0})
self.assertEqual(col_to_median, {"value": 1.5})
def test_range_values_correct_with_uneven_length(self):
df = pd.DataFrame([0, 1, 2], columns=["value"])
(
col_to_max,
col_to_min,
col_to_median,
) = dataframe_functions.get_range_values_per_column(df)
self.assertEqual(col_to_max, {"value": 2})
self.assertEqual(col_to_min, {"value": 0})
self.assertEqual(col_to_median, {"value": 1})
def test_no_finite_values_yields_0(self):
df = pd.DataFrame([np.NaN, np.PINF, np.NINF], columns=["value"])
with warnings.catch_warnings(record=True) as w:
(
col_to_max,
col_to_min,
col_to_median,
) = dataframe_functions.get_range_values_per_column(df)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"The columns ['value'] did not have any finite values. Filling with zeros.",
)
self.assertEqual(col_to_max, {"value": 0})
self.assertEqual(col_to_min, {"value": 0})
self.assertEqual(col_to_median, {"value": 0})
class MakeForecastingFrameTestCase(TestCase):
def test_make_forecasting_frame_list(self):
df, y = dataframe_functions.make_forecasting_frame(
x=range(4), kind="test", max_timeshift=1, rolling_direction=1
)
expected_df = pd.DataFrame(
{
"id": [("id", 1), ("id", 2), ("id", 3)],
"kind": ["test"] * 3,
"value": [0, 1, 2],
"time": [0, 1, 2],
}
)
expected_y = pd.Series(
data=[1, 2, 3], index=[("id", 1), ("id", 2), ("id", 3)], name="value"
)
assert_frame_equal(
df.sort_index(axis=1).reset_index(drop=True), expected_df.sort_index(axis=1)
)
assert_series_equal(y, expected_y)
def test_make_forecasting_frame_range(self):
df, y = dataframe_functions.make_forecasting_frame(
x=np.arange(4), kind="test", max_timeshift=1, rolling_direction=1
)
expected_df = pd.DataFrame(
{
"id": list(zip(["id"] * 3, np.arange(1, 4))),
"kind": ["test"] * 3,
"value": np.arange(3),
"time": [0, 1, 2],
}
)
expected_y = pd.Series(
data=[1, 2, 3], index=[("id", 1), ("id", 2), ("id", 3)], name="value"
)
assert_frame_equal(
df.sort_index(axis=1).reset_index(drop=True), expected_df.sort_index(axis=1)
)
assert_series_equal(y, expected_y)
def test_make_forecasting_frame_pdSeries(self):
t_index = pd.date_range("1/1/2011", periods=4, freq="H")
df, y = dataframe_functions.make_forecasting_frame(
x=pd.Series(data=range(4), index=t_index),
kind="test",
max_timeshift=1,
rolling_direction=1,
)
time_shifts = pd.DatetimeIndex(
["2011-01-01 01:00:00", "2011-01-01 02:00:00", "2011-01-01 03:00:00"],
freq="H",
)
expected_y = pd.Series(
data=[1, 2, 3], index=zip(["id"] * 3, time_shifts), name="value"
)
expected_df = pd.DataFrame(
{
"id": list(
zip(
["id"] * 3,
pd.DatetimeIndex(
[
"2011-01-01 01:00:00",
"2011-01-01 02:00:00",
"2011-01-01 03:00:00",
]
),
)
),
"kind": ["test"] * 3,
"value": [0, 1, 2],
"time": pd.DatetimeIndex(
[
"2011-01-01 00:00:00",
"2011-01-01 01:00:00",
"2011-01-01 02:00:00",
]
),
}
)
assert_frame_equal(
df.sort_index(axis=1).reset_index(drop=True), expected_df.sort_index(axis=1)
)
assert_series_equal(y, expected_y)
def test_make_forecasting_frame_feature_extraction(self):
t_index = pd.date_range("1/1/2011", periods=4, freq="H")
df, y = dataframe_functions.make_forecasting_frame(
x=pd.Series(data=range(4), index=t_index),
kind="test",
max_timeshift=1,
rolling_direction=1,
)
extract_relevant_features(
df,
y,
column_id="id",
column_sort="time",
column_value="value",
default_fc_parameters=MinimalFCParameters(),
)
class GetIDsTestCase(TestCase):
def test_get_id__correct_DataFrame(self):
df = pd.DataFrame({"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]})
self.assertEqual(dataframe_functions.get_ids(df, "id"), {1, 2})
def test_get_id__correct_dict(self):
df_dict = {
"a": pd.DataFrame(
{"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}
),
"b": pd.DataFrame(
{"_value": [5, 6, 7, 8, 12, 13], "id": [4, 4, 3, 3, 2, 2]}
),
}
self.assertEqual(dataframe_functions.get_ids(df_dict, "id"), {1, 2, 3, 4})
def test_get_id_wrong(self):
other_type = np.array([1, 2, 3])
self.assertRaises(TypeError, dataframe_functions.get_ids, other_type, "id")
class AddSubIdTestCase(TestCase):
def test_no_parameters(self):
dataframe = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
extended_dataframe = dataframe_functions.add_sub_time_series_index(dataframe, 2)
self.assertEqual(list(extended_dataframe["id"]), [0, 0, 1, 1, 2, 2, 3, 3, 4])
assert_series_equal(dataframe["value"], extended_dataframe["value"])
def test_id_parameters(self):
dataframe = pd.DataFrame(
{"value": [1, 2, 3, 4, 5, 6, 7, 8, 9], "id": [1, 1, 1, 1, 2, 2, 2, 2, 2]}
)
extended_dataframe = dataframe_functions.add_sub_time_series_index(
dataframe, 2, column_id="id"
)
self.assertEqual(
list(extended_dataframe["id"]),
[(0, 1), (0, 1), (1, 1), (1, 1), (0, 2), (0, 2), (1, 2), (1, 2), (2, 2)],
)
assert_series_equal(dataframe["value"], extended_dataframe["value"])
def test_kind_parameters(self):
dataframe = pd.DataFrame(
{
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"id": [1, 1, 1, 1, 2, 2, 2, 2, 2],
"kind": [0, 1, 0, 1, 0, 1, 0, 1, 0],
}
)
extended_dataframe = dataframe_functions.add_sub_time_series_index(
dataframe, 2, column_id="id", column_kind="kind"
).sort_index()
self.assertEqual(
list(extended_dataframe["id"]),
[(0, 1), (0, 1), (0, 1), (0, 1), (0, 2), (0, 2), (0, 2), (0, 2), (1, 2)],
)
assert_series_equal(dataframe["value"], extended_dataframe["value"])
assert_series_equal(dataframe["kind"], extended_dataframe["kind"])
def test_sort_parameters(self):
dataframe = pd.DataFrame(
{
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"id": [1, 1, 1, 1, 2, 2, 2, 2, 2],
"kind": [0, 1, 0, 1, 0, 1, 0, 1, 0],
"sort": [9, 8, 7, 6, 5, 4, 3, 2, 1],
}
)
extended_dataframe = dataframe_functions.add_sub_time_series_index(
dataframe, 2, column_id="id", column_kind="kind", column_sort="sort"
)
self.assertEqual(
list(extended_dataframe["id"]),
[(0, 2), (0, 2), (0, 2), (0, 2), (1, 2), (0, 1), (0, 1), (0, 1), (0, 1)],
)
self.assertEqual(list(extended_dataframe["value"]), [9, 8, 7, 6, 5, 4, 3, 2, 1])
self.assertEqual(list(extended_dataframe["kind"]), [0, 1, 0, 1, 0, 1, 0, 1, 0])
self.assertEqual(list(extended_dataframe["sort"]), [1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_dict_input(self):
dataframe = pd.DataFrame(
{"value": [1, 2, 3, 4, 5, 6, 7, 8, 9], "id": [1, 1, 1, 1, 2, 2, 2, 2, 2]}
)
extended_dataframe = dataframe_functions.add_sub_time_series_index(
{"1": dataframe}, 2, column_id="id"
)
self.assertIn("1", extended_dataframe)
extended_dataframe = extended_dataframe["1"]
self.assertEqual(
list(extended_dataframe["id"]),
[(0, 1), (0, 1), (1, 1), (1, 1), (0, 2), (0, 2), (1, 2), (1, 2), (2, 2)],
)
assert_series_equal(dataframe["value"], extended_dataframe["value"])
|
#!/usr/bin/python
#\file joint_spring.py
#\brief Joint spring controller test
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Oct.29, 2015
'''
NOTE: run beforehand:
$ rosrun baxter_interface joint_trajectory_action_server.py
'''
from bxtr import *
if __name__=='__main__':
rospy.init_node('baxter_test')
EnableBaxter()
robot= TRobotBaxter()
robot.Init()
#Joint springs mode
robot.ActivateJointSprings(arms=(RIGHT,LEFT), stop_err=0.2, stop_dt=None)
rospy.signal_shutdown('Done.')
|
li = [9, 1, 8, 7, 3, 6, 4, 2, 5]
s_li = sorted(li)
print('Sorted Variable:\t', s_li)
print('Original Variable:\t', li)
li.sort()
print('Sort Variable:\t', li)
li = [9, 1, 8, 7, 3, 6, 4, 2, 5]
s_li = sorted(li, reverse=True)
print('Reverse Sorted Variable:\t', s_li)
print('Original Variable:\t', li)
li.sort(reverse=True)
print('Reverse Sort Variable:\t', li)
tup = (9, 1, 8, 7, 3, 6, 4, 2, 5)
s_tup = sorted(tup)
print('Tuple Sorted Variable:\t', s_tup)
print('Original Variable:\t', tup)
s_tup = sorted(li, reverse=True)
print('Reverse Tuple Variable:\t', s_tup)
print('Original Variable:\t', tup)
di = {'name': 'Corey', 'job': 'programming', 'age': 47, 'os':'Mac'}
s_di = sorted(di)
print('Dict:\t', s_di)
li = [-6, -5, -4, 1, 2, 3]
s_li = sorted(li)
print(s_li)
li = [-6, -5, -4, 1, 2, 3]
s_li = sorted(li, key=abs)
print(s_li)
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def __repr__(self):
return '({}, {}, ${})'.format(self.name, self.age, self.salary)
e1 = Employee('Carl', 37, 70000)
e2 = Employee('Sarah', 29, 80000)
e3 = Employee('John', 43, 90000)
employees = [e1, e2, e3]
def e_sort(emp):
return emp.name
s_emp = sorted(employees, key=e_sort)
print(s_emp)
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def __repr__(self):
return '({}, {}, ${})'.format(self.name, self.age, self.salary)
e1 = Employee('Carl', 37, 70000)
e2 = Employee('Sarah', 29, 80000)
e3 = Employee('John', 43, 90000)
employees = [e1, e2, e3]
def e_sort(emp):
return emp.name
s_emp = sorted(employees, key=e_sort, reverse=True)
print(s_emp)
from operator import attrgetter
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def __repr__(self):
return '({}, {}, ${})'.format(self.name, self.age, self.salary)
e1 = Employee('Carl', 37, 70000)
e2 = Employee('Sarah', 29, 80000)
e3 = Employee('John', 43, 90000)
employees = [e1, e2, e3]
s_emp = sorted(employees, key=attrgetter('age'))
print(s_emp) |
value = 15
new_value = value / 2 if value < 100 else - value
print(new_value)
###############################
value = 500
new_value = 1 if value < 100 else 0
print(new_value)
################################
value = 10
new_value = True if value < 100 else False
print(new_value)
################################
my_str = "slOvo"
my_str.upper()
################################
my_str = "slOvo"
my_str.lower()
################################
my_str = "qwer"
if len(my_str) < 5:
new_str = my_str * 2
else:
new_str = my_str
print(new_str)
################################
my_str = "qwer"
if len(my_str) < 5:
back_str = my_str + my_str[::-1]
else:
back_str = my_str
print(back_str)
################################
|
# -*- coding: utf-8 -*-
from os.path import exists, expanduser, isfile, join
from sys import exit
import rsa
import settings
from accessory import get_abs_path
from salt import get_salt
def getpassword(path):
"""Get password from an encoded file.
Input:
path -- source path.
Output:
passwd -- password.
"""
# Prepare cipher abs path
cipher_abs_path = get_abs_path(path)
# Read cipher
if exists(cipher_abs_path) and isfile(cipher_abs_path):
with open(cipher_abs_path, 'r') as f:
cipher = f.read()
else:
print(settings.messages["_error_NoCipher"] % cipher_abs_path)
exit(1)
# Read private_key
keys_location_rel_path = settings.cfg.get("keys",
"keys_location_rel_path")
keys_location_abs_path = expanduser(keys_location_rel_path)
private_key_file_name = settings.cfg.get("keys", "private_key_file_name")
private_key_abs_path = join(keys_location_abs_path, private_key_file_name)
if exists(private_key_abs_path) and isfile(private_key_abs_path):
with open(private_key_abs_path, 'r') as f:
private_key_data = f.read()
private_key = rsa.PrivateKey.load_pkcs1(private_key_data)
else:
print(settings.messages["_error_NoPrivateKey"] % private_key_abs_path)
exit(1)
# Read salt
salt = get_salt()
# Decode cipher
try:
data = rsa.decrypt(cipher, private_key)
except Exception:
print(settings.messages["_error_DecodeError"] % (cipher_abs_path,
private_key_abs_path))
exit(1)
else:
passwd = data[len(salt):]
return passwd
|
import pandas as pd
from models.basic import PipeLine
from models.constants import TASK_DESEQ, TASK_MULTI_QC
CONFIG_FILE = "config/config.yaml"
configfile: CONFIG_FILE
SAMPLES_DF = pd.read_csv(config['samples'])
BASE = config['base']
PIPELINE = PipeLine(CONFIG_FILE)
def get_final_outputs(wildcards):
files = []
for _, row in SAMPLES_DF.iterrows():
srr = row["run"]
paired = row["is_paired"]
files.extend(PIPELINE.output_files(srr, paired))
if TASK_DESEQ in PIPELINE.tasks:
files.extend(PIPELINE.get_deseq2_outputs())
# Always keep MultiQC at the top of the input list
if TASK_MULTI_QC in PIPELINE.tasks:
files.insert(0, f"{PIPELINE.base}/quality.html")
return files
def get_fastq_files(wildcards):
return PIPELINE.fastq(wildcards.SRR_ID)
"""
Important: Rule order is important. Be careful with which rule you use before.
"""
rule all:
input: get_final_outputs
threads: config["threads"]
# Include all other rules files
# This order is also important as some of the functions are reused in other
# files.
include: "rules/ncbi.smk"
include: "rules/sortmerna.smk"
include: "rules/fastqc.smk"
include: "rules/star.smk"
include: "rules/stringtie.smk"
include: "rules/salmon.smk"
include: "rules/kallisto.smk"
include: "rules/counts.smk"
include: "rules/deseq2.smk"
include: "rules/multiqc.smk"
include: "rules/trinity.smk"
|
import pickle
fichero = open("lista_nombres","rb") #leemos el archivo binario
lista = pickle.load(fichero)
print(lista) |
"""empty message
Revision ID: 9f17ec120c2b
Revises: 493466ec9210
Create Date: 2018-04-17 20:41:17.155314
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9f17ec120c2b'
down_revision = '493466ec9210'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('bookmarks', sa.Column('created_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('bookmarks', sa.Column('updated_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('bookmarks', 'updated_on')
op.drop_column('bookmarks', 'created_on')
# ### end Alembic commands ###
|
from sys import platform
class Hosts:
def __init__(self, ip, domain):
self.ip = ip
self.domain = domain
# def __del__(self):
# self.remove_from_hosts()
def get_hosts(self):
f = open(self.hosts_path, encoding='utf-8')
lines = f.readlines()
f.close()
return lines
def update_hosts(self, lines):
f = open(self.hosts_path, mode='w', encoding='utf-8')
f.writelines([i for i in lines])
f.close()
def add_to_hosts(self):
self.check_http()
lines = self.get_lines()
s = ' '.join((self.ip, self.domain, '\n'))
lines.append(s)
self.update_hosts(lines)
def remove_from_hosts(self):
self.check_http()
lines = self.get_hosts()
s = ' '.join((self.ip, self.domain, '\n'))
if s in lines:
lines.remove(s)
self.update_hosts(lines)
def get_lines(self):
lines = self.get_hosts()
for line in lines:
if self.domain in line or ' ' not in line.strip():
lines.remove(line)
return lines
def check_http(self):
if 'http' in self.domain:
self.domain = self.domain.replace('http://', '').replace('https://', '')
@property
def hosts_path(self):
if platform == 'linux' or platform == 'linux2':
return '/etc/hosts'
else:
return 'C:\\Windows\\System32\\drivers\\etc\\hosts' |
import requests
import pymysql
import re
#连接数据库:
db = pymysql.connect("localhost","root","root","search_news",charset='utf8')
while True:
info = input("输入对话内容:")
cur = db.cursor()
if info == "exit":
sql = "select * from news"
try:
cur.execute(sql)
results = cur.fetchall()
print("内容是:",results)
except Exception as e:
raise e
exit()
else:
appkey = "e5ccc9c7c8834ec3b08940e290ff1559"
url = "http://www.tuling123.com/openapi/api?key=%s&info=%s"%(appkey,info)
search_str = '{"code":.*?,"text":"(.*?)"}'
req = requests.get(url).text
content = req
search_content = re.compile(search_str)
result_news = search_content.findall(content)
str_result_news = "".join(result_news)
print(str_result_news)
sql_insert = "insert into news (content) values ('{0}')".format(str_result_news)
try:
cur.execute(sql_insert)
#提交
db.commit()
except Exception as e:
#错误回滚
db.rollback()
|
from django.db.models import Q, Count
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from products.mixins import ObjectViewedMixin
# from .forms import DocProductFormSet
from products.forms import ProductModelForm
from .models import DocProduct
from products.models import Product, Category, ParentCategory
def is_valid_queryparam(param):
return param != '' and param is not None
def filter(request):
qs = DocProduct.objects.all()
categories = Category.objects.all()
name_contains_query = request.GET.get('name_contains')
category = request.GET.get('category')
subcat = request.GET.get('subcat')
active_order = request.GET.get('active_order')
if is_valid_queryparam(name_contains_query):
qs = qs.filter(Q(product__title_e__icontains=name_contains_query)
| Q(product__name__icontains=name_contains_query)
).distinct()
if is_valid_queryparam(category) and category != 'Choose...':
qs = qs.filter(product__category_c__parent__name=category)
if is_valid_queryparam(subcat) and subcat != 'Choose...':
qs = qs.filter(product__category_c__name=subcat)
if active_order == 'on':
qs = qs.filter(product__active_order=True)
qs = qs.filter(Q(product__category_b__name__icontains="Food"))
return qs
def DocProductListView(request):
template_name = "docproducts/list.html"
qs = filter(request)
context = {
'categories': Category.objects.all(),
'parentscat': ParentCategory.objects.all(),
'queryset': qs
}
return render(request, template_name, context)
class DocProductCreateSlugView(ObjectViewedMixin, CreateView):
#queryset = DocProduct.objects.all()
template_name = "docproducts/detail.html"
form_class = ProductModelForm
second_form_class = ProductModelForm
success_url = '/products'
def get_context_data(self, *args, **kwargs):
context = super(DocProductCreateSlugView,
self).get_context_data(*args, **kwargs)
context['product_list'] = Product.objects.all()
return context
def get_object(self, *args, **kwargs):
#request = self.request
slug = self.kwargs.get('slug')
try:
instance = DocProduct.objects.get(slug=slug)
except DocProduct.DoesNotExist:
raise Http404("Not found..")
except:
raise Http404("hummmz..")
return instance
def form_valid(self, form):
# print(form.cleaned_data)
return super().form_valid(form)
def update_post(request, slug):
#slug = request.kwargs.get('slug')
post = get_object_or_404(DocProduct, slug=slug)
form = DocProductModelForm(request.POST or None, instance=post)
formset = ProductFormSet(request.POST or None, files=request.FILES or None, instance=post)
if request.method == 'POST' and form.is_valid() and formset.is_valid():
form.save()
formset.save()
# 編集ページを再度表示
return redirect('docproducts:ProductList')
context = {
'form': form,
'formset': formset
}
return render(request, 'app/post_form.html', context)
class DocProductDetailSlugView(ObjectViewedMixin, UpdateView):
#queryset = DocProduct.objects.all()
template_name = "docproducts/detail.html"
form_class = ProductModelForm
second_form_class = ProductModelForm
success_url = reverse_lazy('docproducts:ProductList')
# def get_context_data(self, *args, **kwargs):
# context = super(DocProductDetailSlugView,
# self).get_context_data(*args, **kwargs)
# return context
def get_object(self, *args, **kwargs):
#request = self.request
slug = self.kwargs.get('slug')
try:
instance = DocProduct.objects.get(slug=slug)
except DocProduct.DoesNotExist:
raise Http404("Not found..")
except:
raise Http404("hummmz..")
return instance
def get_context_data(self, **kwargs):
context = super(DocProductDetailSlugView, self).get_context_data(**kwargs)
context['active_client'] = True
if self.request.POST:
context['products'] = ProductFormSet(self.request.POST)
else:
context['products'] = ProductFormSet()
# if 'form' not in context:
# context['form'] = self.form_class(self.request.GET)
# if 'form2' not in context:
# context['form2'] = self.second_form_class(self.request.GET)
# context['active_client'] = True
return context
# def get(self, request, *args, **kwargs):
# super(DocProductDetailSlugView, self).get(request, *args, **kwargs)
# form = self.form_class
# form2 = self.second_form_class
# return self.render_to_response(self.get_context_data(
# object=self.object, form=form, form2=form2))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.form_class(request.POST)
form2 = self.second_form_class(request.POST)
if form.is_valid() and form2.is_valid():
productdata = form.save(commit=False)
# used to set the password, but no longer necesarry
productdata.save()
docProductdata = form2.save(commit=False)
# docProductdata.user = userdata
docProductdata.save()
#messages.success(self.request, 'Settings saved successfully')
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(
self.get_context_data(form=form, form2=form2))
def get_success_url(self, form):
return reverse_lazy('docproducts:ProductList')
def form_valid(self, form):
context = self.get_context_data()
titles = context['titles']
with transaction.atomic():
form.instance.created_by = self.request.user
self.object = form.save()
if titles.is_valid():
titles.instance = self.object
titles.save()
return super(CollectionCreate, self).form_valid(form)
#print(form.cleaned_data)
return super().form_valid(form) |
MINI_DATASET_URL = "http://files.grouplens.org/datasets/movielens/ml-latest-small.zip"
FULL_DATASET_URL = "http://files.grouplens.org/datasets/movielens/ml-latest.zip"
IG_URL = "https://www.instagram.com/yame_movies/"
GOOGLE_FORM_URL = "https://docs.google.com/forms/d/e/1FAIpQLSf9bL0StMXnjjfSlhgekbMFJNw5okT2bpFUqfO-O8dAbPfKCw/viewform?usp=sf_link"
|
import numpy as np
import vegans.utils.loading.architectures as architectures
from vegans.utils.loading.MNISTLoader import MNISTLoader
from vegans.utils.loading.DatasetLoader import DatasetLoader, DatasetMetaData
class CIFAR10Loader(MNISTLoader):
def __init__(self, root=None):
self.path_data = "cifar10_data.pickle"
self.path_targets = "cifar10_targets.pickle"
m5hashes = {
"data": "40e8e2ca6c43feaa1c7c78a9982b978e",
"targets": "9a7e604de1826613e860e0bce5a6c1d0"
}
metadata = DatasetMetaData(directory="CIFAR10", m5hashes=m5hashes)
DatasetLoader.__init__(self, metadata=metadata, root=root)
@staticmethod
def _preprocess(X_train, y_train, X_test, y_test):
""" Preprocess mnist by normalizing and padding.
"""
max_number = X_train.max()
X_train = X_train / max_number
X_test = X_test / max_number
if y_train is not None:
y_train = np.eye(10)[y_train.reshape(-1)]
y_test = np.eye(10)[y_test.reshape(-1)]
return X_train, y_train, X_test, y_test
def load_generator(self, x_dim=(3, 32, 32), z_dim=64, y_dim=10):
return architectures.load_mnist_generator(x_dim=x_dim, z_dim=z_dim, y_dim=y_dim)
def load_adversary(self, x_dim=(3, 32, 32), y_dim=10, adv_type="Discriminator"):
return architectures.load_mnist_adversary(x_dim=x_dim, y_dim=y_dim, adv_type=adv_type)
def load_encoder(self, x_dim=(3, 32, 32), z_dim=64, y_dim=10):
return architectures.load_mnist_encoder(x_dim=self.x_dim, z_dim=z_dim, y_dim=y_dim)
def load_autoencoder(self, z_dim=64, y_dim=10):
return architectures.load_mnist_autoencoder(z_dim=z_dim, y_dim=y_dim)
def load_decoder(self, z_dim=64, y_dim=10):
return architectures.load_mnist_decoder(z_dim=z_dim, y_dim=y_dim) |
# -*- coding: utf-8 -*-
#############
#
# Copyright - Nirlendu Saha
#
# author - nirlendu@gmail.com
#
#############
from __future__ import unicode_literals
import inspect
import sys
from django.db import models
from libs.logger import app_logger as log
class ExpressionPrimaryManager(models.Manager):
def create_expression(
self,
expression_owner_id,
expression_content,
expression_content_url=None,
expression_imagefile=None,
broadcast_parent_id=None,
expression_weight=0,
total_upvotes=0,
total_collects=0,
total_broadcasts=0,
total_discussions=0,
):
log.debug('Expression create operation')
expression = self.create(
expression_owner_id=expression_owner_id,
expression_content=expression_content,
expression_content_url=expression_content_url,
expression_imagefile=expression_imagefile,
broadcast_parent_id=broadcast_parent_id,
expression_weight=expression_weight,
total_upvotes=total_upvotes,
total_collects=total_collects,
total_broadcasts=total_broadcasts,
total_discussions=total_discussions,
)
return expression.id
def update_expression(
self,
expression_owner_id,
expression_content,
expression_content_url=None,
expression_imagefile=None,
expression_weight=0,
broadcast_parent_id=None,
total_upvotes=0,
total_collects=0,
total_broadcasts=0,
total_discussions=0,
):
log.debug('Expression update operation')
expression = self.update_or_create(
expression_owner_id=expression_owner_id,
expression_content=expression_content,
expression_content_url=expression_content_url,
expression_imagefile=expression_imagefile,
broadcast_parent_id=broadcast_parent_id,
expression_weight=expression_weight,
total_upvotes=total_upvotes,
total_collects=total_collects,
total_broadcasts=total_broadcasts,
total_discussions=total_discussions,
)
return expression.id
class ExpressionPrimary(models.Model):
expression_owner_id = models.CharField(
max_length=12,
)
expression_content = models.CharField(
max_length=10000,
)
expression_content_url = models.CharField(
max_length=100,
default=None,
null=True,
)
expression_imagefile = models.CharField(
max_length=100,
default=None,
null=True,
)
expression_weight = models.DecimalField (
default=0,
max_digits=15,
decimal_places=10
)
broadcast_parent_id = models.CharField(
max_length=20,
default=None,
null=True,
)
total_upvotes = models.IntegerField(
default=0,
)
total_broadcasts = models.IntegerField(
default=0,
)
total_discussions = models.IntegerField(
default=0,
)
total_collects = models.IntegerField(
default=0,
)
expression_updated = models.DateTimeField(
auto_now_add=True,
)
expression_created = models.DateTimeField(
auto_now_add=True,
)
objects = ExpressionPrimaryManager() |
from django.contrib import admin
from .models from User
# Register your models here.
admin.site.register(Article)
|
"""my_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from two_factor.urls import urlpatterns as tf_urls
from two_factor.views import LoginView
from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('', TemplateView.as_view(template_name='home.html'), name='home'),
path('password/', include('password.urls')),
path('about/', TemplateView.as_view(template_name='about.html'), name = 'about'),
path('api/v1/', include('api.urls')),
path('accounts/login/', LoginView.as_view(), name='login'),
path(r'', include(tf_urls)),
url(r'', include(tf_twilio_urls)),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
import SCDM.TD3_plus_demos.TD3 as TD3
import dexterous_gym
import gym
import numpy as np
import time
filename = "models/TD3_PenSpin-v0_0_beta_0_7_norm"
beta = 0.7
env_name = "PenSpin-v0"
env = gym.make("PenSpin-v0")
steps = 1000 #long run, "standard" episode is 250
def eval_policy(policy, env_name, seed, eval_episodes=1, render=True, delay=0.0):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state = eval_env.reset()
if render:
eval_env.render()
time.sleep(delay)
num_steps = 0
prev_action = np.zeros((eval_env.action_space.shape[0],))
while num_steps < steps:
action = policy.select_action(np.array(state), prev_action)
state, reward, done, _ = eval_env.step(action)
if render:
eval_env.render()
time.sleep(delay)
prev_action = action.copy()
avg_reward += reward
num_steps += 1
print(num_steps)
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
kwargs = {
"state_dim": env.observation_space.shape[0],
"action_dim": env.action_space.shape[0],
"beta": beta,
"max_action": 1.0
}
policy = TD3.TD3(**kwargs)
policy.load(filename)
eval_policy(policy, env_name, seed=0, eval_episodes=1, render=True, delay=0.03) |
import numpy as np
from IPython import embed
from matplotlib import pyplot as plt
from math import hypot
from skimage import draw
class MapEnvironment(object):
def __init__(self, mapfile, start, goal):
# Obtain the boundary limits.
# Check if file exists.
self.goal = goal
self.map = np.loadtxt(mapfile)
self.xlimit = [1, np.shape(self.map)[0]] # TODO (avk): Check if this needs to flip.
self.ylimit = [1, np.shape(self.map)[1]]
# Check if start and goal are within limits and collision free
if not self.state_validity_checker(start) or not self.state_validity_checker(goal):
raise ValueError('Start and Goal state must be within the map limits');
exit(0)
# Display the map
plt.imshow(self.map, interpolation='nearest')
def compute_distance(self, start_config, end_config):
return hypot(start_config[0] - end_config[0], start_config[1] - end_config[1])
def state_validity_checker(self, config):
if self.map[tuple(config)] == 0:
return True
else:
return False
def edge_validity_checker(self, config1, config2):
line = draw.line(config1[0], config1[1],
config2[0], config2[1])
a = np.where(self.map[line] >=1)
if np.size(a) == 0:
return True
else:
return False
def compute_heuristic(self, config):
return self.compute_distance(config, self.goal)
def visualize_plan(self, plan=None, visited=None, tree=None, title=None):
'''
Visualize the final path
@param plan Sequence of states defining the plan.
'''
plt.imshow(self.map, interpolation='nearest', cmap='Greys')
if visited is not None:
plt.imshow(visited)
elif tree is not None:
nodes = tree.vertices
for k, v in tree.edges.items():
plt.plot([nodes[k][1], nodes[v][1]], [
nodes[k][0], nodes[v][0]], "-g")
if plan is not None:
for i in range(np.shape(plan)[0] - 1):
x = [plan[i,0], plan[i+1, 0]]
y = [plan[i,1], plan[i+1, 1]]
plt.plot(y, x, 'r')
if title: plt.title(title)
plt.show() |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
# source = cv2.imread("D:/Users/84460/Desktop/Oracle_Split/picture/003.png",0) # 读图片
def file_name(dir_path):
f_name = []
f_namelist = []
print(os.listdir(dir_path))
for i in f_namelist:#分割后缀
index = i.rfind('.')
f_name.append(i[:index])
return f_name,f_namelist
save_path = "D:/Users/84460/Desktop/Oracle_Split/picture/"
f_name, f_namelist = file_name(save_path)
image_len = len(f_name)
peek_ranges = [(17, 53), (67, 98), (113, 144), (160, 191), (210, 216), (229, 236), (256, 262), (276, 282),(302,322)]
#对文字分割进行整理,去除上下结构分割错误
a_len = 10 #两字间距
a_len_e = 15 #例如“ 一二 ”的两字间距更大一点
b_len = 18 #单字高度最低限
#合并上下结构文字
peek_ranges_new = []
peek_ranges = np.array(peek_ranges)
peek_ranges = peek_ranges.flatten()
peek_ranges_new.append(peek_ranges[0])
flag=0
for i in range(1,len(peek_ranges) - 1):
if(flag==0):
if(peek_ranges[i+1] - peek_ranges[i] > a_len):
peek_ranges_new.append(peek_ranges[i])
peek_ranges_new.append(peek_ranges[i+1])
flag = 1
else:
flag -= 1
peek_ranges_new.append(peek_ranges[len(peek_ranges)-1])
#合并 “二”
peek_ranges_new1 = []
flag=0
end_flag=0
end_temp=0
for i in range(len(peek_ranges_new)):
print("i:", i,"\t",len(peek_ranges_new)," ",peek_ranges_new[i])
if(flag==0):
if(peek_ranges_new[i+1] - peek_ranges_new[i] > b_len):#判断是否是结构件(包括 “一” ) (>:否, <:是)
if(end_flag == 1):#标志位判断上一个是否是结构件,识别类似结构件高度的文字 (例如“一”)
print("111","\t" ,i ,"\t",peek_ranges_new[i],"\t",end_temp)
end_flag = 0
end_temp = peek_ranges_new[i - 1]
peek_ranges_new1.append(end_temp)
print("222","\t" ,i ,"\t",peek_ranges_new[i],"\t",end_temp)
peek_ranges_new1.append(peek_ranges_new[i])
peek_ranges_new1.append(peek_ranges_new[i+1])
else:
if(end_flag == 0): #识别两个以上结构件
print("333","\t" ,i ,"\t",peek_ranges_new[i],"\t",end_temp)
end_flag = 1
peek_ranges_new1.append(peek_ranges_new[i])
end_temp = peek_ranges_new[i+1]
elif(end_flag == 1 and peek_ranges_new[i] - peek_ranges_new[i-1] < b_len): #判断间距,两个结构件
print("444","\t" ,i ,"\t",peek_ranges_new[i],"\t",end_temp)
end_temp = peek_ranges_new[i+1]
elif(end_flag == 1 and peek_ranges_new[i] - peek_ranges_new[i-1] >= a_len_e): #判断间距,两个文字
print("555","\t" ,i ,"\t",peek_ranges_new[i],"\t",end_temp)
peek_ranges_new1.append(end_temp)
end_flag = 0
peek_ranges_new1.append(peek_ranges_new[i])
end_temp = peek_ranges_new[i+1]
end_flag = 1
if(end_flag == 1 and i==(len(peek_ranges_new)-2)):
print("666","\t" ,i ,"\t",peek_ranges_new[i],"\t",end_temp)
peek_ranges_new1.append(end_temp)
flag = 1
else:
flag -=1
print(end_temp)
print(peek_ranges_new,type(peek_ranges_new))
print(peek_ranges_new1,type(peek_ranges_new1)) |
/Users/matthewpeterson/anaconda3/lib/python3.7/hmac.py |
from django.contrib import admin
from .models import (
Faculty,
Profile,
AppraiseeComment,
AppraiserAndAppraiseeAgreement,
Competence,
OverallPerformance,
Performance,
AppraiserComment,
VcComment,
Department
)
class AppraiserAndAppraiseeAgreementAdmin(admin.ModelAdmin):
list_display=['competence', 'key_outputs', 'self_rating', 'supervisor_rating', 'agreed_rating']
# list_filter=['self_rating', 'supervisor_rating']
# class CompetenceAdmin(admin.ModelAdmin):
# list_display=['profession_skill', 'overallp']
# list_filter=['profession_skill','planning']
# Register your models here.
admin.site.register(Faculty)
admin.site.register(Profile)
admin.site.register(AppraiseeComment)
admin.site.register(AppraiserAndAppraiseeAgreement, AppraiserAndAppraiseeAgreementAdmin)
admin.site.register(OverallPerformance)
admin.site.register(Performance)
admin.site.register(Competence)
admin.site.register(VcComment)
admin.site.register(Department)
admin.site.register(AppraiserComment) |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
n=int(raw_input())
l=map(int,raw_input().split())
ans=chk=1
tmp=-1
for i in l:
if tmp==-1:
pass
elif i>tmp:
chk+=1
else:
chk=1
tmp=i
ans=max(ans,chk)
print ans
|
# coding:utf-8
import itchat
import math
import PIL.Image as Image
import os
itchat.auto_login(hotReload=True)
friends = itchat.get_friends(update=True)[0:]
user = friends[0]["UserName"]
num = 0
for i in friends:
img = itchat.get_head_img(userName=i["UserName"])
fileImage = open('D:' + "/" + str(num) + ".jpg", 'wb')
fileImage.write(img)
fileImage.close()
num += 1
ls = os.listdir('D:')
each_size = int(math.sqrt(float(640 * 640) / len(ls)))
lines = int(640 / each_size)
image = Image.new('RGBA', (640, 640))
x = 0
y = 0
for i in range(0, len(ls) + 1):
try:
img = Image.open('D:' + "/" + str(i) + ".jpg")
except IOError:
print("Error")
else:
img = img.resize((each_size, each_size), Image.ANTIALIAS)
image.paste(img, (x * each_size, y * each_size))
x += 1
if x == lines:
x = 0
y += 1
image.save('D:' + "/" + "all.jpg")
itchat.send_image('D:' + "/" + "all.jpg", 'filehelper') |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test4.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import requests
import re
from pyquery import PyQuery as pq
from Pyquery的应用.百度百科API.baike_baidu import Baike_baidu as bk
class Ui_Frame(object):
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.resize(790, 607)
self.textEdit = QtWidgets.QTextEdit(Frame)
self.textEdit.setGeometry(QtCore.QRect(80, 40, 411, 31))
self.textEdit.setObjectName("textEdit")
self.pushButton = QtWidgets.QPushButton(Frame)
self.pushButton.setGeometry(QtCore.QRect(550, 40, 93, 28))
self.pushButton.setObjectName("pushButton")
self.textBrowser = QtWidgets.QTextBrowser(Frame)
self.textBrowser.setGeometry(QtCore.QRect(80, 120, 651, 421))
self.textBrowser.setObjectName("textBrowser")
self.retranslateUi(Frame)
self.textEdit.windowIconTextChanged['QString'].connect(self.pushButton.click)
self.pushButton.clicked.connect(self.btn_click)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Frame"))
self.pushButton.setText(_translate("Frame", "PushButton"))
def btn_click(self):
key=self.textEdit.toPlainText()
a=bk(key).search_news_Summary()
self.textBrowser.insertPlainText(a)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
widget = QtWidgets.QWidget()
ui = Ui_Frame()
ui.setupUi(widget)
widget.show()
sys.exit(app.exec_()) |
# Generated by Django 3.0.5 on 2020-10-20 11:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('log', '0004_log_complete_at'),
]
operations = [
migrations.RemoveField(
model_name='log',
name='complete_at',
),
]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Author: shoumuzyq@gmail.com
# https://shoumu.github.io
# Created on 2015/10/27 10:46
def win_nim(n):
if n % 4 is 0:
return False
else:
return True
print(win_nim(4))
print(win_nim(5))
|
import nltk
class Stemming:
# Stemming
def execute(self, dataframe, execute):
if (execute == True):
print("Stemming words")
nltk.download('rslp')
stemmer = nltk.stem.RSLPStemmer()
for index, row in dataframe.iterrows():
text = ""
for w in row['text'].split():
text += stemmer.stem(w) + " "
row['text'] = text
return dataframe |
import time
from datetime import datetime
from IPython.display import HTML, display
# Paths and filenames for saving models/output
# path = '/home/jupyter/CSE253_FinalProject/Logistic_Regression/'
path = '/content/Logistic_Regression'
dt = datetime.now().strftime("%m_%d_%H_%M")
output_fn = path + "model_output_" + dt + ".txt"
captions_fn = path + "model_captions_" + dt + ".txt"
best_model_fn = path + "best_model_" + dt + ".pt"
model_fn = path + "model_" + dt + ".pt"
def print_info(out_str):
f = open(output_fn,"a")
print(out_str)
f.write(out_str)
f.close()
"""
check_dims
Checks that the batch is of dimensions Nx1x725
"""
def check_dims(batch):
if (batch.size(0) == 1): return batch
return batch.unsqueeze(1)
class ProgressMonitor(object):
"""
Custom IPython progress bar for training
"""
tmpl = """
<p>Loss: {loss:0.4f} {value} / {length}</p>
<progress value='{value}' max='{length}', style='width: 100%'>{value}</progress>
"""
def __init__(self, length):
self.length = length
self.count = 0
self.display = display(self.html(0, 0), display_id=True)
def html(self, count, loss):
return HTML(self.tmpl.format(length=self.length, value=count, loss=loss))
def update(self, count, loss):
self.count += count
self.display.update(self.html(self.count, loss))
class AverageBase(object):
def __init__(self, value=0):
self.value = float(value) if value is not None else None
def __str__(self):
return str(round(self.value, 4))
def __repr__(self):
return self.value
def __format__(self, fmt):
return self.value.__format__(fmt)
def __float__(self):
return self.value
class RunningAverage(AverageBase):
"""
Keeps track of a cumulative moving average (CMA).
"""
def __init__(self, value=0, count=0):
super(RunningAverage, self).__init__(value)
self.count = count
def update(self, value):
self.value = (self.value * self.count + float(value))
self.count += 1
self.value /= self.count
return self.value
class MovingAverage(AverageBase):
"""
An exponentially decaying moving average (EMA).
"""
def __init__(self, alpha=0.99):
super(MovingAverage, self).__init__(None)
self.alpha = alpha
def update(self, value):
if self.value is None:
self.value = float(value)
else:
self.value = self.alpha * self.value + (1 - self.alpha) * float(value)
return self.value
def save_checkpoint(optimizer, model, epoch, filename):
checkpoint_dict = {
'optimizer': optimizer.state_dict(),
'model': model.state_dict(),
'epoch': epoch
}
torch.save(checkpoint_dict, filename)
def load_checkpoint(optimizer, model, filename):
checkpoint_dict = torch.load(filename)
epoch = checkpoint_dict['epoch']
model.load_state_dict(checkpoint_dict['model'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint_dict['optimizer'])
return epoc
|
from torch import tensor
from torch.nn.utils.rnn import pad_sequence
class TweetPadCollate:
def __init__(self,pad_idx):
self.pad_idx = pad_idx
def __call__(self,batch):
data = [sample[0] for sample in batch]
targets = [sample[1] for sample in batch]
padded_data = pad_sequence(data,batch_first=True,padding_value=self.pad_idx)
return padded_data,tensor(targets) |
import cv2
import glob
images=glob.glob("*.jpg")
for image in images:
img= cv2.imread(image,1)
img2 = cv2.resize(img,(100,100))
cv2.imshow("resized_image",img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite("resized_image"+image,img2)
|
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import threading
# Batasi hanya pada path /RPC2 saja supaya tidak bisa mengakses path lainnya
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Buat server
with SimpleXMLRPCServer(("localhost", 8080),
requestHandler=RequestHandler) as server:
server.register_introspection_functions()
candidate_dict = {'candidate_1': 0, 'candidate_2': 0}
lock = threading.Lock()
# Register a function under a different name
def vote_candidate(x):
lock.acquire()
if candidate_dict.get(x) != None:
candidate_dict[x] = candidate_dict[x] + 1
pesan = "Anda telah memilih " + x
lock.release()
return pesan
pesan = "Anda memilih kandidat "+x+" yang tidak ada dalam list"
lock.release()
return pesan
server.register_function(vote_candidate, 'vote')
def querry_result():
lock.acquire()
total = 0
for i in candidate_dict:
total = total + candidate_dict[i]
if total == 0:
lock.release()
return "Anda memilih kandidat yang tidak terdaftar"
pesan = ""
for i in candidate_dict:
hasil_vote = (candidate_dict[i]/total) * 100
pesan = pesan + i + " memperoleh " + str(hasil_vote) + " %\n"
lock.release()
return pesan
server.register_function(querry_result, 'querry')
print("Server voting berjalan...")
# Run the server's main loop
server.serve_forever()
|
def bellNumber(n):
bell = [[0 for i in range(n+1)] for j in range(n+1)]
bell[0][0] = 1
for i in range(1,n+1):
bell[i][0] = bell[i-1][i-1]
for j in range(1,i+1):
bell[i][j] = bell[i-1]
return bell[n][0]
for n in range(4):
print('Bell Number',n,'is',bellNumber(n)) |
!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/elements1_20.txt -o elements1.txt
def get_names() :
while True :
if(len(input_list) < 5):
input_string = input("Enter the name of an element: ").strip().lower()
if not input_string :
continue
elif (input_string not in input_list) :
input_list.append(input_string)
elif(input_string in input_list) :
print(input_string,"was already entered <--no duplicates allowed")
else :
break
return input_list;
fh = open('elements1.txt','r')
input_list =[]
index = 0
file_list =[]
Found_list =[]
Found_not_list =[]
file_string = fh.readline().strip("\n").upper().lower()
get_names()
while file_string :
if file_string is None :
break
else :
file_list.append(file_string)
file_string = fh.readline().strip("\n").upper().lower()
fh.close()
for input_line in range(len(input_list)) :
temp_comp=input_list[input_line]
if temp_comp in file_list :
Found_list.append(input_list[input_line])
else :
Found_not_list.append(input_list[input_line])
correct_ans = int(len(Found_list))*20
print (correct_ans," %"," correct")
print("Found : ",' '.join(Found_list).title())
print("Not Found: ",' '.join(Found_not_list).title())
|
import numpy as np
import ast
file = open("evaluator_1_input_script_out.txt")
contents = file.read()
ackley = ast.literal_eval(contents)
print({"ackley": ackley})
|
print('Я домашка, делаю проверку 2')
|
i=2
while i<4:
one=int(input("Enter the judge #1's score:"))
if one>10:
print('Please enter a range from 0-10')
i=2
else:
two=int(input("Enter the judge #2's score:"))
if two>10:
print('Please enter a range from 0-10')
i=2
else:
three=int(input("Enter the judge #3's score:"))
if three>10:
print('Please enter a range from 0-10')
i=2
else:
four=int(input("Enter the judge #4's score:"))
if four>10:
print('Please enter a range from 0-10')
i=2
else:
five=int(input("Enter the judge #5's score:"))
if five>10:
print('Please enter a range from 0-10')
i=2
else:
if one>10 or two>10 or three>10 or four>10 or five>10:
print('Please enter a range from 0-10')
i=2
if (10>one>0) and (10>two>0) and (10>three>0) and (10>four>0) and (10>five>0):
avg=(one+two+three+four+five)/5
print('The average score is','%0.2f' %(avg))
i=6
|
# A list of numbers is given. If it has two adjacent
# elements of the same sign, print these numbers.
s = list(map(int, input().split()))
def Same(s):
for i in range((len(s) - 1)):
x1 = int(s[i])
x2 = int(s[i + 1])
if x1 * x2 >= 0:
print(x1, x2)
break
Same(s)
|
from pyplasm import *
from pyplasm import *
import scipy
from scipy import *
def VERTEXTRUDE((V,coords)):
return CAT(AA(COMP([AA(AR),DISTR]))(DISTL([V,coords])))
def larExtrude(model,pattern):
V,FV = model
d = len(FV[0])
offset = len(V)
m = len(pattern)
outcells = []
for cell in FV:
# create the indices of vertices in the cell "tube"
tube = [v + k*offset for k in range(m+1) for v in cell]
# take groups of d+1 elements, via shifting by one
rangelimit = len(tube)-d
cellTube = [tube[k:k+d+1] for k in range(rangelimit)]
outcells += [scipy.reshape(cellTube,newshape=(m,d,d+1)).tolist()]
outcells = AA(CAT)(TRANS(outcells))
outcells = [group for k,group in enumerate(outcells) if pattern[k]>0 ]
coords = list(cumsum([0]+(AA(ABS)(pattern))))
outVerts = VERTEXTRUDE((V,coords))
newModel = outVerts, CAT(outcells)
return newModel
def GRID(args):
model = ([[]],[[0]])
for k,steps in enumerate(args):
model = larExtrude(model,steps*[1])
V,cells = model
verts = AA(list)(scipy.array(V) / AA(float)(args))
return MKPOL([verts, AA(AA(lambda h:h+1))(cells), None])
#
Dom1D = INTERVALS(1)(10)
Dom2D= GRID([10,10])
Inverted_Dom2D = MAP([S2,S1])(GRID([10,10]))
D1 = INTERVALS(1)(40);
D2 = INTERVALS(PI/2)(40);
rot_domain = PROD([D1,D2]);
#BACK SUPPORT
p1 = [[2,0,2],[1.98,0,1.85],[1.78,0,1.85],[1.8,0,2]]
c1 = BEZIER(S1)(p1)
mapp1 = ROTATIONALSURFACE(c1)
s1 = MAP(mapp1)(rot_domain)
p2 = [[2,0,2],[1.98,0,2.15],[1.78,0,2.15],[1.8,0,2]]
c2 = BEZIER(S1)(p2)
mapp2 = ROTATIONALSURFACE(c2)
s2 = MAP(mapp2)(rot_domain)
sx_back_support = STRUCT([s1,s2])
dx_back_support = R([1,2])(PI/2)(sx_back_support)
#ARMRESTS
p3 = [[2,-1,2],[1.98,-1,1.85],[1.78,-1,1.85],[1.8,-1,2]]
c3 = BEZIER(S1)(p3)
p4 = [[2,-1,2],[1.98,-1,2.15],[1.78,-1,2.15],[1.8,-1,2]]
c4 = BEZIER(S1)(p4)
s13 = BEZIER(S2)([c1,c3])
surf13 = MAP(s13)(Dom2D)
s24 = BEZIER(S2)([c2,c4])
surf24 = MAP(s24)(Dom2D)
sx_armrest = STRUCT([surf13,surf24])
dx_arst = R([1,2])(PI)(sx_armrest)
dx_armrest = T([2])([-1])(dx_arst)
#SUPERIOR FRAME
superior_frame = STRUCT([sx_back_support,dx_back_support,sx_armrest,dx_armrest])
#FRONT TOP JUNCTIONS
p5 = [[2,-1.15,2],[2,-1,1.8],[1.8,-1,1.8],[1.8,-1.15,2]]
c5 = BEZIER(S1)(p5)
p6 = [[2,-1.15,2],[2,-1.3,2.1],[1.8,-1.3,2.1],[1.8,-1.15,2]]
c6 = BEZIER(S1)(p6)
p7 = [[2.02,-1.15,1.8],[2,-1,1.8],[1.8,-1,1.8],[1.78,-1.15,1.8]]
c7 = BEZIER(S1)(p7)
p8 = [[2.02,-1.15,1.8],[2,-1.3,1.8],[1.8,-1.3,1.8],[1.78,-1.15,1.8]]
c8 = BEZIER(S1)(p8)
s37 = BEZIER(S2)([c3,c5,c7])
surf37 = MAP(s37)(Dom2D)
s48 = BEZIER(S2)([c4,c6,c8])
surf48 = MAP(s48)(Dom2D)
front_top_sx_junction = STRUCT([surf37,surf48])
front_top_dx_junction = T([1])([-3.8])(front_top_sx_junction)
front_top_junctions = STRUCT([front_top_sx_junction,front_top_dx_junction])
#VERTICAL FRONT SUPPORTS
p9 = [[2.02,-1.15,-3.2],[2,-1,-3.2],[1.8,-1,-3.2],[1.78,-1.15,-3.2]]
c9 = BEZIER(S1)(p9)
p10 = [[2.02,-1.15,-3.2],[2,-1.3,-3.2],[1.8,-1.3,-3.2],[1.78,-1.15,-3.2]]
c10 = BEZIER(S1)(p10)
s79 = BEZIER(S2)([c7,c9])
surf79 = MAP(s79)(Dom2D)
s810 = BEZIER(S2)([c8,c10])
surf810 = MAP(s810)(Dom2D)
sx_front_vertical_support = STRUCT([surf79,surf810])
dx_front_vertical_support = T([1])([-3.8])(sx_front_vertical_support)
cfs = R([1,2])(-PI/2)(sx_armrest)
central_front_support1 = T([1,2,3])([-0.9,0.75,-2])(cfs)
central_front_support2 = T([1])([1])(central_front_support1)
central_front_support3 = T([1])([1])(central_front_support2)
central_front_support4 = T([1])([0.85])(central_front_support3)
central_front_support = T([3])([0.15])(STRUCT([central_front_support1,central_front_support2,central_front_support3,central_front_support4]))
front_vertical_supports = STRUCT([sx_front_vertical_support,dx_front_vertical_support,central_front_support])
#FRONT BOTTOM JUNCTIONS
fbsj = R([2,3])(PI/2)(front_top_sx_junction)
front_bottom_sx_junction = T([2,3])([0.85,-2.2])(fbsj)
front_bottom_dx_junction = T([1])([-3.8])(front_bottom_sx_junction)
front_bottom_junctions = STRUCT([front_bottom_sx_junction,front_bottom_dx_junction])
#FOOTS
sx_foot1 = T([3])([-5.35])(sx_armrest)
dx_foot1 = T([3])([-5.35])(dx_armrest)
sx_foot2 = T([2])([1])(sx_foot1)
dx_foot2 = T([2])([1])(dx_foot1)
sx_foot3 = T([2])([1])(sx_foot2)
dx_foot3 = T([2])([1])(dx_foot2)
sx_foot4 = T([2])([1])(sx_foot3)
dx_foot4 = T([2])([1])(dx_foot3)
foots = STRUCT([sx_foot1,dx_foot1,sx_foot2,dx_foot2,sx_foot3,dx_foot3,sx_foot4,dx_foot4])
#BACK BOTTOM JUNCTIONS
bbsj = R([2,3])(PI/2)(front_bottom_sx_junction)
back_bottom_sx_junction = T([2,3])([-0.25,-2.2])(bbsj)
back_bottom_dx_junction = T([1])([-3.8])(back_bottom_sx_junction)
back_bottom_junctions = STRUCT([back_bottom_sx_junction,back_bottom_dx_junction])
#BACK TOP JUNCTIONS
btsj = R([1,2])(-PI/2)(bbsj)
back_top_sx_junction_rotated = R([2,3])(PI)(btsj)
back_top_dx_junction_rotated = R([1,3])(PI)(btsj)
back_top_sx_junction = T([1,2,3])([-2.2,-0.24,-1.15])(back_top_sx_junction_rotated)
back_top_dx_junction = T([1,2,3])([2.2,3.55,-1.15])(back_top_dx_junction_rotated)
back_top_junctions = STRUCT([back_top_sx_junction,back_top_dx_junction])
#VERTICAL BACK SUPPORT
central_back_support_traslated = T([1,2])([-0.1,2.8])(STRUCT([central_front_support2,central_front_support3]))
central_back_support_rotated = R([2,3])(PI/8)(STRUCT([back_top_junctions,central_back_support_traslated]))
central_back_support = T([2,3])([0.15,-0.5])(central_back_support_rotated)
sbs = COLOR([0.8235,0.8235,0.8235])(CYLINDER([0.095,3.6])(60))
sbs_r = R([2,3])(PI/8)(sbs)
sbs_r2 = R([1,3])(PI/13)(sbs_r)
sx_back_support = T([1,2,3])([1.93,3.1,-3.22])(sbs_r2)
rbs_r = R([1,3])(-PI/13)(sbs_r)
rx_back_support = T([1,2,3])([-1.92,3.1,-3.2])(rbs_r)
vertical_back_support = STRUCT([central_back_support,sx_back_support,rx_back_support])
#SEANCE
seance1 = COLOR([0.588,0.294,0])(T([1,2,3])([-1.8,-1.2,0.1])(CUBOID([3.6,2,0.2])))
p11 = [[0,0,0],[0,1.3,0],[3.6,1.3,0],[3.6,0,0]]
c11 = BEZIER(S1)(p11)
p12 = [[0,0,0.2],[0,1.3,0.2],[3.6,1.3,0.2],[3.6,0,0.2]]
c12 = BEZIER(S1)(p12)
p13 = [[0,0,0],[0,0,0],[3.6,0,0],[3.6,0,0]]
c13 = BEZIER(S1)(p13)
p14 = [[0,0,0.2],[0,0,0.2],[3.6,0,0.2],[3.6,0,0.2]]
c14 = BEZIER(S1)(p14)
s1112 = BEZIER(S2)([c11,c12])
surf1112 = MAP(s1112)(Inverted_Dom2D)
s2 = COLOR([0.588,0.294,0])(surf1112)
s1113 = BEZIER(S2)([c11,c13])
surf1113 = MAP(s1113)(Inverted_Dom2D)
s3 = COLOR([0.588,0.294,0])(surf1113)
s1214 = BEZIER(S2)([c12,c14])
surf1214 = MAP(s1214)(Inverted_Dom2D)
s4 = COLOR([0.588,0.294,0])(surf1214)
seance2 = T([1,2,3])([-1.8,0.8,0.1])(STRUCT([s2,s3,s4]))
seance = T([3])([0.14])(STRUCT([seance1,seance2]))
#BACKREST
p15 = [[-0.1,0,0],[0,1,0],[1.5,1,0],[1.6,0,0]]
c15 = BEZIER(S1)(p15)
p16 = [[-0.2,0,0],[-0.2,1.2,0],[1.6,1.2,0],[1.7,0,0]]
c16 = BEZIER(S1)(p16)
p17 = [[-0.1,0,-1],[0,1,-1],[1.5,1,-1],[1.6,0,-1]]
c17 = BEZIER(S1)(p17)
p18 = [[-0.2,0,-1],[-0.2,1.2,-1],[1.6,1.2,-1],[1.7,0,-1]]
c18 = BEZIER(S1)(p18)
s1516 = BEZIER(S2)([c15,c16])
surf1516 = MAP(s1516)(Dom2D)
s1718 = BEZIER(S2)([c17,c18])
surf1718 = MAP(s1718)(Inverted_Dom2D)
s1517 = BEZIER(S2)([c15,c17])
surf1517 = MAP(s1517)(Inverted_Dom2D)
s1618 = BEZIER(S2)([c16,c18])
surf1618 = MAP(s1618)(Dom2D)
p19 = [[-0.1,0,0],[-0.2,0,0]]
c19 = BEZIER(S1)(p19)
p20 = [[-0.1,0,-1],[-0.2,0,-1]]
c20 = BEZIER(S1)(p20)
s1920 = BEZIER(S2)([c19,c20])
surf1920 = MAP(s1920)(Dom2D)
p21 = [[1.6,0,0],[1.7,0,0]]
c21 = BEZIER(S1)(p21)
p22 = [[1.6,0,-1],[1.7,0,-1]]
c22 = BEZIER(S1)(p22)
s2122 = BEZIER(S2)([c21,c22])
surf2122 = MAP(s2122)(Inverted_Dom2D)
backrest_s = S([1])([1.5])(STRUCT([surf1516,surf1718,surf1517,surf1618,surf1920,surf2122]))
backrest = COLOR([0.588,0.294,0])(T([1,2,3])([-1.1,0.9,2.5])(backrest_s))
VIEW(STRUCT([superior_frame,front_top_junctions,front_vertical_supports,front_bottom_junctions,
foots,back_bottom_junctions,vertical_back_support,seance,backrest])) |
msg = "hello"
print(msg.capitalize()) |
import logging
from twisted.internet import defer
from twisted.web.client import getPage
from scrapy import Request
from scrapy.http import HtmlResponse
from scrapy.utils.misc import arg_to_iter
from crochet import setup, wait_for, TimeoutError
setup()
class FetchError(Exception):
status = 400
def __init__(self, errors):
for error in errors:
print(vars(error))
self.errors = errors
self.message = str(self)
def __str__(self):
return '\n'.join(e.getErrorMessage() for e in self.errors)
def get_page(times, url):
errors = []
deferred = defer.Deferred()
def run():
d = getPage(url)
d.addCallbacks(lambda html: deferred.callback((url, html)), error)
def error(error):
errors.append(error)
retry = True if len(errors) < times else False
logging.warn('Failed to get %s %d times, %s', url, len(errors),
'retrying' if retry else 'stop')
run() if retry else deferred.errback((url, errors))
run()
return deferred
def _load_urls(urls):
deferreds = []
for url in urls:
deferreds.append(get_page(3, url.encode('utf-8')))
return defer.DeferredList(deferreds)
@wait_for(timeout=50)
def load_urls(urls):
return _load_urls(urls)
class Pages(object):
def __init__(self, urls, spider):
if hasattr(urls, 'get'):
urls = urls.get('urls', [])
if isinstance(urls, dict):
self.urls = urls.items()
else:
self.urls = urls
self.spider = spider
def fetch(self):
try:
responses = load_urls(self.urls)
except TimeoutError:
raise FetchError(['Requests timed out, try loading fewer urls'])
results, errors = [], []
for success, result in responses:
if not success:
errors.append(result.value[1][-1])
else:
results.extend(arg_to_iter(self.process(*result)))
if errors and not results:
raise FetchError(errors)
return results
def process(self, url, page):
return HtmlResponse(url, body=page, request=Request(url))
def extract_items(self):
responses = self.fetch()
items = []
for response in responses:
page_key = response.meta.get('page_key') or response.url
item = {'key': page_key, 'items': None, 'templates': None}
extracted_items = [dict(i) for i in self.spider.parse(response)
if not isinstance(i, Request)]
if extracted_items:
item['items'] = extracted_items
item['templates'] = [i['_template'] for i in extracted_items
if i.get('_template')]
items.append(item)
return items
|
## Scrapes historical Rotogrinders projections by game for every active player in the MLB, and
## export each day's projections as a CSV file
##
## To run this file, pip install the packages below and install the chromedriver
## application onto your computer. Then, create a PATH variable to the chromedriver
## folder (this can be done in your computer's settings).
##
## Ming Ying, 2018.
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.common.by import *
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup as BS
import csv, time, os, datetime
def main():
# Start scraping from here (any player's page will do)
url_pitchers = "https://rotogrinders.com/projected-stats/mlb-pitcher?site=draftkings&date="
url_hitters = "https://rotogrinders.com/projected-stats/mlb-hitter?site=draftkings&date="
# Write CSV files to this folder: change this
out_hitters = "C:/Users/Ming/Documents/Fantasy_Models/Historical_Projections_MLB/Roto_Hitters_New"
out_pitchers = "C:/Users/Ming/Documents/Fantasy_Models/Historical_Projections_MLB/Roto_Pitchers_New"
year = 2017
months = range(4, 9)
days = {4:30, 5:31, 6:30, 7:31, 8:31, 9:30}
select(url = url_pitchers, out = out_pitchers, year = year, months = months, days = days, player_type = "pitcher")
def select(url, out, year, months, days, player_type):
# Initialize Chromedriver
driver = webdriver.Chrome("C:/Users/Ming/ChromeDriver/chromedriver.exe")
for month in months:
num_days = days[month]
for day in range(1, num_days + 1):
# Get date of interest as a string
try:
t = datetime.datetime(year, month, day, 0, 0)
t = t.strftime('%Y-%m-%d')
# Append date string to URL, and visit that URL
current_url = url + t
driver.get(current_url)
wait = WebDriverWait(driver, 100)
start = 20
end = 30
if player_type == "pitcher":
start = 16
end = 34
# Get HTML of entire page
final_content = []
wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@class = 'rgt-col']")))
new_columns = driver.find_elements_by_xpath("//div[@class = 'rgt-col']")
columns = []
for column in new_columns:
columns.append([val.text.encode('utf8') for val in column.find_elements_by_xpath("./div")])
final_content = columns
rows = zip(*final_content)
file_name = "{}.csv".format(player_type + "_" + t)
os.chdir(out)
with open(file_name, "ab") as file:
writer = csv.writer(file)
writer.writerows(row for row in rows if row)
file.close()
except:
pass
if __name__ == '__main__':
main() |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
import cv2
from cv_bridge import CvBridge, CvBridgeError
class KinectInterface:
def __init__(self):
self._image_sub = rospy.Subscriber('/kinect2/hd/image_color', Image, self.callback)
self._downsampling_pub = rospy.Publisher('dsampled_image', Image, queue_size=1)
self.rate = rospy.Rate(20)
def callback(self, data):
self._downsampling_pub.publish(data)
# self.display(data)
self.rate.sleep()
def display(self, img):
cv_img = CvBridge().imgmsg_to_cv2(img, 'bgr8')
cv2.imshow('img', cv_img)
cv2.waitKey(1)
if __name__ == "__main__":
rospy.init_node('kinect_interface',anonymous=True)
KI = KinectInterface()
rospy.spin() |
from onegov.core.utils import module_path
from onegov.foundation import BaseTheme
class WtfsTheme(BaseTheme):
name = 'onegov.wtfs.foundation'
@property
def pre_imports(self) -> list[str]:
return ['font-newsgot', 'wtfs-foundation-mods']
@property
def post_imports(self) -> list[str]:
return super().post_imports + [
'mixin',
'header',
'footer',
'form',
'chosen',
'table',
'alert',
'wtfs'
]
@property
def extra_search_paths(self) -> list[str]:
base_paths = super().extra_search_paths
return [module_path('onegov.wtfs.theme', 'styles')] + base_paths
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures """
import logging
import os
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class PreTrainedEncoderDecoder(nn.Module):
r"""
:class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be
instantiated as a transformer architecture with one of the base model
classes of the library as encoder and (optionally) another one as
decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
# manually set the self.config
self.config = decoder.config
self.config.is_encoder_decoder = True
@classmethod
def from_pretrained(
cls,
encoder_pretrained_model_name_or_path=None,
decoder_pretrained_model_name_or_path=None,
*model_args,
**kwargs,
):
r"""Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you need to first set it back in training mode with `model.train()`
Params:
encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders.
Examples::
# For example purposes. Not runnable.
model = PreTrainedEncoderDecoder.from_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as a whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_")
and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
encoder = AutoModel.from_pretrained(
encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
)
encoder.config.is_decoder = False
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
decoder = AutoModelWithLMHead.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder
)
decoder.config.is_decoder = True
model = cls(encoder, decoder)
return model
def save_pretrained(self, save_directory):
"""Save a Seq2Seq model and its configuration file in a format such
that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained`
We save the encoder' and decoder's parameters in two separate directories.
"""
# If the root output directory does not exist, create it
if not os.path.exists(save_directory):
os.mkdir(save_directory)
# Check whether the output directory is empty or not
sub_directories = [
directory
for directory in os.listdir(save_directory)
if os.path.isdir(os.path.join(save_directory, directory))
]
if len(sub_directories) > 0:
if "encoder" in sub_directories and "decoder" in sub_directories:
print(
"WARNING: there is an older version of encoder-decoder saved in"
+ " the output directory. The default behaviour is to overwrite them."
)
# Empty the output directory
for directory_to_remove in sub_directories:
# Remove all files into the subdirectory
files_to_remove = os.listdir(
os.path.join(save_directory, directory_to_remove)
)
for file_to_remove in files_to_remove:
os.remove(
os.path.join(
save_directory, directory_to_remove, file_to_remove
)
)
# Remove the subdirectory itself
os.rmdir(os.path.join(save_directory, directory_to_remove))
assert len(os.listdir(save_directory)) == 0 # sanity check
# Create the "encoder" directory inside the output directory and save the encoder into it
if not os.path.exists(os.path.join(save_directory, "encoder")):
os.mkdir(os.path.join(save_directory, "encoder"))
self.encoder.save_pretrained(os.path.join(save_directory, "encoder"))
# Create the "encoder" directory inside the output directory and save the decoder into it
if not os.path.exists(os.path.join(save_directory, "decoder")):
os.mkdir(os.path.join(save_directory, "decoder"))
self.decoder.save_pretrained(os.path.join(save_directory, "decoder"))
@staticmethod
def prepare_model_kwargs(**kwargs):
"""Prepare the encoder and decoder's keyword arguments.
Keyword arguments come in 3 flavors:
- encoder-specific (prefixed by `encoder_`)
- decoder-specific (prefixed by `decoder_`)
- those that apply to the model as whole.
We let the specific kwargs override the common ones in case of
conflict.
"""
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_")
and not argument.startswith("decoder_")
}
decoder_kwargs = kwargs_common.copy()
encoder_kwargs = kwargs_common.copy()
encoder_kwargs.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
decoder_kwargs.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
decoder_kwargs["encoder_attention_mask"] = encoder_kwargs.get(
"attention_mask", None
)
return encoder_kwargs, decoder_kwargs
def forward(self, encoder_input_ids=None, decoder_input_ids=None, **kwargs):
"""The forward pass on a seq2eq depends what we are performing:
- During training we perform one forward pass through both the encoder
and decoder;
- During prediction, we perform one forward pass through the encoder,
and then perform several forward passes with the encoder's hidden
state through the decoder to decode a full sequence.
Therefore, we skip the forward pass on the encoder if an argument named
`encoder_hidden_state` is passed to this function.
Params:
encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of encoder input sequence tokens in the vocabulary.
decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of decoder input sequence tokens in the vocabulary.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
"""
kwargs_encoder, kwargs_decoder = self.prepare_model_kwargs(**kwargs)
# Encode if needed (training, first prediction pass)
encoder_hidden_states = kwargs_encoder.pop("hidden_states", None)
if encoder_hidden_states is None:
encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder)
encoder_hidden_states = encoder_outputs[0]
else:
encoder_outputs = ()
kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states
decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder)
return decoder_outputs + encoder_outputs
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step
if type(past) is tuple:
encoder_outputs = past
else:
encoder_outputs = (past,)
return {
"decoder_input_ids": input_ids,
"encoder_outputs": encoder_outputs,
"encoder_hidden_states": encoder_outputs[0],
"decoder_attention_mask": None,
}
def prepare_scores_for_generation(self, scores, **kwargs):
return scores
def _do_output_past(self, outputs):
"""During generation, decide whether to pass the `past` variable to the next forward pass."""
has_output_past = getattr(self.config, "output_past", False)
mem_len = getattr(self.config, "mem_len", 0)
if len(outputs) <= 1:
return False
if mem_len > 0 or has_output_past:
return True
return False
def enforce_repetition_penalty_(
self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty
):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)."""
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
@torch.no_grad()
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
):
r"""Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = (
early_stopping if early_stopping is not None else self.config.early_stopping
)
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = (
temperature if temperature is not None else self.config.temperature
)
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = (
repetition_penalty
if repetition_penalty is not None
else self.config.repetition_penalty
)
bos_token_id = (
bos_token_id if bos_token_id is not None else self.config.bos_token_id
)
pad_token_id = (
pad_token_id if pad_token_id is not None else self.config.pad_token_id
)
eos_token_id = (
eos_token_id if eos_token_id is not None else self.config.eos_token_id
)
length_penalty = (
length_penalty if length_penalty is not None else self.config.length_penalty
)
no_repeat_ngram_size = (
no_repeat_ngram_size
if no_repeat_ngram_size is not None
else self.config.no_repeat_ngram_size
)
bad_words_ids = (
bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
)
num_return_sequences = (
num_return_sequences
if num_return_sequences is not None
else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert (
isinstance(max_length, int) and max_length > 0
), "`max_length` should be a strictly positive integer."
assert (
isinstance(min_length, int) and min_length >= 0
), "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert (
isinstance(num_beams, int) and num_beams > 0
), "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert (
isinstance(top_k, int) and top_k >= 0
), "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None
or isinstance(bad_words_ids, list)
and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1),
bos_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
else:
assert (
input_ids.dim() == 2
), "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (
(attention_mask is None)
and (pad_token_id is not None)
and (pad_token_id in input_ids)
):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(
eos_token_id
)
)
pad_token_id = eos_token_id
# current position and vocab size
vocab_size = self.config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(
self, "get_encoder"
), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(
self.get_encoder
)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (
encoder_outputs[0].index_select(0, expanded_batch_idxs),
*encoder_outputs[1:],
)
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
encoder_outputs,
attention_mask,
):
"""Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask
)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, 1, input_ids, repetition_penalty
)
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_tokens = calc_banned_ngram_tokens(
input_ids, batch_size, no_repeat_ngram_size, cur_len
)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float(
"inf"
)
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float(
"inf"
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
next_token_logits[:, eos_token_id] = -float("inf")
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(
next_token_logits, top_k=top_k, top_p=top_p
)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (
1 - unfinished_sents
)
else:
tokens_to_add = next_token
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(
eos_in_sents.long()
).bool()
sent_lengths.masked_fill_(
is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1
)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[
attention_mask,
attention_mask.new_ones((attention_mask.shape[0], 1)),
],
dim=-1,
)
cur_len = cur_len + 1
# if there are different sentences lengths in the batch, some batches have to be padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert (
pad_token_id is not None
), "`Pad_token_id` has to be defined if batches have different lengths"
# finished sents are filled with pad_token
decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(
pad_token_id
)
else:
decoded = input_ids
for hypo_idx, hypo in enumerate(input_ids):
decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]
return decoded
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
):
"""Generate sequences for each example with beam search."""
# generated hypotheses
generated_hyps = [
BeamHypotheses(
num_beams, max_length, length_penalty, early_stopping=early_stopping
)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros(
(batch_size, num_beams), dtype=torch.float, device=input_ids.device
)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask
)
outputs = self(
**model_inputs
) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][
:, -1, :
] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits,
batch_size,
num_beams,
input_ids,
repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
scores = F.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solutino
scores = self.prepare_scores_for_generation(
scores, cur_len=cur_len, max_length=max_length
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (
batch_size * num_beams,
vocab_size,
), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(
probs, num_samples=2 * num_beams
) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(
_scores, -1, next_tokens
) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(
next_scores, descending=True, dim=1
)
next_tokens = torch.gather(
next_tokens, -1, next_scores_indices
) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(
next_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
assert (
next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(
num_beams
)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend(
[(0, pad_token_id, 0)] * num_beams
) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = (
beam_token_rank >= num_beams
)
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(),
beam_token_score.item(),
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append(
(beam_token_score, token_id, effective_beam_id)
)
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[
attention_mask,
attention_mask.new_ones((attention_mask.shape[0], 1)),
],
dim=-1,
)
# update current length
cur_len = cur_len + 1
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() is not eos_token_id
for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams]
== beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx],
beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = (
batch_size if do_sample else batch_size * num_return_sequences
)
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are filled with pad_token
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = (
torch.stack(best).type(torch.long).to(next(self.parameters()).device)
)
return decoded
# force one of token_ids to be generated by setting prob of all other tokens to 0.
def _force_token_ids_generation(self, scores, token_ids):
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert (
len(scores.shape) == 2
), "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = []
for layer_past in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` and `mems` is at 2nd position
reordered_layer_past = [
layer_past[:, i].unsqueeze(1).clone().detach() for i in beam_idx
]
reordered_layer_past = torch.cat(reordered_layer_past, dim=1)
# check that shape matches
assert reordered_layer_past.shape == layer_past.shape
reordered_past.append(reordered_layer_past)
past = tuple(reordered_past)
return past
def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len):
# Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(
prev_ngram_tuple, []
) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - no_repeat_ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids):
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert (
len(banned_token_seq) > 0
), "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if (
_tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1])
is False
):
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def top_k_top_p_filtering(
logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1
):
"""Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
1, sorted_indices, sorted_indices_to_remove
)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted(
[(s, idx) for idx, (s, _) in enumerate(self.beams)]
)
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len=None):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
if cur_len is None:
cur_len = self.max_length
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
|
from django.urls import path
from . import views
app_name = 'subscriptions'
urlpatterns = [
path('api/active-products/', views.ProductWithMetadataAPI.as_view(), name='products_api'),
# team-specific URLs
# todo: it would be better if these matched the /a/team-slug/subscription pattern of other pages
path('team/<slug:team_slug>/', views.team_subscription, name='team_subscription_details'),
path('team/<slug:team_slug>/subscription_success/',
views.team_subscription_success, name='team_subscription_success'),
path('team/<slug:team_slug>/demo/',
views.team_subscription_demo, name='team_subscription_demo'),
path('team/<slug:team_slug>/subscription-gated-page/',
views.team_subscription_gated_page, name='team_subscription_gated_page'),
path('team/<slug:team_slug>/stripe-portal/', views.team_create_stripe_portal_session,
name='team_create_stripe_portal_session'),
path('team/<slug:team_slug>/api/create_customer/',
views.team_create_customer, name='team_create_customer'),
# Team admin subscription
path('api/stripe-info/', views.stripe_info, name='stripe-info'),
path('api/subscription-details/', views.subscription_details, name='subscription-details'),
path('api/upgrade-subscription/', views.upgrade_subscription, name='upgrade-subscription'),
path('api/create_customer/', views.create_customer, name='create_customer'),
path('api/create_stripe_portal_session/', views.create_stripe_portal_session, name='create_stripe_portal_session'),
]
|
import sys
import time
from networktables import NetworkTables
# To see messages from networktables, you must setup logging
import logging
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 2:
print("Error: specify an IP to connect to!")
exit(0)
ip = sys.argv[1]
NetworkTables.initialize(server=ip)
visionTable = NetworkTables.getTable("vision")
visionTable.putBoolean("vision_test", True)
i = 0
while True:
try:
visionTable.putNumber('vision_test_counter', i)
except:
print("Some error occurred when incrementing the vision test counter")
time.sleep(1)
i += 1
|
def temperature(cel):
return (cel * (9/5) + 32)
c = int(input("enter temperature " ))
print(temperature(c))
|
from fastapi import FastAPI
from common.config import Config
from scripts.lambdas.initial_setup import setup
from views.schedules import router as schedules_router
def get_application() -> FastAPI:
setup()
app = FastAPI()
app.include_router(schedules_router)
@app.get("/")
async def root():
import boto3
cfg = Config()
clt = boto3.resource('ec2', aws_access_key_id=cfg.aws_access_key_id.get_secret_value(),
aws_secret_access_key=cfg.aws_secret_access_key.get_secret_value(), region_name='us-east-2')
ids = ["i-0baefa4c6941972d2"]
resp = clt.instances.all().terminate()
print(resp)
return {"message": "Hello World " + cfg.aws_access_key_id.get_secret_value() + " " + cfg.aws_secret_access_key.get_secret_value()}
return app |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-30 17:29
from __future__ import unicode_literals
from django.db import migrations
from osf_oauth2_adapter.apps import OsfOauth2AdapterConfig
def create_human_group(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.db_manager(schema_editor.connection.alias).create(name=OsfOauth2AdapterConfig.humans_group_name)
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.RunPython(create_human_group),
]
|
def loading():
print("loading...")
|
from pycalclib.Storage import Storage
from pycalclib.Data import Data
from pycalclib.Manager import Register
def test_storage_varaible_creation():
st = Storage()
var1 = st.createVariable('myVar', Data(
Register.getTypeByClassName('Integer'), 2))
expectedVar = Data(Register.getTypeByClassName('Integer'), 2)
assert var1 == st.getVariable('myVar')
assert var1 in st.getAllVariables()
assert var1 == expectedVar
|
from scripts.systems.digg_system import DiggSystem
from config.badger_config import digg_config
def deploy_digg_minimal(deployer, devProxyAdmin, daoProxyAdmin, owner=None):
digg = DiggSystem(digg_config, deployer, devProxyAdmin, daoProxyAdmin)
digg.deploy_core_logic()
digg.deploy_digg_token()
digg.deploy_digg_policy()
digg.deploy_orchestrator()
digg.deploy_market_median_oracle()
digg.deploy_cpi_median_oracle()
digg.deploy_constant_oracle()
digg.deploy_dynamic_oracle()
return digg
|
import numpy as np
import numpy.linalg as LA
import itertools
import random
#seabornはimportしておくだけでもmatplotlibのグラフがきれいになる
#import seaborn as sns
#sns.set_style("darkgrid")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#from sklearn.neighbors import NearestNeighbors
"""
def AND(f1, f2):
return lambda x,y,z: f1(x,y,z) + f2(x,y,z) - np.sqrt(f1(x,y,z)**2 + f2(x,y,z)**2)
def OR(f1, f2):
return lambda x,y,z: f1(x,y,z) + f2(x,y,z) + np.sqrt(f1(x,y,z)**2 + f2(x,y,z)**2)
def NOT(f):
return lambda x,y,z: -f(x,y,z)
"""
def norm(normal):
#ベクトルが一次元のとき
if len(normal.shape)==1:
if np.linalg.norm(normal) == 0:
#print("Warning: 法線ベクトルがゼロです!")
return normal
else:
return normal / np.linalg.norm(normal)
#ベクトルが二次元
else:
#各法線のノルムをnormに格納
norm = np.linalg.norm(normal, ord=2, axis=1)
#normが0の要素は1にする(normalをnormで割る際に0除算を回避するため)
norm = np.where(norm==0, 1, norm)
#normalの各成分をノルムで割る
norm = np.array([np.full(3, norm[i]) for i in range(len(norm))])
return normal / norm
#pointsからpのk近傍点のindexのリストを返す
def K_neighbor(points, p, k):
#points[i]とpointsの各点とのユークリッド距離を格納
distances = np.sum(np.square(points - p), axis=1)
#距離順でpointsをソートしたときのインデックスを格納
sorted_index = np.argsort(distances)
return sorted_index[:k]
#def K_neighbor2(points, k):
# scikit-learnより全ての点群のk近傍のインデックスを受け取る
#nn = NearestNeighbors(n_neighbors=k+1)
#nn.fit(points)
#_, indices = nn.kneighbors(points)
# 自分もk近傍に含んじゃってるので自分を消す処理
#mask = indices != np.arange(indices.shape[0])[:,np.newaxis]
#mask[:,-1] &= np.logical_not(mask.all(axis=1))
#shape = (indices.shape[0], indices.shape[1] - 1)
#return indices[mask].reshape(shape)
#点群データなどをx, y, zに分解する
#[x1, y1, z1] [x1, x2, ..., xn]
# : -> [y1, y2, ..., yn]
#[xn, yn, zn] [z1, z2, ..., zn]
def Disassemble(XYZ):
XYZ = XYZ.T[:]
X = XYZ[0, :]
Y = XYZ[1, :]
Z = XYZ[2, :]
return X, Y, Z
def line(a, b):
t = np.arange(0, 1, 0.01)
x = a[0]*t + b[0]*(1-t)
y = a[1]*t + b[1]*(1-t)
z = a[2]*t + b[2]*(1-t)
return x, y, z
###OBB生成####
def buildOBB(points):
#分散共分散行列Sを生成
S = np.cov(points, rowvar=0, bias=1)
#固有ベクトルを算出
w,svd_vector = LA.eig(S)
# 固有値が小さい順に固有ベクトルを並べる
svd_vector = svd_vector[np.argsort(w)]
#print(S)
#print(svd_vector)
#print("="*50)
#正規直交座標にする(=直行行列にする)
#############################################
u = np.asarray([svd_vector[i] / np.linalg.norm(svd_vector[i]) for i in range(3)])
#点群の各点と各固有ベクトルとの内積を取る
#P V^T = [[p1*v1, p1*v2, p1*v3], ... ,[pN*v1, pN*v2, pN*v3]]
inner_product = np.dot(points, u.T)
#各固有値の内積最大、最小を抽出(max_stu_point = [s座標max, tmax, umax])
max_stu_point = np.amax(inner_product, axis=0)
min_stu_point = np.amin(inner_product, axis=0)
#xyz座標に変換・・・単位ベクトル*座標
#max_xyz_point = [[xs, ys, zs], [xt, yt, zt], [xu, yu, zu]]
max_xyz_point = np.asarray([u[i]*max_stu_point[i] for i in range(3)])
min_xyz_point = np.asarray([u[i]*min_stu_point[i] for i in range(3)])
"""
max_index =
print(max_index)
max_point = np.asarray([points[max_index[i]] for i in range(3)])
min_index = np.argmin(inner_product, axis=0)
min_point = np.asarray([points[min_index[i]] for i in range(3)])
"""
#対角線の長さ
vert_max = min_xyz_point[0] + min_xyz_point[1] + max_xyz_point[2]
vert_min = max_xyz_point[0] + max_xyz_point[1] + min_xyz_point[2]
l = np.linalg.norm(vert_max-vert_min)
return max_xyz_point, min_xyz_point, l
###AABB生成####
def buildAABB(points):
#なんとこれで終わり
max_p = np.amax(points, axis=0)
min_p = np.amin(points, axis=0)
l = np.sqrt((max_p[0]-min_p[0])**2 + (max_p[1]-min_p[1])**2 + (max_p[2]-min_p[2])**2)
return max_p, min_p, l
def MakePoints(fn, bbox=(-2.5,2.5), grid_step=50, down_rate = 0.5, epsilon=0.05):
#import time
#start = time.time()
xmin, xmax, ymin, ymax, zmin, zmax = bbox*3
#点群X, Y, Z, pointsを作成
x = np.linspace(xmin, xmax, grid_step)
y = np.linspace(ymin, ymax, grid_step)
z = np.linspace(zmin, zmax, grid_step)
X, Y, Z = np.meshgrid(x, y, z)
# 格子点X, Y, Zをすべてfnにぶち込んでみる
W = np.array([[fn(X[i][j], Y[i][j], Z[i][j]) for j in range(grid_step)] for i in range(grid_step)])
# 変更前
#W = fn(X, Y, Z)
#Wが0に近いインデックスを取り出す
index = np.where(np.abs(W)<=epsilon)
index = [(index[0][i], index[1][i], index[2][i]) for i in range(len(index[0]))]
#print(index)
#ランダムにダウンサンプリング
index = random.sample(index, int(len(index)*down_rate//1))
#格子点から境界面(fn(x,y,z)=0)に近い要素のインデックスを取り出す
pointX = np.array([X[i] for i in index])
pointY = np.array([Y[i] for i in index])
pointZ = np.array([Z[i] for i in index])
#points作成([[x1,y1,z1],[x2,y2,z2],...])
points = np.stack([pointX, pointY, pointZ])
points = points.T
#end = time.time()
#print("time:{}s".format(end-start))
return points
def ViewerInit(points, X, Y, Z, normals=[]):
#グラフの枠を作っていく
fig = plt.figure()
ax = Axes3D(fig)
#軸にラベルを付けたいときは書く
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
#点群を描画
ax.plot(X,Y,Z,marker="o",linestyle='None',color="white")
"""
if len(normals) != 0:
#法線を描画
U, V, W = Disassemble(normals)
ax.quiver(X, Y, Z, U, V, W, length=0.1, normalize=True, color="blue")
"""
max_p, min_p, _ = buildOBB(points)
#OBBを描画
OBBViewer(ax, max_p, min_p)
return ax
#点群を入力としてOBBを描画する
def OBBViewer(ax, max_p, min_p):
#直積:[smax, smin]*[tmax, tmin]*[umax, umin] <=> 頂点
s_axis = np.vstack((max_p[0], min_p[0]))
t_axis = np.vstack((max_p[1], min_p[1]))
u_axis = np.vstack((max_p[2], min_p[2]))
products = np.asarray(list(itertools.product(s_axis, t_axis, u_axis)))
vertices = np.sum(products, axis=1)
#各頂点に対応するビットの列を作成
bit = np.asarray([1, -1])
vertices_bit = np.asarray(list(itertools.product(bit, bit, bit)))
#頂点同士のハミング距離が1なら辺を引く
for i, v1 in enumerate(vertices_bit):
for j, v2 in enumerate(vertices_bit):
if np.count_nonzero(v1-v2) == 1:
x, y, z = line(vertices[i], vertices[j])
ax.plot(x,y,z,marker=".",color="orange")
#OBBの頂点の1つ
vert_max = min_p[0] + min_p[1] + max_p[2]
vert_min = max_p[0] + max_p[1] + min_p[2]
#xyzに分解
Xmax, Ymax, Zmax = Disassemble(max_p)
Xmin, Ymin, Zmin = Disassemble(min_p)
#頂点なども描画
ax.plot(Xmax,Ymax,Zmax,marker="X",linestyle="None",color="red")
ax.plot(Xmin,Ymin,Zmin,marker="X",linestyle="None",color="blue")
ax.plot([vert_max[0], vert_min[0]],[vert_max[1], vert_min[1]],[vert_max[2], vert_min[2]],marker="o",linestyle="None",color="black")
#点群を入力としてAABBを描画する
def AABBViewer(ax, max_p, min_p):
# [xmax, xmin]と[ymax, ymin]の直積 <=> 頂点
x_axis = [max_p[0], min_p[0]]
y_axis = [max_p[1], min_p[1]]
z_axis = [max_p[2], min_p[2]]
vertices = np.asarray(list(itertools.product(x_axis, y_axis, z_axis)))
#各頂点に対応するビットの列を作成
bit = np.asarray([1, -1])
vertices_bit = np.asarray(list(itertools.product(bit, bit, bit)))
#頂点同士のハミング距離が1なら辺を引く
for i, v1 in enumerate(vertices_bit):
for j, v2 in enumerate(vertices_bit):
if np.count_nonzero(v1-v2) == 1:
x, y, z = line(vertices[i], vertices[j])
ax.plot(x,y,z,marker=".",color="orange")
# ラベルの色分け
def LabelViewer(ax, points, label_list, max_label):
colorlist = ['#1f77b4','#ff7f0e','#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd22','#17becf']
# ラベルの数
label_num = np.max(label_list)
# ラベルなしの点群を白でプロット
X, Y, Z = Disassemble(points[np.where(label_list == 0)])
ax.plot(X, Y, Z, marker=".",linestyle="None",color="white")
for i in range(1, label_num+1):
#同じラベルの点群のみにする
same_label_points = points[np.where(label_list == i)]
print("{}:{}".format(i, same_label_points.shape[0]))
#plot
X, Y, Z = Disassemble(same_label_points)
if i == max_label:
ax.plot(X, Y, Z, marker="o",linestyle="None",color=colorlist[i%len(colorlist)])
else:
ax.plot(X, Y, Z, marker=".",linestyle="None",color=colorlist[i%len(colorlist)])
#陰関数のグラフ描画
#fn ...fn(x, y, z) = 0の左辺
#AABB_size ...AABBの各辺をAABB_size倍する
def plot_implicit(ax, fn, points=None, AABB_size=2, bbox=(-2.5,2.5), contourNum=30):
if points is not None:
#AABB生成
max_p, min_p = buildAABB(points)
xmax, ymax, zmax = max_p[0], max_p[1], max_p[2]
xmin, ymin, zmin = min_p[0], min_p[1], min_p[2]
#AABBの各辺がAABB_size倍されるように頂点を変更
xmax = xmax + (xmax - xmin)/2 * AABB_size
xmin = xmin - (xmax - xmin)/2 * AABB_size
ymax = ymax + (ymax - ymin)/2 * AABB_size
ymin = ymin - (ymax - ymin)/2 * AABB_size
zmax = zmax + (zmax - zmin)/2 * AABB_size
zmin = zmin - (zmax - zmin)/2 * AABB_size
else:
xmin, xmax, ymin, ymax, zmin, zmax = bbox*3
A_X = np.linspace(xmin, xmax, 100) # resolution of the contour
A_Y = np.linspace(ymin, ymax, 100)
A_Z = np.linspace(zmin, zmax, 100)
B_X = np.linspace(xmin, xmax, 15) # number of slices
B_Y = np.linspace(ymin, ymax, 15)
B_Z = np.linspace(zmin, zmax, 15)
#A1,A2 = np.meshgrid(A,A) # grid on which the contour is plotted
for z in B_Z: # plot contours in the XY plane
X,Y = np.meshgrid(A_X, A_Y)
Z = fn(X,Y,z)
ax.contour(X, Y, Z+z, [z], zdir='z')
# [z] defines the only level to plot for this contour for this value of z
for y in B_Y: # plot contours in the XZ plane
X,Z = np.meshgrid(A_X, A_Z)
Y = fn(X,y,Z)
ax.contour(X, Y+y, Z, [y], zdir='y')
for x in B_X: # plot contours in the YZ plane
Y,Z = np.meshgrid(A_Y, A_Z)
X = fn(x,Y,Z)
ax.contour(X+x, Y, Z, [x], zdir='x')
#(拡大した)AABBの範囲に制限
ax.set_zlim3d(zmin,zmax)
ax.set_xlim3d(xmin,xmax)
ax.set_ylim3d(ymin,ymax)
def plot_normal(ax, figure, X, Y, Z):
#図形の方程式から点群を作る
#points, X, Y, Z = MakePoints(figure.f_rep, epsilon=0.01)
#法線
normals = figure.normal(X, Y, Z)
U, V, W = Disassemble(normals)
#法線を描画
ax.quiver(X, Y, Z, U, V, W, length=0.1,color='red', normalize=True) |
#!/usr/bin/env python3
"""Calculates the path of a particle in a magnetic field.
This code was written for Python 3 and tested using Python 3.5.0
(64-bit) with Anaconda 2.4.0 (64-bit) on a computer running Ubuntu 14.04
LTS (64-bit).
"""
__author__ = 'Kyle Capobianco-Hogan'
__copyright = 'Copyright 2016'
__credits__ = ['Kyle Capobianco-Hogan']
#__license__
__version__ = '0.0.0'
__maintainer__ = 'Kyle Capobianco-Hogan'
__email__ = 'kylech.git@gmail.com'
__status__ = 'Development'
# ======================================================================
# Import modules.
# ======================================================================
# Standard library modules.
# Third party modules.
#import numpy as np
#import scipy.constants as const
# Custom modules.
#from coordinate_converter import *
import mcps_UI as UI
import mcps_prompt as prompt
# ======================================================================
# Define main function.
# ======================================================================
def main():
# Print script name, file name, and version.
print('\n' + '='*80 + '\n'*2
+ 'Magnetic Cloak Particle Simulation'.center(80, ' ') + '\n'
+ __file__.center(80, ' ') + ('v ' + __version__).center(80, ' ')
+ '\n'*2 + '='*80 + '\n')
# Prompt user for desired UI.
key_UI = int(prompt.prompt('Select user interface:', UI.dictionary))
UI.dictionary[key_UI]()
return 0
# ======================================================================
# Run main function.
# ======================================================================
main()
|
import numpy as np
#import denemee as den
import thirdtry as thr
##***train edilecek neural network ile test edilecek dataset uzunlugu aynı olmalı(bias weightlerden dolayı)
#import denemee as dn
f = open(r'C:\Users\ASUS\Desktop\test-data\ann-test1.txt')#neural networkün çalıstıgını göstermek için aynı set kullanıldı
line = f.readline()
dataarray = []
i=0
for line in f:
dataarray.append(line)
# print(dataarray[i])3771
i+=1
# print(i)
f.close()
####################################################
inputarray=[]
labelarray=[]
labelmat=[]
setlen =30 #test datasının uzunlugu
hiddenlayer=thr.hiddenlay
#np.random.seed(42)
#weighthid = np.random.rand(21,5)
#weightout = np.random.rand(5,3)
#lamda=0.01 #learningrate
testweight1=np.array(thr.nweighthid)
testweight2=np.array(thr.nwweightout)
bias1=np.array(thr.nwbiashid, order='C')#resize biases
bias1.resize((setlen,hiddenlayer))
bias2=np.array(thr.nwbiasout, order='C')
print(bias2.shape)
bias2.resize((3,setlen))
print(bias2.shape)
#print(testweight2)
####################################################
labelrow=[]
for y in range(0,setlen):
A=[]
for z in range(0,3):
A.append(0)
labelrow.append(A)
def inputarr():
for i in range(setlen):
datarowarray=[]
datarow =dataarray[i].split()
m=0
for temp in datarow:
datarowarray.append(float(temp))
m+=1
if(m==21):
break
inputarray.append(datarowarray)
return inputarray
def inputlabel ():#bu sütuna kadar tüm satırları döndürür
for i in range(setlen):#ilk 10 sütunun labellarını döndüren array
#3371
datarowarray=[]
datarow =dataarray[i].split() #her bir hücre
for temp in datarow:
datarowarray.append(float(temp))
# inputs= np.array(datarowarray)
# print(datarowarray)
#sadece satırları yazıyor
labelarray.append(datarowarray[21])#for normalizing
return labelarray
inputarr()
inputlabel()
#print("labelarray",labelarray)
#print("inputarray",inputarray)
def labelmatrix():
x=0
for i in range (len(labelarray)):
if(round(labelarray[i])==1):
labelrow[x][0]=1
if(round(labelarray[i])==2):
labelrow[x][1]=1
if(round(labelarray[i])==3):
labelrow[x][2]=1
#print(labelrow[x])
x+=1
labelmat.append(labelrow)
return labelmat
labelmatrix()
#######################################################
npinput=np.array(inputarray)
nplabel=np.array(labelrow)
def sigmoid(x):
return np.power((np.add(1,np.exp(-x))),(-1))
ZH = np.dot(testweight1,npinput.T)+bias1.T
zh=sigmoid(ZH)
#print(zh.shape)
ZO=np.dot(testweight2,zh)+bias2
zo=sigmoid(ZO)
print("realclasses","\n",nplabel)
#print("predicted value",zo.T)
for i in range (setlen):
if(np.round(zo.T.max(axis=1)[i])==nplabel[i][0]):
print("predicted class1")
if(np.round(zo.T.max(axis=1)[i])==nplabel[i][1]):
print("predicted class2")
if(np.round(zo.T.max(axis=1)[i])==nplabel[i][2]):
print("predicted class3")
"""
for i in range (setlen):
x=np.abs(np.subtract(1,zo[i][0]))
y=np.abs(np.subtract(1,zo[i][1]))
z=np.abs(np.subtract(1,zo[i][2]))
m=min(x,y,z)
#if(min(x,y,z)==x):
#print("1")
#if(min(x,y,z)==y):
#print("2")
#if(min(x,y,z)==z):
#print("3")
"""
|
import flask_bcrypt as _fb
import flask_migrate as _fm
import flask_sqlalchemy as _fs
db = _fs.SQLAlchemy()
migrate = _fm.Migrate(db=db)
bcrypt = _fb.Bcrypt()
def init_app(app, **kwargs):
db.app = app
migrate.init_app(app)
db.init_app(app)
from .user import User, Role
from .book import Book
from .category import Category, category_book_table
|
# from flask import Flask, jsonify, request, WebScraperAndFormatter
#
# app = Flask(__name__)
#
# @app.route('/<string: url>', methods=['GET'])
# def index():
# events = scrape_and_format(url) #zipped object of events
# csv = csv_generate(events, url)
# csv_filename = '{}.csv'.format(url)
# json = json_generate(csv_filename, url)
# return 'SUCCESSFUL DEPLOYMENT'
#
# # @app.route('/csv', methods=['GET'])
# # def index():
# # return 'SUCCESSFUL DEPLOYMENT'
# #
# # @app.route('/json', methods=['GET'])
# # def index():
# # return 'SUCCESSFUL DEPLOYMENT'
#
#
#
# if __name__ == "__main__":
# app.run(debug=True, port=33507)
|
"""Connect to Database and create visualizations"""
from sqlalchemy import create_engine
import pandas as pd
from bokeh.io import output_file, show
from bokeh.plotting import figure
from bokeh.palettes import turbo
# import concurrent.futures
call = 'mysql+mysqlconnector://mausolorio:ducinALTUM7!@localhost/s&p500'
engine = create_engine(call)
sql_querry = 'SELECT * FROM materials'
data = pd.read_sql(sql_querry, engine)
data.rename(columns={"1. open": "open", "2. high": "high",
"3. low": "low", "4. close": "close",
"5. volume": "volume"}, inplace=True)
# Create a figure with x_axis_type="datetime":
p = figure(x_axis_type='datetime', x_axis_label='Date',
y_axis_label='US Dollars', tools='pan,wheel_zoom,box_zoom,reset',
sizing_mode='stretch_both', title='Sector: Materials')
# Make a list of the unique values from the symbol column: company_list
# company_list = data.Symbol.unique().tolist()
# n = len(company_list)
# For testing only
company_list = ['AMCR', 'APD', 'AVY', 'ALB', 'BLL', 'CF']
n = len(company_list)
# Plot date along the x axis and price along the y axis
def draw_plot(name, color):
p.line(data['date'][data.Symbol == name],
data['open'][data.Symbol == name],
color=color, legend_label=name)
p.circle(data['date'][data.Symbol == name],
data['open'][data.Symbol == name],
color=color, legend_label=name)
# The location of the legend labels is controlled by the location property
p.legend.location = "top_left"
p.legend.click_policy = "hide"
p.legend.title = 'Ticker'
p.legend.title_text_font_style = "bold"
p.legend.title_text_font_size = "15pt"
return p
# with concurrent.futures.ThreadPoolExecutor() as executor:
# args = ((name, color) for name, color in zip(company_list, turbo(n)))
# executor.map(lambda x: draw_plot(*x), args)
for name, color in zip(company_list, turbo(n)):
draw_plot(name, color)
# Specify the name of the output file and show the result
output_file('materials.html')
show(p)
|
class Solution:
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
nums = [i for i in range(1,n+1)]
result = []
def backtrack(i, data):
if len(data) == k:
result.append(data.copy())
else:
for j in range(i+1, len(nums)):
data.append(nums[j])
backtrack(j, data)
data.pop()
backtrack(-1, [])
return result
print(Solution().combine(6,5)) |
from decimal import Decimal
from ..schema_fields import (
ArrayField,
BooleanField,
DynamicArrayField,
Field,
CharField,
DecimalField,
IntegerField,
TextField
)
from .utils import assert_dict_equal
class TestField:
def test_schema_basic(self):
field = Field()
assert field.schema == {'type': 'string', 'format': 'text'}
def test_schema(self):
label = "Test Field"
default = "Thing"
required = True
field = Field(label=label, default=default, required=required)
assert_dict_equal(
expected={
'type': 'string',
'format': 'text',
'title': label,
'default': default
},
actual=field.schema
)
def test_load_data(self):
test_data = "A test"
field = Field()
loaded_data = field.Meta.python_type(test_data)
assert loaded_data == test_data
class TestCharField:
def test_schema_basic(self):
field = CharField()
assert field.schema == {'type': 'string', 'format': 'text'}
def test_schema(self):
label = "Test Field"
default = "Thing"
required = True
field = CharField(label=label, default=default, required=required)
assert_dict_equal(
expected={
'type': 'string',
'format': 'text',
'minLength': 1,
'title': label,
'default': default
},
actual=field.schema
)
def test_schema_min_max_length(self):
label = "Test Field"
default = "Thing"
required = True
min_length = 3
max_length = 5
field = CharField(
label=label,
default=default,
required=required,
min_length=min_length,
max_length=max_length
)
assert_dict_equal(
expected={
'type': 'string',
'format': 'text',
'minLength': min_length,
'maxLength': max_length,
'title': label,
'default': default
},
actual=field.schema
)
def test_schema_choices(self):
label = "Test Field"
default = "cat"
choices = [('cat', 'Cat'), ('dog', 'Dog'), ('fish', 'Fish')]
required = True
field = CharField(
label=label,
choices=choices,
default=default,
required=required
)
assert_dict_equal(
expected={
'type': 'string',
'format': 'text',
'minLength': 1,
'title': label,
'default': default,
'enum': ['cat', 'dog', 'fish']
},
actual=field.schema
)
def test_editor_schema_choices(self):
label = "Test Field"
default = "cat"
choices = [('cat', 'Cat'), ('dog', 'Dog'), ('fish', 'Fish')]
required = True
field = CharField(
label=label,
choices=choices,
default=default,
required=required
)
assert_dict_equal(
expected={
'type': 'string',
'format': 'text',
'minLength': 1,
'title': label,
'default': default,
'enumSource': [
{
'source': [
{'value': choice_value, 'title': choice_label}
for (choice_value, choice_label) in choices
],
'title': '{{item.title}}',
'value': '{{item.value}}'
}
],
},
actual=field.editor_schema
)
def test_load_data(self):
test_data = "A test"
field = CharField()
loaded_data = field.Meta.python_type(test_data)
assert loaded_data == test_data
class TestTextField:
def test_schema_basic(self):
field = TextField()
assert_dict_equal(
expected={'type': 'string', 'format': 'textarea'},
actual=field.schema
)
def test_schema(self):
label = "Test Field"
default = "Thing"
required = True
field = TextField(label=label, default=default, required=required)
assert_dict_equal(
expected={
'type': 'string',
'format': 'textarea',
'minLength': 1,
'title': label,
'default': default
},
actual=field.schema
)
def test_load_data(self):
test_data = "A test"
field = TextField()
loaded_data = field.Meta.python_type(test_data)
assert loaded_data == test_data
class TestIntegerField:
def test_schema_basic(self):
field = IntegerField()
assert_dict_equal(
expected={'type': 'integer', 'format': 'number'},
actual=field.schema
)
def test_schema(self):
label = "Test Field"
default = 5
required = True
field = IntegerField(label=label, default=default, required=required)
assert_dict_equal(
expected={
'type': 'integer',
'format': 'number',
'title': label,
'default': default
},
actual=field.schema
)
def test_load_data(self):
test_data = 8
field = IntegerField()
loaded_data = field.Meta.python_type(test_data)
assert loaded_data == test_data
class TestDecimalField:
def test_schema_basic(self):
field = DecimalField()
assert_dict_equal(
expected={'type': 'decimal', 'format': 'number'},
actual=field.schema
)
def test_schema(self):
label = "Test Field"
default = Decimal('5.5')
required = True
field = DecimalField(label=label, default=default, required=required)
assert_dict_equal(
expected={
'type': 'decimal',
'format': 'number',
'title': label,
'default': default
},
actual=field.schema
)
def test_load_data(self):
test_data = Decimal('3.14')
field = DecimalField()
loaded_data = field.Meta.python_type(test_data)
assert loaded_data == test_data
class TestBooleanField:
def test_schema_basic(self):
field = BooleanField()
assert_dict_equal(
expected={'type': 'boolean', 'format': 'checkbox'},
actual=field.schema
)
def test_schema(self):
label = "Test Field"
default = False
required = True
field = BooleanField(label=label, default=default, required=required)
assert_dict_equal(
expected={
'type': 'boolean',
'format': 'checkbox',
'title': label,
'default': default
},
actual=field.schema
)
def test_load_data(self):
test_data = True
field = BooleanField()
loaded_data = field.Meta.python_type(test_data)
assert loaded_data == test_data
class TestObjectField:
def test_schema(self, dog_field, dog_schema):
assert_dict_equal(
expected=dog_schema,
actual=dog_field().schema
)
def test_typed_schema(self, dog_field, typed_dog_schema):
assert_dict_equal(
expected=typed_dog_schema,
actual=dog_field().typed_schema
)
def test_load_data(self, dog_field, scooby_doo):
loaded_data = dog_field().Meta.python_type(scooby_doo)
assert loaded_data.name == scooby_doo['name']
assert loaded_data.breed == scooby_doo['breed']
def test_nested_schema(self, person_field, person_schema):
assert_dict_equal(expected=person_schema, actual=person_field().schema)
def test_nested_editor_schema(self, person_field, person_editor_schema):
assert_dict_equal(
expected=person_editor_schema,
actual=person_field().editor_schema
)
def test_load_nested_data(self, person_field, scooby_doo, shaggy):
loaded_data = person_field().Meta.python_type(shaggy)
assert loaded_data.name == shaggy['name']
assert loaded_data.favourite_dog.name == scooby_doo['name']
assert loaded_data.favourite_dog.breed == scooby_doo['breed']
def test_instances_copy_methods(self, dog_field, scooby_doo):
"""
Methods declared on the field should be available to the instance
"""
loaded_data = dog_field().Meta.python_type(scooby_doo)
assert loaded_data.name == scooby_doo['name']
assert loaded_data.short_name == scooby_doo['name'][:3]
class TestSubClassedObjectField:
def test_schema(self, parrot_field, parrot_schema):
assert_dict_equal(
expected=parrot_schema,
actual=parrot_field().schema
)
def test_typed_schema(self, parrot_field, typed_parrot_schema):
assert_dict_equal(
expected=typed_parrot_schema,
actual=parrot_field().typed_schema
)
def test_load_data(self, parrot_field):
polly = {'name': 'Polly', 'talks': True}
loaded_data = parrot_field().Meta.python_type(polly)
assert loaded_data.name == polly['name']
assert loaded_data.talks == polly['talks']
def test_instances_copy_parent_methods(self, parrot_field):
"""
Methods declared on the parent field should be copied to the instance
"""
polly = {'name': 'Polly', 'talks': True}
loaded_data = parrot_field().Meta.python_type(polly)
assert loaded_data.loud_name == polly['name'].upper()
class TestArrayField:
def test_schema(self, dog_field, dog_schema):
dog_list_field = ArrayField(base_field=dog_field())
assert_dict_equal(
expected={
'type': 'array',
'format': 'table',
'title': 'Dog List',
'items': dog_schema
},
actual=dog_list_field.schema
)
def test_load_data(self, dog_field, scooby_doo, snoopy):
dog_list_field = ArrayField(base_field=dog_field())
dog_list = [scooby_doo, snoopy]
loaded_data = dog_list_field.Meta.python_type(dog_list)
assert len(loaded_data) == 2
scooby_doo_instance = loaded_data[0]
snoopy_instance = loaded_data[1]
assert scooby_doo_instance.name == scooby_doo['name']
assert scooby_doo_instance.breed == scooby_doo['breed']
assert snoopy_instance.name == snoopy['name']
assert snoopy_instance.breed == snoopy['breed']
class TestDynamicArrayField:
def test_schema(
self, dog_field, fish_field, typed_dog_schema, typed_fish_schema
):
item_label = "Pet"
schema_name = "pet_list"
unique_items = True
min_items = 2
max_items = 5
pet_field = DynamicArrayField(
schema_name=schema_name,
item_label=item_label,
allowed_fields=[dog_field(), fish_field()],
unique_items=unique_items,
min_items=min_items,
max_items=max_items
)
assert_dict_equal(
expected={
'type': 'array',
'format': 'tabs',
'title': schema_name.replace("_", " ").title(),
'uniqueItems': unique_items,
'minItems': min_items,
'maxItems': max_items,
'items': {
'headerTemplate': "{} {{{{i1}}}}.".format(item_label),
'oneOf': [typed_dog_schema, typed_fish_schema],
'title': item_label
}
},
actual=pet_field.schema
)
def test_editor_schema(
self, dog_field, fish_field, typed_dog_schema, typed_fish_editor_schema
):
item_label = "Pet"
schema_name = "pet_list"
unique_items = True
min_items = 2
max_items = 5
pet_field = DynamicArrayField(
schema_name=schema_name,
item_label=item_label,
allowed_fields=[dog_field(), fish_field()],
unique_items=unique_items,
min_items=min_items,
max_items=max_items
)
assert_dict_equal(
expected={
'type': 'array',
'format': 'tabs',
'title': schema_name.replace("_", " ").title(),
'uniqueItems': unique_items,
'minItems': min_items,
'maxItems': max_items,
'items': {
'headerTemplate': "{} {{{{i1}}}}.".format(item_label),
'oneOf': [typed_dog_schema, typed_fish_editor_schema],
'title': item_label
}
},
actual=pet_field.editor_schema
)
def test_auto_generated_schema_name(
self, dog_field, fish_field, typed_dog_schema, typed_fish_schema
):
"""
If no schema name is provided, it should be built from allowed fields
"""
item_label = "Pet"
pet_field = DynamicArrayField(
item_label=item_label,
allowed_fields=[dog_field(), fish_field()]
)
assert pet_field.Meta.schema_name == "one_of_dog_or_fish"
assert pet_field.schema['title'] == "One Of Dog Or Fish"
def test_provided_schema_name(
self, dog_field, fish_field, typed_dog_schema, typed_fish_schema
):
"""
If a schema name is provided, it should be available as class meta
"""
item_label = "Pet"
pet_field = DynamicArrayField(
item_label=item_label,
schema_name="pet",
allowed_fields=[dog_field(), fish_field()]
)
assert pet_field.Meta.schema_name == "pet"
def test_load_data(
self,
dog_field,
fish_field,
typed_dog_schema,
typed_fish_schema,
scooby_doo,
nemo
):
item_label = "Pet"
pet_field = DynamicArrayField(
item_label=item_label,
allowed_fields=[dog_field(), fish_field()]
)
pet_list = [
{
"schemaName": "dog",
"data": scooby_doo
},
{
"schemaName": "fish",
"data": nemo
}
]
loaded_data = pet_field.Meta.python_type(pet_list)
assert len(loaded_data) == 2
scooby_doo_instance = loaded_data[0]
nemo_instance = loaded_data[1]
assert scooby_doo_instance.name == scooby_doo['name']
assert scooby_doo_instance.breed == scooby_doo['breed']
assert nemo_instance.name == nemo['name']
assert nemo_instance.salt_water == nemo['salt_water']
|
def solution(A):
n = len(A)
lead, count = leader(A,n)
if lead == "NoDominator":
return 0
equileader = 0
temp_count = 0
for idx in range(n):
if A[idx] == lead:
temp_count +=1
if (temp_count > (idx+1)//2) and (count-temp_count > ((n-idx-1)//2)):
equileader +=1
return equileader
def leader(A, n):
size = 0
for i in range(n):
if size == 0:
size += 1
value = A[i]
else:
if(value != A[i]):
size -=1
else:
size +=1
candidate = -1
if size > 0:
candidate = value
count = 0
for k in range(n):
if(A[k] == candidate):
count +=1
if(count>n // 2):
leader = candidate
if count<n // 2:
return "NoDominator", count
return leader, count |
# name: P.U.B.智能计算系统!
# author: Thomas·P
# date: 2020.11.29
# version:0.0.0
import math
import time
start_input = "请选择您需要的计算类型(按1或2并按下回车(ENTER)):"
print("欢迎来到P.U.B.智能计算系统!")
time.sleep(2)
print("以下是本系统支持的数学计算领域:\n1.代数\n2.几何")
time.sleep(2)
while True:
choose = input(start_input)
start_input = "请选择您需要的计算类型(按1或2并按下回车(ENTER)):"
if choose == '1' or choose == '2':
input_content = "请输入圆的半径:"
while True:
a = input(input_content)
input_content = "请输入圆的半径:"
try:
r = float(a)
s = math.pi * r ** 2
print("圆的面积是:", s)
break
except ValueError:
print("您输入的格式有误,请重新输入:", end='')
input_content = ''
continue
else:
print("您输入的格式有误,请重新输入:", end='')
start_input = ''
continue
|
import mosaic
import numpy as np
import prettypyplot as pplt
from matplotlib import pyplot as plt
pplt.use_style(colors='tab20c', figsize=2.4)
def main():
# Load trajectory from file
# traj = np.loadtxt(filename)
# Here we use some random sample data
traj = create_traj()
# specify parameters grid
n_clusters = np.arange(2, traj.shape[1])
params = {'n_clusters': n_clusters}
search = mosaic.GridSearchCV(
similarity=mosaic.Similarity(),
clustering=mosaic.Clustering(
mode='kmedoids',
n_clusters=2, # any dummy value is good here
),
param_grid=params,
).fit(traj)
# plotting result
fig, ax = plt.subplots()
mean_score = search.cv_results_['mean_test_score']
std_score = search.cv_results_['std_test_score']
ax.fill_between(
n_clusters,
mean_score + std_score,
mean_score - std_score,
color='C2',
)
ax.plot(n_clusters, mean_score + std_score, c='C1')
ax.plot(n_clusters, mean_score - std_score, c='C1')
ax.plot(n_clusters, mean_score, c='C0')
ax.set_xlim([0, traj.shape[1]])
ax.set_xlabel('$k$ no. of clusters')
ax.set_ylabel('silhouette score')
pplt.savefig('cv_silhouette.pdf')
def create_traj():
"""Creating sample trajectory."""
np.random.seed(42)
x = np.linspace(0, 2 * np.pi, 1000)
rand_offsets = np.random.uniform(
low=-np.pi / 6, high=np.pi / 6, size=10,
)
traj = np.array([
*[np.sin(x + xi) for xi in rand_offsets],
*[np.cos(x + xi) for xi in rand_offsets],
*[np.zeros_like(x) for _ in rand_offsets],
]).T
return traj + np.random.normal(size=traj.shape, scale=.2)
if __name__ == '__main__':
main()
|
# from django.test import Client
# from django.urls import reverse
# from test_plus.test import TestCase
#
# from zhihu.qa.models import Question, Answer
# class QAViewsTest(TestCase):
# def setUp(self):
# self.user = self.make_user("user01")
# self.other_user = self.make_user("user02")
# self.client = Client()
# self.other_client = Client()
# self.client.login(username="user01", password="password")
# self.other_client.login(username="user02", password="password")
# self.question_one = Question.objects.create(
# user=self.user,
# title="问题1",
# content="问题1的内容",
# tags="测试1, 测试2"
# )
# self.question_two = Question.objects.create(
# user=self.user,
# title="问题2",
# content="问题2的内容",
# has_answer=True,
# tags="测试1, 测试2"
# )
# self.answer = Answer.objects.create(
# user=self.user,
# question=self.question_two,
# content="问题2被采纳的回答",
# is_answer=True
# )
#
# def test_index_questions(self):
# response = self.client.get(reverse("qa:all_q"))
# assert response.status_code == 200
# assert "问题1" in str(response.context["questions"])
#
# def test_create_question_view(self):
# current_count = Question.objects.count()
# response = self.client.post(reverse("qa:ask_question"),
# {"title": "问题标题",
# "content": "问题内容",
# "status": "O",
# "tags": "测试标签"})
# assert response.status_code == 302
# new_question = Question.objects.first()
# assert new_question.title == "问题标题"
# assert Question.objects.count() == current_count + 1
#
# def test_answered_questions(self):
# response = self.client.get(reverse("qa:answered_q"))
# self.assertEqual(response.status_code, 200)
# self.assertTrue("问题2" in str(response.context["questions"]))
#
# def test_unanswered_questions(self):
# response = self.client.get(reverse("qa:unanswered_q"))
# assert response.status_code == 200
# assert "问题1" in str(response.context["questions"])
#
# def test_answer_question(self):
# current_answer_count = Answer.objects.count()
# response = self.client.post(
# reverse("qa:propose_answer", kwargs={"question_id": self.question_one.id}), {"content": "问题1的回答"}
# )
# assert response.status_code == 302
# assert Answer.objects.count() == current_answer_count + 1
#
# def test_question_upvote(self):
# """赞同问题"""
# response_one = self.client.post(
# reverse("qa:question_vote"),
# {"value": "U", "question": self.question_one.id},
# HTTP_X_REQUESTED_WITH="XMLHttpRequest"
# )
# assert response_one.status_code == 200
#
# def test_question_downvote(self):
# """反对问题"""
# response_one = self.client.post(
# reverse("qa:question_vote"),
# {"value": "D", "question": self.question_one.id},
# HTTP_X_REQUESTED_WITH="XMLHttpRequest")
# assert response_one.status_code == 200
#
# def test_answer_upvote(self):
# """赞同回答"""
# response_one = self.client.post(
# reverse("qa:answer_vote"),
# {"value": "U", "answer": self.answer.uuid_id},
# HTTP_X_REQUESTED_WITH="XMLHttpRequest")
# assert response_one.status_code == 200
#
# def test_answer_downvote(self):
# """反对回答"""
# response_one = self.client.post(
# reverse("qa:answer_vote"),
# {"value": "D", "answer": self.answer.uuid_id},
# HTTP_X_REQUESTED_WITH="XMLHttpRequest")
# assert response_one.status_code == 200
#
# def test_accept_answer(self):
# """接受回答"""
# response_one = self.client.post(
# reverse("qa:accept_answer"),
# {"answer": self.answer.uuid_id},
# HTTP_X_REQUESTED_WITH="XMLHttpRequest")
# assert response_one.status_code == 200
|
#8-12 一個簡單的類別,說明可變預設值的危險
class Bus:
"""A bus model haunted by ghost passengers"""
def __init__(self,passengers = []):
self.passengers = passengers
def pick(self,name):
self.passengers.append(name)
def drop(self,name):
self.passengers.remove(name)
|
import os
import sys
from django.conf import settings
BASE_DIR = os.getcwd() # os.path.dirname(__file__)
print('Here', BASE_DIR, __file__)
print(os.path.join(BASE_DIR, 'templates'))
print(os.listdir(os.path.join(BASE_DIR, 'templates')))
DEBUG = os.environ.get('DEBUG', 'on') == 'on'
SECRET_KEY = os.environ.get('SECRET_KEY', 'django-insecure-)4c%#2fvie3kld4t$sv5^d_#35=!k*_gs4-u81m9#@!vov@g$&')
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
settings.configure(
DEBUG=DEBUG,
SECRET_KEY=SECRET_KEY,
ALLOWED_HOSTS=ALLOWED_HOSTS,
ROOT_URLCONF=__name__,
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
INSTALLED_APPS=(
'django.contrib.staticfiles',
),
TEMPLATES=(
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (os.path.join(BASE_DIR, 'templates'), ),
},
),
STATICFILES_DIRS=(
os.path.join(BASE_DIR, 'static'),
),
STATIC_URL='/static/',
)
from django import forms
from django.core.cache import cache
class ImageForm(forms.Form):
"""Form to validate requested placeholder image."""
height = forms.IntegerField(min_value=1, max_value=2000)
width = forms.IntegerField(min_value=1, max_value=2000)
def generate(self):
height = self.cleaned_data['height']
width = self.cleaned_data['width']
key = f'{height}.{width}'
content = cache.get(key)
if content is None:
content = f'{height}:{width}'
cache.set(key, content, 60)
return content
from django.core.wsgi import get_wsgi_application
from django.http import HttpResponse, HttpResponseBadRequest
from django.urls.base import reverse
from django.shortcuts import render
import hashlib
from django.views.decorators.http import etag
def generate_etag(request, width, height):
content = 'Placeholder: {0} x {1}'.format(width, height)
return hashlib.sha1(content.encode('utf-8')).hexdigest()
@etag(generate_etag)
def placeholder(requests, width, height):
form = ImageForm({'height': height, 'width': width})
if form.is_valid():
# height = form.cleaned_data['height']
# width = form.cleaned_data['width']
return HttpResponse(form.generate(), content_type='string')
else:
return HttpResponseBadRequest('Invalid Image Requests')
def index(request):
example = reverse('placeholder', kwargs = {'width': 50, 'height': 50})
context = {
'example': request.build_absolute_uri(example)
}
print(example)
print(context)
# return HttpResponse('Hello World')
return render(request, 'home.html', context)
# from django.conf.urls import url
from django.conf.urls import re_path
urlpatterns = (
re_path(r'^image/(?P<width>[0-9]+)x(?P<height>[0-9]+)/$', placeholder, name='placeholder'),
re_path(r'^$', index),
)
application = get_wsgi_application()
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
# https://github.com/lightweightdjango/examples/blob/chapter-2/placeholder/placeholder.py
# import hashlib
# import os
# import sys
# from io import BytesIO
# from PIL import Image, ImageDraw
# from django.conf import settings
# DEBUG = os.environ.get('DEBUG', 'on') == 'on'
# SECRET_KEY = os.environ.get('SECRET_KEY',
# '%jv_4#hoaqwig2gu!eg#^ozptd*a@88u(aasv7z!7xt^5(*i&k')
# ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
# BASE_DIR = os.path.dirname(__file__)
# settings.configure(
# DEBUG=DEBUG,
# SECRET_KEY=SECRET_KEY,
# ALLOWED_HOSTS=ALLOWED_HOSTS,
# ROOT_URLCONF=__name__,
# MIDDLEWARE_CLASSES=(
# 'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# ),
# INSTALLED_APPS=(
# 'django.contrib.staticfiles',
# ),
# TEMPLATES=(
# {
# 'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': (os.path.join(BASE_DIR, 'templates'), ),
# },
# ),
# STATICFILES_DIRS=(
# os.path.join(BASE_DIR, 'static'),
# ),
# STATIC_URL='/static/',
# )
# from django import forms
# from django.conf.urls import url
# from django.core.cache import cache
# from django.urls.base import reverse
# from django.core.wsgi import get_wsgi_application
# from django.http import HttpResponse, HttpResponseBadRequest
# from django.shortcuts import render
# from django.views.decorators.http import etag
# class ImageForm(forms.Form):
# """Form to validate requested placeholder image."""
# height = forms.IntegerField(min_value=1, max_value=2000)
# width = forms.IntegerField(min_value=1, max_value=2000)
# def generate(self, image_format='PNG'):
# """Generate an image of the given type and return as raw bytes."""
# height = self.cleaned_data['height']
# width = self.cleaned_data['width']
# key = '{}.{}.{}'.format(width, height, image_format)
# content = cache.get(key)
# if content is None:
# image = Image.new('RGB', (width, height))
# draw = ImageDraw.Draw(image)
# text = '{} X {}'.format(width, height)
# textwidth, textheight = draw.textsize(text)
# if textwidth < width and textheight < height:
# texttop = (height - textheight) // 2
# textleft = (width - textwidth) // 2
# draw.text((textleft, texttop), text, fill=(255, 255, 255))
# content = BytesIO()
# image.save(content, image_format)
# content.seek(0)
# cache.set(key, content, 60 * 60)
# return content
# def generate_etag(request, width, height):
# content = 'Placeholder: {0} x {1}'.format(width, height)
# return hashlib.sha1(content.encode('utf-8')).hexdigest()
# @etag(generate_etag)
# def placeholder(request, width, height):
# form = ImageForm({'height': height, 'width': width})
# if form.is_valid():
# image = form.generate()
# return HttpResponse(image, content_type='image/png')
# else:
# return HttpResponseBadRequest('Invalid Image Request')
# def index(request):
# example = reverse('placeholder', kwargs={'width': 50, 'height':50})
# context = {
# 'example': request.build_absolute_uri(example)
# }
# return render(request, 'home.html', context)
# urlpatterns = (
# url(r'^image/(?P<width>[0-9]+)x(?P<height>[0-9]+)/$', placeholder,
# name='placeholder'),
# url(r'^$', index, name='homepage'),
# )
# application = get_wsgi_application()
# if __name__ == "__main__":
# from django.core.management import execute_from_command_line
# execute_from_command_line(sys.argv) |
"""
File: convect.py
Author: Sean Blake
Date: December 2012 - January 2013
This code uses data from the 'Model S' solar data set. This can be found at: http://users-phys.au.dk/jcd/solar_models/
The code can be (roughly) separated into 3 parts.
1) Data extraction + definition of the functions used.
-lines 18-340
2) The 'For' loop used for the motion of the cell.
-lines 340-462
3) The various plots used for my project. These plots are all commented out. In order to use a particular plot, comment that plot back into the code, and run again.
-lines 462-930
"""
import pdb
import math
import scipy
from scipy import interpolate
import matplotlib.font_manager
from time import *
t0=clock()
rc('text', usetex=True) #Allows 'tex' in my graphs
rc('font',**{'family':'times','sans-serif':['times']}) #Uses times font in my plots.
############################################## Data Extraction ##############################################
a1, a2, a3, a4, a5 = np.loadtxt('bleh.txt', usecols = (0, 1, 2, 3, 4), unpack=True, skiprows = 0) #extracting data from main GONG file.
adgrad = a1[2::5]
opacity = a3[1::5]
opacity = opacity[::-1]
opacity= list(opacity)
del opacity[-1]
opacity = array(opacity)
t1 = a3[0::5] #temperature
t1 = t1[::-1]
t1 = t1[1::]
r1 = a1[0::5] #radius
r1 = r1[::-1]
r1 = ((r1/100)/6.96342E8)
r1 = r1[1::]
m1 = a2[0::5] #mass
m1 = m1[::-1]
m1 = exp(m1)
m1 = m1
m1 = m1[1::]
rho1 = a5[0::5] #density
rho1 = rho1[::-1]
rho1 = rho1 * 1E3
rho1 = rho1[1::]
P1 = a4[0::5] #Pressure
P1 = P1[::-1]
P1 = P1*(0.1)
P1 = P1[1::]
Cp = a3[3::5] #Specific heat capacity at constant pressure
Cp = Cp[::-1]
Cp = Cp[1::]
gamma2 = np.loadtxt('zgamma.txt', unpack=True, skiprows=0)
r2 = np.loadtxt('zradius.txt', unpack=True, skiprows = 0)
t2 = np.loadtxt('ztemp.txt', unpack=True, skiprows = 0) #these are from the secondary data file.
rho2 = np.loadtxt('zrho.txt', unpack=True, skiprows = 0)
P2 = np.loadtxt('zpressure.txt', unpack=True, skiprows = 0)
m2 = np.loadtxt('zmass.txt', unpack=True, skiprows = 0)
gamm=[] #allows gamma (from secondary set of data) to be used with first set of data
for i in arange(0, len(r1), 1):
y = interp(r1[i], r2, gamma2)
gamm.append(y)
gamma1 = array(gamm)
P2 = P2 * 0.1 #Converting Dynes to Pa
rho2 = rho2 * 1E3 #Converting g/cm^3 to kg/m^3
############################################## Constants ##############################################
radiussol = 6.96342E8 #Radius of Sun
kxc = 1.3806488E-23 #Boltzmanns constant
uu = 1.660538E-27 #Atomic mass constant
mu = 0.6 #average mass of sun
G = 6.67428E-11 #Gravitational Constant
gasconstant = 8.3144621 #Gas Constant
sbconst = 5.670400E-8 #Stefann-Boltzmann Constant
c = 3E8 # Speed of light
solarmass = 1.9891E30 #solar mass in kg
xx1 = r1 * radiussol #makes array for radius of sun in metres as opposed to fraction (as with r1)
xx2 = r2 * radiussol
"""----------------------------------------------------------------------------------------------------------------------"""
############################################## Adjustments of constants/user inputs ##############################################
#These are used if you want to particularly specify a cell's conditions.
m_cell = float(raw_input("Mass (Mkg): ")) #Enter mass of cell in mega-kilograms
r_initial1 = float(raw_input("Radial Fraction: ")) #Radial Fraction of cell
t_initial = float(raw_input("Temperature (MK): ")) #Temperature of cell
#m_cell = 1000
#t_initial = 2.5
#r_initial1 = 0.9
#r_initial2 = 0.8
m_cell = m_cell * 1E6 #puts mass of cell into mega-kgs.
t_initial = t_initial *1E6 #temperature of cell into mega-kelvin
n = (m_cell * 1000)/2.0158 #number of moles in cell
R = 8.3144621 #Gas constant
rx1 = r_initial1 * 6.96342E8 #initial radius of cell in metres
"""----------------------------------------------------------------------------------------------------------------------"""
############################################## Various Functions ##############################################
def sunvol(xx1): #calculates radius of sun at a particular point 'xx1'.
return (4./3)*pi*(xx1)**3
def idealtemp(P1): #calculates the 'ideal' temperature of the Sun, given pressure and density by Model S.
return ((P1 * mu * uu) / (rho1 * kxc)) #This is a measure of how much the Model S Sun acts like an ideal gas.
def idealdens(P1, t1): #calculates the 'ideal' density of the Sun, given pressure and temperature by Model S.
return ((P1 * mu * uu) / (t1 * kxc)) #This is also a measure of how much the Model S Sun acts like an ideal gas.
def dens(P, temp): #Calculating density of cell for pressures 'P' and cell temperatures via ideal gas law.
return ((P * mu * uu) / (temp * kxc))
def volbub(m_cell, P, temp): #Calculating volume of cell for all using the density calculated above.
aaa = m_cell
bbb = (P * mu * uu)
ccc = (temp * kxc)
ddd = bbb/ccc
eee = aaa/ddd
return eee
def radiuscell(m_cell, P, temp): #Radius of the cell calculated from volume above. Assumes spherical cell.
aaa = 3./(4. * pi)
bbb = aaa * volbub(m_cell, P, temp)
ccc = bbb ** (1./3.)
return ccc
def areacell(m_cell, P, temp): #Surface area of Cell. Calculated from cell radius above.
return (4 * pi * radiuscell(m_cell, P, temp)**2)
def g(m, z): #Function for gravity at all positions within the Sun.
return (G *m * 1.9891E30/ (z)**2)
def buoy(m, z, m_cell, P, t_initial, rho): #Calculating buoyancy of cell for all r. Function is broken down line by line as:
aaa = (G *m * 1.9891E30/ (z)**2) #aaa = Gravity (acceleration) at position
bbb = (m_cell / ((P * mu * uu) / (t_initial * kxc))) #bbb = volume at position
ccc = rho #ccc = ambient density at position
ddd = aaa * bbb * ccc #combining above for buoyancy force
eee = (ddd)/m_cell #eee = buoyancy acceleration (divide by mass)
return eee
def netacceleration(m, z, m_cell, P, t_initial, rho): #Buoyancy acceleration minus gravity. Broken down as follows:
aaa = (G *m * 1.9891E30/ (z)**2) #aaa = Gravity (acceleration) at position
bbb = (m_cell / ((P * mu * uu) / (t_initial * kxc))) #bbb = volume at position
ccc = rho #ccc = ambient density at position
ddd = aaa * bbb * ccc #combining above for buoyancy force
eee = (ddd)/m_cell #eee = buoyancy acceleration
fff = eee - aaa #fff = buoyancy acceleration -gravity acceleration = net acceleration
return fff
def pheight(P, rho, m, x): #Pressure Scale Height calculated for each point.
return (P/(rho * g(m, x)))
############################################## Calculating Differences Between top/bottom of Cell ##############################################
""" This was used for plot number (). It works like so:
The cell radius is found. This is added and taken away from the cells position (at cell's centre) to get position at top and bottom of cell.
The conditions at these two points (temperature, pressure, density) are found. The difference between these conditions are found, then divided by the value of the condition at the centre of the cell.
This gives a rough way of measuring how well the spherical cell model works as the cell rises."""
def rminus(z, m_cell, P, t_initial): #position of cell minus radius, or position of bottom of cell
return (z - (((3 * (m_cell / ((P * mu * uu) / (t_initial * kxc))))/4*pi)**(1./3.)))
def rplus(z, m_cell, P, t_initial): #position of cell plus radius, or position of top of cell
return (z + (((3 * (m_cell / ((P * mu * uu) / (t_initial * kxc))))/4*pi)**(1./3.)))
def Pdifference(xx1, m_cell, P1, t_initial): #difference in pressure between these two points
plus = rplus(xx1, m_cell, P1, t_initial)
minus = rminus(xx1, m_cell, P1, t_initial)
Pplus = interp(plus, xx1, P1) #pressure at top of cell
Pminus = interp(minus, xx1, P1) #pressure at bottom of cell
return ((Pminus-Pplus)/P1) #comparison to centre of cell
def rhodifference(xx1, m_cell, P1, t_initial): #This does for density what Pdifference(...) does for pressure.
plus = rplus(xx1, m_cell, P1, t_initial)
minus = rminus(xx1, m_cell, P1, t_initial)
rhoplus = interp(plus, xx1, rho1)
rhominus = interp(minus, xx1, rho1)
return ((rhominus-rhoplus)/rho1)
def tdifference(xx1, m_cell, P1, t_initial): #This does for temperature what Pdifference(...) does for pressure.
plus = rplus(xx1, m_cell, P1, t_initial)
minus = rminus(xx1, m_cell, P1, t_initial)
tplus = interp(plus, xx1, t1)
tminus = interp(minus, xx1, t1)
z = ((tminus-tplus)/t1)
return z
############################################## Adiabatic Temperature Gradient ##############################################
""" There were a number of different ways used to calculate the adiabatic temperature gradient of a cell. They were numbered 1-4, and found using the following functions."""
def adbtempgrad1(gamma1, t1, P1): #first adiabatic method
a = (gamma1-1)/(gamma1)
b = (t1/P1)
c = -((g(m2, xx2)) * rho2)
return a * b * c
def adbtempgrad2(m2, xx2, rho2, t2, P2): #second adiabatic method
a = -((g(m2, xx2)) * rho2)
b = (t2/P2)
c = Cp
return a * b * c
def adbtempgrad3(m1, xx1, Cp): #third adiabatic method
a = -g(m1, xx1)
b = Cp+1E-25 #The '1E-25' number was added, to prevent the code from thinking it was dividing by 0.
return a/b
#The 4th adiabatic method required d(rho)/dR. This was found using a for loop with the geometric equation for slope.
slop=[] #An empty dummy list 'slop' was made, which could be filled later.
for i in arange(0, len(r1), 1): #for every point in the radius array r1,
zzz = r1
if i == 1: #if it was the first point, the slope was set to 0.
slo = 0
if i == len(r1)-1: #For the last point, the last point and the 3rd last point were used.
x1 = (zzz[i-2] * radiussol)
x2 = (zzz[i] * radiussol)
y1 = interp(x1, xx1, rho1)
y2 = interp(x2, xx1, rho1)
slo = (y2-y1)/(x2-x1)
else: #For all other points, the points to the left and right of it were used (named x1, x2)
x1 = (zzz[i-1] * radiussol)
x2 = (zzz[i+1] * radiussol)
y1 = interp(x1, xx1, rho1) #The corresponding density values were found (y1 and y2 respectively)
y2 = interp(x2, xx1, rho1)
slo = (y2-y1)/(x2-x1) #These were plugged into the geometric slope equation
slop.append(slo) #This value was put into the dummy list created earlier
rhoslope = array(slop) #List was renamed and made an array so it could be manipulated.
# The following For loop finds the pressure gradient (dP/dR), much like the loop above.
#This isn't actually used in the code, however, and may best be commented out to speed it up.
slop=[]
for i in arange(0, len(r1), 1):
zzz = r1
if i == 1:
slo = 0
if i == len(r1)-1:
x1 = (zzz[i-2] * radiussol)
x2 = (zzz[i] * radiussol)
y1 = interp(x1, xx1, P1)
y2 = interp(x2, xx1, P1)
slo = (y2-y1)/(x2-x1)
else:
x1 = (zzz[i-1] * radiussol)
x2 = (zzz[i+1] * radiussol)
y1 = interp(x1, xx1, P1)
y2 = interp(x2, xx1, P1)
slo = (y2-y1)/(x2-x1)
slop.append(slo)
Pslope = array(slop)
def adbtempgrad4(gamma1, t1, P1, rho1): #4th adiabatic method, using the density gradient 'rhoslope'.
a = (gamma1-1)
b = (t1)/(P1)
c = (P1)/(rho1)
d = rhoslope
return a * b * c * d
############################################## Actual Temperature Gradient ##############################################
#This method calculates the actual temperature gradient.
slop=[] #Dummy list, ready to be populated.
for i in arange(0, len(r1), 1): #for every point in the radius array r1,
zzz = r1
if i == 1: #if it was the first point, the slope was set to 0.
slo = 0
if i == len(r1)-1: #For the last point, the last point and the 3rd last point were used.
x1 = (zzz[i-2] * radiussol)
x2 = (zzz[i] * radiussol)
y1 = interp(x1, xx1, t1)
y2 = interp(x2, xx1, t1)
slo = (y2-y1)/(x2-x1)
else: #For all other points, the points to the left and right of it were used (named x1, x2)
x1 = (zzz[i-1] * radiussol)
x2 = (zzz[i+1] * radiussol)
y1 = interp(x1, xx1, t1) ##The corresponding temperature values were found (y1 and y2 respectively)
y2 = interp(x2, xx1, t1)
slo = (y2-y1)/(x2-x1)
slop.append(slo) #This value was put into the dummy list created earlier
tslope = array(slop) #List was renamed and made an array so it could be manipulated.
############################################## Adiabatic Temperature Change ##############################################
#Calculates the temperature of a cell or cells as they rise through the Sun.
rx1 = r_initial1 * radiussol #Position of cell's in metres.
tempfina, distanc=[], [] # Two dummy lists, to be populated later.
asdf = interp(r_initial1, r1, t1) #'asdf' is the temperature of the surroundings at the cell's position.
temperatures=(2*asdf, 1.25*asdf, 1.1*asdf, 1.01*asdf) #This gives a list of cell temperatures- 2, 1.25, 1.1 and 1.01 times the ambient temp.
for z in temperatures: #For every temperature listed above:
tempfina=[]
for i in (xx1): #For every position along the solar radius:
if i< (r_initial1*radiussol): #If the position is less than the initial cell radius, the temperature is set negligibly small.
temp2 = 0.01
if i > (r_initial1*radiussol): #If the position is more than the initial cell radius,
temp1 = z #The conditions at these points are found...
PP1 = interp(rx1, xx1, P1)
gammma1 = interp(rx1, xx1, gamma1)
gammma2 = interp(i, xx1, gamma1)
PP2 = interp(i, xx1, P1)
aaa = (PP1)**(1-gammma1) #And plugged into an equation for ideal gas temperature.
bbb = (temp1)**(gammma1)
ccc = (PP2)**(1-gammma2)
ddd = aaa * bbb
eee = ddd / ccc
temp2 = (eee)**(1/gammma2) #This is returned here.
tempfina.append(temp2) #And plugged into the list 'tempfina'
if z == 2*asdf: #These next 4 'ifs' put the temperatures into lists numbered 1-4.
tempfinal1=array(tempfina) #This is so the temperature loss of cells for different initial temperatures can be compared.
if z == 1.25*asdf:
tempfinal2 = array(tempfina)
if z == 1.1*asdf:
tempfinal3 = array(tempfina)
if z == 1.01*asdf:
tempfinal4 = array(tempfina)
"""----------------------------------------------------------------------------------------------------------------------"""
"""----------------------------------------------------------------------------------------------------------------------"""
"""----------------------------------------------------------------------------------------------------------------------"""
"""=================================================================================================="""
############################################## BIG BAD FOR/WHILE LOOPS ##############################################
"""=================================================================================================="""
time = 0.1 #time step chosen- 0.1 seconds.
u, d, s = 0, 0, 0 #initial speed 'u'=0, counter 'd'=0, and displacement 's'=0
timelist, distlist, speedlist, acclist = [0], [0], [0], [0] #This will create lists for time, cell displacement, cell speed and cell acceleration.
rxx1 = (r_initial1 + 0.01) * 6.96342E8 #initial radius of cell in metres. 0.01 was added, otherwise cell begins erroneously sinking.
ghjk = 0
kjhg = 0
jj = [0.9] #Cell positions to be tested. Works nicely if you use an array such as arange(0.7, 0.95, 0.01)
kk = (2, 1.25, 1.1, 1.01) #Temperature factors to be used. These are multiples of solar temp at initial cell radius.
average=[] #The following set empty lists for average cell speeds, cell positions, temperatures, and
posit =[] #final speeds.
temper=[]
finalspee =[]
for j in jj: #For each cell position
for kj in kk: #For each temperature factor in 'kk'
lop = interp(j, r1, t1) #This finds the solar temp at initial cell radius.
k = kj * lop #Multiplies temperature factor by solar temperature.
kjhg = kjhg + 1 #counter for telling how complete the loop is.
tempdif = 0
tempfina, distanc=[], []
#The following finds the temperature loss (as above) for every cell used in the loop.
rx1 = j * radiussol
for i in (xx1):
if i< (j*radiussol):
temp2 = 0.01
if i > (j*radiussol):
temp1 = k
PP1 = interp(rx1, xx1, P1)
gammma1 = interp(rx1, xx1, gamma1)
gammma2 = interp(i, xx1, gamma1)
PP2 = interp(i, xx1, P1)
aaa = (PP1)**(1-gammma1)
bbb = (temp1)**(gammma1)
ccc = (PP2)**(1-gammma2)
ddd = aaa * bbb
eee = ddd / ccc
temp2 = (eee)**(1/gammma2)
tempfina.append(temp2)
tempfinal=array(tempfina)
#This temperature array 'tempfinal' is used in the array below.
rxx1 = (j+ 0.001) * 6.96342E8 #rxx1 is the changing position of the cell used in the code.
u, d, s = 0, 0, 0 #resets initial values for speed, counter and displacement.
templist, kineticlist, timelist, distlist, speedlist, acclist = [0], [0], [0], [0], [0], [0] #making the lists empty again.
while (rxx1/6.96342E8)< 1: #while the cell is beneath the solar surface (where r/solarradius=1)
#These find the the conditions at whatever position the cell is at
mm1 = interp(rxx1, xx1, m1) #mass of Sun
PP1 = interp(rxx1, xx1, P1) #pressure of Sun
blaergh = interp(rxx1, xx1, tempfinal) #Temperature of cell
rhorho1 = interp(rxx1, xx1, rho1) #density of Sun
a = netacceleration(mm1, rxx1, m_cell, PP1, blaergh, rhorho1) #This finds the net acceleration acting upon the cell.
interp(rxx1, xx1, netacceleration(m1, xx1, m_cell, P1, blaergh, rho1))
if u<0: #this condition stops the loop if the cell begins sinking (u= initial velocity)
average.append(0) #this adds a 0-value to the average cell velocity.
posit.append(j) #position
temper.append(k) #temperature
finalspee.append(0) #final cell speed
break
v = a * time + u # First equation of motion, where v=final velocity
s = u * time + 0.5 * a * time**2 # Second equation of motion, where s=displacement
d = d + 1 #counter
u = v #sets new initial velocity as the final velocity.
v = 0 # sets final velocity after step as initial velocity
rxx1 = rxx1 + s # sums total distance travelled so far, and changes rxx1 accordingly
zxc = (0.5)*(m_cell)*(u * u)
distlist.append(rxx1) #populates the distance, speed, acceleration, and time lists.
speedlist.append(u)
acclist.append(a)
timelist.append(d*time)
kineticlist.append(zxc)
tyu = sum(speedlist)/(d/10.) #finds average velocity for cell.
if speedlist[-1] > 0: #If the final cell velocity is NOT 0, the cell has reached the surface.
posit.append(j) #position, temperature, final speed and average speed are added to lists.
temper.append(k)
finalspee.append(speedlist[-1])
average.append(tyu)
if kj==(2):
kineticlist1, timelist1, acclist1, speedlist1, distlist1 = kineticlist, timelist, acclist, speedlist, distlist
kineticarray1, distarray1, speedarray1, accarray1, timearray1 = array(kineticlist1), array(distlist1), array(speedlist1), array(acclist1), array(timelist1)
templist, kineticlist, timelist, distlist, speedlist, acclist = [0], [0], [0], [0], [0], [0]
if kj==(1.25):
kineticlist2, timelist2, acclist2, speedlist2, distlist2 = kineticlist, timelist, acclist, speedlist, distlist
kineticarray2, distarray2, speedarray2, accarray2, timearray2 = array(kineticlist2), array(distlist2), array(speedlist2), array(acclist2), array(timelist2)
templist, kineticlist, timelist, distlist, speedlist, acclist = [0], [0], [0], [0], [0], [0]
if kj==(1.1):
kineticlist3, timelist3, acclist3, speedlist3, distlist3 = kineticlist, timelist, acclist, speedlist, distlist
kineticarray3, distarray3, speedarray3, accarray3, timearray3 = array(kineticlist3), array(distlist3), array(speedlist3), array(acclist3), array(timelist3)
if kj==(1.01):
kineticlist4, timelist4, acclist4, speedlist4, distlist4 = kineticlist, timelist, acclist, speedlist, distlist
kineticarray4, distarray4, speedarray4, accarray4, timearray4 = array(kineticlist4), array(distlist4), array(speedlist4), array(acclist4), array(timelist4)
print "Process Time: ", clock() -t0, "seconds" #prints time taken for code.
"""=================================================================================================="""
############################################## PLOTS AND SHITE ##############################################
"""=================================================================================================="""
"""
#######################################################################################
#PLOT NUMBER 1
# TEMPERATURE PRESSURE DENSITY MASS OF SUN (Surroundings)
close()
fig = figure(2)
bleh=dict(wspace= 0.25, hspace=0.2)
subplots_adjust(**bleh)
ax1 = subplot(221)
#setp( ax1.get_xticklabels(), visible=False)
title('(A)', fontsize=15)
plot(r1, t1, 'r', lw = 3.5)
xlabel(r'$r/R_\odot$', fontsize=17)
xlim([0, 1])
ylabel(r'Temperature ($K$)', fontsize = 15)
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
grid(True)
plt.show()
#####2
ax2 = subplot(222)
#setp( ax2.get_xticklabels(), visible=False)
title('(B)', fontsize=15)
plot(r1, P1, 'b', lw=3.5)
xlabel(r'$r/R_\odot$', fontsize=17)
xlim([0, 1])
ylabel(r'Pressure ($Pa$)', fontsize = 15)
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
grid(True)
####3
ax3 = subplot(223)
plot(r1, rho1, 'g', lw=3.5)
title('(C)', fontsize=15)
xlim([0, 1])
xlabel(r'$r/R_\odot$', fontsize=17)
ylabel(r'Density ($kgm^-^3$)', fontsize = 15)
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
ax = gca()
ax.ticklabel_format(style='sci', axis='y')
ax.yaxis.major.formatter.set_powerlimits((0,0))
grid(True)
####4
ax4 = subplot(224, sharex=ax2)
title('(D)', fontsize=15)
plot(r1, m1, 'c', lw=3.5)
xlim([0, 1])
ylim([0, 1])
xlabel(r'$r/R_\odot$', fontsize=17)
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
ylabel(r'Mass of Sun ($m/M_\odot$)', fontsize = 15)
grid(True)
show()
#######################################################################################
# PLOT NUMBER 2
# TEMPERATURE PRESSURE DENSITY MASS OF SUN (Convective Zone)
close()
fig = figure(2)
bleh= dict(wspace= 0.3, hspace=0.2)
subplots_adjust(**bleh)
ax1 = subplot(221)
#setp( ax1.get_xticklabels(), visible=False)
title('(E)', fontsize=15)
plot(r1, t1, 'r', lw = 3.5)
xlim([0.7, 1])
ylim([0, 2.5E6])
xlabel(r'$r/R_\odot$', fontsize=17)
ax1.ticklabel_format(style='sci', axis='y')
ax1.yaxis.major.formatter.set_powerlimits((0,0))
ylabel(r'Temperature ($K$)', fontsize = 15)
grid(True)
#####2
ax2 = subplot(222)
#setp( ax2.get_xticklabels(), visible=False)
title('(F)', fontsize=15)
xlabel(r'$r/R_\odot$', fontsize=17)
plot(r1, P1, 'b', lw=3.5)
xlim([0.7, 1])
ylim([0, 0.7E13])
ylabel(r'Pressure ($Pa$)', fontsize = 15)
grid(True)
####3
ax3 = subplot(223)
plot(r1, rho1, 'g', lw=3.5)
title('(G)', fontsize=15)
xlim([0.7, 1])
ylim([0, 2.5E2])
xlabel(r'$r/R_\odot$', fontsize=17)
ylabel(r'Density ($kgm^-^3$)', fontsize = 15)
ax = gca()
ax.ticklabel_format(style='sci', axis='y')
ax.yaxis.major.formatter.set_powerlimits((0,0))
grid(True)
####4
ax4 = subplot(224, sharex=ax2)
title('(H)', fontsize=15)
plot(r1, m1, 'c', lw=3.5)
xlim([0.7, 1])
ylim([0.97, 1])
xlabel(r'$r/R_\odot$', fontsize=17)
ylabel(r'Mass of Sun ($m/M_\odot$)', fontsize = 15)
grid(True)
show()
#######################################################################################
#PLOT NUMBER 3
# OPACITY OF SUN
close()
semilogy(r1, opacity, 'k', lw=2)
ylim([1E-2, 1E6])
xlim([0, 1.02])
grid()
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel(r'Opacity ($m^2/kg$)', fontsize = 18)
from pylab import *
text(0.05, 316228, 'Core', rotation=00, fontsize=19)
text(0.339, 316228, 'Radiative Zone', rotation=00, fontsize=19)
text(0.77, 316228, 'Convective', rotation=00, fontsize=19)
text(0.82, 90851, 'Zone', rotation=00, fontsize=19)
text(0.65, 1100, 'Tachocline', rotation=90, fontsize=19)
show()
#######################################################################################
#PLOT NUMBER 4
# IDEAL GAS SUN
close()
ax1 = subplot(211)
plot(r1, t1, 'r')
plot(r1, idealtemp(P1), 'k')
ylabel(r'Temperature ($K$)', fontsize = 18)
legend((r'$T_{\odot}$', r'$T_{ideal}$'), loc='upper right')
setp( ax1.get_xticklabels(), visible=False)
xlim([0, 1])
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
grid()
ax2 = subplot(212)
plot(r1, (t1-idealtemp(P1))/idealtemp(P1)*100)
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel(r'$\left(\frac{T_{\odot}-T_{ideal}}{T_{ideal}}\right)\%$', fontsize=20)
xlim([0, 1])
axvline(0.713, color='black', lw=2, linestyle='steps--')
axvline(0.2, color='black', lw=2, linestyle='steps--')
grid()
show()
#######################################################################################
# PLOT NUMBER 5
# PRESSURE SCALE HEIGHT WITH 'ERROR' BARS
xheight = np.linspace(0, 1, 56)
er =[]
for i in arange(0, len(xheight), 1):
err = interp(xheight[i], r1, pheight(P1, rho1, m1, xx1))
er.append(err)
yheight = array(er)
yheighter=[]
for i in arange(0, len(xheight), 1):
if i < 38:
yter = 0
else:
yter = yheight[i]
yheighter.append(yter)
yheighterr = array(yheighter)
zeroes = zeros(56)
close()
figure(1)
plot(r1, pheight(P1, rho1, m1, xx1,), 'b', lw=2)
plot(r1, pheight(P1, rho1, m1, xx1), 'k', lw=2)
plot(r1, pheight(P1, rho1, m1, xx1), 'r', lw=2)
errorbar(xheight, yheight, xerr=[zeroes, 3*(yheighterr/radiussol)], fmt='b.', lw=2)
errorbar(xheight, yheight, xerr=[zeroes, 2*(yheighterr/radiussol)], fmt='k.', lw=2)
errorbar(xheight, yheight, xerr=[zeroes, (yheighterr/radiussol)], fmt='r.', lw=2)
xlim([0.7, 1.02])
ylim([0, 0.6E8])
grid(True)
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel(r'Pressure Scale Height ($m$)', fontsize=18)
legend((r'$\alpha$ = 3', r'$\alpha$ = 2', r'$\alpha$ = 1'), loc = 'bottom left')
show()
#######################################################################################
# PLOT NUMBER 6
# ADIABATIC/ACTUAL TEMPERATURE GRADIENTS
close()
bleh=dict(wspace= 0.4, hspace=0.2)
plot(r1, abs(tslope), 'k', lw=3)
plot(r1, abs(adbtempgrad1(gamma1, t1, P1)), 'r', lw=2)
#plot(r1, abs(adbtempgrad4(gamma1, t1, P1, rho1)), 'g')
xlim([0.5, 1])
ylim([0, 0.02])
axvline(0.713, color='black', lw=2, linestyle='steps--')
ylabel(r'$|\frac{dT}{dr}|$', fontsize = 22)
xlabel(r'$r/R_\odot$', fontsize=18)
legend((r'$|\frac{dT}{dr}|_{act}$', r'$|\frac{dT}{dr}|_{adb}=(\frac{\gamma-1}{\gamma})(\frac{T}{P})(-g\rho)$',), loc='lower left')
grid(True)
show()
#######################################################################################
# PLOT NUMBER 7
# TEMPERATURE DIFFERENCE OF CELL
close()
plot(r1, t1, 'r', lw=2)
plot( r1, tempfinal1, 'k', r1, tempfinal2, 'b')
legend(('Solar Temperature', r'$T_{initial} = 2 \times T_{0.9R_{\odot}}$', r'$T_{initial} = 1.25 \times T_{0.9R_{\odot}}$'), loc='upper center')
xlim([0.9, 1])
ylim([0, 1.2E6])
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel(r'Temperature ($K$)', fontsize = 18)
ylabel
grid()
show()
#######################################################################################
# PLOT NUMBER 8
# CELL RADIUS/VOLUME
#NOTE: Mass of cell needs to be at least 1E14kg to be seen with the xlimits below.
close()
fig = figure(1)
m_cell=1E14
ax1 = subplot(211)
setp( ax1.get_xticklabels(), visible=False)
#title('Volume and Radius of Cell', fontsize = 22)
semilogy(r1, volbub(m_cell, P1, tempfinal1), 'k', r1, volbub(m_cell, P1, tempfinal2), 'b')
xlim([0.9, 1.01])
ylim([1E12, 1E19])
ylabel('Volume of Cell ($m^{3}$)', fontsize = 17)
legend((r'$T_{initial}=2\times T_{0.9R_{\odot}}$', r'$T_{initial}=1.25\times T_{0.9R_{\odot}}$'), loc='upper left')
#legend((r'Cell= $3\times10^6 K$', r'Cell= $2.6\times10^6 K$'), loc='upper center')
grid(True)
ax2 = subplot(212)
semilogy(r1, radiuscell(m_cell, P1, tempfinal1), 'k', r1, radiuscell(m_cell, P1, tempfinal2), 'b')
xlim([0.9, 1.01])
ylim([1E4, 1E6])
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel('Radius of Cell ($m$)', fontsize = 17)
legend((r'$T_{initial}=2\times T_{0.9R_{\odot}}$', r'$T_{initial}=1.25\times T_{0.9R_{\odot}}$'), loc='upper left')
#legend((r'Cell= $3\times10^6 K$', r'Cell= $2.6\times10^6 K$'), loc='upper center')
grid(True)
show()
#######################################################################################
# PLOT NUMBER 9
# APPLICABILITY OF 1-D SIM.
#Note: This also depends on the cell mass chosen.
close()
figure(1)
semilogy(r1, Pdifference(xx1, m_cell, P1, tempfinal1), 'r')
semilogy(r1, rhodifference(xx1, m_cell, P1, tempfinal1), 'k')
semilogy(r1, tdifference(xx1, m_cell, P1, tempfinal1), 'b')
xlim([0.9, 1.001])
ylim([1E-6, 1E0])
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel(r'$\frac{\bigtriangleup x}{x}$', fontsize = 30)
legend(('$x $ = Pressure', '$x $ = Density', '$x $ = Temperature'), loc='upper center')
#axvline(0.9994407736403459, color='black')
grid(True)
show()
###########################
############################################################
# PLOT NUMBER 10
# KINEMATICS OF CELL
close()
F = pylab.gcf()
bleh=dict(wspace= 0.3, hspace=0.3)
subplots_adjust(**bleh)#left=0.0, right=1.0, bottom=0.0, top=1.0)
####1
ax1 = subplot(211)
plot(distarray1/radiussol, accarray1, 'k', distarray2/radiussol, accarray2, 'b', distarray3/radiussol, accarray3, 'g', distarray4/radiussol, accarray4, 'r')
setp( ax1.get_xticklabels(), visible=False)
#title(r'$T_{cell} = 2\times T_{ext}$', fontsize=18)
xlim([0.9, 1.002])
ylabel(r'Acceleration ($ms^{-2}$)', fontsize = 17)
grid(True)
####2
ax2 = subplot(212)
plot(distarray1/radiussol, speedarray1/1000, 'k', distarray2/radiussol, speedarray2/1000, 'b', distarray3/radiussol, speedarray3/1000, 'g', distarray4/radiussol, speedarray4/1000, 'r')
xlim([0.9, 1.002])
ylim([0, 200])
ylabel(r'Velocity ($kms^{-1}$)', fontsize=17)
xlabel(r'$r/R_\odot$', fontsize=18)
legend((r'$T_{initial}=2\times T_{0.9R_{\odot}}$', r'$T_{initial}=1.25\times T_{0.9R_{\odot}}$', r'$T_{initial}=1.1\times T_{0.9R_{\odot}}$', r'$T_{initial}=1.01\times T_{0.9R_{\odot}}$'), loc='upper left')
grid()
show()
#######################################################################################
# PLOT NUMBER 11
# GRAVITY/BUOYANCY ACTING UPON CELL INNIT
close()
figure(1)
plot(r1, netacceleration(m1, xx1, m_cell, P1, tempfinal1, rho1), 'g', r1, g(m1, xx1), 'b--', r1, buoy(m1, xx1, m_cell, P1, tempfinal1, rho1), 'r--')
grid(True)
xlim([0.9, 1])
ylim([-250, 700])
axhline(0, color='black', lw=2)
xlabel(r'$r/R_\odot$', fontsize=18)
ylabel(r'Acceleration ($ms^{-2}$)', fontsize = 18)
legend(('Net Acceleration', 'Gravity', ' Buoyancy'), loc = 'bottom left')
show()
#######################################################################################
# PLOT NUMBER 12
# SCATTER PLOT OF AVERAGE/FINAL VELOCITIES
#This requires a bit of fiddling around. See note on PDF
posits = array(posit) #This makes the lists arrays.
tempers = array(temper) # so they can be manipulated
finalspees = array(finalspee)
averages = array(average)
averages=averages/10000 #This changes the average velocity into km/s. The extra factor of 10 is to correct a mistake I had
tempers=tempers/1E6 # in calculating the average velocity above.
finalspees=finalspees/1000 # Puts final speed in km/s
close()
plt.scatter(posits, tempers, c=averages, marker='s', s=400, cmap = 'hot', edgecolors='none')
xlim([0.695, 0.9495])
ylim([0.5, 2.9])
ylabel(r'Initial Cell Temperature ($10^{6}K$)', fontsize = 19)
xlabel(r'Initial Cell Position $r/R_\odot$', fontsize=19)
cbar = colorbar(ticks=[0, 50, 100, 150, 200, 250, 300, 350], orientation='vertical')
plot(r1, t1/1E6, 'w', lw=3) #These add multiples of the solar temperature.
plot(r1, (1.25*t1)/1E6, 'w')#, lw=3)
plot(r1, (1.5*t1)/1E6, 'w')#, lw=3)
plot(r1, (1.75*t1)/1E6, 'w')#, lw=3)
show()
"""
|
from flask import *
home = Blueprint('home', __name__, url_prefix='/home', template_folder='home_templates', static_folder='home_static')
@home.route('/')
def home():
return render_template('home.html')
|
import time
from model.nethandler.battle_net_handler import BattleNetHandler
def default_callback(*args):
pass
class BattleNetClient(BattleNetHandler):
def __init__(self, name="client#" + str(round(time.time()*1000))):
self.players_names = []
self.short_rule = True
self.no_card_upside_down = False
self.on_new_room = default_callback # gamehost_name, room_name, short_rule, no_card_upside_down
self.on_room_is_full = default_callback # gamehost_name
self.on_another_game_begin = default_callback # gamehost_name
self.on_room_connection_accepted = default_callback #
self.on_room_connection_failed = default_callback #
self.on_game_begin = default_callback #
self.on_game_new_turn = default_callback #
self.on_players_list_updated = default_callback # players_names
self.on_my_card_drawn = default_callback # card_desc
self.on_player_card_drawn = default_callback # client_name, card_desc
self.on_turn_battle_with = default_callback # in_battle, others_battle_members
self.on_turn_finished = default_callback # winner_name
self.on_player_picked_card = default_callback # client_name, card_desc
self.on_game_ended = default_callback #
self.on_game_won = default_callback # winner_name
self.on_game_par = default_callback # winners_names
self.__connecting_to_room = None
self.__connected_to_room = None
BattleNetHandler.__init__(self, name)
# Envoi de messages. #
def find_rooms(self):
self.send_msg("find_rooms.")
def connect_room(self, gamehost_name):
self.__connecting_to_room = gamehost_name
self.send_msg("connect_room: " + gamehost_name + ".")
def game_turn_draw_card(self):
self.send_msg("game_turn_draw_card: " + self.__connected_to_room + ".")
def game_turn_card_pick(self, card_desc):
self.send_msg("game_turn_card_pick: " + self.__connected_to_room + ", " + card_desc + ".")
# Réception de messages. #
def ivy__find_rooms(self, agent):
pass # Rien à gérer dans le cas d'un client, c'est lui qui demande quelles sont les salles voisines.
def ivy__room(self, agent, room_name, short_rule, no_card_upside_down):
self.on_new_room(agent.agent_name, room_name,
True if short_rule == "true" else False, True if no_card_upside_down == "true" else False)
def ivy__connect_room(self, agent, gamehost_name):
pass # Rien à gérer dans le cas d'un client, c'est lui qui envoie les demandes de connexion à une salle.
def ivy__room_is_full(self, agent):
if self.__connecting_to_room == agent.agent_name:
self.on_room_connection_failed()
self.__connecting_to_room = None
self.on_room_is_full(agent.agent_name)
def ivy__accept_player(self, agent, player_name):
if self.name == player_name and self.__connecting_to_room == agent.agent_name:
self.__connecting_to_room = None
self.__connected_to_room = agent.agent_name
self.on_room_connection_accepted()
def ivy__players_list(self, agent, players_names, _):
if self.__connected_to_room == agent.agent_name:
self.players_names.clear()
players_names = players_names.split(", ")[:-1]
for player_name in players_names:
if self.name != player_name:
self.players_names.append(player_name)
self.on_players_list_updated(self.players_names)
def ivy__game_begin(self, agent, short_rule, no_card_upside_down):
if self.__connected_to_room == agent.agent_name:
self.short_rule = True if short_rule == "true" else False
self.no_card_upside_down = True if no_card_upside_down == "true" else False
self.on_game_begin()
else:
self.on_another_game_begin(agent.agent_name)
def ivy__game_new_turn(self, agent):
if self.__connected_to_room == agent.agent_name:
self.on_game_new_turn()
def ivy__game_turn_draw_card(self, agent, gamehost_name):
pass # Rien à gérer dans le cas d'un client, c'est lui qui envoie le moment où il tire une carte.
def ivy__game_turn_card_drawn(self, agent, client_name, card_desc):
if self.__connected_to_room == agent.agent_name:
if self.name == client_name:
self.on_my_card_drawn(card_desc)
else:
self.on_player_card_drawn(client_name, card_desc)
def ivy__game_turn_battle(self, agent, battle_members_names, _):
if self.__connected_to_room == agent.agent_name:
others_member_names = []
in_battle = False
battle_members_names = battle_members_names.split(", ")[:-1]
for battle_member_name in battle_members_names:
if self.name == battle_member_name:
in_battle = True
else:
others_member_names.append(battle_member_name)
self.on_turn_battle_with(in_battle, others_member_names)
def ivy__game_turn_won(self, agent, winner_name):
if self.__connected_to_room == agent.agent_name:
self.on_turn_finished(winner_name)
def ivy__game_turn_par(self, agent):
if self.__connected_to_room == agent.agent_name:
self.on_turn_finished(None)
def ivy__game_turn_card_pick(self, agent, gamehost_name, card_desc):
if self.__connected_to_room == gamehost_name:
if agent.agent_name in self.players_names:
self.on_player_picked_card(agent.agent_name, card_desc)
def ivy__game_ended(self, agent, player_name):
if self.__connected_to_room == agent.agent_name:
if self.name == player_name:
self.on_game_ended()
def ivy__game_won(self, agent, winner_name):
if self.__connected_to_room == agent.agent_name:
self.on_game_won(winner_name)
def ivy__game_par(self, agent, winners_names, _):
if self.__connected_to_room == agent.agent_name:
self.on_game_par(winners_names.split(", ")[:-1])
|
from discord.ext import commands
from potato_bot.bot import Bot
from potato_bot.cog import Cog
class Democracy(Cog):
"""Automatic democracy tools"""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command()
async def peng(self, ctx):
await ctx.send("pong!")
def setup(bot):
bot.add_cog(Democracy(bot))
|
from canoser import Struct, Uint8, bytes_to_int_list, hex_to_int_list
from libra.transaction.transaction_argument import TransactionArgument, normalize_public_key
from libra.bytecode import bytecodes
from libra.account_address import Address
class Script(Struct):
_fields = [
('code', [Uint8]),
('args', [TransactionArgument])
]
@classmethod
def gen_transfer_script(cls, receiver_address,micro_libra):
if isinstance(receiver_address, bytes):
receiver_address = bytes_to_int_list(receiver_address)
if isinstance(receiver_address, str):
receiver_address = hex_to_int_list(receiver_address)
code = bytecodes["peer_to_peer_transfer"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_mint_script(cls, receiver_address,micro_libra):
receiver_address = Address.normalize_to_int_list(receiver_address)
code = bytecodes["mint"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_create_account_script(cls, fresh_address):
fresh_address = Address.normalize_to_int_list(fresh_address)
code = bytecodes["create_account"]
args = [
TransactionArgument('Address', fresh_address),
TransactionArgument('U64', 0)
]
return Script(code, args)
@classmethod
def gen_rotate_auth_key_script(cls, public_key):
key = normalize_public_key(public_key)
code = bytecodes["rotate_authentication_key"]
args = [
TransactionArgument('ByteArray', key)
]
return Script(code, args)
@staticmethod
def get_script_bytecode(script_name):
return bytecodes[script_name] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created: Fri Jan 6 16:47:09 2017
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(372, 284)
self.groupBox = QtWidgets.QGroupBox(Dialog)
self.groupBox.setGeometry(QtCore.QRect(20, 50, 301, 161))
self.groupBox.setObjectName("groupBox")
self.radioButton = QtWidgets.QRadioButton(self.groupBox)
self.radioButton.setGeometry(QtCore.QRect(10, 30, 116, 22))
self.radioButton.setObjectName("radioButton")
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_2.setGeometry(QtCore.QRect(10, 60, 116, 22))
self.radioButton_2.setObjectName("radioButton_2")
self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_3.setGeometry(QtCore.QRect(10, 90, 116, 22))
self.radioButton_3.setObjectName("radioButton_3")
self.radioButton_4 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_4.setGeometry(QtCore.QRect(10, 120, 116, 22))
self.radioButton_4.setObjectName("radioButton_4")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 20, 91, 17))
font = QtGui.QFont()
font.setPointSize(15)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(250, 240, 98, 27))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "LangLearner"))
self.groupBox.setTitle(_translate("Dialog", "GroupBox"))
self.radioButton.setText(_translate("Dialog", "RadioButton"))
self.radioButton_2.setText(_translate("Dialog", "RadioButton"))
self.radioButton_3.setText(_translate("Dialog", "RadioButton"))
self.radioButton_4.setText(_translate("Dialog", "RadioButton"))
self.label.setText(_translate("Dialog", "TextLabel"))
self.pushButton.setText(_translate("Dialog", "Next"))
|
import tornado
from tornado import web
import wtforms_json
from peewee_async import Manager
from MxForum.settings import settings, database
from MxForum.urls import urlpattern
if __name__ == '__main__':
wtforms_json.init()
app = web.Application(urlpattern, debug=True, **settings)
app.listen(8888)
objects = Manager(database)
database.set_allow_sync(False)
app.objects = objects
io_loop = tornado.ioloop.IOLoop.current().start()
|
import webapp2
import os
import jinja2
import json
import datetime
import time
import urllib
import urllib2
import soundcloud
import sys
import random
import math
from google.appengine.ext import db
from google.appengine.api import memcache
from secret import client_id, client_secret
template_dir = os.path.dirname(__file__)
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
#handler for the jinja2 env. allows us to use templates! c/p this code all you want for other projects
#https://api-v2.soundcloud.com/explore/techno?limit=100&linked_partitioning=1
# stream url: https://api.soundcloud.com/tracks/189594894/stream?client_id=6ec16ffb5ed930fce00949be480f746b&allows_redirect=false#t=50
# comment url: https://api.soundcloud.com/tracks/189594894/comments?client_id=6ec16ffb5ed930fce00949be480f746b
client = soundcloud.Client(client_id=client_id, client_secret=client_secret)
segmentLen = 3
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template,**kw))
class SegmentHandler(Handler):
def get(self):
self.write('hello world')
class ReqJSON(db.Model):
genre = db.StringProperty(required = True)
json_str = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
page = db.IntegerProperty(required = True)
class RandomHandler(Handler):
def get(self, genre1):
genre = urllib.quote(genre1) #to make sure it's url valid
if '/' in genre:
genre, sortOption, page = genre.split('/')
page = int(page)
else:
sortOption = 'random' #hot by default
page = 1 #page 1 by default
if page == '': page = 1
arr = []
comments = []
####
#requirements for a song to be considered
#downloadable, minimum duration (2 min), minimum playbacks (1000), minimum likes (5), minimum comments (5)
#hotness = plays / (time elapsed)^1.2
#store song snippets on box
url = 'https://api-v2.soundcloud.com/explore/' + genre + '?offset=' + str((page-1)*50) + "&tag=out-of-experiment&limit=50" #offset parameter for paging (e.g. offset = (n-1)*limit to get results for nth page)
print url
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
# mc_genre = memcache.get('genre')
tracks = memcache.get('tracks_' + genre + "_" + str(page))
tracks_filtered = memcache.get('tracks_filtered_' + genre + "_" + str(page)) #type string or None
lastUpdated = memcache.get('lastUpdated_' + genre + "_" + str(page)) #type string or None
if tracks:
tracks = json.loads(tracks)
filter_change_needed = False
if lastUpdated is None or int(time.time()) - float(lastUpdated) > 3600: #if memcache needs to update bc too old
#url = url + genre + "?limit=50"
req = json.load(urllib2.urlopen(url))
tracks = req.get('tracks')
print req.get('next_href')
memcache.set('tracks_'+genre+"_"+str(page), json.dumps(tracks))
# memcache.set('genre', genre)
memcache.set('lastUpdated_'+genre+"_"+str(page), int(time.time()))
filter_change_needed = True
if tracks_filtered and not filter_change_needed: #if the filtered tracks list exists in memcache and change isn't needed
tracks_filtered = json.loads(tracks_filtered) #convert to list of track objects
elif filter_change_needed: #if memcache needs to update (or not found in memcache)
query = db.GqlQuery('SELECT * FROM ReqJSON') #query db to check if we already did this before
query = list(query)
print "DB QUERY"
in_db = False
tooOld = False #check if db needs to update as well
for q in query:
if q.genre == genre: #if found in db. USE THIS TO IMPLEMENT MULTIPLE GENRE FEATURE
in_db = True
if time.time() - time.mktime(q.created.timetuple()) > 3600: #if the db entry is more than an hour old, delete and refresh
db.get(q.get('__key__')).delete() #delete old entr
tooOld = True
else:
tracks_filtered = json.loads(q.json_str)
if not in_db or tooOld: #if not in db or db needs to be updated(along with memcache), we send http requests, and then store to db
tracks_filtered = [] #going to generate list of track objects
for a in range(len(tracks)):
if tracks[a].get('streamable') == True and \
tracks[a].get('duration') > 120000 and \
tracks[a].get('duration') < 360000 and \
tracks[a].get('commentable') == True and \
tracks[a].get('playback_count') > 1000 and \
tracks[a].get('comment_count') > 5 and \
tracks[a].get('likes_count') > 50: #if this track isn't spam and can be applicable to the app
intrack = {}
startTime = 0
greatestSum = 0
#now we find best part based on comment density
#retrieve comments
#instantiate array with length = length of song in seconds
#parse through comments, increment index of list that it appears in
#parse through array, set starting index as startTime if sum is greater than greatestSum
link = tracks[a].get('uri') + "/comments?client_id=" + client_id
comments = json.load(urllib2.urlopen(link)) #retrieve comments
#are we retrieving comments correctly? sanity check
# for b in range(len(comments)):
# arr.append(comments[b].get('timestamp'))
#okay this works
#calculating startTime based on comment density now
arr = [0] * (int(tracks[a].get('duration')/1000)+10)
for b in range(len(comments)):
if comments[b].get('timestamp') and comments[b].get('timestamp') < len(arr)*1000:
arr[int(comments[b].get('timestamp'))/1000] += 1
for index in range(1,len(arr)-segmentLen):
tempsum = sum(arr[index:(index+segmentLen)])
if tempsum>greatestSum:
greatestSum = tempsum
startTime = index
# how about reddit's hot algorithm? include a hotness attr
# hotness value = log(num_likes * 20*num_comments) + time_elapsed/45000
if tracks[a].get('release_day'):
time_track = datetime.datetime(tracks[a].get('release_year'), tracks[a].get('release_month'), tracks[a].get('release_day'))
else:
time_track = datetime.datetime(2011,5,1)
time_obj = time_track - datetime.datetime(2007, 8, 1)
time_dif = time_obj.days*3600*24 + time_obj.seconds
hotness = math.log(20*len(comments) * tracks[a].get('likes_count'), 10) + time_dif/45000
intrack['hotness'] = hotness
# var title: String
# var id: Int
# var duration: Int
# var stream_url: String
# var start_time: Int
# var permalink_url: String
# // Optional Variables (could be nil if not there)
# var genre: String?
# var subtitle: String?
# var artwork_url: String?
#extracting only the necessary json parts
intrack['start_time'] = startTime*1000
attributes = ['id', 'duration', 'stream_url', 'permalink_url', 'genre', 'description', 'artwork_url', 'title', 'comment_count']
for attr in attributes:
if attr == 'artwork_url': #exception since we want the highest quality album art
intrack[attr] = str(tracks[a].get(attr)).replace('large', 't500x500')
else:
intrack[attr] = tracks[a].get(attr)
tracks_filtered.append(intrack)
track = ReqJSON(genre = genre, json_str=json.dumps(tracks_filtered), page=page) #add to db
track.put()
memcache.set('tracks_filtered_'+genre+"_"+str(page), json.dumps(tracks_filtered))
#now, to return json
#just return tracks_filtered list of objects, each one with an additional start time for most popular segment
#write random function
#tracks = json.load(urllib2.urlopen(url)).get('tracks') #url is hardcoded for now...
# self.write(tracks[random.randint(0,99)].get('id'))
#sort randomly (shuffle)
if tracks_filtered and sortOption == 'random':
random.shuffle(tracks_filtered)
#or sort based on reddit's hot algorithm?
elif tracks_filtered:
tracks_filtered.sort(key=lambda x: x.get('hotness'), reverse=True)
self.write(json.dumps(tracks_filtered))
#self.write("This should spit out a random song")
class APIHandler(Handler):
def get(self, inp):
self.write(inp) |
#coding=utf-8
x,y=input('请输入一个数值的范围:')
if x>y:
t=y
y=x
x=t
while x<2:
x,y=input('输入错误,请重新输入一个数值范围:')
if x>y:
t=y
y=x
x=t
m=1
s=[]
print '数值范围内所检索的所有素数为:'
for i in range(x,y,1):
for j in range(2,i+1,1):
if i%j==0:
break
if i==j:
s.append(i)
print i,
m+=1
if m%10==0:
print '\n'
print '\n数值范围内所有的双胞胎素数为:'
for l in s:
if l+2 in s:
print l,l+2
|
import os
import sys
import pytest
import subprocess
import time
import re
from webapp.app import create_app
from webapp.config import config_dict
from core.constants import SQL_TEST_CONNECTION_STRING, SQL_TEST_DBNAME
from core.db import create_database, connect_db, drop_db, session_open, session_close
from util_scripts import upload_synthetic_data
from core.utils import create_user
sys.path.append("webapp")
# if we start a new docker container, store the ID so we can stop it later
DOCKER_CONTAINER_ID = None
@pytest.fixture()
def app():
config = config_dict["Test"]
app = create_app(config)
yield app
@pytest.fixture()
def client(app):
return app.test_client()
@pytest.fixture()
def runner(app):
return app.test_cli_runner()
@pytest.fixture()
def session(app):
status, log, engine = connect_db(SQL_TEST_CONNECTION_STRING, SQL_TEST_DBNAME)
session = session_open(engine)
yield session
@pytest.fixture()
def testuser(app):
# create a dummy test user
with app.app_context():
create_user(username="testuser", email="test@test.com", password="test")
def check_for_docker():
"""
See if we have a postgres docker container already running.
Returns
=======
container_id:str if container running,
OR
True if docker is running, but no postgres container
OR
False if docker is not running
"""
p = subprocess.run(["docker", "ps"], capture_output=True)
if p.returncode != 0:
return False
output = p.stdout.decode("utf-8")
m = re.search("([\da-f]+)[\s]+postgres", output)
if not m:
return True # Docker is running, but no postgres container
else:
return m.groups()[0] # return the container ID
def pytest_configure(config):
"""
Allows plugins and conftest files to perform initial configuration.
This hook is called for every plugin and initial conftest
file after command line options have been parsed.
"""
# see if docker is running, and if so, if postgres container exists
docker_info = check_for_docker()
if not docker_info: # docker desktop not running at all
DOCKER_RUNNING = False
print("Docker not found - will skip tests that use the database.")
return
DOCKER_RUNNING = True
if isinstance(docker_info, bool):
# docker is running, but no postgres container
print("Starting postgres docker container")
p = subprocess.run(
[
"docker",
"run",
"-e",
"POSTGRES_DB=cropdb",
"-e",
"POSTGRES_USER=postgres",
"-e",
"POSTGRES_PASSWORD=postgres",
"-d",
"-p",
"5432:5432",
"postgres:11",
],
capture_output=True,
)
if p.returncode != 0:
print("Problem starting Docker container - is Docker running?")
return
else:
# wait a while for the container to start up
time.sleep(10)
# save the docker container id so we can stop it later
global DOCKER_CONTAINER_ID
DOCKER_CONTAINER_ID = p.stdout.decode("utf-8")
print(f"Setting DOCKER_CONTAINER_ID to {DOCKER_CONTAINER_ID}")
# move on with the rest of the setup
print(
"pytest_configure: start " + SQL_TEST_CONNECTION_STRING + " " + SQL_TEST_DBNAME
)
# create database so that we have tables ready
create_database(SQL_TEST_CONNECTION_STRING, SQL_TEST_DBNAME)
time.sleep(1)
upload_synthetic_data.main(SQL_TEST_DBNAME)
print("pytest_configure: end")
def pytest_unconfigure(config):
"""
called before test process is exited.
"""
print("pytest_unconfigure: start")
# drops test db
success, log = drop_db(SQL_TEST_CONNECTION_STRING, SQL_TEST_DBNAME)
assert success, log
# if we started a docker container in pytest_configure, kill it here.
if DOCKER_CONTAINER_ID:
print(f"Stopping docker container {DOCKER_CONTAINER_ID}")
os.system("docker kill " + DOCKER_CONTAINER_ID)
print("pytest_unconfigure: end")
|
import json
from .game_state import GameState
from .util import get_command, debug_write, BANNER_TEXT, send_command
class AlgoCore(object):
"""
This class handles communication with the game engine. \n
algo_strategy.py subclasses it.
Attributes :
* config (JSON): json object containing information about the game
"""
def __init__(self):
self.config = None
def on_game_start(self, config):
"""
This function is called once at the start of the game.
By default, it just initializes the config. \n
You can override it it in algo_strategy.py to perform start of game setup
"""
self.config = config
def on_turn(self, game_state):
"""
This step function is called at the start of each turn.
It is passed the current game state, which can be used to initiate a new GameState object.
By default, it sends empty commands to the game engine. \n
algo_strategy.py inherits from AlgoCore and overrides this on turn function.
Adjusting the on_turn function in algo_strategy is the main way to adjust your algo's logic.
"""
send_command("[]")
send_command("[]")
def on_action_frame(self, action_frame_game_state):
"""
After each deploy phase, the game engine will run the action phase of the round.
The action phase is made up of a sequence of distinct frames.
Each of these frames is sent to the algo in order.
They can be handled in this function.
"""
pass
def start(self):
"""
Start the parsing loop.
After starting the algo, it will wait until it receives information from the game
engine, process this information, and respond if needed to take it's turn.
The algo continues this loop until it receives the "End" turn message from the game.
"""
debug_write(BANNER_TEXT)
while True:
# Note: Python blocks and hangs on stdin. Can cause issues if connections aren't setup properly and may need to
# manually kill this Python program.
game_state_string = get_command()
if "replaySave" in game_state_string:
"""
This means this must be the config file. So, load in the config file as a json and add it to your AlgoStrategy class.
"""
parsed_config = json.loads(game_state_string)
self.on_game_start(parsed_config)
elif "turnInfo" in game_state_string:
state = json.loads(game_state_string)
stateType = int(state.get("turnInfo")[0])
if stateType == 0:
"""
This is the game turn game state message. Algo must now print to stdout 2 lines, one for build phase one for
deploy phase. Printing is handled by the provided functions.
"""
self.on_turn(game_state_string)
elif stateType == 1:
"""
If stateType == 1, this game_state_string string represents a single frame of an action phase
"""
self.on_action_frame(game_state_string)
elif stateType == 2:
"""
This is the end game message. This means the game is over so break and finish the program.
"""
debug_write("Got end state, game over. Stopping algo.")
break
else:
"""
Something is wrong? Received an incorrect or improperly formatted string.
"""
debug_write("Got unexpected string with turnInfo: {}".format(game_state_string))
else:
"""
Something is wrong? Received an incorrect or improperly formatted string.
"""
debug_write("Got unexpected string : {}".format(game_state_string))
|
def NumberChooser(number):
if number == 0:
return "Zero"
elif len(str(number)) == 1:
return Unit(number)
elif len(str(number)) == 2:
return Dozens(number)
elif len(str(number)) == 3:
return Hundreds(number)
elif len(str(number)) == 4:
return Thousands(number)
elif len(str(number)) == 5:
return TenOfThousands(number)
elif len(str(number)) == 6:
return HundredThousands(number)
elif len(str(number)) == 7:
return Million(number)
elif len(str(number)) == 8:
return TensOfMillions(number)
elif len(str(number)) == 9:
return HundredMillions(number)
elif len(str(number)) == 10:
return Billions(number)
elif len(str(number)) == 11:
return TensOfBillions(number)
elif len(str(number)) == 12:
return HundredBillions(number)
def Unit(number):
if number == 0:
return ""
elif number == 1:
return "Ichi"
elif number == 2:
return "Ni"
elif number == 3:
return "San"
elif number == 4:
return "Yon"
elif number == 5:
return "Go"
elif number == 6:
return "Roku"
elif number == 7:
return "Nana"
elif number == 8:
return "Hachi"
elif number == 9:
return "Kyuu"
def Dozens(number):
if number < 10:
return Unit(number)
BackDigit = number % 10
FrontDigit = number//10
if FrontDigit == 1:
return "Jyuu " + Unit(BackDigit)
else:
return Unit(FrontDigit) + " Jyuu " + Unit(BackDigit)
def Hundreds(number):
if number < 100:
return Dozens(number)
BackDigit = number % 100
FrontDigit = number//100
if FrontDigit == 1:
return "Hyaku " + Dozens(BackDigit)
elif FrontDigit == 3:
return "San Byaku " + Dozens(BackDigit)
elif FrontDigit == 6:
return "Rop Pyaku " + Dozens(BackDigit)
elif FrontDigit == 8:
return "Hap Pyaku " + Dozens(BackDigit)
else:
return Unit(FrontDigit) + " Hyaku " + Dozens(BackDigit)
def Thousands(number):
if number < 1000:
return Hundreds(number)
BackDigit = number % 1000
FrontDigit = number//1000
if FrontDigit == 1:
return "Sen " + Hundreds(BackDigit)
elif FrontDigit == 3:
return "San Zen " + Hundreds(BackDigit)
elif FrontDigit == 8:
return "Has Sen " + Hundreds(BackDigit)
else:
return Unit(FrontDigit) + " Sen " + Hundreds(BackDigit)
def TenOfThousands(number):
if number < 10000:
return Thousands(number)
BackDigit = number % 10000
FrontDigit = number//10000
return Unit(FrontDigit) + " Man " + Thousands(BackDigit)
def HundredThousands(number):
if number < 100000:
return TenOfThousands(number)
BackDigit = number % 10000
FrontDigit = number//10000
return Dozens(FrontDigit) + " Man " + Thousands(BackDigit)
#
def Million(number):
if number < 1000000:
return HundredThousands(number)
BackDigit = number % 10000
FrontDigit = number//10000
return Hundreds(FrontDigit) + " Man " + Thousands(BackDigit)
def TensOfMillions(number):
if number < 10000000:
return Million(number)
BackDigit = number % 10000
FrontDigit = number//10000
return Thousands(FrontDigit) + " Man " + Thousands(BackDigit)
def HundredMillions(number):
if number < 100000000:
return TensOfMillions(number)
BackDigit = number % 100000000
FrontDigit = number//100000000
return Unit(FrontDigit) + " Oku " + TensOfMillions(BackDigit)
def Billions(number):
if number < 100000000:
return TensOfMillions(number)
BackDigit = number % 100000000
FrontDigit = number//100000000
return Dozens(FrontDigit) + " Oku " + TensOfMillions(BackDigit)
def TensOfBillions(number):
if number < 100000000:
return TensOfMillions(number)
BackDigit = number % 100000000
FrontDigit = number//100000000
return Hundreds(FrontDigit) + " Oku " + TensOfMillions(BackDigit)
def HundredBillions(number):
if number < 100000000:
return TensOfMillions(number)
BackDigit = number % 100000000
FrontDigit = number//100000000
return Thousands(FrontDigit) + " Oku " + TensOfMillions(BackDigit)
print("Enter number to convert: ")
integer = int(input())
print(NumberChooser(integer))
# for i in range(100000,1000000):
# print(i, NumberChooser(i)) |
#!/usr/bin/env python
"""
Command line interface for Baltica
"""
import os
from pathlib import Path
import sys
import yaml
import logging
import subprocess
import tempfile
import snakemake
import click
from baltica.version import _program, __version__
baltica_path = Path(__file__)
# from https://stackoverflow.com/a/56944256/1694714
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
green = "\x1b[32;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "baltica:\t| %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: green + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def avaiable_workflows(baltica_path):
smk_files = baltica_path.parent.glob('*.smk')
return [x.with_suffix('').name for x in smk_files]
avaiable_workflows_ = avaiable_workflows(baltica_path)
logger = logging.getLogger(__file__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
# https://click.palletsprojects.com/en/8.0.x/advanced/#forwarding-unknown-options
# unknown options are passed to snakemake
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.version_option(__version__, prog_name='baltica')
@click.argument("workflow", type=click.Choice(avaiable_workflows_, case_sensitive=False))
@click.argument("config_file", type=click.Path(exists=True))
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.argument('snakemake_args', nargs=-1, type=click.UNPROCESSED)
def cli(workflow, config_file, verbose, snakemake_args):
"""
Baltica implements workflows for differential junction
usage methods, and method integration and analysis. Visit
https://github.com/dieterich-lab/Baltica for more information.
Runs baltica <WORKFLOW> with <CONFIG_FILE> and <SNAKEMAKE_ARGS>
"""
# TODO add link to baltica docs with important snakemake parameters
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
# Error if older version of snakemake is installed
min_snakemake_version = "6"
try:
snakemake.utils.min_version(min_snakemake_version)
except snakemake.exceptions.WorkflowError:
logger.error(
f'{_program} requires Snakemake version >= {min_snakemake_version}:',
exc_info=True)
sys.exit(1)
# check if workflow file is readable
snakefile = (baltica_path.parent / workflow).with_suffix(".smk")
with open(config_file) as fin:
config = yaml.safe_load(fin)
config['config_path'] = str(Path(config_file).resolve())
config['baltica_path'] = str(baltica_path.resolve())
logger.debug(f"Config file is {config['config_path']}")
with open(config_file, 'w') as fou:
yaml.dump(config, fou)
target = config["path"]
try:
os.makedirs(Path(target) / 'logs/')
except FileExistsError:
pass
snakemake_args = list(snakemake_args)
if verbose:
snakemake_args.extend(['--printshellcmds', '--verbose', '--reason'])
else:
logger.warning(
"Starting Baltica run in a quiet mode. Use --verbose "
"to change this behavior.")
if not any([x in snakemake_args for x in ['--cores', '-c', '--job', '-j', '--profile']]):
logger.warning(
"Snakemake invoked with a single-core, use --cores N or "
"--jobs N, where N is the number of available cores to "
"change this parameter.")
snakemake_args.append('-j1')
# Singularity support
# here we set bindings three directories needed by singularity
# the target path, where the output is written to
# the sample path, which contains the input data
# the baltica directory, which contains the analysis scripts
if '--use-singularity' in snakemake_args and "--singularity-args" not in snakemake_args:
relative_path = Path(baltica_path).parent.resolve()
bound_path = [
config["path"],
str(config["sample_path"]),
str(Path(config['ref']).parent),
str(Path(config['ref_fa']).parent),
str(Path(config['config_path']).parent),
str(relative_path),
tempfile.gettempdir()]
if 'orthogonal_result' in config:
bound_path.append(str(Path(config['orthogonal_result']).parent))
if 'bind_paths' in config:
if not isinstance(config['bind_paths'], list):
logger.error("bind_paths must be a list")
pass
bound_path.extend(config['bind_paths'])
bound_path = set(bound_path)
# bind several paths that contain input data
snakemake_args.extend(
['--singularity-args', '-B ' + ','.join(bound_path.difference('.'))])
try:
_ = subprocess.run(['singularity', '--version'],
stdout=subprocess.DEVNULL)
except FileNotFoundError:
if '--use-singularity' in snakemake_args:
logger.critical(
"Baltica requires Singularity, which was not found", exc_info=True)
sys.exit(1)
if '--use-singularity' in snakemake_args and '--singularity-prefix' not in snakemake_args:
# set $HOME/.baltica/singularity/ as download directory for the containers
snakemake_args.extend(
['--singularity-prefix',
str(Path.home() / '.baltica/singularity/')]
)
if workflow == 'all':
# append final rule name for end-to-end execution
logger.info("Running baltica in the end-to-end mode.")
snakemake_args.append('final')
logger.info(
f"""Starting baltica (v{__version__}) analysis with:
WORKFLOW: {workflow} (from {snakefile})
CONFIGURATION: {config_file}
TARGET DIRECTORY: {target}
SNAKEMAKE ARGUMENTS: {' '.join(snakemake_args)}
""")
cmd = [
'snakemake',
'--configfile', config_file,
'--snakefile', str(snakefile),
*snakemake_args]
logger.debug('Start of snakemake logger:')
result = subprocess.run(cmd)
return result.check_returncode
if __name__ == '__main__':
cli()
|
# Uses python3
import sys
def get_change(m):
changeNum = 0
remainder = 0
# 10
changeNum = m//10
remainder = m%10
# 5
changeNum += remainder//5
remainder = remainder%5
# 1
changeNum += remainder
return changeNum
if __name__ == '__main__':
m = int(sys.stdin.read())
print(get_change(m))
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.11 on 2017-05-28 19:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('press', '0002_auto_20170528_1624'),
]
operations = [
migrations.AlterModelOptions(
name='newssection',
options={'verbose_name': 'Секция новости', 'verbose_name_plural': 'Секция новости'},
),
migrations.RemoveField(
model_name='news',
name='related_news',
),
migrations.AlterField(
model_name='newssection',
name='gallery',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Gallery', verbose_name='Галерея фотографий'),
),
]
|
# -*- coding: utf-8 -*-
from datetime import datetime
from flask import render_template, request, session, make_response, redirect, Response
from jinja2 import Template as jTemplate
from models.project import table, col
from models.orm import User, Project, Template
from zp_tools import rdm_code, hash_pwd, json_res, is_name, str2bool, str2val
from zp_web import app, get_db
from zp_tools import s_user, s_project, login_required, login_required_ajax
from zp_types import types_map
__author__ = 'cloudbeer'
@app.route("/")
def pg_index():
return render_template("index.html", user=s_user())
@app.route("/projects/")
@login_required
def pg_projects():
user = s_user()
db = get_db()
projects = db.query(Project).filter(User.id == user.id).order_by(Project.id.desc())
return render_template("projects.html", projects=projects, user=s_user())
@app.route("/project/")
@login_required
def pg_project():
project = s_project()
sql_types = None
db = project.db
if db in types_map:
sql_types = types_map[db]['sql']
return render_template("project.html", project=project, user=s_user(), types_map=types_map, sql_types=sql_types)
@app.route("/project/<int:id>/")
@login_required
def pg_my_project(id):
db = get_db()
project = db.query(Project).filter(Project.id == id).first()
if project is None:
return "Error Project."
import cPickle
content = project.content
project = cPickle.loads(str(content))
project.dbid = id
session['project'] = project
db = project.db
sql_types = None
if db in types_map:
sql_types = types_map[db]['sql']
return render_template("project.html", project=project, user=s_user(), types_map=types_map, sql_types=sql_types)
@app.route("/project/save/", methods=['post'])
@login_required_ajax
def pg_project_save():
title = request.form["title"]
content = request.form["content"]
db = request.form["db"]
project = s_project()
project.title = title
project.content = content
project.db = db
session['project'] = project
return json_res(True).str()
@app.route("/project/save2db/", methods=['post'])
@login_required_ajax
def pg_project_save_to_db():
project = s_project()
import cPickle
content = cPickle.dumps(project)
user = s_user()
m_project = Project(title=project.title, content=content, user_id=user.id, create_date=datetime.now())
db = get_db()
if project.dbid is not None and project.dbid > 0:
m_project.id = project.dbid
db.merge(m_project)
else:
db.add(m_project)
db.commit()
project.dbid = m_project.id
session['project'] = project
return json_res(True).str()
@app.route("/project/get_project/", methods=['post', 'get'])
@login_required_ajax
def pg_project_get():
project = s_project()
res = json_res(True)
res.title = project.title
res.content = project.content
res.db = project.db
return res.str()
@app.route("/project/save_table/", methods=['post', 'get'])
@login_required_ajax
def pg_table_save():
name = request.form["name"]
if not is_name(name):
return json_res(False, message='Invalid name, must begin with letter and no blank.').str()
ref_name = request.form["ref_name"]
title = request.form["title"]
content = request.form["content"]
act = request.form['act']
project = s_project()
xtable = None
if act == 'create':
yy_tables = [mmtable for mmtable in project.tables if mmtable.name == name]
if yy_tables is not None and len(yy_tables) > 0:
return json_res(False, message='Table name must be unique. Change it please.').str()
xtable = table() #name=name, title=title, content=content)
project.tables.append(xtable)
elif act == 'modi':
yy_tables = [mmtable for mmtable in project.tables if mmtable.name == ref_name]
if yy_tables is not None and len(yy_tables) == 1:
xtable = yy_tables[0]
xtable.name = name
xtable.title = title
xtable.content = content
session["project"] = project
res = json_res(True)
return res.str()
@app.route("/project/get_table/", methods=['post', 'get'])
@login_required_ajax
def pg_table_get():
flag = request.form["flag"]
project = s_project()
yy_tables = [mmtable for mmtable in project.tables if mmtable.name == flag]
xtable = None
if yy_tables is not None and len(yy_tables) == 1:
xtable = yy_tables[0]
if xtable is None:
return json_res(False, message='Table not found.').str()
res = json_res(True)
res.name = xtable.name
res.title = xtable.title
res.content = xtable.content
return res.str()
@app.route("/project/save_col/", methods=['post'])
@login_required_ajax
def pg_col_save():
name = request.form["name"]
if not is_name(name):
return json_res(False, message='Invalid name, must begin with letter and no blank.').str()
table_name = request.form['table']
sql_type = request.form['sql_type']
is_pk = request.form['is_pk']
is_null = request.form['is_null']
is_unique = request.form['is_unique']
is_index = request.form['is_index']
auto_incres = request.form['auto_incres']
init_val = request.form['init_val']
ref_name = request.form["ref_name"]
title = request.form["title"]
content = request.form["content"]
act = request.form['act']
project = s_project()
xtable = None
yy_tables = [mmtable for mmtable in project.tables if mmtable.name == table_name]
if yy_tables is not None and len(yy_tables) == 1:
xtable = yy_tables[0]
else:
return json_res(False, message='Table not found.').str()
xcol = None
if act == 'create':
yy_cols = [mmcol for mmcol in xtable.cols if mmcol.name == name]
if yy_cols is not None and len(yy_cols) > 0:
return json_res(False, message='Col name must be unique. Change it please.').str()
xcol = col()
xtable.cols.append(xcol)
elif act == 'modi':
yy_cols = [mmcol for mmcol in xtable.cols if mmcol.name == ref_name]
if yy_cols is not None and len(yy_cols) == 1:
xcol = yy_cols[0]
xcol.name = name
xcol.title = title
xcol.content = content
xcol.auto_incres = str2bool(auto_incres)
xcol.is_index = str2bool(is_index)
xcol.is_null = str2bool(is_null)
xcol.is_pk = str2bool(is_pk)
xcol.is_unique = str2bool(is_unique)
xcol.sql_type = sql_type
xcol.init_val = init_val
session["project"] = project
res = json_res(True)
return res.str()
@app.route("/account/reg/")
def pg_reg():
return render_template("reg.html")
@app.route("/account/save_reg/", methods=['post'])
def pg_reg_save():
email = request.form["email"]
if not check_email(email):
return json_res(state=False, message="Email is registered, please change.").str()
password = request.form["password"]
nick = request.form["nick"]
salt = rdm_code(16)
password = hash_pwd(password, salt)
m_user = User(email=email, nick=nick, password=password, salt=salt)
db = get_db()
db.add(m_user)
db.commit()
return json_res(state=True).str()
@app.route("/account/valid_email/", methods=['post'])
def pg_reg_check_email():
email = request.form["email"]
db = get_db()
m_user = db.query(User).filter(User.email == email).first()
if m_user is not None:
return "false"
else:
return "true"
def check_email(email):
db = get_db()
m_user = db.query(User).filter(User.email == email).first()
return m_user is None
@app.route("/account/login/")
def pg_login():
back = request.args.get('back', '')
if not back:
back = '/'
return render_template("login.html", back=back)
@app.route("/account/logout/")
def pg_logout():
response = make_response(redirect("/"))
if 'user' in session:
del session['user']
if 'email' in request.cookies:
response.set_cookie("email", "", max_age=-1)
response.set_cookie("emailtoken", "", max_age=-1)
return response
@app.route("/account/login/", methods=["post"])
def pg_login_post():
db = get_db()
email = request.form["email"]
m_user = db.query(User).filter(User.email == email).first()
if m_user is None:
return json_res(False, message="This email is not registered.").str()
pwd = m_user.password
salt = m_user.salt
p_pwd = request.form["password"]
if hash_pwd(p_pwd, salt) != pwd:
return json_res(False, message="Password is wrong.").str()
session['user'] = m_user
response = make_response(json_res(True).str())
max_age = 60 * 60 * 24 * 14
response.set_cookie("email", email, max_age=max_age)
response.set_cookie("emailtoken", hash_pwd('email', salt), max_age=max_age)
return response
@app.route("/project/sql_types/", methods=["post"])
def pg_sql_types():
db = request.form["db"]
res = json_res(True)
if db in types_map:
xdb = types_map[db]["sql"]
res.sql_types = xdb
return res.str()
@app.route("/produce/", methods=["get"])
def pg_produce():
project_id = str2val(request.args.get('pid', ''), 0)
template_id = str2val(request.args.get('tid', ''), 0)
if project_id <= 0 or template_id <= 0:
return "Must prefer template_id and project_id."
db = get_db()
m_project = db.query(Project).filter(Project.id == project_id).first()
m_tempate = db.query(Template).filter(Template.id == template_id).first()
if m_project is None or m_tempate is None:
return "Tempalte or project must be right."
import cPickle
project = cPickle.loads(str(m_project.content))
template = jTemplate(m_tempate.content)
code = template.render(project=project)
return Response(code, mimetype='text/plain')
@app.route("/t/")
def test():
response = make_response('test')
return response |
from chapter3_case_study.Property import Property
from chapter3_case_study.helper_functions import get_valid_input
class Appartment(Property):
valid_laundries = ("coin", "ensuite", "none")
valid_balconies = ("yes", "no", "solarium")
def __init__(self, balcony = '', laundry = '', **kwargs):
super().__init__(**kwargs)
self.balcony = balcony
self.laundry = laundry
def display(self):
super().display()
print("APPARTMENT DETAILS")
print("================")
print("laundry: %s" % self.laundry)
print("has balcony: %s" % self.balcony)
def prompt_init():
parent_init = Property.prompt_init()
laundry = get_valid_input("What laundry facilities does the property have? ", Appartment.valid_laundries)
balcony = get_valid_input("Does the property have a balcony? ", Appartment.valid_balconies)
parent_init.update({
"laundry" : laundry,
"balcony" : balcony
})
return parent_init
prompt_init = staticmethod(prompt_init)
|
"""
Script for loading IDN data into calibration targets and default.yml
"""
import json
import os
import pandas as pd
from autumn.settings import PROJECTS_PATH
from autumn.settings import INPUT_DATA_PATH
from autumn.models.covid_19.constants import COVID_BASE_DATETIME
# Use OWID csv for notification and death numbers.
COVID_IDN_OWID = os.path.join(INPUT_DATA_PATH, "owid", "owid-covid-data.csv")
COVID_IDN_DATA = os.path.join(INPUT_DATA_PATH, "covid_idn", "cases_by_province.xlsx")
COVID_IDN_TARGETS = os.path.join(
PROJECTS_PATH, "covid_19", "indonesia", "indonesia", "timeseries.json"
)
COVID_BALI_DATA = os.path.join(INPUT_DATA_PATH, "covid_idn", "Bali Modelling.xlsx")
COVID_BALI_TARGETS = os.path.join(PROJECTS_PATH, "covid_19", "indonesia", "bali", "timeseries.json")
TARGETS_IDN = {
"notifications": "new_cases",
"infection_deaths": "new_deaths",
}
TARGETS_BALI = {
"notifications": "daily_confirmed",
"infection_deaths": "death_daily",
}
def preprocess_idn_data():
df = pd.read_csv(COVID_IDN_OWID)
df = df[df.iso_code == "IDN"]
df.date = pd.to_datetime(
df.date, errors="coerce", format="%Y-%m-%d", infer_datetime_format=False
)
df["date_index"] = (df.date - COVID_BASE_DATETIME).dt.days
df = df[df.date <= pd.to_datetime("today")]
return df
def preprocess_bali_data():
df = pd.read_excel(COVID_BALI_DATA, sheet_name=0)
df.rename(columns=lambda x: x.lower().strip().replace(" ", "_"), inplace=True)
df.date = pd.to_datetime(
df.date, errors="coerce", format="%Y-%m-%d", infer_datetime_format=False
)
df["date_index"] = (df.date - COVID_BASE_DATETIME).dt.days
return df
def update_timeseries(TARGETS, df, file_path):
with open(file_path, mode="r") as f:
targets = json.load(f)
for key, val in TARGETS.items():
# Drop the NaN value rows from df before writing data.
temp_df = df[["date_index", val]].dropna(0, subset=[val])
targets[key]["times"] = list(temp_df["date_index"])
targets[key]["values"] = list(temp_df[val])
with open(file_path, "w") as f:
json.dump(targets, f, indent=2)
df = preprocess_idn_data()
update_timeseries(TARGETS_IDN, df, COVID_IDN_TARGETS)
df = preprocess_bali_data()
update_timeseries(TARGETS_BALI, df, COVID_BALI_TARGETS)
|
#!/usr/bin/env python
'''
Copyright (c) 2016, Juan Jimeno
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import rospy
# Added by Marielle ---------------------------------------------------------------
from geometry_msgs.msg import PoseWithCovarianceStamped
# End -----------------------------------------------------------------------------
import tf
import localization as lx
import serial
# Added by Marielle ---------------------------------------------------------------
# Globals
#create publisher for position
#topic is "uwbpos" and node name is "uwb_pos_pub"
#the publisher named "publisher" will write to the topic with a PoseWithCovarianceStamped msg
publisher = rospy.Publisher('uwbpos', PoseWithCovarianceStamped, queue_size=50)
#rospy.init_node('uwb_pos_pub', anonymous=True)
# End -----------------------------------------------------------------------------
def get_transform(id):
try:
(trans,rot) = listener.lookupTransform('/map', id, rospy.Time(0))
return trans
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
pass
def get_tag_location(anchors, ranges, transforms):
P = lx.Project(mode="3D",solver="LSE")
#define anchor locations
for i in range(REQ_ANCHOR):
P.add_anchor(anchors[i], transforms[i])
t, label = P.add_target()
#define anchor ranges
for i in range(REQ_ANCHOR):
t.add_measure(anchors[i], ranges[i])
P.solve()
B = t.loc
return {'x':B.x, 'y':B.y, 'z':B.z}
def is_listed(anchors, id):
for anchor in anchors:
if anchor == id:
return True
else:
pass
def get_serial_data():
start = ser.read()
# return ser.readline().strip('$\r\n').split(',')
# expected data from the serial port is: $<anchor_id>,<range in cm>,\r\n
if start == '$':
parsed_data = ser.readline().strip('\r\n').split(',')
# anchor id is stored in index 0 - parsed_data[0]
# range is stored in index 1 - parsed_data[1]
return parsed_data
else:
return None
# Added by Marielle ---------------------------------------------------------------
def uwb_pos_pub(x, y):
#Publish pos as geometry_msgs/PoseWithCovarianceStamped for EKF/UKF
uwb_tag_pos = PoseWithCovarianceStamped()
#header information
uwb_tag_pos.header.frame_id = "map"
uwb_tag_pos.header.stamp = rospy.Time.now()
# pos x and y, no z
uwb_tag_pos.pose.pose.position.x = x
uwb_tag_pos.pose.pose.position.y = y
uwb_tag_pos.pose.pose.position.z = 0.0
#no orientation
uwb_tag_pos.pose.pose.orientation.x = 0.0
uwb_tag_pos.pose.pose.orientation.y = 0.0
uwb_tag_pos.pose.pose.orientation.z = 0.0
uwb_tag_pos.pose.pose.orientation.w = 0.0
#high value means ignore the variable(s) the sensor does not produce
#in our case ignore z, roll, pitch and yaw
#uwb_tag_pos.pose.covariance[0:5] = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#uwb_tag_pos.pose.covariance[6:11] = [0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
#uwb_tag_pos.pose.covariance[12:17] = [0.0, 0.0, 99999, 0.0, 0.0, 0.0]
#uwb_tag_pos.pose.covariance[18:23] = [0.0, 0.0, 0.0, 99999, 0.0, 0.0]
#uwb_tag_pos.pose.covariance[24:29] = [0.0, 0.0, 0.0, 0.0, 99999, 0.0]
#uwb_tag_pos.pose.covariance[30:35] = [0.0, 0.0, 0.0, 0.0, 0.0, 99999]
uwb_tag_pos.pose.covariance = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 99999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 99999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 99999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 99999]
#publishes to our topic named uwbpos
rospy.loginfo(uwb_tag_pos)
publisher.publish(uwb_tag_pos)
# End -----------------------------------------------------------------------------
if __name__ == '__main__':
rospy.init_node('ros_dwm1000')
listener = tf.TransformListener()
start_time = rospy.get_time()
#create rosparameters
MIN_RANGE = rospy.get_param('/ros_dwm1000/min_range', 0.5)
MAX_RANGE = rospy.get_param('/ros_dwm1000/max_range', 12.0)
REQ_ANCHOR = rospy.get_param('/ros_dwm1000/req_anchor', 3)
FRAME_ID = rospy.get_param('/ros_dwm1000/frame_id', 'uwb_tag')
SERIAL_PORT = rospy.get_param('/ros_dwm1000/serial_port', '/dev/charlieArduinoTagUWB')
#rosparam logs just to make sure parameters kicked in
rospy.loginfo("%s is %s", rospy.resolve_name('/ros_dwm1000/min_range'), MIN_RANGE)
rospy.loginfo("%s is %s", rospy.resolve_name('/ros_dwm1000/max_range'), MAX_RANGE)
rospy.loginfo("%s is %s", rospy.resolve_name('/ros_dwm1000/req_anchor'), REQ_ANCHOR)
rospy.loginfo("%s is %s", rospy.resolve_name('/ros_dwm1000/frame_id'), FRAME_ID)
rospy.loginfo("%s is %s", rospy.resolve_name('/ros_dwm1000/serial_port'), SERIAL_PORT)
ser = serial.Serial(SERIAL_PORT, 115200)
ser.timeout = None
rospy.loginfo("Connected to %s", ser.portstr)
#lists to store anchors found
ranges = []
anchors = []
transforms = []
anchors_found = 0
while not rospy.is_shutdown():
#get the stream of data from the tag through the serial port
parsed_data = get_serial_data()
# print parsed_data
if None != parsed_data:
#check if the current range is within specified distance
if MIN_RANGE < float(parsed_data[1]) < MAX_RANGE:
#append respective arrays of the anchor found
#list of anchor IDs found
anchors.append(parsed_data[0])
rospy.loginfo("Anchor found with ID %s", anchors)
#list of distance between tag and anchors found
ranges.append(parsed_data[1])
rospy.loginfo("Distance from anchor with ID %s is %s", anchors, ranges)
#list of static TFs of the anchors found.
transforms.append(get_transform(parsed_data[0]))
anchors_found += 1
#perform trilateration once enough anchors have been found
if anchors_found == REQ_ANCHOR:
#do trilateration
pos = get_tag_location(anchors,ranges,transforms)
#broadcast the transform
br = tf.TransformBroadcaster()
br.sendTransform((pos['x'], pos['y'], pos['z']),
tf.transformations.quaternion_from_euler(0, 0, 0),
rospy.Time.now(),
FRAME_ID,
"map")
#TODO: Publish pos as geometry_msgs/PoseWithCovarianceStamped for EKF and only broadcast TF as an option.
# Added by Marielle ---------------------------------------------------------------
uwb_pos_pub(pos['x'], pos['y'])
# End -----------------------------------------------------------------------------
# clear lists once trilateration is done for the next cycle
anchors_found = 0
ranges = []
transforms = []
anchors = []
|
import tkinter as tk
class MailList(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.label_username = tk.Label(self, text="Test")
self.label_password = tk.Label(self, text="TEST")
self.label_username.grid(row=0, sticky=tk.E)
self.label_password.grid(row=1, sticky=tk.E)
self.pack()
|
"""
Given a non-negative integer represented as a non-empty array of digits, plus one to the integer.
You may assume the integer do not contain any leading zero, except the number 0 itself.
The digits are stored such that the most significant digit is at the head of the list.
"""
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
self.calculateNum(digits, len(digits) - 1)
return digits
def calculateNum(self, digits, index):
if index == -1:
digits.insert(0, 0)
index = 0
if digits[index] <= 8:
digits[index] = digits[index] + 1
else:
digits[index] = 0
self.calculateNum(digits, index - 1)
return
if __name__ == '__main__':
sol = Solution()
exp = [1]
assert (sol.plusOne([0]) == exp)
exp = [1,0]
assert(sol.plusOne([9]) == exp)
exp = [1,0,0]
assert (sol.plusOne([9,9]) == exp) |
from django.db import models
class InsuranceCompany(models.Model):
name = models.CharField(max_length=40)
contact_phone = models.PositiveIntegerField(blank=True, null=True)
contact_email = models.EmailField(blank=True)
website = models.URLField(blank=True)
def __str__(self):
return self.name |
import sys
from rosalind_utility import parse_fasta
memo = {}
def modified_motzkin(seq):
if len(seq) in [0, 1]:
return 1
if seq in memo:
return memo[seq]
memo[seq] = modified_motzkin(seq[1:])
for i in range(1, len(seq)):
if ((seq[0] == "A" and seq[i] == "U") or
(seq[0] == "U" and seq[i] == "A") or
(seq[0] == "C" and seq[i] == "G") or
(seq[0] == "G" and seq[i] == "C")):
memo[seq] += modified_motzkin(seq[1:i]) * modified_motzkin(seq[i+1:])
memo[seq] %= int(1e6)
return memo[seq]
if __name__ == "__main__":
'''
Given: An RNA string s of length at most 300 bp.
Return: The total number of noncrossing matchings of basepair edges in the bonding graph of s, modulo 1,000,000.
'''
input_lines = sys.stdin.read().splitlines()
rna_seq = list(parse_fasta(input_lines).values())[0]
print(modified_motzkin(rna_seq))
|
import pygame.display
import Functions
from Color import Color
from Maze import Maze
from Slides import SlideFunctions
from Text import Text
from Settings import screen as screen_settings
screen_size = screen_settings.screen_size
def backtracker_slide(screen, display_settings):
text_title = Text((screen_size[0] // 2, int(screen_size[1] * 0.1)), 'Backtracker Algorithm',
int(display_settings.title_size), display_settings.text_color)
screen.fill(Color.white)
text_title.show(screen)
backtracker_slide1(screen)
def backtracker_slide1(screen):
"""
"""
maze_backtracker = Maze(8, 'square', 'backtracker')
maze_backtracker.create((screen_size[0] // 4, screen_size[1] // 2), graph_bool=True)
maze_backtracker.draw_grid(screen, graph_bool=True, cell_text_bool=True, cell_text_type=1)
pygame.display.update()
maze_backtracker.draw_frame(screen)
SlideFunctions.slide_animation(screen, maze_backtracker)
Functions.update_delay(500)
maze_backtracker.draw(screen, True, False) |
# Generated by Django 3.1.1 on 2020-10-02 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('joseph_blog', '0004_auto_20201002_1314'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='blog_title',
field=models.IntegerField(verbose_name='Blog Title'),
),
]
|
#!/usr/bin/env python3
from bs4 import BeautifulSoup; # for web scraping
import requests;
import urllib.request;
from urllib.request import urlopen;
import re;
import sys;
# sets the destination URL and returns parsed webpage as BeautifulSoup object
# to be used in other functions
def choose_vehicle_class(choice):
url = "https://www.hitachi-rail.com/delivery/rail_vehicles/{}/index.html".format(choice)
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
return soup
# prints delivery speed record data scraped from Hitachi website
def print_speed_records(choice):
"""pulls train delivery speed records and prints based on user selection"""
soup = choose_vehicle_class(choice)
counter = 0 # counter to track when a table has been iterated over
train_tables = soup.find_all('table', {'class':'TableStyle3'}) # grab all tables of trains with style class TableStyle3...
for tr in train_tables: # ... for each <tr> train tables...
rows = tr.find_all('tr') # find all <tr> and assign to var rows
if counter == 0:
for td in rows: # ... for each <td> found in <tr>...
removed_tags = td.get_text() # ... format HTML tags out, print, and update counter
print(removed_tags)
counter = 1
else:
print("------------------------------------------") # ... else print table delimiter and update counter
counter = 0
def main():
running = True
while running:
print(
"TYPE: High Speed Trains KEY: 'high_speed'\nTYPE: Intercity Trains KEY: 'intercity'\nTYPE: Commuter/Metro Trains KEY: 'commuter'\nTYPE: High Speed Lightweight Bolsterless Bogie Trains KEY: 'bogie'\n")
print("To exit program type '0'")
user_choice = input("To see delivery records for desired train class please type key value and press Enter: ")
if user_choice == 0: # regardless if 0 is entered program is terminating on any input other than defined KEYS
running = False
break
else:
print_speed_records(user_choice)
if __name__ == "__main__":
main()
|
#coding:utf-8
import bobo, webob
from controller import Controller
@bobo.subroute('/user', scan=True)
class UserController(Controller):
def __init__(self, request):
self.request = request
@bobo.query('')
def base(self):
return "Hello!"
|
from common.dto_dependency_loader import asinstanceof, asinstancesof, DtoDependencyLoader
from common.models.scenario_settings import ScenarioSettings
from common.models.static_analysis import StaticAnalysisResult
from common.models.vuln_type import VulnType
class Scenario:
def __init__(self, scenario_settings, apk_filename, static_analysis_result, scenario_settings_id=None):
self.scenario_settings = DtoDependencyLoader.load_if_none(
scenario_settings,
scenario_settings_id,
ScenarioSettings)
self.apk_filename = apk_filename
self.static_analysis_result = asinstanceof(static_analysis_result, StaticAnalysisResult)
def __json__(self):
return {
'scenario_settings': None,
'scenario_settings_id': self.scenario_settings.id,
'apk_filename': self.apk_filename,
'static_analysis_result': self.static_analysis_result}
# per scenario_settings, only in Scenario because of legacy code
class ScenariosData:
def __init__(self, scenario_list, apk_filename, package, min_sdk_version, target_sdk_version):
self.scenario_list = asinstancesof(scenario_list, Scenario)
# general information from static analysis valid for all scenarios
self.apk_filename = apk_filename
self.package = package
self.min_sdk_version = min_sdk_version
self.target_sdk_version = target_sdk_version
def __json__(self):
return {
'scenario_list': self.scenario_list,
'apk_filename': self.apk_filename,
'package': self.package,
'min_sdk_version': self.min_sdk_version,
'target_sdk_version': self.target_sdk_version}
def is_selected_activities(self):
return bool([s for s in self.scenario_list
if s.static_analysis_result.vuln_type == VulnType.selected_activities.value])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.