text
stringlengths 8
6.05M
|
|---|
# 字符串操作测试
a = list()
b = list()
print(a is b)
print(a == b)
|
from __future__ import print_function
import numpy as np;
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Dropout
from keras.layers import Convolution2D, Conv2DTranspose
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop, Adadelta
from keras.callbacks import Callback, ModelCheckpoint
from keras import backend as K
from keras import objectives
import warnings
from keras.utils import plot_model
def build_training_data():
X = np.zeros([1100000, 28, 28])
Y = np.zeros([1100000, 1, 1])
x_raw = X[0:500000]
print("Dataset dimension:", np.shape(x_raw))
# no of trajectory files and frames in each file
n_traj = 50
f_traj = 10000
# fraction of train, test and pred data separation
sep_train = 0.8
sep_test = 0.9
sep_pred = 1
# choice to flatten data: "0" for NO & "1" for YES
choice = 0
# row and column dimension for each frame
row = 28
col = 28
# padding: use this incase diemsion mismatch for encoders
# pad_row and pad_col are row or colums to be added
pad_row = 0
pad_col = 0
# end define parameters
# padding
row_dim_array = row + pad_row
col_dim_array = col + pad_col
# reshape data according to the choice of flatteing
if choice == 0:
new_shape = (len(x_raw),row_dim_array,col_dim_array)
if choice == 1:
new_shape = (len(x_raw),row_dim_array*col_dim_array)
add_zero = np.zeros(new_shape,dtype = x_raw.dtype)
if choice == 0:
add_zero[0:x_raw.shape[0],0:x_raw.shape[1],0:x_raw.shape[2]] = x_raw
if choice == 1:
add_zero[0:x_raw.shape[0],0:x_raw.shape[1]] = x_raw
x_raw = add_zero
sep_1 = int(x_raw.shape[0]*sep_train)
sep_2 = int(x_raw.shape[0]*sep_test)
sep_3 = int(x_raw.shape[0]*sep_pred)
x_train_raw = x_raw[:sep_1]
x_test_raw = x_raw[sep_1:sep_2]
x_pred_raw = x_raw[sep_2:sep_3]
'''
Convolutional variational autoencoder in Keras
Reference: "Auto-Encoding Variational Bayes" (https://arxiv.org/abs/1312.6114)
'''
class conv_variational_autoencoder(object):
'''
variational autoencoder class
parameters:
- image_size: tuple
height and width of images
- channels: int
number of channels in input images
- conv_layers: int
number of encoding/decoding convolutional layers
- feature_maps: list of ints
number of output feature maps for each convolutional layer
- filter_shapes: list of tuples
convolutional filter shape for each convolutional layer
- strides: list of tuples
convolutional stride for each convolutional layer
- dense_layers: int
number of encoding/decoding dense layers
- dense_neurons: list of ints
number of neurons for each dense layer
- dense_dropouts: list of float
fraction of neurons to drop in each dense layer (between 0 and 1)
- latent_dim: int
number of dimensions for latent embedding
- activation: string (default='relu')
activation function to use for layers
- eps_mean: float (default = 0.0)
mean to use for epsilon (target distribution for embedding)
- eps_std: float (default = 1.0)
standard dev to use for epsilon (target distribution for embedding)
methods:
- train(data,batch_size,epochs=1,checkpoint=False,filepath=None)
train network on given data
- save(filepath)
save the model weights to a file
- load(filepath)
load model weights from a file
- return_embeddings(data)
return the embeddings for given data
- generate(embedding)
return a generated output given a latent embedding
'''
def __init__(self,image_size,channels,conv_layers,feature_maps,filter_shapes,
strides,dense_layers,dense_neurons,dense_dropouts,latent_dim,
activation='relu',eps_mean=0.0,eps_std=1.0):
# check that arguments are proper length;
if len(filter_shapes)!=conv_layers:
raise Exception("number of convolutional layers must equal length of filter_shapes list")
if len(strides)!=conv_layers:
raise Exception("number of convolutional layers must equal length of strides list")
if len(feature_maps)!=conv_layers:
raise Exception("number of convolutional layers must equal length of feature_maps list")
if len(dense_neurons)!=dense_layers:
raise Exception("number of dense layers must equal length of dense_neurons list")
if len(dense_dropouts)!=dense_layers:
raise Exception("number of dense layers must equal length of dense_dropouts list")
# even shaped filters may cause problems in theano backend;
even_filters = [f for pair in filter_shapes for f in pair if f % 2 == 0];
if K.image_dim_ordering() == 'th' and len(even_filters) > 0:
warnings.warn('Even shaped filters may cause problems in Theano backend')
if K.image_dim_ordering() == 'channels_first' and len(even_filters) > 0:
warnings.warn('Even shaped filters may cause problems in Theano backend')
self.eps_mean = eps_mean
self.eps_std = eps_std
self.image_size = image_size
# define input layer
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
self.input = Input(shape=(channels,image_size[0],image_size[1]))
else:
self.input = Input(shape=(image_size[0],image_size[1],channels))
# define convolutional encoding layers
self.encode_conv = [];
layer = Convolution2D(feature_maps[0],filter_shapes[0],padding='same',
activation=activation,strides=strides[0])(self.input)
self.encode_conv.append(layer)
for i in range(1,conv_layers):
layer = Convolution2D(feature_maps[i],filter_shapes[i],
padding='same',activation=activation,
strides=strides[i])(self.encode_conv[i-1])
self.encode_conv.append(layer)
# define dense encoding layers
self.flat = Flatten()(self.encode_conv[-1])
self.encode_dense = []
layer = Dense(dense_neurons[0],activation=activation) (Dropout(dense_dropouts[0])(self.flat))
self.encode_dense.append(layer)
for i in range(1,dense_layers):
layer = Dense(dense_neurons[i],activation=activation) (Dropout(dense_dropouts[i])(self.encode_dense[i-1]))
self.encode_dense.append(layer)
# define embedding layer
self.z_mean = Dense(latent_dim)(self.encode_dense[-1])
self.z_log_var = Dense(latent_dim)(self.encode_dense[-1])
self.z = Lambda(self._sampling, output_shape=(latent_dim,)) ([self.z_mean, self.z_log_var])
# save all decoding layers for generation model
self.all_decoding=[]
# define dense decoding layers
self.decode_dense = []
layer = Dense(dense_neurons[-1], activation=activation)
self.all_decoding.append(layer)
self.decode_dense.append(layer(self.z))
for i in range(1,dense_layers):
layer = Dense(dense_neurons[-i-1],activation=activation)
self.all_decoding.append(layer)
self.decode_dense.append(layer(self.decode_dense[i-1]))
# dummy model to get image size after encoding convolutions
self.decode_conv = []
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
dummy_input = np.ones((1,channels,image_size[0],image_size[1]))
else:
dummy_input = np.ones((1,image_size[0],image_size[1],channels))
dummy = Model(self.input, self.encode_conv[-1])
conv_size = dummy.predict(dummy_input).shape
layer = Dense(conv_size[1]*conv_size[2]*conv_size[3],activation=activation)
self.all_decoding.append(layer)
self.decode_dense.append(layer(self.decode_dense[-1]))
reshape = Reshape(conv_size[1:])
self.all_decoding.append(reshape)
self.decode_conv.append(reshape(self.decode_dense[-1]))
# define deconvolutional decoding layers
for i in range(1,conv_layers):
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
dummy_input = np.ones((1,channels,image_size[0],image_size[1]))
else:
dummy_input = np.ones((1,image_size[0],image_size[1],channels))
dummy = Model(self.input, self.encode_conv[-i-1])
conv_size = list(dummy.predict(dummy_input).shape)
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
conv_size[1] = feature_maps[-i]
else:
conv_size[3] = feature_maps[-i]
layer = Conv2DTranspose(feature_maps[-i-1],filter_shapes[-i],
padding='same',activation=activation,
strides=strides[-i])
self.all_decoding.append(layer)
self.decode_conv.append(layer(self.decode_conv[i-1]))
layer = Conv2DTranspose(channels,filter_shapes[0],padding='same',
activation='sigmoid',strides=strides[0])
self.all_decoding.append(layer)
self.output=layer(self.decode_conv[-1])
# build model
self.model = Model(self.input, self.output)
self.optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
self.model.compile(optimizer=self.optimizer, loss=self._vae_loss)
print("Model summary:")
self.model.summary()
#plot model
plot_model(self.model, show_shapes = 'True', show_layer_names = 'True',
to_file='./images/vae_conv.png')
# model for embeddings
self.embedder = Model(self.input, self.z_mean)
# model for generation
self.decoder_input = Input(shape=(latent_dim,))
self.generation = []
self.generation.append(self.all_decoding[0](self.decoder_input))
for i in range(1, len(self.all_decoding)):
self.generation.append(self.all_decoding[i](self.generation[i-1]))
self.generator = Model(self.decoder_input, self.generation[-1])
def _sampling(self,args):
'''
sampling function for embedding layer
'''
z_mean,z_log_var = args
epsilon = K.random_normal(shape=K.shape(z_mean), mean=self.eps_mean, stddev=self.eps_std)
return z_mean + K.exp(z_log_var) * epsilon
def _vae_loss(self,input,output):
'''
loss function for variational autoencoder
'''
input_flat = K.flatten(input)
output_flat = K.flatten(output)
xent_loss = self.image_size[0] * self.image_size[1] * objectives.binary_crossentropy(input_flat,output_flat)
kl_loss = - 0.5 * K.mean(1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var), axis=-1)
return xent_loss + kl_loss
def train(self,data,batch_size,epochs=1,validation_data=None,
checkpoint=False,filepath=None):
'''
train network on given data;
parameters:
- data: numpy array
input data
- batch_size: int
number of records per batch
- epochs: int (default: 1)
number of epochs to train for
- validation_data: tuple (optional)
tuple of numpy arrays (X,y) representing validation data
- checkpoint: boolean (default: False)
whether or not to save model after each epoch
- filepath: string (optional)
path to save model if checkpoint is set to True
outputs:
None
'''
if checkpoint==True and filepath==None:
raise Exception("Please enter a path to save the network")
self.model.fit(data,data,batch_size,epochs=epochs,shuffle=True,
validation_data=(data,data),callbacks=[history])
def save(self, filepath):
'''
save the model weights to a file
parameters:
- filepath: string
path to save model weights
outputs:
None
'''
self.model.save_weights(filepath)
def load(self,filepath):
'''
load model weights from a file
parameters:
- filepath : string
path from which to load model weights
outputs:
None
'''
self.model.load_weights(filepath)
def decode(self,data):
'''
return the decodings for given data
parameters:
- data: numpy array
input data
outputs:
numpy array of decodings for input data
'''
return self.model.predict(data)
def return_embeddings(self,data):
'''
return the embeddings for given data
parameters:
- data: numpy array
input data
outputs:
numpy array of embeddings for input data
'''
return self.embedder.predict(data)
def generate(self,embedding):
'''
return a generated output given a latent embedding
parameters:
- data: numpy array
latent embedding
outputs:
numpy array of generated output
'''
return self.generator.predict(embedding);
if __name__ == "__main__":
import sys
import os
import gzip
from six.moves import cPickle
import matplotlib.pyplot as plt
from scipy.stats import norm
# define parameters
channels = 1
batch_size = 1000
conv_layers = 4
feature_maps = [64,64,64,64]
filter_shapes = [(3,3),(3,3),(3,3),(3,3)]
strides = [(1,1),(2,2),(1,1),(1,1)]
dense_layers = 1
dense_neurons = [128]
dense_dropouts = [0]
latent_dim = 3
epochs = 1
nb_start = 0
nb_end = 80
# create directories
#path_1 = "./fig"
#path_2 = "./imgs"
#path_3 = "./hist"
#path_4 = "./model"
#if not os.path.exists(path_1):
# os.mkdir(path_1, 0755)
#if not os.path.exists(path_2):
# os.mkdir(path_2, 0755)
#if not os.path.exists(path_3):
# os.mkdir(path_3, 0755)
#if not os.path.exists(path_4):
# os.mkdir(path_4, 0755)
#print("Completed directories creation or if already exist - then checked")
# load data
print("Loading data")
# normalizing input image matrix
X_train = x_train_raw.astype('float32') / np.amax(x_train_raw)
X_test = x_test_raw.astype('float32') / np.amax(x_test_raw)
X_pred = x_pred_raw.astype('float32') / np.amax(x_pred_raw)
print("Shape of data loaded:", "train:", np.shape(X_train), "test:", np.shape(X_test))
# reshape to 4d tensors
image_size = X_train.shape[-2:]
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
tensor_shape = (1,image_size[0],image_size[1])
else:
tensor_shape = (image_size[0],image_size[1],1)
X_train = X_train.reshape((X_train.shape[0],) + tensor_shape)
X_test = X_test.reshape((X_test.shape[0],) + tensor_shape)
print("Reshaped data:", "train:", np.shape(X_train), "test:", np.shape(X_test))
# build autoencoder
print("Building variational autoencoder")
autoencoder = conv_variational_autoencoder(image_size,channels,conv_layers,feature_maps,
filter_shapes,strides,dense_layers,dense_neurons,dense_dropouts,latent_dim)
|
from pandas import DataFrame, read_csv
import pandas as pd
import matplotlib.pyplot as plt
import math
import re
import numpy as np
import random
file = 'train_test.csv'
file_evaluate = 'evaluate.csv'
Data = pd.read_csv(file)
poems = Data['text']
statuses = Data['label']
numberOfpoems = len(poems)
train_index = random.sample(range(0,numberOfpoems), int(.8 * numberOfpoems))
test_index = list(set([item for item in range(0, numberOfpoems)]) - set(train_index))
hafez_poems_in_train_data = []
saadi_poems_in_train_data = []
total_vocab_in_train_data = {}
hafez_vocab_in_train_data = {}
saadi_vocab_in_train_data = {}
train_index.sort()
test_index.sort()
for index in train_index:
poem = poems[index]
words = poem.split(' ')
if statuses[index] == 'hafez' :
hafez_poems_in_train_data.append(poems[index])
elif statuses[index] == 'saadi' :
saadi_poems_in_train_data.append(poems[index])
for word in words:
if word not in total_vocab_in_train_data:
total_vocab_in_train_data[word] = 0
else:
total_vocab_in_train_data[word]+=1
probability_hafez = (len(hafez_poems_in_train_data) / float(len(train_index)) )
probability_saadi = (len(saadi_poems_in_train_data) / float(len(train_index)) )
total_vocab_size_unique_word = len(total_vocab_in_train_data)
num_total_word_vocab_not_unique = 0
for key in total_vocab_in_train_data :
num_total_word_vocab_not_unique+=total_vocab_in_train_data[key]
def recognizer(poems_td, vocab_td):
for poem in poems_td:
words = poem.split(' ')
for word in words:
if word not in vocab_td:
vocab_td[word] = 0
else:
vocab_td[word]+=1
return vocab_td
hafez_vocab_in_train_data = recognizer(hafez_poems_in_train_data,hafez_vocab_in_train_data)
saadi_vocab_in_train_data = recognizer(saadi_poems_in_train_data,saadi_vocab_in_train_data)
num_total_word_saadi_unique = len(saadi_vocab_in_train_data)
num_total_word_saadi_not_unique = 0
for key in saadi_vocab_in_train_data :
num_total_word_saadi_not_unique += saadi_vocab_in_train_data[key]
num_total_word_hafez_unique = len(hafez_vocab_in_train_data)
num_total_word_hafez_not_unique = 0
for key in hafez_vocab_in_train_data :
num_total_word_hafez_not_unique+=hafez_vocab_in_train_data[key]
print("Total Number Of Poems in train_test:",numberOfpoems)
print("hafez poems count in 80% train data:",len(hafez_poems_in_train_data))
print("saadi poems count in 80% train data:",len(saadi_poems_in_train_data))
print("probability hafez:" , probability_hafez)
print("probability saadi:" , probability_saadi)
print("num_total_word_vocab_not_unique",num_total_word_vocab_not_unique)
print("total_vocab_size_unique_word",len(total_vocab_in_train_data))
print("num poem in train data",len(train_index))
print("num_total_word_hafez_not_unique:",num_total_word_hafez_not_unique)
print("num_total_word_hafez_unique",num_total_word_hafez_unique)
print("num_total_word_saadi_not_unique:",num_total_word_saadi_not_unique )
print("num_total_word_saadi_unique",num_total_word_saadi_unique)
unique_words = 0
unique_words = len(total_vocab_in_train_data)
def calculate_probability_conditional(word,saadi_vocab_in_train_data,total_vocab_in_train_data,
num_total_word_saadi_not_unique ,unique_words, condition):
if(condition == "Laplace"):
num_word_in_saadi = 0
if word in saadi_vocab_in_train_data:
num_word_in_saadi = saadi_vocab_in_train_data[word]
x = num_word_in_saadi + 1
y = float( num_total_word_saadi_not_unique + len(total_vocab_in_train_data))
return x/y
elif(condition == "Simple"):
num_word_in_saadi = 0
if word in saadi_vocab_in_train_data:
num_word_in_saadi = saadi_vocab_in_train_data[word]
x = num_word_in_saadi + .3
y = float(num_total_word_saadi_not_unique + len(total_vocab_in_train_data))
return x/y
def Unit_Test(test_index,
statuses,
poems,
probability_hafez,
probability_saadi,
total_vocab_in_train_data,
unique_words,
num_total_word_vocab_not_unique,
hafez_vocab_in_train_data,
saadi_vocab_in_train_data,
num_total_word_hafez_unique,
num_total_word_saadi_unique,
num_total_word_hafez_not_unique,
num_total_word_saadi_not_unique):
hafez_poems_in_test_data = []
saadi_poems_in_test_data = []
num_total_word_vocab_not_unique = 0
total_vocab_in_test_data = {}
hafez_vocab_in_test_data = {}
saadi_vocab_in_test_data = {}
correct_detected_hafezes = 0
detected_hafezes = 0
correct_detected = 0
select_op = 0
num_total_word_hafez_not_unique = 0
num_total_word_saadi_not_unique = 0
for index in test_index :
if statuses[index] == 'hafez' :
hafez_poems_in_test_data.append(poems[index])
elif True:
saadi_poems_in_test_data.append(poems[index])
hafez_numberOfpoems_in_test_data = len(hafez_poems_in_test_data)
saadi_numberOfpoems_in_test_data = len(saadi_poems_in_test_data)
for index in test_index:
poem = poems[index]
words = poem.split(' ')
for word in words:
if word not in total_vocab_in_test_data:
total_vocab_in_test_data[word] = 0
elif True:
total_vocab_in_test_data[word]+=1
unique_words = len(total_vocab_in_test_data)
for key in total_vocab_in_test_data :
num_total_word_vocab_not_unique += total_vocab_in_test_data[key]
hafez_vocab_in_test_data = recognizer(hafez_poems_in_train_data,hafez_vocab_in_train_data)
saadi_vocab_in_test_data = recognizer(saadi_poems_in_train_data,saadi_vocab_in_train_data)
num_total_word_hafez_unique = len(hafez_vocab_in_test_data)
for key in hafez_vocab_in_test_data :
num_total_word_hafez_not_unique += hafez_vocab_in_test_data[key]
num_total_word_saadi_unique = len(saadi_vocab_in_test_data)
for key in saadi_vocab_in_test_data :
num_total_word_saadi_not_unique += saadi_vocab_in_test_data[key]
print("\n\n Unit Test Initialization OutPut :")
print("hafez poems count in 20% test data:",hafez_numberOfpoems_in_test_data)
print("saadi poems count in 20% test data:",saadi_numberOfpoems_in_test_data)
print("num_total_word_vocab_not_unique in 20% test",num_total_word_vocab_not_unique)
print("total_vocab_size_unique_word in 20 % test",len(total_vocab_in_train_data))
print("num poem in train data in 20% test",len(test_index))
print("num_total_word_hafez_not_unique in 20% test:",num_total_word_hafez_not_unique)
print("num_total_word_hafez_unique in 20% test: ",num_total_word_hafez_unique)
print("num_total_word_saadi_not_unique in 20% test:",num_total_word_saadi_not_unique )
print("num_total_word_saadi_unique in 20 % test",num_total_word_saadi_unique)
print("Unit Test Initialization OutPuts Ends \n")
all_hafezes = len(hafez_poems_in_test_data)
total = len(test_index)
select_op = int(input("\n\n\n '1'.... Laplace \n '2' ....Simple \n\n\n"))
for index in test_index:
poem = poems[index]
words = poem.split(' ')
p_hafez = probability_hafez
p_saadi = probability_saadi
for word in words:
if(select_op == 2):
p_hafez *= calculate_probability_conditional(word,hafez_vocab_in_train_data,
total_vocab_in_train_data,
num_total_word_hafez_not_unique ,
len(total_vocab_in_train_data),"Simple")
p_saadi *= calculate_probability_conditional(word,saadi_vocab_in_train_data,
total_vocab_in_train_data,
num_total_word_saadi_not_unique ,
len(total_vocab_in_train_data),"Simple" )
if(select_op == 1):
p_hafez *= calculate_probability_conditional(word,hafez_vocab_in_train_data,
total_vocab_in_train_data,
num_total_word_hafez_not_unique ,
len(total_vocab_in_train_data),"Laplace")
p_saadi *= calculate_probability_conditional(word,saadi_vocab_in_train_data,total_vocab_in_train_data,
num_total_word_saadi_not_unique , len(total_vocab_in_train_data),"Laplace")
if p_hafez > p_saadi:
detected_hafezes += 1
if statuses[index] == 'hafez':
correct_detected_hafezes += 1
correct_detected += 1
##model decide saadi
elif p_hafez < p_saadi:
if statuses[index] =='saadi':
correct_detected+=1
recall = correct_detected_hafezes/float(all_hafezes)
precision = correct_detected_hafezes/ float(detected_hafezes)
accuracy = correct_detected / float(total)
if(select_op == 2):
print("Recall in simple:",recall)
print("Precision in simple:",precision)
print("Accuracy in simple:",accuracy)
if (select_op == 1):
print("Recall in Laplace:",recall)
print("Precision in Laplace:",precision)
print("Accuracy in Laplace:",accuracy)
Unit_Test(test_index,
statuses,
poems,
probability_hafez,
probability_saadi,
total_vocab_in_train_data,
len(total_vocab_in_train_data),
num_total_word_vocab_not_unique,
hafez_vocab_in_train_data,
saadi_vocab_in_train_data,
num_total_word_hafez_unique,
num_total_word_saadi_unique,
num_total_word_hafez_not_unique,
num_total_word_saadi_not_unique)
# def make_evalute_file( evalute_filename ,
# probability_hafez,
# probability_saadi,
# total_vocab_in_train_data,
# unique_words,
# num_total_word_vocab_not_unique,
# hafez_vocab_in_train_data,
# saadi_vocab_in_train_data,
# num_total_word_hafez_unique,
# num_total_word_saadi_unique,
# num_total_word_hafez_not_unique,
# num_total_word_saadi_not_unique):
# Data = pd.read_csv(evalute_filename)
# index = Data['id']
# poems = Data['text']
# statuses = []
# for poem in poems:
# words = poem.split(' ')
# p_hafez = probability_hafez
# p_saadi = probability_saadi
# for word in words:
# p_hafez *= calculate_probability_conditional(word,hafez_vocab_in_train_data,
# total_vocab_in_train_data,
# num_total_word_hafez_not_unique ,
# len(total_vocab_in_train_data),"Laplace")
# p_saadi *= calculate_probability_conditional(word,saadi_vocab_in_train_data,total_vocab_in_train_data,
# num_total_word_saadi_not_unique , len(total_vocab_in_train_data),"Laplace")
# if p_hafez > p_saadi:
# statuses.append('hafez')
# elif p_hafez < p_saadi:
# statuses.append('saadi')
# Data.insert(2,'label',statuses)
# print(Data.head(20))
# Data.to_csv("output.csv")
# make_evalute_file(file_evaluate ,
# probability_hafez,
# probability_saadi,
# total_vocab_in_train_data,
# len(total_vocab_in_train_data),
# num_total_word_vocab_not_unique,
# hafez_vocab_in_train_data,
# saadi_vocab_in_train_data,
# num_total_word_hafez_unique,
# num_total_word_saadi_unique,
# num_total_word_hafez_not_unique,
# num_total_word_saadi_not_unique)
|
from itertools import chain, combinations
from aimacode.planning import Action
from aimacode.utils import expr
from layers import BaseActionLayer, BaseLiteralLayer, makeNoOp, make_node
class ActionLayer(BaseActionLayer):
def _inconsistent_effects(self, actionA, actionB):
""" Return True if an effect of one action negates an effect of the other
See Also
--------
layers.ActionNode
"""
"""
Inconsistent effects: one action negates an effect of the other. For example Eat(Cake)
and the persistence of Have(Cake) have inconsistent effects because they disagree on
the effect Have(Cake).
"""
# TODO: implement this function
#raise NotImplementedError
for effectA in actionA.effects:
for effectB in actionB.effects:
if effectA == ~effectB:
return True
return False
def _interference(self, actionA, actionB):
""" Return True if the effects of either action negate the preconditions of the other
See Also
--------
layers.ActionNode
"""
"""
Interference: one of the effects of one action is the negation of a precondition of the
other. For example Eat(Cake) interferes with the persistence of Have(Cake) by negating
its precondition.
"""
# TODO: implement this function
#raise NotImplementedError
for effect in actionA.effects:
for precondition in actionB.preconditions:
if effect == ~precondition:
return True
for effect in actionB.effects:
for precondition in actionA.preconditions:
if effect == ~precondition:
return True
return False
def _competing_needs(self, actionA, actionB):
""" Return True if the preconditions of the actions are all pairwise mutex in the
parent layer
See Also
--------
layers.ActionNode
layers.BaseLayer.parent_layer
"""
"""
Competing needs: one of the preconditions of one action is mutually exclusive with a
precondition of the other. For example, Bake(Cake) and Eat(Cake) are mutex because
they compete on the value of the Have(Cake) precondition.
"""
for preconditionA in actionA.preconditions:
for preconditionB in actionB.preconditions:
if self.parent_layer.is_mutex(preconditionA, preconditionB):
return True
return False
class LiteralLayer(BaseLiteralLayer):
def _inconsistent_support(self, literalA, literalB):
""" Return True if all ways to achieve both literals are pairwise mutex in the parent
layer
See Also
--------
layers.BaseLayer.parent_layer
"""
"""
Inconsistent support: At(Spare, Axle) is mutex with At(Flat, Axle) in S2 because the
only way of achieving At(Spare, Axle) is by PutOn(Spare, Axle), and that is mutex
with the persistence action that is the only way of achieving At(Flat, Axle). Thus, the
mutex relations detect the immediate conflict that arises from trying to put two objects
in the same place at the same time.
A mutex relation holds between two literals at the same level if one is the negation of
the other
or if each possible pair of actions that could achieve the two literals is mutually
exclusive.
This condition is called inconsistent support. For example, Have(Cake) and Eaten(Cake)
are mutex in S1 because the only way of achieving Have(Cake), the persistence action, is
mutex with the only way of achieving Eaten(Cake), namely Eat(Cake). In S2 the two
literals are not mutex because there are new ways of achieving them, such as Bake(Cake)
and the persistence of Eaten(Cake), that are not mutex.
"""
literalsA = self.parents[literalA]
literalsB = self.parents[literalB]
# TODO: implement this function
#raise NotImplementedError
for literalA in literalsA:
for literalB in literalsB:
if not self.parent_layer.is_mutex(literalA, literalB):
return False
return True
def _negation(self, literalA, literalB):
""" Return True if two literals are negations of each other """
# TODO: implement this function
#raise NotImplementedError
#return ((literalA == ~literalB) and (literalB == ~literalA))
return literalA == ~literalB
class PlanningGraph:
def __init__(self, problem, state, serialize=True, ignore_mutexes=False):
"""
Parameters
----------
problem : PlanningProblem
An instance of the PlanningProblem class
state : tuple(bool)
An ordered sequence of True/False values indicating the literal value
of the corresponding fluent in problem.state_map
serialize : bool
Flag indicating whether to serialize non-persistence actions. Actions
should NOT be serialized for regression search (e.g., GraphPlan), and
_should_ be serialized if the planning graph is being used to estimate
a heuristic
"""
self._serialize = serialize
self._is_leveled = False
self._ignore_mutexes = ignore_mutexes
self.goal = set(problem.goal)
# make no-op actions that persist every literal to the next layer
no_ops = [make_node(n, no_op=True) for n in chain(*(makeNoOp(s) for s in problem.state_map))]
self._actionNodes = no_ops + [make_node(a) for a in problem.actions_list]
# initialize the planning graph by finding the literals that are in the
# first layer and finding the actions they they should be connected to
literals = [s if f else ~s for f, s in zip(state, problem.state_map)]
layer = LiteralLayer(literals, ActionLayer(), self._ignore_mutexes)
layer.update_mutexes()
self.literal_layers = [layer]
self.action_layers = []
def LevelCost(self, literal_layers, goal):
"""
function LevelCost(graph, goal) returns a value
inputs:
graph, a leveled planning graph
goal, a literal that is a goal in the planning graph
for each layeri in graph.literalLayers do
if goal in layeri then return i
"""
levelCost = 0
i = 0
for layer in literal_layers:
GoalFound = False
if goal in layer:
GoalFound = True
levelCost = i
else:
i += 1
if GoalFound:
break
return levelCost
def h_levelsum(self):
""" Calculate the level sum heuristic for the planning graph
The level sum heuristic, following the subgoal independence assumption,
returns the sum of the level costs of the goals; this is inadmissible
(not to be allowed or tolerated)
but works very well in practice for problems that are largely decomposable.
It is much more accurate than the number-of-unsatisfied-goals heuristic
from Section 11.2. For our problem, the heuristic estimate for the conjunctive
goal Have(Cake)∧Eaten(Cake) will be 0+1 = 1, whereas the correct answer is 2.
Moreover, if we eliminated the Bake(Cake) action, the esimate would
still be 1, but the conjunctive goal would be impossible
The level sum is the sum of the level costs of all the goal literals
combined. The "level cost" to achieve any single goal literal is the
level at which the literal first appears in the planning graph. Note
that the level cost is **NOT** the minimum number of actions to
achieve a single goal literal.
For example, if Goal1 first appears in level 0 of the graph (i.e.,
it is satisfied at the root of the planning graph) and Goal2 first
appears in level 3, then the levelsum is 0 + 3 = 3.
Hint: expand the graph one level at a time and accumulate the level
cost of each goal.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
"""
"""
function LevelSum(graph) returns a value
inputs:
graph, an initialized (unleveled) planning graph
costs = []
graph.fill() /* fill the planning graph until it levels off */
for each goal in graph.goalLiterals do
costs.append(LevelCost(graph, goal))
return sum(costs)
"""
# TODO: implement this function
#raise NotImplementedError
levelCost = 0
self.fill()
for goal in self.goal:
#self._extend()
levelCost += self.LevelCost(self.literal_layers, goal)
#GoalFound = False
#i = 0
#for layer in self.literal_layers:
# if goal in layer:
# GoalFound = True
# levelCost += i
# else:
# i += 1
#if GoalFound:
# break
return levelCost
def h_maxlevel(self):
""" Calculate the max level heuristic for the planning graph
To estimate the cost of a conjunction of goals, there are three simple approaches.
The MAX-LEVEL max-level heuristic simply takes the maximum level cost of any of
the goals; this is admissible, but not necessarily very accurate.
The max level is the largest level cost of any single goal fluent.
The "level cost" to achieve any single goal literal is the level at
which the literal first appears in the planning graph. Note that
the level cost is **NOT** the minimum number of actions to achieve
a single goal literal.
For example, if Goal1 first appears in level 1 of the graph and
Goal2 first appears in level 3, then the levelsum is max(1, 3) = 3.
Hint: expand the graph one level at a time until all goals are met.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
Notes
-----
WARNING: you should expect long runtimes using this heuristic with A*
"""
"""
function MaxLevel(graph) returns a value
inputs:
graph, an initialized (unleveled) planning graph
costs = []
graph.fill() /* fill the planning graph until it levels off */
for each goal in graph.goalLiterals do
costs.append(LevelCost(graph, goal))
return max(costs)
"""
# TODO: implement maxlevel heuristic
#raise NotImplementedError
levelCost = 0
maxLevelCost = levelCost
self.fill()
for goal in self.goal:
#self._extend()
levelCost = self.LevelCost(self.literal_layers, goal)
#GoalFound = False
#i = 0
#for layer in self.literal_layers:
# if goal in layer:
# GoalFound = True
# levelCost += i
# else:
# i += 1
#if GoalFound:
if maxLevelCost < levelCost:
maxLevelCost = levelCost
return maxLevelCost
def h_setlevel(self):
""" Calculate the set level heuristic for the planning graph
the set-level heuristic finds the level at which all the
literals in the conjunctive goal appear in the planning graph without any pair
of them being mutually exclusive. This heuristic gives the correct values of 2 for
our original problem and infinity for the problem without Bake(Cake). It dominates
the max-level heuristic and works extremely well on tasks in which there is a good
deal of interaction among subplans
The set-level heuristic finds the level at which all the literals in the conjunctive goal
appear in the planning graph without any pair of them being mutually exclusive.
The set level of a planning graph is the first level where all goals
appear such that no pair of goal literals are mutex in the last
layer of the planning graph.
Hint: expand the graph one level at a time until you find the set level
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
Notes
-----
WARNING: you should expect long runtimes using this heuristic on complex problems
"""
"""
function SetLevel(graph) returns a value
inputs:
graph, an initialized (unleveled) planning graph
graph.fill() /* fill the planning graph until it levels off */
for layeri in graph.literalLayers do
allGoalsMet <- true
for each goal in graph.goalLiterals do
if goal not in layeri then allGoalsMet <- false
if not allGoalsMet then continue
goalsAreMutex <- false
for each goalA in graph.goalLiterals do
for each goalB in graph.goalLiterals do
if layeri.isMutex(goalA, goalB) then goalsAreMutex <- true
if not goalsAreMutex then return i
"""
#"""
self.fill()
i = -1
for layer in self.literal_layers:
i += 1
allGoalsMet = True
for goal in self.goal:
if goal not in layer:
allGoalsMet = False
if not allGoalsMet:
continue
goalsAreMutex = False
#for goalA in self.goal:
for goalA, goalB in combinations(self.goal, 2):
#for goalB in self.goal:
if layer.is_mutex(goalA, goalB):# and goalA != goalB:
goalsAreMutex = True
if not goalsAreMutex:
return i
#"""
##############################################################################
# DO NOT MODIFY CODE BELOW THIS LINE #
##############################################################################
def fill(self, maxlevels=-1):
""" Extend the planning graph until it is leveled, or until a specified number of
levels have been added
Parameters
----------
maxlevels : int
The maximum number of levels to extend before breaking the loop. (Starting with
a negative value will never interrupt the loop.)
Notes
-----
YOU SHOULD NOT THIS FUNCTION TO COMPLETE THE PROJECT, BUT IT MAY BE USEFUL FOR TESTING
"""
while not self._is_leveled:
if maxlevels == 0: break
self._extend()
maxlevels -= 1
return self
def _extend(self):
""" Extend the planning graph by adding both a new action layer and a new literal layer
The new action layer contains all actions that could be taken given the positive AND
negative literals in the leaf nodes of the parent literal level.
The new literal layer contains all literals that could result from taking each possible
action in the NEW action layer.
"""
if self._is_leveled: return
parent_literals = self.literal_layers[-1]
parent_actions = parent_literals.parent_layer
action_layer = ActionLayer(parent_actions, parent_literals, self._serialize, self._ignore_mutexes)
literal_layer = LiteralLayer(parent_literals, action_layer, self._ignore_mutexes)
for action in self._actionNodes:
# actions in the parent layer are skipped because are added monotonically to planning graphs,
# which is performed automatically in the ActionLayer and LiteralLayer constructors
if action not in parent_actions and action.preconditions <= parent_literals:
action_layer.add(action)
literal_layer |= action.effects
# add two-way edges in the graph connecting the parent layer with the new action
parent_literals.add_outbound_edges(action, action.preconditions)
action_layer.add_inbound_edges(action, action.preconditions)
# # add two-way edges in the graph connecting the new literaly layer with the new action
action_layer.add_outbound_edges(action, action.effects)
literal_layer.add_inbound_edges(action, action.effects)
action_layer.update_mutexes()
literal_layer.update_mutexes()
self.action_layers.append(action_layer)
self.literal_layers.append(literal_layer)
self._is_leveled = literal_layer == action_layer.parent_layer
|
import pandas as pd
x = {'one':[1,2,3], 'two':[4,5,6]}
df = pd.DataFrame(x)
df.set_index(['a','b','c'])
print(df)
|
def getTotal(costs, items, tax):
output = 0
for x in items:
output += costs.get(x, 0)
return round((output * tax) + output,2)
'''
How much will you spend?
Given a dictionary of items and their costs and a array specifying the items
bought, calculate the total cost of the items plus a given tax.
If item doesn't exist in the given cost values, the item is ignored.
Output should be rounded to two decimal places.
Python:
costs = {'socks':5, 'shoes':60, 'sweater':30}
get_total(costs, ['socks', 'shoes'], 0.09)
#-> 5+60 = 65
#-> 65 + 0.09 of 65 = 70.85
#-> Output: 70.85
'''
|
from django.urls import path
from . import views
import django.conf.urls
from django.views.generic import TemplateView
urlpatterns=[
path('',views.home,name='doctor first page'),
path('signup/',views.signup,name="doctor signup"),
path('ajaxlogin/',views.ajaxlogin,name="ajaxlogin"),
path('signup/ajaxsignup/',views.ajaxsignup,name="ajaxsignup"),
path('doctorhomepage/',views.doctorhomepage,name="doctorhomepage"),
path('addpatient/',views.addpatient,name="addpatient"),
path('addpatient/ajaxaddpatient/',views.ajaxaddpatient,name="ajaxaddpatient"),
path('viewpatient/',views.viewpatient,name="viewpatient"),
path('viewpatient/ajaxviewpatient/',views.ajaxviewpatient,name="ajaxviewpatient"),
path('searchpatient/',views.searchpatient,name="searchpatient"),
path('searchpatient/ajaxview/',views.ajaxview,name="ajaxview"),
path('searchpatient/ajaxseachpatient/',views.ajaxseachpatient,name="ajaxseachpatient"),
path('addmulcondition/',views.addmulcondition,name="addmulcondition"),
path('addmulcondition/ajaxpaview/',views.ajaxpaview,name="ajaxpaview"),
path('addmulcondition/ajaxaddserach/',views.ajaxaddserach,name="ajaxaddserach"),
path('addmulcondition/ajaxupdate/',views.ajaxupdate,name="ajaxupdate"),
path('doctorlogout/',views.doctorlogout,name="doctorlogout"),
]
|
# image process
import os
import cv2
DEBUG = False
class Image:
scale_percent = 10 # percent of original size
def __init__(self, path, camera_i, time_i, ori_img=True):
self.ori_path = path
self.resize_path = None
self.camera_i = -1
self.time_i = -1
self.image_name = "Camera:{camera} Time:{time}".format(camera=camera_i, time=time_i)
self._img = None
self.ORIGINAL_FLAG = ori_img
def _read_image(self):
if self.ORIGINAL_FLAG and os.path.isfile(self.ori_path):
self._img = cv2.imread(self.ori_path, cv2.IMREAD_UNCHANGED)
elif not self.ORIGINAL_FLAG and os.path.isfile(self.resize_path):
self._img = cv2.imread(self.resize_path, cv2.IMREAD_UNCHANGED)
else:
print("image {image_name} path is wrong".format(image_name=self.image_name ))
def _resize(self):
width = int(self._img.shape[1] * self.scale_percent / 100)
height = int(self._img.shape[0] * self.scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(self._img, dim, interpolation = cv2.INTER_AREA)
return resized
def _show_image(self):
if self._img is not None:
show_image = self._img
if self._img.shape[1] > 1000 or self._img[0] > 800:
show_image = self._resize()
cv2.imshow(self.image_name, show_image)
cv2.waitKey(0) % 256
elif self.ori_path is not None or self.resize_path is not None:
self._read_image()
show_image = self._img
if self._img.shape[1] > 1000 or self._img[0] > 800:
show_image = self._resize()
cv2.imshow(self.image_name, show_image)
cv2.waitKey(0) % 256
else:
print("image is not loaded")
class ImageLoader:
def __init__(self, path, num_camera, num_time):
if os.path.isdir(path):
self.path = path
else:
print("wrong root path")
return
if isinstance(num_camera, int) and isinstance(num_time, int):
self.num_camera = num_camera
self.num_time = num_time
else:
self.num_camera = 0
self.num_time = 0
print("initial with wrong args, cameras, times set to 0")
def get_image_path(self):
path_list = []
for camera in range(self.num_camera):
for time in range(self.num_time):
image_path = "{root}/{camera}/{time}.{ext}". \
format(root=self.path, camera=camera, time=time, ext="jpg")
if os.path.isfile(image_path):
path_list.append(image_path)
else:
print("{path} is empty or not a file".format(path = image_path))
if DEBUG:
print("ImageLoader->get_image_path:")
print(path_list)
return path_list
class ImageProcessor:
project_images = []
def __init__(self, path, camera_i, time_i):
assert len(path) == camera_i * time_i,"Error: wrong image numbers"
count = 0
for camera in range(camera_i):
camera_images = []
for time in range(time_i):
camera_images.append(Image(path[0], camera, time))
count += 1
self.project_images.append(camera_images)
if __name__ == "__main__":
image_root = "../test_data/image_root"
num_camera = 2
num_time = 1
ld = ImageLoader(image_root, num_camera, num_time)
images_path = ld.get_image_path()
ip = ImageProcessor(images_path, num_camera, num_time)
image = ip.project_images[0][0]
image._show_image()
pass
# while program is running
# if cur_time == pre_time + d_t:
# if buffer is empty:
# pre_time = cur_time
# output the p_last
# else:
# pre_time = cur_time
# pop the first p in the buffer
# else:
# wait until cur_time == pre_time + d_t
|
#!/usr/bin/env python
def fib(x):
''' assumes x is an int and >= 0
returns Fibonacci of x'''
assert type(x) == int and x >= 0
if x == 0 or x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
print fib(7)
print fib(-2)
print fib(t)
|
from django.forms import ModelForm
from index.models import Moment
class MomentForm(ModelForm):
class Meta:
model = Moment
fields='__all__'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from web_backend.nvlserver.module import nvl_meta
from sqlalchemy import BigInteger, String, Column, Boolean, DateTime, Table, ForeignKey, func
from geoalchemy2.types import Geometry
from sqlalchemy import Numeric
from sqlalchemy.dialects.postgresql import JSONB
nvl_linestring = Table(
'nvl_linestring',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('geom', Geometry('LINESTRING', srid=4326)),
Column('label', String(length=255), nullable=False),
Column('color', String(length=32), nullable=False),
Column('location_id', BigInteger, ForeignKey('location.id'), nullable=True),
Column('user_id', BigInteger, ForeignKey('user.id'), nullable=True),
Column('meta_information', JSONB, default=lambda: {}, nullable=False),
Column('active', Boolean, nullable=False),
Column('deleted', Boolean, nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False),
)
nvl_point = Table(
'nvl_point',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('geom', Geometry('POINT', srid=4326)),
Column('label', String(length=255), nullable=False),
Column('color', String(length=32), nullable=False),
Column('icon', String(length=32), nullable=False),
Column('location_id', BigInteger, ForeignKey('location.id'), nullable=True),
Column('user_id', BigInteger, ForeignKey('user.id'), nullable=True),
Column('meta_information', JSONB, default=lambda: {}, nullable=False),
Column('active', Boolean, nullable=False),
Column('deleted', Boolean, nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False),
)
nvl_circle = Table(
'nvl_circle',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('geom', Geometry('POINT', srid=4326)),
Column('radius', Numeric, nullable=False),
Column('label', String(length=255), nullable=False),
Column('color', String(length=32), nullable=False),
Column('location_id', BigInteger, ForeignKey('location.id'), nullable=True),
Column('user_id', BigInteger, ForeignKey('user.id'), nullable=True),
Column('meta_information', JSONB, default=lambda: {}, nullable=False),
Column('active', Boolean, nullable=False),
Column('deleted', Boolean, nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False),
)
nvl_polygon = Table(
'nvl_polygon',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('geom', Geometry('POLYGON', srid=4326)),
Column('label', String(length=255), nullable=False),
Column('color', String(length=32), nullable=False),
Column('location_id', BigInteger, ForeignKey('location.id'), nullable=True),
Column('user_id', BigInteger, ForeignKey('user.id'), nullable=True),
Column('meta_information', JSONB, default=lambda: {}, nullable=False),
Column('active', Boolean, nullable=False),
Column('deleted', Boolean, nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False),
)
|
import os
import webapp2
import jinja2
from new import Story
from google.appengine.ext import db
class ReviewHandler(webapp2.RequestHandler):
def post(self):
ratingValue = self.request.get('review.reviewRating.ratingValue')
reviewBody = self.request.get('review.reviewBody')
id = self.request.get('id')
if not ratingValue or not id:
self.error(400)
return
review = Review(storyId=id, ratingValue=int(ratingValue), reviewBody=reviewBody)
review.put()
return
def get(self):
stories = Story.all().fetch(1000)
results = []
for s in stories:
total = 0
q = Review.all()
q.filter("storyId =", str(s.key().id()))
reviews = q.fetch(100)
for r in reviews:
total += r.ratingValue
if len(reviews):
score = round(float(total) / len(reviews), 2)
else:
score = 0
result = {'score': score, 'title': s.title, 'desc': s.desc, 'tag': s.tag, 'votes': len(reviews), 'author': s.author, 'date': s.date}
results.append(result)
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
template = jinja_environment.get_template('home.html')
self.response.write(template.render({'stories': results}))
class Review(db.Model):
storyId = db.StringProperty(required=True)
ratingValue = db.IntegerProperty(required=True)
reviewBody = db.TextProperty(required=False)
|
import numpy as np
from copy import copy as copy
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from scipy.sparse import csr_matrix
from numpy.polynomial import Legendre
def normal_eqn_vects(X, Y, W, var):
overlap = np.einsum('bai,bi,bci->bac', X, W, X, optimize='greedy')
if np.any(np.linalg.det(overlap) == 0.0):
print("Overlap matrix is singular, should figure out why")
return np.ones((Y.shape[0], X.shape[0]))*np.nan, np.ones((Y.shape[0], X.shape[0], X.shape[0]))*np.nan
# Fit
denom = np.linalg.inv(overlap)
numer = np.einsum('bai,bi,bi->ba', X, W, Y, optimize='greedy')
# Covariance
cov = np.einsum('bai,bij,bj,bj,bj,bkj,bkc->bac', denom, X, W, var, W, X, denom, optimize='greedy')
return np.einsum('abi,ai->ab', denom, numer, optimize='greedy'), cov
def fit_legendres_images(images, centers, lg_inds, rad_inds, maxPixel, rotate=0,
image_stds=None, image_counts=None, image_nanMaps=None, image_weights=None,
chiSq_fit=False, rad_range=None):
"""
Fits legendre polynomials to an array of single images (3d) or a list/array of
an array of scan images, possible dimensionality:
1) [NtimeSteps, image_rows, image_cols]
2) [NtimeSteps (list), Nscans, image_rows, image_cols]
"""
if image_counts is None:
image_counts = []
for im in range(len(images)):
image_counts.append(np.ones_like(images[im]))
image_counts[im][np.isnan(images[im])] = 0
if chiSq_fit and (image_stds is None):
print("If using the chiSq fit you must supply image_stds")
return None
if image_stds is None:
image_stds = []
for im in range(len(images)):
image_stds.append(np.ones_like(images[im]))
image_stds[im][np.isnan(images[im])] = 0
with_scans = len(images[0].shape)+1 >= 4
img_fits = [[] for x in range(len(images))]
img_covs = [[] for x in range(len(images))]
for rad in range(maxPixel):
if rad_range is not None:
if rad < rad_range[0] or rad >= rad_range[1]:
continue
if rad % 25 == 0:
print("Fitting radius {}".format(rad))
pixels, nans, angles = [], [], []
all_angles = np.arctan2(rad_inds[rad][1].astype(float), rad_inds[rad][0].astype(float))
all_angles[all_angles<0] += 2*np.pi
all_angles = np.mod(all_angles + rotate, 2*np.pi)
all_angles[all_angles > np.pi] -= 2*np.pi
if np.sum(np.mod(lg_inds, 2)) == 0:
all_angles[np.abs(all_angles) > np.pi/2.] -= np.pi*np.sign(all_angles[np.abs(all_angles) > np.pi/2.])
angles = np.unique(np.abs(all_angles))
ang_sort_inds = np.argsort(angles)
angles = angles[ang_sort_inds]
Nangles = angles.shape[0]
if len(angles) == len(all_angles):
do_merge = False
else:
do_merge = True
mi_rows, mi_cols, mi_data = [], [], []
pr,pc,pv = [],[],[]
for ia,ang in enumerate(angles):
inds = np.where(np.abs(all_angles) == ang)[0]
mi_rows.append(np.ones_like(inds)*ia)
mi_cols.append(inds)
mi_rows, mi_cols = np.concatenate(mi_rows), np.concatenate(mi_cols)
merge_indices = csr_matrix((np.ones_like(mi_rows), (mi_rows, mi_cols)),
shape=(len(angles), len(all_angles)))
for im in range(len(images)):
if with_scans:
angs_tile = np.tile(angles, (images[im].shape[0], 1))
scn_inds, row_inds, col_inds = [], [], []
for isc in range(images[im].shape[0]):
scn_inds.append(np.ones(rad_inds[rad][0].shape[0], dtype=int)*isc)
row_inds.append(rad_inds[rad][0]+centers[im][isc,0])
col_inds.append(rad_inds[rad][1]+centers[im][isc,1])
scn_inds = np.concatenate(scn_inds)
row_inds = np.concatenate(row_inds)
col_inds = np.concatenate(col_inds)
img_pixels = np.reshape(copy(images[im][scn_inds,row_inds,col_inds]), (images[im].shape[0], -1))
img_counts = np.reshape(copy(image_counts[im][scn_inds,row_inds,col_inds]), (images[im].shape[0], -1))
img_stds = np.reshape(copy(image_stds[im][scn_inds,row_inds,col_inds]), (images[im].shape[0], -1))
if image_nanMaps is not None:
img_pixels[np.reshape(image_nanMaps[im][scn_inds,row_inds,col_inds], (images[im].shape[0], -1)).astype(bool)] = np.nan
img_counts[np.reshape(image_nanMaps[im][scn_inds,row_inds,col_inds], (images[im].shape[0], -1)).astype(bool)] = 0
if image_weights is not None:
img_weights = np.reshape(copy(image_weights[im][scn_inds,row_inds,col_inds]), (images[im].shape[0], -1))
else:
angs_tile = np.expand_dims(angles, 0)
row_inds = rad_inds[rad][0]+centers[im,0]
col_inds = rad_inds[rad][1]+centers[im,1]
img_pixels = np.reshape(copy(images[im][row_inds,col_inds]), (1, -1))
img_counts = np.reshape(copy(image_counts[im][row_inds,col_inds]), (1, -1))
img_stds = np.reshape(copy(image_stds[im][row_inds,col_inds]), (1, -1))
if image_nanMaps is not None:
img_pixels[np.reshape(image_nanMaps[im][row_inds,col_inds], (1, -1)).astype(bool)] = np.nan
img_counts[np.reshape(image_nanMaps[im][row_inds,col_inds], (1, -1)).astype(bool)] = 0
if image_weights is not None:
img_weights = np.reshape(copy(image_weights[im][row_inds,col_inds]), (1, -1))
img_pix = img_pixels*img_counts
img_var = img_counts*(img_stds**2)
img_pix[np.isnan(img_pixels)] = 0
img_var[np.isnan(img_pixels)] = 0
if do_merge:
img_pixels[np.isnan(img_pixels)] = 0
img_pix = np.transpose(merge_indices.dot(np.transpose(img_pix)))
img_var = np.transpose(merge_indices.dot(np.transpose(img_var)))
img_counts = np.transpose(merge_indices.dot(np.transpose(img_counts)))
if image_weights is not None:
print("Must fill this in, don't forget std option")
sys.exit(0)
else:
img_pix = img_pix[:,ang_sort_inds]
img_var = img_var[:,ang_sort_inds]
img_counts = img_counts[:,ang_sort_inds]
img_pix /= img_counts
img_var /= img_counts
Nnans = np.sum(np.isnan(img_pix), axis=-1)
ang_inds = np.where(img_counts > 0)
arr_inds = np.concatenate([np.arange(Nangles-Nn) for Nn in Nnans])
img_pixels = np.zeros_like(img_pix)
img_vars = np.zeros_like(img_var)
img_angs = np.zeros_like(img_pix)
img_dang = np.zeros_like(img_pix)
img_pixels[ang_inds[0][:-1], arr_inds[:-1]] =\
(img_pix[ang_inds[0][:-1], ang_inds[1][:-1]] + img_pix[ang_inds[0][1:], ang_inds[1][1:]])/2.
img_vars[ang_inds[0][:-1], arr_inds[:-1]] =\
(img_var[ang_inds[0][:-1], ang_inds[1][:-1]] + img_var[ang_inds[0][1:], ang_inds[1][1:]])/2.
img_angs[ang_inds[0][:-1], arr_inds[:-1]] =\
(angs_tile[ang_inds[0][:-1], ang_inds[1][:-1]] + angs_tile[ang_inds[0][1:], ang_inds[1][1:]])/2.
img_dang[ang_inds[0][:-1], arr_inds[:-1]] =\
(angs_tile[ang_inds[0][1:], ang_inds[1][1:]] - angs_tile[ang_inds[0][:-1], ang_inds[1][:-1]])
for isc in range(Nnans.shape[0]):
# Using angle midpoint => one less angle => Nnans[isc]+1
img_pixels[isc,-1*(Nnans[isc]+1):] = 0
img_vars[isc,-1*(Nnans[isc]+1):] = 0
img_angs[isc,-1*(Nnans[isc]+1):] = 0
img_dang[isc,-1*(Nnans[isc]+1):] = 0
if image_weights is not None:
print("Must fill this in and check below")
sys.exit(0)
elif chiSq_fit:
img_weights = 1./img_vars
img_weights[img_vars==0] = 0
else:
img_weights = np.ones_like(img_pixels)
img_weights *= np.sin(img_angs)*img_dang
lgndrs = []
for lg in lg_inds:
lgndrs.append(Legendre.basis(lg)(np.cos(img_angs)))
lgndrs = np.transpose(np.array(lgndrs), (1,0,2))
empty_scan = np.sum(img_weights.astype(bool), -1) < 2
overlap = np.einsum('bai,bi,bci->bac',
lgndrs[np.invert(empty_scan)],
img_weights[np.invert(empty_scan)],
lgndrs[np.invert(empty_scan)],
optimize='greedy')
empty_scan[np.invert(empty_scan)] = (np.linalg.det(overlap) == 0.0)
if np.any(empty_scan):
fit = np.ones((img_pixels.shape[0], len(lg_inds)))*np.nan
cov = np.ones((img_pixels.shape[0], len(lg_inds), len(lg_inds)))*np.nan
if np.any(np.invert(empty_scan)):
img_pixels = img_pixels[np.invert(empty_scan)]
img_weights = img_weights[np.invert(empty_scan)]
img_vars = img_vars[np.invert(empty_scan)]
lgndrs = lgndrs[np.invert(empty_scan)]
fit[np.invert(empty_scan)], cov[np.invert(empty_scan)] =\
normal_eqn_vects(lgndrs, img_pixels, img_weights, img_vars)
else:
fit, cov = normal_eqn_vects(lgndrs, img_pixels, img_weights, img_vars)
img_fits[im].append(np.expand_dims(fit, 1))
img_covs[im].append(np.expand_dims(cov, 1))
Nscans = None
for im in range(len(img_fits)):
img_fits[im] = np.concatenate(img_fits[im], 1)
img_covs[im] = np.concatenate(img_covs[im], 1)
if Nscans is None:
Nscans = img_fits[im].shape[0]
elif Nscans != img_fits[im].shape[0]:
Nscans = -1
if Nscans > 0:
img_fits = np.array(img_fits)
img_covs = np.array(img_covs)
if with_scans:
return img_fits, img_covs
else:
return img_fits[:,0,:,:], img_covs[:,0,:,:]
|
# test 1
# 输入的获取
# message = input("Please input some sent\n")
# message = int(message) #强制类型转换
# print(message+1)
'''
input()返回的输入的都被视作字符串,应该用相应的类型转换函数强制转换
'''
# test 2
# while循环
num = 0
sum = 0
while num < 5:
sum = sum + num
num = num + 1
print(sum)
# test 3
# 用户选择何时退出
print("===========================================================================================")
mess = ""
while mess != "quit":
mess = input("Please input something1:\n")
if mess != "quit":
print(mess)
# test 4
# 使用标志
print("===========================================================================================")
active = True
while active:
mess = input("Please input something2:\n")
if mess == "quit":
active = False
else:
print(mess)
|
import boto3
import random
import string
from typing import Dict, List
# prefix for objects created in test
ID_PREFIX = "fake-"
# us-east-1 is bleeding edge features
DEV_REGION = "us-east-1"
def id_generator(size=8, chars=string.ascii_lowercase + string.digits):
random_string = ''.join(random.choice(chars) for _ in range(size))
return ID_PREFIX + random_string
def create_bucket_with_client(bucket_id, tag_set):
dev_session = boto3.Session(region_name=DEV_REGION, profile_name='personal')
s3_resource = dev_session.resource('s3')
response = s3_resource.create_bucket(Bucket=bucket_id)
bucket_tagging = s3_resource.BucketTagging(bucket_id)
bucket_tagging.put(Tagging={'TagSet': tag_set})
return response
def destroy_bucket_with_client(bucket_name):
"""Destroy bucket by bucket name
:param str bucket_name: name of bucket to delete
"""
dev_session = boto3.Session(region_name=DEV_REGION, profile_name='personal')
s3_resource = dev_session.resource('s3')
for bucket in s3_resource.buckets.all():
if bucket.name == bucket_name:
for key in bucket.objects.all():
key.delete()
bucket.delete()
break
def bucket_name_exists(bucket_name):
"""Find bucket by name
:param str bucket_name: name of bucket to delete
:rtype: bool
"""
dev_session = boto3.Session(region_name=DEV_REGION, profile_name='personal')
s3_resource = dev_session.resource('s3')
all_buckets = [bucket.name for bucket in s3_resource.buckets.all()]
return bucket_name in all_buckets
def simple_get_bucket_by_tags(tag_filter, type_filter):
"""The summary line for a method docstring should fit on one line.
Example tag filter
TagFilters=[
{
'Key': 'shape',
'Values': ['round']
}
]
:param List[Dict[str, str or List[str]]] tag_filter: tag filter
Type filter options:
https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces
"""
dev_session = boto3.Session(region_name=DEV_REGION, profile_name='personal')
client = dev_session.client('resourcegroupstaggingapi')
def lookup_for_tags(token):
response = client.get_resources(
PaginationToken=token,
TagFilters=tag_filter,
ResourcesPerPage=50,
ResourceTypeFilters=[
type_filter,
]
)
return response
results = []
response = lookup_for_tags("")
page_token = ""
while True:
results += response["ResourceTagMappingList"]
page_token = response["PaginationToken"]
if page_token == "":
break
response = lookup_for_tags(page_token)
for r in results:
print
r["ResourceARN"]
return response
def list_bucket_arns_by_tags(tag_filter, type_filter):
"""The summary line for a method docstring should fit on one line.
Example tag filter
TagFilters=[
{
'Key': 'shape',
'Values': ['round']
}
]
:param List[Dict[str, str or List[str]]] tag_filter: tag filter
:param str type_filter: see options for string below
Type filter options:
https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces
:rtype: List[str]
"""
res = []
response = simple_get_bucket_by_tags(tag_filter, type_filter)
for arn in response['ResourceTagMappingList']:
res.append(arn['ResourceARN'])
return res
def list_cfn_by_tags(tag_filter, type_filter="cloudformation"):
"""The summary line for a method docstring should fit on one line.
Example tag filter
TagFilters=[
{
'Key': 'shape',
'Values': ['round']
}
]
:param List[Dict[str, str or List[str]]] tag_filter: tag filter
:param str type_filter: see options for string below
Type filter options:
https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces
:rtype: List[str]
"""
res = []
response = simple_get_bucket_by_tags(tag_filter, type_filter)
for arn in response['ResourceTagMappingList']:
res.append(arn['ResourceARN'])
return res
def get_file_path(file_name, path=None):
"""Fild path of file_name in search_dir subtree
Given a file name (file_name) walk the subtrees under search_dir and return the full path of the first found
instance of the file name. If no search_dir is provided, use cwd
:param str file_name: name of a file
:param str path: absolute path to a directory to be searched. default to cwd
:rtype: str
"""
import os
for root, dirs, files in os.walk(path):
if file_name in files:
return os.path.join(root, file_name)
def string_from_file(file_name):
"""Read contents of a resource file into a string
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Good for planning using just docstrings.
:param str file_name: name of a resource file kin the project tree
:rtype: str
"""
file_path = get_file_path(file_name)
with open(file_path, "r") as myfile:
data = myfile.read()
return data
def create_stack(body, tags, parameters, disable_rollback=False, capabilities=None):
"""Create a stack from a given body string
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Good for planning using just docstrings.
:param int a: some short description
:param b: some short description
:type b: int
:param c: just a description with no type
:rtype: dict[str, str]
"""
pass
|
import pkg_resources
libsgutils2 = pkg_resources.resource_filename(__name__, 'libsgutils2-2.dll')
libc = 'msvcrt'
|
from GUI import gaGUI
# 2*exp( - (x**2) - (y**2) ) + 5 * exp ( - (( x - 3 )**2) - (( y - 3)**2) ) max save all files widac dobrze tutaj!!!!
# (x ** 1/2 - 5 * y ) / (x ** 2 + y ** 2 - 2 * x + 10) - przykladowa Funkcja do testowania
# sin(x) + cos(y) min po wpisaniu idz do katalogu results + stworzony gif save all files
#exp(-(x-3)**2-(y-3)**2) max save all files
import numpy as np
if __name__ == '__main__':
gui = gaGUI()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class GimletUser(AbstractUser):
ROLE_OWNER = 'owner'
ROLE_MANAGER = 'manager'
ROLE_EMPLOYEE = 'employee'
ROLE_CHOICES = (
(ROLE_OWNER, 'Owner'),
(ROLE_MANAGER, 'Manager'),
(ROLE_EMPLOYEE, 'Employee'),
)
role = models.CharField(max_length=40, choices=ROLE_CHOICES)
|
from psana import dgram
from psana.event import Event
from psana.psexp import PacketFooter, TransitionId, PrometheusManager
import numpy as np
import os
import time
import logging
logger = logging.getLogger(__name__)
s_bd_just_read = PrometheusManager.get_metric('psana_bd_just_read')
s_bd_gen_smd_batch = PrometheusManager.get_metric('psana_bd_gen_smd_batch')
s_bd_gen_evt = PrometheusManager.get_metric('psana_bd_gen_evt')
class EventManager(object):
""" Return an event from the received smalldata memoryview (view)
1) If dm is empty (no bigdata), yield this smd event
2) If dm is not empty,
- with filter fn, fetch one bigdata and yield it.
- w/o filter fn, fetch one big chunk of bigdata and
replace smalldata view with the read out bigdata.
Yield one bigdata event.
"""
def __init__(self, view, smd_configs, dm, esm,
filter_fn=0, prometheus_counter=None,
max_retries=0, use_smds=[]):
if view:
pf = PacketFooter(view=view)
self.n_events = pf.n_packets
else:
self.n_events = 0
self.smd_configs = smd_configs
self.dm = dm
self.esm = esm
self.n_smd_files = len(self.smd_configs)
self.filter_fn = filter_fn
self.prometheus_counter = prometheus_counter
self.max_retries = max_retries
self.use_smds = use_smds
self.smd_view = view
self.i_evt = 0
# Each chunk must fit in BD_CHUNKSIZE and we only fill bd buffers
# when bd_offset reaches the size of buffer.
self.BD_CHUNKSIZE = int(os.environ.get('PS_BD_CHUNKSIZE', 0x1000000))
self._get_offset_and_size()
if self.dm.n_files > 0:
self._init_bd_chunks()
def __iter__(self):
return self
def _inc_prometheus_counter(self, unit, value=1):
if self.prometheus_counter:
self.prometheus_counter.labels(unit,'None').inc(value)
@s_bd_gen_evt.time()
def __next__(self):
if self.i_evt == self.n_events:
raise StopIteration
evt = self._get_next_evt()
# Update EnvStore - this is the earliest we know if this event is a Transition
# make sure we update the envstore now rather than later.
if evt.service() != TransitionId.L1Accept:
self.esm.update_by_event(evt)
return evt
def _get_bd_offset_and_size(self, d, current_bd_offsets, current_bd_chunk_sizes, i_evt, i_smd, i_first_L1):
if self.use_smds[i_smd]: return
# Get offset and size of bd buffer
# For transitions, this is the current bd offset
if hasattr(d, "smdinfo"):
self.bd_offset_array[i_evt, i_smd] = d.smdinfo[0].offsetAlg.intOffset
self.bd_size_array[i_evt, i_smd] = d.smdinfo[0].offsetAlg.intDgramSize
else:
self.bd_offset_array[i_evt, i_smd] = current_bd_offsets[i_smd]
self.bd_size_array[i_evt, i_smd] = d._size
# Check continuous chunk
if current_bd_chunk_sizes[i_smd] + self.bd_size_array[i_evt, i_smd] < self.BD_CHUNKSIZE:
if i_evt > i_first_L1 and \
current_bd_offsets[i_smd] == self.bd_offset_array[i_evt, i_smd]:
self.cutoff_flag_array[i_evt, i_smd] = 0
current_bd_chunk_sizes[i_smd] += self.bd_size_array[i_evt, i_smd]
if self.cutoff_flag_array[i_evt, i_smd] == 1:
current_bd_chunk_sizes[i_smd] = self.bd_size_array[i_evt, i_smd]
"""
if current_bd_offsets[i_smd] == self.bd_offset_array[i_evt, i_smd] \
and i_evt != i_first_L1 \
and current_bd_chunk_sizes[i_smd] + self.bd_size_array[i_evt, i_smd] < self.BD_CHUNKSIZE:
self.cutoff_flag_array[i_evt, i_smd] = 0
current_bd_chunk_sizes[i_smd] += self.bd_size_array[i_evt, i_smd]
else:
current_bd_chunk_sizes[i_smd] = self.bd_size_array[i_evt, i_smd]
"""
current_bd_offsets[i_smd] = self.bd_offset_array[i_evt, i_smd] + self.bd_size_array[i_evt, i_smd]
print(f'i_smd:{i_smd} i_evt:{i_evt} i_first_L1:{i_first_L1} current_bd_chunk_size={current_bd_chunk_sizes[i_smd]} current_bd_offset:{current_bd_offsets[i_smd]} bd_offset:{self.bd_offset_array[i_evt, i_smd]} bd_size:{self.bd_size_array[i_evt, i_smd]}')
@s_bd_gen_smd_batch.time()
def _get_offset_and_size(self):
"""
Use fast step-through to read off offset and size from smd_view.
Format of smd_view
[
[[d_bytes][d_bytes]....[evt_footer]] <-- 1 event
[[d_bytes][d_bytes]....[evt_footer]]
[chunk_footer]]
"""
offset = 0
i_smd = 0
smd_chunk_pf = PacketFooter(view=self.smd_view)
dtype = np.int64
# Row - events, col = smd files
self.bd_offset_array = np.zeros((smd_chunk_pf.n_packets, self.n_smd_files), dtype=dtype)
self.bd_size_array = np.zeros((smd_chunk_pf.n_packets, self.n_smd_files), dtype=dtype)
self.smd_offset_array = np.zeros((smd_chunk_pf.n_packets, self.n_smd_files), dtype=dtype)
self.smd_size_array = np.zeros((smd_chunk_pf.n_packets, self.n_smd_files), dtype=dtype)
self.new_chunk_id_array = np.zeros((smd_chunk_pf.n_packets, self.n_smd_files), dtype=dtype)
self.cutoff_flag_array = np.ones((smd_chunk_pf.n_packets, self.n_smd_files), dtype=dtype)
self.services = np.zeros(smd_chunk_pf.n_packets, dtype=dtype)
smd_aux_sizes = np.zeros(self.n_smd_files, dtype=dtype)
self.i_first_L1s = np.zeros(self.n_smd_files, dtype=dtype) + 0xffffff
# For comparing if the next dgram should be in the same read
current_bd_offsets = np.zeros(self.n_smd_files, dtype=dtype)
# Current chunk size (gets reset at boundary)
current_bd_chunk_sizes = np.zeros(self.n_smd_files, dtype=dtype)
i_evt = 0
while offset < memoryview(self.smd_view).nbytes - memoryview(smd_chunk_pf.footer).nbytes:
if i_smd == 0:
smd_evt_size = smd_chunk_pf.get_size(i_evt)
smd_evt_pf = PacketFooter(view=self.smd_view[offset: offset+smd_evt_size])
smd_aux_sizes[:] = [smd_evt_pf.get_size(i) for i in range(smd_evt_pf.n_packets)]
# Only get offset and size of non-missing dgram
# TODO: further optimization by looking for the first L1 and read in a big chunk
# anything that comes after. Right now, all transitions mark the cutoff points.
if smd_aux_sizes[i_smd] == 0:
self.cutoff_flag_array[i_evt, i_smd] = 0
else:
d = dgram.Dgram(config=self.smd_configs[i_smd], view=self.smd_view, offset=offset)
self.smd_offset_array[i_evt, i_smd] = offset
self.smd_size_array[i_evt, i_smd] = d._size
self.services[i_evt] = d.service()
# Any dgrams after the first L1 (as long as they fit in the chunk size)
# will be read in together.
if self.dm.n_files > 0:
if d.service() == TransitionId.L1Accept:
if self.i_first_L1s[i_smd] == 0xffffff:
self.i_first_L1s[i_smd] = i_evt
print(f'i_smd={i_smd} i_first_L1={self.i_first_L1s[i_smd]}')
# For SlowUpdate, we need to check if the next dgram gets cutoff
elif d.service() == TransitionId.SlowUpdate and hasattr(d, 'chunkinfo'):
stream_id = self.dm.get_stream_id(i_smd)
_chunk_ids = [getattr(d.chunkinfo[seg_id].chunkinfo, 'chunkid') for seg_id in d.chunkinfo]
# There must be only one unique epics var
if _chunk_ids: self.new_chunk_id_array[i_evt, i_smd] = _chunk_ids[0]
self._get_bd_offset_and_size(d, current_bd_offsets, current_bd_chunk_sizes,
i_evt, i_smd, self.i_first_L1s[i_smd])
"""
# For L1 with bigdata files, store offset and size found in smd dgrams.
# For SlowUpdate, store new chunk id (if found). TODO: check if
# we need to always check for epics for SlowUpdate.
if d.service() == TransitionId.L1Accept and self.dm.n_files > 0:
if i_first_L1 == -1:
i_first_L1 = i_evt
print(f'i_smd={i_smd} i_first_L1={i_first_L1}')
self._get_bd_offset_and_size(d, current_bd_offsets, current_bd_chunk_sizes, i_evt, i_smd, i_first_L1)
elif d.service() == TransitionId.SlowUpdate and hasattr(d, 'chunkinfo'):
# We only support chunking on bigdata
if self.dm.n_files > 0:
stream_id = self.dm.get_stream_id(i_smd)
_chunk_ids = [getattr(d.chunkinfo[seg_id].chunkinfo, 'chunkid') for seg_id in d.chunkinfo]
# There must be only one unique epics var
if _chunk_ids: self.new_chunk_id_array[i_evt, i_smd] = _chunk_ids[0]
"""
offset += smd_aux_sizes[i_smd]
i_smd += 1
if i_smd == self.n_smd_files:
offset += PacketFooter.n_bytes * (self.n_smd_files + 1) # skip the footer
i_smd = 0 # reset to the first smd file
i_evt += 1 # done with this smd event
# end while offset
# Precalculate cutoff indices
self.cutoff_indices = []
self.chunk_indices = np.zeros(self.n_smd_files, dtype=dtype)
for i_smd in range(self.n_smd_files):
self.cutoff_indices.append(np.where(self.cutoff_flag_array[:, i_smd] == 1)[0])
print(f'i_smd={i_smd} cutoff_index={self.cutoff_indices[i_smd]} services={self.services[self.cutoff_indices[i_smd]]}')
def _open_new_bd_file(self, i_smd, new_chunk_id):
os.close(self.dm.fds[i_smd])
xtc_dir = os.path.dirname(self.dm.xtc_files[i_smd])
filename = os.path.basename(self.dm.xtc_files[i_smd])
found = filename.find('-c')
new_filename = filename.replace(filename[found:found+4], '-c'+str(new_chunk_id).zfill(2))
fd = os.open(os.path.join(xtc_dir, new_filename), os.O_RDONLY)
self.dm.fds[i_smd] = fd
self.dm.xtc_files[i_smd] = new_filename
@s_bd_just_read.time()
def _read(self, fd, size, offset):
st = time.monotonic()
chunk = bytearray()
for i_retry in range(self.max_retries+1):
chunk.extend(os.pread(fd, size, offset))
got = memoryview(chunk).nbytes
if got == size:
break
offset += got
size -= got
found_xtc2_flags = self.dm.found_xtc2('bd')
if got == 0 and all(found_xtc2_flags):
print(f'bigddata got 0 byte and .xtc2 files found on disk. stop reading this .inprogress file')
break
print(f'bigdata read retry#{i_retry} - waiting for {size/1e6} MB, max_retries: {self.max_retries} (PS_R_MAX_RETRIES), sleeping 1 second...')
time.sleep(1)
en = time.monotonic()
sum_read_nbytes = memoryview(chunk).nbytes # for prometheus counter
rate = 0
if sum_read_nbytes > 0:
rate = (sum_read_nbytes/1e6)/(en-st)
logger.debug(f"bd reads chunk {sum_read_nbytes/1e6:.5f} MB took {en-st:.2f} s (Rate: {rate:.2f} MB/s)")
self._inc_prometheus_counter('MB', sum_read_nbytes/1e6)
self._inc_prometheus_counter('seconds', en-st)
return chunk
def _init_bd_chunks(self):
self.bd_bufs = [bytearray() for i in range(self.n_smd_files)]
self.bd_buf_offsets = np.zeros(self.n_smd_files, dtype=np.int64)
def _fill_bd_chunk(self, i_smd):
"""
Fill self.bigdatas for this given stream id
No filling: No bigdata files or
when this stream doesn't have at least a dgram.
Detail:
- Ignore all transitions
- Read the next chunk
From cutoff_flag_array[:, i_smd], we get cutoff_indices as
[0, 1, 2, 12, 13, 18, 19] a list of index to where each read
or copy will happen.
"""
# Check no filling
if self.use_smds[i_smd]: return
# Reset buffer offset with new filling
self.bd_buf_offsets[i_smd] = 0
cutoff_indices = self.cutoff_indices[i_smd]
i_evt_cutoff = cutoff_indices[self.chunk_indices[i_smd]]
begin_chunk_offset = self.bd_offset_array[i_evt_cutoff, i_smd]
# Calculate read size:
# For last chunk, read size is the sum of all bd dgrams all the
# way to the end of the array. Otherwise, only sum to the next chunk.
if self.chunk_indices[i_smd] == cutoff_indices.shape[0] - 1:
read_size = np.sum(self.bd_size_array[i_evt_cutoff:, i_smd])
else:
i_next_evt_cutoff = cutoff_indices[self.chunk_indices[i_smd] + 1]
read_size = np.sum(self.bd_size_array[i_evt_cutoff:i_next_evt_cutoff, i_smd])
self.bd_bufs[i_smd] = self._read(self.dm.fds[i_smd], read_size, begin_chunk_offset)
def _get_next_evt(self):
""" Generate bd evt for different cases:
1) No bigdata or is a Transition Event prior to i_first_L1
create dgrams from smd_view
2) L1Accept event
create dgrams from bd_bufs
3) L1Accept with some smd files replaced by bigdata files
create dgram from smd_view if use_smds[i_smd] is set
otherwise create dgram from bd_bufs
"""
dgrams = [None] * self.n_smd_files
for i_smd in range(self.n_smd_files):
# Check in case we need to switch to the next bigdata chunk file
if self.services[self.i_evt] != TransitionId.L1Accept:
if self.new_chunk_id_array[self.i_evt, i_smd] != 0:
print(f'open_new_bd_file i_smd={i_smd} chunk_id={self.new_chunk_id_array[self.i_evt, i_smd]}')
self._open_new_bd_file(i_smd,
self.new_chunk_id_array[self.i_evt, i_smd])
view, offset, size = (bytearray(),0,0)
# Try to create dgram from smd view
if self.dm.n_files == 0 or self.use_smds[i_smd] \
or self.i_evt < self.i_first_L1s[i_smd]:
view = self.smd_view
offset = self.smd_offset_array[self.i_evt, i_smd]
size = self.smd_size_array[self.i_evt, i_smd]
# Non L1 dgram prior to i_first_L1 are counted as a new "chunk"
# because their cutoff flag is set (data coming from smd view
# instead of bd chunk). We'll need to update chunk index for
# this smd when we see non L1.
self.chunk_indices[i_smd] += 1
else:
# Check if we need to fill bd buf if this dgram doesn't fit in the current view
if self.bd_buf_offsets[i_smd] + self.bd_size_array[self.i_evt, i_smd] \
> memoryview(self.bd_bufs[i_smd]).nbytes:
self._fill_bd_chunk(i_smd)
self.chunk_indices[i_smd] += 1
# This is the offset of bd buffer! and not what stored in smd dgram,
# which in contrast points to the location of disk.
offset = self.bd_buf_offsets[i_smd]
size = self.bd_size_array[self.i_evt, i_smd]
view = self.bd_bufs[i_smd]
self.bd_buf_offsets[i_smd] += size
if size: # handles missing dgram
dgrams[i_smd] = dgram.Dgram(config=self.dm.configs[i_smd], view=view, offset=offset)
self.i_evt += 1
self._inc_prometheus_counter('evts')
evt = Event(dgrams=dgrams, run=self.dm.get_run())
print(f'YIELD ts={evt.timestamp} service={evt.service()}')
return evt
|
from django import forms
from .models import Contact
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Div, Submit
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('name', 'number', 'email', 'message')
def clean(self):
cleaned_data = super(ContactForm, self).clean()
name = cleaned_data.get('name')
phone = cleaned_data.get('number')
email = cleaned_data.get('email')
message = cleaned_data.get('message')
if not name and not phone and not email and not message:
raise forms.ValidationError('You have to write something!')
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.layout = Layout(
Field('name', css_class="form-group col-md-4 offset-md-4"),
Field('number', css_class="form-group col-md-4 offset-md-4"),
Field('email', css_class="form-group col-md-4 offset-md-4"),
Field('message', css_class="form-group"),
Submit('submit', 'Submit', css_class="btn btn-success"),
)
|
import sys
from functools import wraps
import logging
import os
import random
import time
from contextlib import contextmanager
from typing import Union
from pathlib import Path
import numpy as np
import torch
from hydra.experimental import compose, initialize
from hydra._internal.hydra import Hydra as BaseHydra
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
@contextmanager
def timer(name: str, logger: Union[logging.Logger, None] = None):
t0 = time.time()
yield
msg = f'[{name}] done in {time.time()-t0:.3f} s'
if logger:
logger.info(msg)
else:
print(msg)
def tail_recursive(func):
self_func = [func]
self_firstcall = [True]
self_CONTINUE = [object()]
self_argskwd = [None]
@wraps(func)
def _tail_recursive(*args, **kwd):
if self_firstcall[0] == True:
func = self_func[0]
CONTINUE = self_CONTINUE
self_firstcall[0] = False
try:
while True:
result = func(*args, **kwd)
if result is CONTINUE: # update arguments
args, kwd = self_argskwd[0]
else: # last call
return result
finally:
self_firstcall[0] = True
else: # return the arguments of the tail call
self_argskwd[0] = args, kwd
return self_CONTINUE
return _tail_recursive
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def load_conf(path):
# config_pathをos.getcwd()基準から __file__基準に変更
config_path = Path(path).parent
config_name = Path(path).stem
# TODO: 対応
# (os.getcwd() / Path(path).parent).resolve() == (Path(__file__).parent / config_path).resolve()
target = (os.getcwd() / Path(path).parent).resolve()
config_path = '..' / config_path
with initialize(config_path=config_path, job_name=None):
cfg = compose(config_name=config_name, overrides=[arg for arg in sys.argv[1:] if '=' in arg])
return cfg
|
# Declares the variable "car" to equal 100
cars = 100
# Declares the variable "space_in_a_car" to equal 4
space_in_a_car = 4
# Declares the variable "driver" to equal 30
drivers = 30
# Declare the variable "passegers" to equal 90
passengers = 90
# Declares the variable "cars_not_driven" to equal the sum of cars minus drivers
cars_not_driven = cars - drivers
# declares the variable "cars_driven" to equal "drivers"
cars_driven = drivers
# declares the variable "carpool_capacity" to equal the sum of cars_driven multiplied by space_in_a_car
carpool_capacity = cars_driven * space_in_a_car
# declares the variable "average_passengers_per_car" to equal the sum of passengers divided by cars_driven
average_passengers_per_car = passengers / cars_driven
# prints "There are" 100 "Cars available."
print "There are", cars, "Cars available."
# prints "there are only" 30 "Drivers available."
print "There are only", drivers, "Drivers available."
# prints "There will be" 70 "empty cars today."
print "There will be", cars_not_driven, "empty cars today."
# prints "We can transport" 120 "people today."
print "We can transport", carpool_capacity, "people today."
# prints "We have" 90 "to carpool today."
print "We have", passengers, "to carpool today."
# prints "We need to put about" 3 "in each car."
print "We need to put about", average_passengers_per_car, "in each car."
# When he first wrote the program it spit out an error car_pool_compacity is
# not defined, this is because when he calls for it in line 13 it's carpool_capicity,
# not car_pool_compacity. Removing the "_" from car_pool will fix this
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse, redirect
# Create your views here.
def index(request):
return render(request, "store/index.html")
def checkout(request):
return render(request, "store/checkout.html")
def delete(request):
del request.session["purchases"]
return redirect("/")
def buy(request):
try:
request.session["purchases"]
except KeyError:
request.session["purchases"] = True
request.session["total_quantity"] = 0
request.session["total_spent"] = 0
product = int(request.POST["product_id"])
quantity = int(request.POST["quantity"])
total_price = 0
if product == 1:
total_price = quantity * 19.99
elif product == 2:
total_price = quantity * 30.00
elif product == 3:
total_price = quantity * 4.99
else:
total_price = quantity * 10.00
request.session["total_quantity"] += quantity
request.session["total_spent"] += total_price
request.session["purchases"] = {
"total_price": total_price,
"total_items": request.session["total_quantity"],
"total_spent": request.session["total_spent"],
}
return redirect("/checkout")
|
import xml.etree.ElementTree as ET
import csv
#import xml.dom.minidom as md
#
def main():
tree = ET.parse("D:\PPTV\Collins\DMC-31218-A-25-20-43-01000-941A-D_002-00_EN-US.XML")
root=tree.getroot()
#open a file for writing
Output_file=open('D:\PPTV\Collins\out.csv','w')
#create csv writer object
csvwriter=csv.writer(Output_file)
for con in root.findall('content'):
for ref in con.findall('refs'):
for dmref in ref.findall('dmRef'):
for dmrefI in dmref.findall('dmRefIdent'):
for dmcod in dmrefI.findall('dmCode'):
print("InfoCode=",dmcod.get('infoCode'));
print("infoCodeVariant=",dmcod.get('infoCodeVariant'));
print("itemLocationCode=",dmcod.get('itemLocationCode'));
# for con in root.findall('content'):
# for illpartcat in con.findall('illustratedPartsCatalog'):
# for catseq in illpartcat.findall('catalogSeqNumber'):
# for itemseq in catseq.findall('itemSeqNumber'):
# print("PartRef=",itemseq.attrib);
# print("QuantityperNextHigherAssy=",itemseq.find('quantityPerNextHigherAssy').text);
# for p in itemseq.iter('descrForPart'):
# print("DescForPart=",p.text);
# print("DescforPart=",itemid.find('descrForPart').text);
print("**********************");
out_head=[]
out_head.append('DescforPart')
csvwriter.writerow(out_head)
count=1
for con in root.findall('content'):
for d in con.iter('descrForPart'):
print("DescForPart=",d.text);
out_det=[]
out_det.append(count);
out_det.append(d.text);
csvwriter.writerow(out_det);
count=count+1;
Output_file.close();
# doc=md.parse("D:\PPTV\Collins\DMC-31218-A-25-20-43-01000-941A-D_002-00_EN-US.XML");
# print(doc.nodeName)
# print(doc.firstChild)
# print(doc.firstChild.TagName)
if __name__== "__main__": main();
|
import json
import time
import sys
from cleanser import df_header
from downloader import vinculo_str_d, month_list
def parser(month, year, file_list, df_struct):
json_data = json_encoder(month, year, file_list, df_struct)
print(json_data)
def json_encoder(month, year, file_list, employee_struct):
# Criação de lista de dicionários (LofD) de funcionários
employee_lofd = []
for employee_tuple in employee_struct:
vinculo = employee_tuple[0]
df = employee_tuple[1]
for employee in df.index:
employee_lofd.append(gen_employee_dict(vinculo,
df.loc[employee]))
# Removi o procinfo dessa estrutura pois não entendi como colocar o stdout na saída sem criar uma recursão
crawling_result = {'aid':'MPF',
'month':month_list[month],
'year':str(year),
'crawler':{'id':None,
'version':None,
},
'files':file_list,
'employees':employee_lofd,
'timestamp':time.ctime(),
}
return json.dumps(crawling_result, indent=4)
def gen_employee_dict(vinculo, df_slice):
reg = int(df_slice[df_header[0]])
name = str(df_slice[df_header[1]])
role = str(df_slice[df_header[2]])
workplace = str(df_slice[df_header[3]])
income_dict = {key:float(df_slice[key]) for key in df_header[4:13]}
discount_dict = {key:float(df_slice[key]) for key in df_header[13:17]}
# Devido ao não entendimento da estrutura de rendimentos e dividendos dos
# incomes, discounts, funds e perks, resolvi não implementar da forma
# proposta por não saber como mapear as células das tabelas com as variáveis
# das estruturas de dados propostas na API.
employee_dict = {'reg':reg,
'name':name,
'role':role,
'type':vinculo,
'workplace':workplace,
'active': activity_test(vinculo),
'income':income_dict,
'discounts':discount_dict,
}
return employee_dict
def activity_test(vinculo):
return (vinculo == "Membro Ativo"
or vinculo == "Servidor Ativo"
or vinculo == "Colaborador")
|
import hashlib
from time import time
class BlockChain:
"""
区块链结构体
chain:包含的区块列表 索引从1开始向后计数
current_transactions:存储每次需要打包的交易
transactions:存储所有交易记录
"""
def __init__(self):
self.chain = []
self.current_transactions = []
self.transactions = []
# Create the genesis block
self.minetoblock_c(proof=100, previous_hash="")
@staticmethod
def hash(block):
"""
给一个区块生成 SHA-256 值
:param block:
:return:
"""
block_string = str(block).encode()
return hashlib.sha256(block_string).hexdigest()
def minetoblock_c(self, proof, previous_hash):
"""
创建一个新的区块加入区块链
:param proof: <int> 由工作证明算法生成的证明
:param previous_hash: (Optional) <str> 前一个区块的 hash 值
:return: <dict> 新区块
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash,
}
# 重置当前交易记录
self.current_transactions = []
self.chain.append(block)
return block
def new_transaction(self, sender, data, flagMoneyOrData):
"""
transactions相当于block_B
创建一笔新的交易到下一个被挖掘的区块中
:param sender: <str> 发送人的地址
:param data: <int> 上传的数据
:param flagMoneyOrData: <bool> data指的是下注金额/用户定义的数据
:return: <int> 持有本次交易的区块索引
"""
self.current_transactions.append({
'sender': sender,
'data': data,
'flagMoneyOrData': flagMoneyOrData,
})
self.transactions.append({
'sender': sender,
'data': data,
'flagMoneyOrData': flagMoneyOrData,
})
def last_block(self):
return self.chain[-1]
def proof_of_work(self):
"""
Simple Proof of Work Algorithm:
- Find a number p' such that hash(pp') contains leading 4 zeroes, where p is the previous p'
- p is the previous proof, and p' is the new proof
:param last_proof: <int>
:return: <int>
"""
last_block = self.last_block()
last_proof = last_block['proof']
proof = 0
while self.valid_proof(last_proof, proof) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof):
"""
Validates the Proof: Does hash(last_proof, proof) contain 4 leading zeroes?
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
|
import re
# Memory Module
class Partition:
process = "A"
start_unit = 0
size = 2
def __init__(self, process, start, size):
self.process = process
self.start_unit = start
self.size = size
class Memory:
def allocate(self, process, size):
start = self.alloc_start
if self.type == "nf": # handle next fit
rep = self.representation()
i = self.alloc_start
found = -1
while (i+size < self.size and found == -1):
if rep[i:i+size] == "."*size:
found=i
i += 1
i = 0
while (i+size < self.alloc_start and found == -1):
if rep[i:i+size] == "."*size:
found=i
i += 1
if found == -1:
return False
start = found
self.alloc_start = start+size
elif self.type == "bf": # handle best fit
m = self.representation()
found = False
for i in range(size, m.count(".")+1):
who = "^[.]{"+str(i)+"}$"
be="^[.]{"+str(i)+"}[A-Z]"
#Don't forget to add an extra character for these (ignore start character).
mid="[A-Z][.]{"+str(i)+"}[A-Z]"
end="[A-Z][.]{"+str(i)+"}$"
for rg in [ who, mid, be, end ]:
place = re.search(rg, m)
#Wohoo we have a match! :D
if (place):
found = True
start = place.start()+1 if rg in [mid, end] else 0
break
if (not found):
return False
else: # handle first fit/default
for i in range(len(self.partitions)):
p = self.partitions[i]
# handle if on first partition
if p == self.partitions[0]: # compare remaining space
if p.start_unit >= size:
start = 0
break
# handle case if on last partition
if p == self.partitions[-1]: # compare remaining space
if self.size-(p.start_unit + p.size) >= size:
start = p.start_unit+p.size
break
else:
return False
else:
p_next = self.partitions[i+1]
# handle all other case where there are two partitions
if p_next.start_unit - (p.start_unit + p.size) >= size:
start = p.start_unit + p.size
break
self.partitions.append(Partition(process, start, size))
self.partitions.sort(key=lambda p: p.start_unit)
return True
# assumes process has one associated memory block
def deallocate(self, process):
for p in self.partitions:
if p.process == process:
self.partitions.remove(p)
return True
return False
def defragment(self):
units_moved = 0
current_unit = 0
for p in self.partitions:
if p.start_unit != current_unit:
p.start_unit = current_unit
current_unit += p.size
units_moved += p.size
else:
current_unit += p.size
time_elapsed = units_moved*self.t_memmove
return time_elapsed, units_moved # return time elapsed
# assumes partitions is sorted by start_unit, and start_unit is unique
def representation(self):
representation = []
current_unit = 0
for p in self.partitions:
if p.start_unit > current_unit:
representation.append("."*(p.start_unit-current_unit))
representation.append(p.process*p.size)
else:
representation.append(p.process*p.size)
current_unit = p.start_unit+p.size
if len("".join(representation)) < self.size:
representation.append("."*(self.size-len("".join(representation))))
return "".join(representation)
def __str__(self):
# output memory visualization
rep = self.representation() # get string of memory
out_rep = []
for i in range(len(rep)/self.split):
out_rep.append(rep[i*self.split:(i+1)*self.split]) # split into nice looking chunks
if self.size % self.split != 0:
out_rep.append(rep[(i+1)*self.split:]) #ensure even printing
return "="*self.split + "\n" + "\n".join(out_rep) + "\n" + "="*self.split
def __init__(self, type="ff", size=256, t_memmove=10, split=32):
# the type of allocation ("ff", "nf", "bf")
self.type = type
# the number of memory units
self.size = size
# size of split for output, default 32
self.split = split
# mem_move time
self.t_memmove = t_memmove
# as per instructions, a list containing process identifier,
# starting unit, and size of allocation
self.partitions = []
# where the next allocation should begin
self.alloc_start = 0
# will return true / false for fail / success
self.partitions = []
self.t_memmove = t_memmove
if __name__ == "__main__":
# # first fit tests
# print("Testing first fit:\n")
# test_mem = Memory("ff")
# test_mem.allocate("A", 22)
# test_mem.allocate("B", 15)
# test_mem.allocate("C", 15)
# print test_mem
# test_mem.deallocate("B")
# test_mem.allocate("D", 15)
# print test_mem
# print("\nTesting next fit:\n")
# test_mem2 = Memory("nf", 32)
# print test_mem2.alloc_start
# test_mem2.allocate("A", 22)
# print test_mem2.alloc_start
# print test_mem2.allocate("B", 2)
# print test_mem2.allocate("C", 15)
# print test_mem2.alloc_start
# print test_mem2
# print test_mem2.allocate("D", 15)
# test_mem2.deallocate("A")
# print test_mem2.alloc_start
# test_mem2.allocate("E", 2)
# print test_mem2.alloc_start
# print test_mem2
# test defrag, following example 3
print("\nDefragmentation test:\n")
test_def = Memory("ff")
# allocate nodes
test_def.allocate("H", 16)
test_def.allocate("X", 14)
test_def.allocate("J", 59)
test_def.allocate("Y", 2)
test_def.allocate("K", 35)
test_def.allocate("L", 24)
test_def.allocate("N", 8)
test_def.allocate("Z", 2)
test_def.allocate("M", 17)
test_def.allocate("W", 20)
test_def.allocate("S", 24)
# remove dummy nodes
test_def.deallocate("W")
test_def.deallocate("X")
test_def.deallocate("Y")
test_def.deallocate("Z")
# defragulate
print("starting defrag\n")
print(test_def)
time_elapsed, units_moved = test_def.defragment()
print "defrag done, time elapsed: ", time_elapsed, " and units moved: ", units_moved
print(test_def)
print("\nDefrag #2:")
test_def2 = Memory("ff")
test_def2.allocate("X", 81)
test_def2.allocate("B", 4)
test_def2.allocate("R", 42)
test_def2.allocate("E", 65)
test_def2.allocate("G", 7)
test_def2.allocate("D", 3)
test_def2.allocate("H", 40)
time_elapsed, units_moved = test_def2.defragment()
print "defrag done, time elapsed: ", time_elapsed, " and units moved: ", units_moved
test_def2.allocate("O", 46)
print(test_def2)
|
import unittest
from katas.kyu_7.return_a_sorted_list_of_objects import sort_list
class SortListTestCase(unittest.TestCase):
def test_equals_1(self):
self.assertEqual(sort_list('x', []), [])
def test_equals_2(self):
self.assertEqual(sort_list(
'b', [{'a': 2, 'b': 2}, {'a': 3, 'b': 40}, {'a': 1, 'b': 12}]
), [{'a': 3, 'b': 40}, {'a': 1, 'b': 12}, {'a': 2, 'b': 2}])
def test_equals_3(self):
self.assertEqual(sort_list(
'a', [{'a': 4, 'b': 3}, {'a': 2, 'b': 2}, {'a': 3, 'b': 40},
{'a': 1, 'b': 12}]
), [{'a': 4, 'b': 3}, {'a': 3, 'b': 40}, {'a': 2, 'b': 2},
{'a': 1, 'b': 12}])
|
"""
13. Longest Palindromic Substring
Question:
Given a string S, find the longest palindromic substring in S. You may assume that the
maximum length of S is 1000, and there exists one unique longest palindromic substring.
Hint:
First, make sure you understand what a palindrome means. A palindrome is a string
which reads the same in both directions. For example, “aba” is a palindome, “abc” is not.
A common mistake:
Some people will be tempted to come up with a quick solution, which is unfortunately
flawed (however can be corrected easily):
Reverse S and become S’. Find the longest common substring between S and S’,
which must also be the longest palindromic substring.
This seemed to work, let’s see some examples below.
For example, S = “caba”, S’ = “abac”.
The longest common substring between S and S’ is “aba”, which is the answer.
Let’s try another example: S = “abacdfgdcaba”, S’ = “abacdgfdcaba”.
The longest common substring between S and S’ is “abacd”. Clearly, this is not a valid
palindrome.
We could see that the longest common substring method fails when there exists a
reversed copy of a non-palindromic substring in some other part of S. To rectify this,
each time we find a longest common substring candidate, we check if the substring’s
indices are the same as the reversed substring’s original indices. If it is, then we attempt
to update the longest palindrome found so far; if not, we skip this and find the next
candidate.
"""
"""
ALGO :
for each character start from the middle and go out until the string is not a palindrome : isplaindrome() does this
record and compare from previous largest palindrome substring : res stores this
return the largest at the end. : res
"""
class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
res = ""
for i in range(len(s)):
# for odd cases such as a, aba, ababa
l = self.ispalindrome(s, i, i)
# print("odd : "+l)
if len(l) > len(res):
res = l
#for even cases such as aa, abba, abbaabba
l = self.ispalindrome(s, i, i+1)
# print("even : "+l)
if len(l) > len(res):
res = l
return res
def ispalindrome(self, s, l, r):
while l >= 0 and r < len(s) and s[l] == s[r]:
# print(str(l)+" : "+str(r))
# print(s[l], s[r])
l -= 1
r += 1
return s[l+1:r]
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class BeerAdvocateItem(scrapy.Item):
#ip_address = scrapy.Field()
pass
class RatingSummaryPageItem(scrapy.Item):
beer_name = scrapy.Field()
beer_url = scrapy.Field()
brewery_name = scrapy.Field()
brewery_url = scrapy.Field()
abv = scrapy.Field()
num_ratings = scrapy.Field()
score = scrapy.Field()
class BreweryDetailsItem(scrapy.Item):
brewery_id = scrapy.Field()
brewery_name = scrapy.Field()
brewery_url = scrapy.Field()
ba_score = scrapy.Field()
ba_score_desc = scrapy.Field()
ba_num_ratings = scrapy.Field()
place_type = scrapy.Field()
place_address1 = scrapy.Field()
place_address2 = scrapy.Field()
place_city = scrapy.Field()
place_state = scrapy.Field()
place_postal_code = scrapy.Field()
place_country = scrapy.Field()
bs_num_beers = scrapy.Field()
bs_num_reviews = scrapy.Field()
bs_num_ratings = scrapy.Field()
ps_ba_score = scrapy.Field()
ps_num_reviews = scrapy.Field()
ps_num_ratings = scrapy.Field()
ps_pDev = scrapy.Field()
class BeerRatingItem(scrapy.Item):
brewery_name = scrapy.Field()
brewery_id = scrapy.Field()
beer_name = scrapy.Field()
beer_id = scrapy.Field()
user_name = scrapy.Field()
user_url = scrapy.Field()
rating_agg = scrapy.Field()
rating_look = scrapy.Field()
rating_smell = scrapy.Field()
rating_taste = scrapy.Field()
rating_feel = scrapy.Field()
rating_overall = scrapy.Field()
review = scrapy.Field()
|
from itertools import cycle, islice
import tensorflow as tf
class FullyConnectedWTA:
"""Fully-connected winner-take-all autoencoder.
This model is deterministic.
"""
def __init__(self,
input_dim,
batch_size,
sparsity=0.05,
hidden_units=24,
encode_layers=3,
optimizer=tf.train.AdamOptimizer,
learning_rate=1e-2,
tie_weights=True,
weight_initializer=tf.random_normal_initializer(0, 0.01, seed=1),
bias_initializer=tf.constant_initializer(0.01),
name='FCWTA'):
"""Create the model.
Args:
input_dim: the dimensionality of the input data.
batch_size: the batch size to be used.
sparsity: the lifetime sparsity constraint to enforce.
hidden_units: the number of units in each ReLU (encode) layer, and
also the dimensionality of the encoded data.
encode_layers: the number ReLU (encode) layers.
optimizer: a TensorFlow optimizer op that takes only a learning rate.
learning_rate: the learning rate to train with.
tie_weights: whether to use the same weight matrix for the decode
layer and final encode layer.
weight_initializer: initializer to use for matrices of weights.
bias_initializer: initializer to use for matrices of biases.
name: the name of the variable scope to use.
"""
self.input_dim = input_dim
self.batch_size = batch_size
self.sparsity = sparsity
self.hidden_units = hidden_units
self.encode_layers = encode_layers
self.optimizer = optimizer
self.learning_rate = learning_rate
self.tie_weights = tie_weights
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self.name = name
self._initialize_vars()
def _initialize_vars(self):
"""Sets up the training graph."""
with tf.variable_scope(self.name) as scope:
self.global_step = tf.get_variable(
'global_step',
shape=[],
initializer=tf.zeros_initializer())
self.input = tf.placeholder(tf.float32, shape=[None, self.input_dim])
current = self.input
for i in range(self.encode_layers - 1):
current = self._relu_layer(current, self.input_dim, self.input_dim, i)
self.encoded = self._relu_layer(current, self.input_dim, self.hidden_units, self.encode_layers - 1)
# Make batch size the last dimension (for use with tf.nn.top_k)
encoded_t = tf.transpose(self.encoded)
# Compute the indices corresponding to the top k activations for each
# neuron in the final encoder layer
k = int(self.sparsity * self.batch_size)
_, top_indices = tf.nn.top_k(encoded_t, k=k, sorted=False)
# Transform top_indices, which contains rows of column indices, into
# indices, a list of [row, column] pairs (for use with tf.scatter_nd)
top_k_unstacked = tf.unstack(top_indices, axis=1)
row_indices = [tf.range(self.hidden_units) for _ in range(k)]
combined_columns = tf.transpose(tf.stack(_interleave(row_indices, top_k_unstacked)))
indices = tf.reshape(combined_columns, [-1, 2])
# Apply sparsity constraint
updates = tf.ones(self.hidden_units * k)
shape = tf.constant([self.hidden_units, self.batch_size])
mask = tf.scatter_nd(indices, updates, shape)
sparse_encoded = self.encoded * tf.transpose(mask)
self.decoded = self._decode_layer(sparse_encoded)
self.loss = tf.reduce_sum(tf.square(self.decoded - self.input))
self.optimizer_op = self.optimizer(self.learning_rate).minimize(
self.loss, self.global_step)
self.saver = tf.train.Saver(tf.global_variables())
def _relu_layer(self, input, input_dim, output_dim, layer_num):
with tf.variable_scope(self.name) as scope:
return tf.nn.relu_layer(
input,
tf.get_variable('encode_W_{}'.format(layer_num),
shape=[input_dim, output_dim],
initializer=self.weight_initializer),
tf.get_variable('encode_b_{}'.format(layer_num),
shape=[output_dim],
initializer=self.bias_initializer),
'encode_layer_{}'.format(layer_num))
def _decode_layer(self, input, reuse=False):
with tf.variable_scope(self.name, reuse=reuse) as scope:
decode_b = tf.get_variable('decode_b',
shape=[self.input_dim],
initializer=self.bias_initializer)
if self.tie_weights:
scope.reuse_variables()
decode_W = tf.transpose(tf.get_variable(
self._get_last_encode_layer_name(),
shape=[self.input_dim, self.hidden_units]))
else:
decode_W = tf.get_variable(
'decode_W',
shape=[self.hidden_units, self.input_dim],
initializer=self.weight_initializer)
return tf.matmul(input, decode_W) + decode_b
def _get_last_encode_layer_name(self):
return 'encode_W_{}'.format(self.encode_layers - 1)
def step(self, session, input, forward_only=False):
"""Run a step of the model, feeding the given inputs.
Args:
session: TensorFlow session to use.
input: NumPy array to feed as input.
forward_only: whether to do the backward step or only forward.
Returns:
A tuple containing the reconstruction and the (summed) squared loss.
Raises:
ValueError: if batch size (resp. dimensionality) of input does not
agree with the batch_size (resp. input_dim) provided in the
constructor.
"""
if input.shape[0] != self.batch_size:
raise ValueError('Input batch size must equal the batch_size '
'provided in the constructor, {} != {}.'.format(
input.shape[0], self.batch_size))
if input.shape[1] != self.input_dim:
raise ValueError('Dimensionality of input must equal the input_dim '
'provided in the constructor, {} != {}.'.format(
input.shape[1], self.input_dim))
if forward_only:
decoded, loss = session.run(
[self.decoded, self.loss],
feed_dict={self.input: input})
else:
decoded, loss, _ = session.run(
[self.decoded, self.loss, self.optimizer_op],
feed_dict={self.input: input})
return decoded, loss
def encode(self, session, input):
"""Encode the given inputs.
Args:
session: TensorFlow session to use.
input: NumPy array to feed as input.
Returns:
The encoded data, with shape (input.shape[1], hidden_units).
Raises:
ValueError: if dimensionality of input disagrees with the input_dim
provided in the constructor.
"""
if input.shape[1] != self.input_dim:
raise ValueError('Dimensionality of input must equal the input_dim'
'provided in the constructor, {} != {}.'.format(
input.shape[1], self.input_dim))
return session.run(self.encoded, feed_dict={self.input: input})
def get_dictionary(self, session):
"""Fetch (approximately) the learned code dictionary.
Args:
session: TensorFlow session to use.
Returns:
The code dictionary, with shape (hidden_units, input_dim).
"""
fake_input = 1e15 * tf.eye(self.hidden_units)
return session.run(self._decode_layer(fake_input, reuse=True))
def _interleave(xs, ys):
"""Interleaves the two given lists (assumed to be of equal length)."""
return [val for pair in zip(xs, ys) for val in pair]
|
from django.db import models
from django.contrib.auth.models import User
class Project(models.Model):
STATUS_CHOICES = [
('LATE', 'Late'),
('ON TRACK', 'On Track'),
('NEAR COMPLETIONS', 'Near Completion'),
]
description = models.CharField(max_length=30)
status = models.CharField(choices = STATUS_CHOICES, default='ON TRACK', max_length=20)
owner = models.ForeignKey(User, blank=True, null=True, on_delete = models.SET_NULL)
class Milestone(models.Model):
start = models.DateField()
end = models.DateField()
project = models.ForeignKey(Project, on_delete=models.CASCADE)
class Action(models.Model):
description = models.CharField(max_length=300)
status = models.BooleanField(default=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
|
import common
import random
def run(nick, message, cmd_prefix):
arg = common.get_plugin_argument(message, cmd_prefix, 'choose')
if arg is None:
return
if not arg:
return '{}: nothing to choose from'.format(nick)
result = random.choice(arg.strip().split(' | ')).strip()
return '{}: {}'.format(nick, result)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from pathlib import PurePath
from typing import Any
from pants.backend.docker.target_types import AllDockerImageTargets
from pants.backend.docker.target_types import rules as docker_target_types_rules
from pants.backend.helm.subsystems import k8s_parser
from pants.backend.helm.subsystems.k8s_parser import ParsedKubeManifest, ParseKubeManifestRequest
from pants.backend.helm.target_types import HelmDeploymentFieldSet
from pants.backend.helm.target_types import rules as helm_target_types_rules
from pants.backend.helm.util_rules import renderer
from pants.backend.helm.util_rules.renderer import (
HelmDeploymentCmd,
HelmDeploymentRequest,
RenderedHelmFiles,
)
from pants.backend.helm.utils.yaml import FrozenYamlIndex, MutableYamlIndex
from pants.build_graph.address import MaybeAddress
from pants.engine.addresses import Address
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.fs import Digest, DigestEntries, FileEntry
from pants.engine.internals.native_engine import AddressInput, AddressParseException
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
DependenciesRequest,
ExplicitlyProvidedDependencies,
InferDependenciesRequest,
InferredDependencies,
)
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class AnalyseHelmDeploymentRequest(EngineAwareParameter):
field_set: HelmDeploymentFieldSet
def debug_hint(self) -> str | None:
return self.field_set.address.spec
@dataclass(frozen=True)
class HelmDeploymentReport(EngineAwareReturnType):
address: Address
image_refs: FrozenYamlIndex[str]
@property
def all_image_refs(self) -> FrozenOrderedSet[str]:
return FrozenOrderedSet(self.image_refs.values())
def level(self) -> LogLevel | None:
return LogLevel.DEBUG
def metadata(self) -> dict[str, Any] | None:
return {"address": self.address, "image_refs": self.image_refs}
@rule(desc="Analyse Helm deployment", level=LogLevel.DEBUG)
async def analyse_deployment(request: AnalyseHelmDeploymentRequest) -> HelmDeploymentReport:
rendered_deployment = await Get(
RenderedHelmFiles,
HelmDeploymentRequest(
cmd=HelmDeploymentCmd.RENDER,
field_set=request.field_set,
description=f"Rendering Helm deployment {request.field_set.address}",
),
)
rendered_entries = await Get(DigestEntries, Digest, rendered_deployment.snapshot.digest)
parsed_manifests = await MultiGet(
Get(
ParsedKubeManifest,
ParseKubeManifestRequest(file=entry),
)
for entry in rendered_entries
if isinstance(entry, FileEntry)
)
# Build YAML index of Docker image refs for future processing during depedendecy inference or post-rendering.
image_refs_index: MutableYamlIndex[str] = MutableYamlIndex()
for manifest in parsed_manifests:
for entry in manifest.found_image_refs:
image_refs_index.insert(
file_path=PurePath(manifest.filename),
document_index=entry.document_index,
yaml_path=entry.path,
item=entry.unparsed_image_ref,
)
return HelmDeploymentReport(
address=request.field_set.address, image_refs=image_refs_index.frozen()
)
@dataclass(frozen=True)
class FirstPartyHelmDeploymentMappingRequest(EngineAwareParameter):
field_set: HelmDeploymentFieldSet
def debug_hint(self) -> str | None:
return self.field_set.address.spec
@dataclass(frozen=True)
class FirstPartyHelmDeploymentMapping:
"""A mapping between `helm_deployment` target addresses and tuples made up of a Docker image
reference and a `docker_image` target address.
The tuples of Docker image references and addresses are stored in a YAML index so we can track
the locations in which the Docker image refs appear in the deployment files.
"""
address: Address
indexed_docker_addresses: FrozenYamlIndex[tuple[str, Address]]
@rule
async def first_party_helm_deployment_mapping(
request: FirstPartyHelmDeploymentMappingRequest, docker_targets: AllDockerImageTargets
) -> FirstPartyHelmDeploymentMapping:
deployment_report = await Get(
HelmDeploymentReport, AnalyseHelmDeploymentRequest(request.field_set)
)
def image_ref_to_address_input(image_ref: str) -> tuple[str, AddressInput] | None:
try:
return image_ref, AddressInput.parse(
image_ref,
description_of_origin=f"the helm_deployment at {request.field_set.address}",
relative_to=request.field_set.address.spec_path,
)
except AddressParseException:
return None
indexed_address_inputs = deployment_report.image_refs.transform_values(
image_ref_to_address_input
)
maybe_addresses = await MultiGet(
Get(MaybeAddress, AddressInput, ai) for _, ai in indexed_address_inputs.values()
)
docker_target_addresses = {tgt.address for tgt in docker_targets}
maybe_addresses_by_ref = {
ref: maybe_addr
for ((ref, _), maybe_addr) in zip(indexed_address_inputs.values(), maybe_addresses)
}
def image_ref_to_actual_address(
image_ref_ai: tuple[str, AddressInput]
) -> tuple[str, Address] | None:
image_ref, _ = image_ref_ai
maybe_addr = maybe_addresses_by_ref.get(image_ref)
if not maybe_addr:
return None
if not isinstance(maybe_addr.val, Address):
return None
if maybe_addr.val not in docker_target_addresses:
return None
return image_ref, maybe_addr.val
return FirstPartyHelmDeploymentMapping(
address=request.field_set.address,
indexed_docker_addresses=indexed_address_inputs.transform_values(
image_ref_to_actual_address
),
)
class InferHelmDeploymentDependenciesRequest(InferDependenciesRequest):
infer_from = HelmDeploymentFieldSet
@rule(desc="Find the dependencies needed by a Helm deployment")
async def inject_deployment_dependencies(
request: InferHelmDeploymentDependenciesRequest,
) -> InferredDependencies:
chart_address = None
chart_address_input = request.field_set.chart.to_address_input()
if chart_address_input:
chart_address = await Get(Address, AddressInput, chart_address_input)
explicitly_provided_deps, mapping = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(request.field_set.dependencies)),
Get(
FirstPartyHelmDeploymentMapping,
FirstPartyHelmDeploymentMappingRequest(request.field_set),
),
)
dependencies: OrderedSet[Address] = OrderedSet()
if chart_address:
dependencies.add(chart_address)
for imager_ref, candidate_address in mapping.indexed_docker_addresses.values():
matches = frozenset([candidate_address]).difference(explicitly_provided_deps.includes)
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
matches,
request.field_set.address,
context=softwrap(
f"""
The Helm deployment {request.field_set.address} declares
{imager_ref} as Docker image reference
"""
),
import_reference="manifest",
)
maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)
if maybe_disambiguated:
dependencies.add(maybe_disambiguated)
logging.debug(
f"Found {pluralize(len(dependencies), 'dependency')} for target {request.field_set.address}"
)
return InferredDependencies(dependencies)
def rules():
return [
*collect_rules(),
*renderer.rules(),
*k8s_parser.rules(),
*helm_target_types_rules(),
*docker_target_types_rules(),
UnionRule(InferDependenciesRequest, InferHelmDeploymentDependenciesRequest),
]
|
# 给定一个仅包含大小写字母和空格 ' ' 的字符串 s,返回其最后一个单词的长度。如果字符串从左向右滚动显示,那么最后一个单词就是最后出现的单词。
#
# 如果不存在最后一个单词,请返回 0 。
#
# 说明:一个单词是指仅由字母组成、不包含任何空格字符的 最大子字符串。
#
#
#
# 示例:
#
# 输入: "Hello World"
# 输出: 5
#
# Related Topics 字符串
# 👍 226 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def lengthOfLastWord(self, s: str) -> int:
if not s: return 0
count = 0
for i in s[::-1]:
if i != " ":
count += 1
elif count == 0:
continue
else:
break
return count
# leetcode submit region end(Prohibit modification and deletion)
count = Solution().lengthOfLastWord("a ")
print(count)
|
#!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Date: 2017.07.03
# Author: Luis Cardoso
# Description: Create a list using the header information of all scripts and
# export to a CSV file.
# Modified by: Luis Cardoso
# Version: 0.1
import os
import sys
import time
import csv
################################################################################
# Global Constants
################################################################################
TIME_DAY_IN_SECONDS = 24*60*60 # 1d => 24h
TIME_HOUR_IN_SECONDS = 60*60 # 1h => 60m
TIME_MINUTE_IN_SECONDS = 60 # 1m => 60s
################################################################################
# Definitions
################################################################################
def printMessage(title='Dummy Title', message='Dummy message!', error_status=0):
"""Print general messages to console, and if used as error parser, this will
terminate after printing."""
print '\n%s:' % title.title()
print message
# Check error status and exit if the error is greater than 0
# Obs.: 0 means success
if error_status > 0:
sys.exit(error_status)
print 80*'-'
def convertSeconds2Time(seconds=0):
"""Convert a value in seconds to a tuple with values for days, hours,
minutes and seconds. This can be used to write the time spent on some task,
without the need to make extra manual calculation."""
seconds = int(seconds)
processed = 0
days = (seconds - processed) / TIME_DAY_IN_SECONDS
processed = days * TIME_DAY_IN_SECONDS
hours = (seconds - processed) / TIME_HOUR_IN_SECONDS
processed += hours * TIME_HOUR_IN_SECONDS
minutes = (seconds - processed) / TIME_MINUTE_IN_SECONDS
processed += minutes * TIME_MINUTE_IN_SECONDS
seconds = seconds - processed
return (days, hours, minutes, seconds)
def main():
"""Collect the data from the list of files and stores on CSV file."""
result_row = []
argv_len = len(sys.argv)
main_csv_path = ""
main_start_time = time.time()
main_start_time_tuple = time.localtime(main_start_time)
main_start_time_string = time.strftime("%Y-%m-%d_%H-%M-%S", main_start_time_tuple)
main_stop_time = 0
if argv_len > 0:
# Grab the absolute path from this file
main_csv_path = os.path.abspath(sys.argv[0])
# Filter the absolute to retain only the directory path
main_csv_path = os.path.dirname(main_csv_path)
# Join the CSV filename to the previous directory path
# Note: The filename will have the date and time.
main_csv_path = os.path.join(main_csv_path, "py_list_%s.csv" % main_start_time_string)
print "The results will be present in:\n\"%s\"" % main_csv_path
# Try to open the CSV file in Write Binary format
try:
csvfile = open(main_csv_path, 'wb')
except:
e = sys.exc_info()
printMessage('File IO', '\n'.join(e), 0x11)
# Create an handler with the specified properties
main_csv_writer = csv.writer(csvfile, delimiter = ',', quotechar = '"')
# Write the first row as a header for the values bellow
#result_row = ['Scenario', 'Auteur', 'Description', 'Modified by', 'Version', 'Requirement', 'Table']
result_row = ['Scenario', 'Auteur', 'Description', 'Modified by', 'Version', 'Path']
main_csv_writer.writerow(result_row)
# Verifica os ficheiros como argumento a este script
for argv_param in range(1, argv_len):
filename = sys.argv[argv_param]
commentaire_id = 0
# Grab the absolute path from this file
filename_absolute = os.path.abspath(filename)
path_absolute = filename_absolute
# If the given path point to a file, filter its directory path
if os.path.isfile(filename_absolute):
path_absolute = os.path.dirname(filename_absolute)
# Setting the patterns to be used to detect the appropriate files
f_pattern_1 = 'scenario_'
f_pat_len_1 = len(f_pattern_1)
f_pattern_2 = '.py'
f_pat_len_2 = len(f_pattern_2)
# Recursively walk every directory and collect all files to be used
# as module, in order to get the required information
for rl, dl, fl in os.walk(path_absolute):
# Add the actual path as Python Lib path, in order to be able to
# import the Python file as module
sys.path.append(rl)
for f in fl:
f_len = 0
f_pos_1 = 0
f_pos_2 = 0
f_len = len(f)
# Check if the file has the required patterns, storing the
# position found (remember, -1 means not found)
f_pos_1 = f.lower().find(f_pattern_1)
f_pos_2 = f.lower().find(f_pattern_2)
# Check if the positions found are in the required positions
if f_pos_1 == 0 and f_pos_2 > 0 and (f_len - f_pat_len_2 == f_pos_2):
# Grab the filename without the extension
f_module = f[0:f_pos_2]
# Import the module and grab the required constants and
# stores the data as a new row on CSV file
#_temp = __import__(f_module, globals(), locals(), ['Auteur', 'Description', 'Modifiedby', 'Version', 'Requirement', 'Table'])
#result_row = [f_module, _temp.Auteur, _temp.Description, _temp.Modifiedby, _temp.Version, _temp.Requirement, _temp.Table]
_temp = __import__(f_module, globals(), locals(), ['Auteur', 'Description', 'Modifiedby', 'Version'])
result_row = [f_module, _temp.Auteur, _temp.Description, _temp.Modifiedby, _temp.Version, rl]
main_csv_writer.writerow(result_row)
# Close the file and checks if it is really closed
csvfile.close()
if not csvfile.closed:
printMessage('File IO', 'I can\'t close "%s".' % main_csv_path, 0x12)
main_stop_time = time.time()
print 80*'#'
print "# Total time spent: %dd %dh %dm %ds" % (convertSeconds2Time(main_stop_time - main_start_time))
print 80*'#'
if __name__ == '__main__':
main()
|
# -*- coding: utf8 -*-
import cx_Oracle
import requests
import json
import os
from backend import syn_sign,syn_student
#不加本句会出现中文编码问题!!!
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
def a(date):
host="xjtudlc_kq/it516kqdlc8tj@uc.xjtu.edu.cn/orcl.uc"
connection=cx_Oracle.connect(host)
cursor = connection.cursor()
sql = """select *
from SIGNIN.CARD_SIGNIN
where node_name in ('西一楼','东二楼开发科','9楼')
and sign_date > to_date('%s','yyyy-mm-dd hh24:mi:ss') """% (date)
# sql = """ select count(*) from SIGNIN.CARD_SIGNIN where sign_date > to_date('%s','yyyy-mm-dd hh24:mi:ss') """% (date)
# sql = u""" select table_name from all_tables """
# sql = sql.decode('utf8').encode('gbk')
# sql = sql.encode('gbk').decode('utf8')
# sql = sql.encode('utf8').decode('gbk')
# sql = sql.encode('gbk')
# sql = sql.encode('utf8')
cursor.execute(sql)
res = [dict(person_name=row[0],id_num=row[1],node_name=row[2],sign_date=row[3].strftime('%Y-%m-%d %H:%M:%S')) for row in cursor]
toReturn = dict(rs=json.dumps(res), date=date,password='xueshubu2016')
print "finished!"
cursor.close()
connection.close()
return toReturn
latest = requests.get('http://checkin.9lou.org/get/new').json()
print latest['date']
post_data = a(str(latest['date']))
# post_data = a('2017-05-01 19:55:36')
r = requests.post("http://checkin.9lou.org/put/bulk",data=post_data)
print r.text
|
#coding=utf8
#########################################################################
# Copyright (C) 2016 All rights reserved.
#
# 文件名称:RomeAnnotation.py
# 创 建 者:unicodeproject
# 创建日期:2016年11月29日
# 描 述:
#
# 备 注:将日语假名注音转成罗马音
# 用最短路径分词方法将假名系列作切分,如シャギ 可分成 シ、ャ、ギ 或者シャ、ギ
# 注意促音的处理
#########################################################################
#!/usr/bin/python
# please add your code here!
__Author__="finallyly"
import sys;
reload(sys);
sys.setdefaultencoding('utf8');
class RomeAnnotator:
def __init__(self):
self.wordict={};
def loadWordList(self,pathname):
fid = open(pathname,"r");
while True:
line = fid.readline();
if (len(line)==0):
break;
line = line.replace("\n","");
col = line.split("\t");
col[0].strip();
col[1].strip();
col[2].strip();
if self.wordict.has_key(col[0]):
self.wordict[col[0]].append((col[1],int(col[2])));
else:
self.wordict[col[0]]=[];
self.wordict[col[0]].append((col[1],int(col[2])));
for m in self.wordict.keys():
sum = 0;
for i in range(0,len(self.wordict[m])):
sum+=float(self.wordict[m][i][1]);
for i in range(0,len(self.wordict[m])):
val=self.wordict[m][i][1]/sum;
self.wordict[m][i]=(self.wordict[m][i][0],val);
#for m in self.wordict.keys():
#for i in range(0,len(self.wordict[m])):
#print m, self.wordict[m][i][0],self.wordict[m][i][1];
def DynamicShortestPathSegment(self,sentence):
usentence = unicode(sentence,"UTF-8");
#跟据词典情况构建词图
uindex = [];
for i in range(0,len(usentence)+1):
uindex.append([]);
pos = 0;
while pos<=len(usentence)-1:
j=pos+1;
while j <=len(usentence):
upiece=usentence[pos:j];
utf8counterpart=upiece.encode("UTF-8");
#unicode原子自动存在词图
if j-pos == 1:
uindex[pos].append(j);
else:
if self.wordict.has_key(utf8counterpart):
uindex[pos].append(j);
j+=1;
pos+=1;
uindex[pos].append(-1);
#利用uindex二维数组,以及动态规划算法查找最短路径
cost = 1;
max_cost = 10000;
pos = len(usentence);
#保存层图的最小耗费;
#保存层图最小耗费对应的回退路径
ulen=[];
ucost=[];
for i in range(0,len(usentence)+1):
ucost.append(max_cost);
ulen.append(i+1);
while pos>=0:
ucost[pos] = max_cost;
for m in uindex[pos]:
temp = cost+ucost[m];
if (temp < ucost[pos]):
ucost[pos] = temp;
ulen[pos] = m;
if (m==-1):
#尾巴节点
ucost[pos]=0;
ulen[pos]=-1;
pos-=1;
pos = 0;
seg=[];
while pos<len(usentence):
temp=usentence[pos:ulen[pos]].encode("UTF-8");
seg.append(temp);
pos=ulen[pos];
return seg;
def AnnotateByWordSegmentor(self,sentence):
seg = self.DynamicShortestPathSegment(sentence);
length=[];
now=[];
eflag = 0;
for i in range(0,len(seg)):
if (not self.wordict.has_key(seg[i])):
eflag = 1;
break;
else:
count=len(self.wordict[seg[i]]);
length.append(count);
now.append(-1);
if eflag == 1:
return None;
pos = 0;
now[pos]=-1;
finalresult = [];
while pos != -1:
now[pos]+=1;
if now[pos] < length[pos]:
if pos==len(seg)-1:
score = 1.0;
mystr="";
#促音时用于保留第二候选,即吞掉促音xtu的注音方式,
#即:きっさてん 注音成ki'xtu'ssa'te'nn和ki'ssa'te'nn都可以
mystr2="";
#用flag标注促音
for i in range(0,len(seg)):
flag=0;
if i >0 and seg[i]!="ー" and (seg[i-1]=="っ"or seg[i-1]=="ッ"):
flag=1;
if flag == 0:
if mystr=="":
mystr+=self.wordict[seg[i]][now[i]][0];
else:
mystr+="'";
mystr+=self.wordict[seg[i]][now[i]][0];
if mystr2!="":
mystr2+="'";
mystr2+=self.wordict[seg[i]][now[i]][0];
else:
if mystr=="":
mystr+=(self.wordict[seg[i]][now[i]][0][0:1]+self.wordict[seg[i]][now[i]][0]);
else:
mystr2=mystr;
subcol=mystr2.split("'");
mystr2="'".join(subcol[:-1]);
mystr+="'";
mystr2+="'";
mystr2+=(self.wordict[seg[i]][now[i]][0][0:1]+self.wordict[seg[i]][now[i]][0]);
mystr+=(self.wordict[seg[i]][now[i]][0][0:1]+self.wordict[seg[i]][now[i]][0]);
score*=self.wordict[seg[i]][now[i]][1];
finalresult.append((mystr,score));
if mystr2!="":
finalresult.append((mystr2,score));
else:
pos+=1;
else:
now[pos]=-1;
pos-=1;
return finalresult;
|
from flask import Flask, render_template # Import Flask to allow us to create our app
app = Flask(__name__) # Create a new instance of the Flask class called "app"
@app.route('/') # The "@" decorator associates this route with the function immediately following
def index():
return "Hello World!"
@app.route('/dojo')
def dojo():
return "Dojo!"
@app.route('/hello/<string:name>') # for a route '/hello/____' anything after '/hello/' gets passed as a variable 'name'
def hi(name):
return "Hi, " + str(name)
@app.route('/repeat/<int:num>/<string:word>')
def number_and_words(num, word):
stuff = ''
for x in range(0, int(num)):
stuff += str(word) + ' '
return stuff
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return 'Sorry! No response. Try again.'
if __name__=="__main__": # Ensure this file is being run directly and not from a different module
app.run(debug=True) # Run the app in debug mode.
|
distancia = float(input('Qual a distância percorrida em km: '))
dias = float(input('Qual a quantidade de dias que o carro foi alugado: '))
valordias = 60.00 * dias
valordistancia = 0.15 * distancia
total = valordias + valordistancia
print(f'R${total:.2f}')
|
import os
import re
import requests
# 设置要遍历的目录
dir_path = '../_posts/'
# 定义正则表达式匹配图片行和flickr图片url
md_img_pattern = re.compile(r'!\[.*?\]\((.*?)\)')
html_image_pattern = re.compile(r'<img.*?src="(.*?farm8\.staticflickr\.com.*?)".*?>')
flickr_pattern = re.compile(r'staticflickr\.com')
# file_type = '.html'
file_type = '.md'
if file_type == '.html':
img_pattern = html_image_pattern
else:
img_pattern = md_img_pattern
def get_count(count, all_image_url):
if len(all_image_url) <= 9:
return count
elif len(all_image_url) <= 99:
return f"0{count}" if count < 10 else count
else:
raise Exception("图片数量超过99张")
def handle_file(file: str, root: str):
# 定义计数器,用于生成文件名
count = 1
file_path = os.path.join(root, file)
# 读取文件内容
with open(file_path, 'r') as f:
content = f.read()
# 查找图片行并下载flickr图片
all_image_url = img_pattern.findall(content)
for img_url in all_image_url:
if flickr_pattern.search(img_url):
response = requests.get(img_url)
# 生成新的文件名
new_name = f'{os.path.splitext(file)[0]}_{get_count(count, all_image_url)}{os.path.splitext(img_url)[1]}'
# 保存图片并替换原来的url
with open(os.path.join(root, new_name), 'wb') as f:
f.write(response.content)
content = content.replace(img_url, new_name)
# 更新计数器
count += 1
print(f"处理文件{file_path}中,已下载{count - 1}张图片")
# 将修改后的内容写入文件
with open(file_path, 'w') as f:
f.write(content)
if count > 1:
print(f"处理文件{file_path}成功,共替换{count - 1}张图片")
return count
def handle_all():
# 遍历目录下所有md文件
for root, dirs, files in os.walk(dir_path):
for file in files:
if file.endswith(file_type):
count = handle_file(file, root)
# if count > 1:
# exit()
'''
[x] 递归遍历目录, 找到所有 md 文件,找到 ![]() 格式的图片
[x] 下载, image 目录
[x] 重命名,前缀(文件名,url, 时间),后缀,自增?
[x] 替换 url, 本地预览 + github 能用
[x] a/b test 先搞一个文件试试
[x] 本地格式替换 /images/qiniu-trail-151205hangzhou-25 替换为 ../images/mac_finder_sort_photos_by_taken_time_1
[x] 兼容 html 格式
[x] 兼容 md 格式
'''
if __name__ == '__main__':
handle_all()
# handle_file('2014-04-10-my_favourite_tshirt.md', '../_posts/')
|
from collections import defaultdict
import numpy as np
import networkx as nx
import sys
from attrdict import AttrDict
from logging import getLogger
from genice_svg import hooks, render_svg
from countrings import countrings_nx as cr
v1 = np.array([0.0, 0.0, 0.0])
r1 = 0.75
v2 = np.array([1.0, 1.0, 1.0])
r2 = 0.5
rb = 0.25
prims = []
prims.append(hooks.clip_cyl(v1, r1, v2, r2, rb) + [rb, {}])
prims.append([v1, "C", r1, {}])
prims.append([v2, "C", r2, {}])
render_svg.Render(prims, r1)
|
from tek import cli, Config
@cli(parse_cli=False)
def cli_test(data):
data.append(Config['sec1'].key1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging as log
import yaml
import os
def get_settings_yml_file():
yml_file = None
config_file = "configs/settings.yaml"
try:
with open(config_file, 'r') as yml:
yml_file = yaml.load(yml, Loader=yaml.SafeLoader)
except KeyError:
log.error("Couldn't find {}", config_file)
exit()
return yml_file
def config_path():
yml_file = get_settings_yml_file()
try:
return yml_file['config']['path']
except KeyError:
log.error("No config path in settings file")
return "Missing key!"
def repo_bin():
yml_file = get_settings_yml_file()
try:
return yml_file['repo']['bin']
except KeyError:
log.error("No repo bin in settings file")
return "Missing key!"
def repo_reference():
yml_file = get_settings_yml_file()
try:
return yml_file['repo']['reference']
except KeyError:
log.error("No repo reference in settings file")
return "Missing key!"
def aarch32_toolchain_path():
yml_file = get_settings_yml_file()
try:
return yml_file['toolchain']['aarch32_path']
except KeyError:
log.error("No aarch32 toolchain in settings file")
return "Missing key!"
def aarch64_toolchain_path():
yml_file = get_settings_yml_file()
try:
return yml_file['toolchain']['aarch64_path']
except KeyError:
log.error("No aarch64 toolchain in settings file")
return "Missing key!"
def aarch32_prefix():
yml_file = get_settings_yml_file()
try:
return yml_file['toolchain']['aarch32_prefix']
except KeyError:
log.error("No aarch32 prefix in settings file")
return "Missing key!"
def aarch64_prefix():
yml_file = get_settings_yml_file()
try:
return yml_file['toolchain']['aarch64_prefix']
except KeyError:
log.error("No aarch64 prefix in settings file")
return "Missing key!"
def workspace_path():
yml_file = get_settings_yml_file()
try:
return yml_file['workspace']['path']
except KeyError:
log.error("No workspace path in settings file")
return "Missing key!"
def log_dir():
try:
if os.environ['IBART_LOG_DIR']:
return os.environ['IBART_LOG_DIR']
except KeyError:
pass
yml_file = get_settings_yml_file()
try:
return yml_file['log']['dir']
except KeyError:
log.error("No log dir in settings file")
return "Missing key!"
def log_file():
try:
if os.environ['IBART_CORE_LOG']:
return os.environ['IBART_CORE_LOG']
except KeyError:
pass
yml_file = get_settings_yml_file()
try:
return yml_file['log']['file']
except KeyError:
log.error("No log file specified in settings file or env")
return "Missing key!"
def db_file():
try:
if os.environ['IBART_DB_FILE']:
return os.environ['IBART_DB_FILE']
except KeyError:
pass
yml_file = get_settings_yml_file()
try:
return yml_file['db']['file']
except KeyError:
log.error("No db file specified in settings file or env")
return "Missing key!"
def jobdefs_path():
try:
if os.environ['IBART_JOBDEFS']:
return os.environ['IBART_JOBDEFS']
except KeyError:
pass
yml_file = get_settings_yml_file()
try:
return yml_file['jobs']['path']
except KeyError:
log.error("No jobdefs folder specified in settings file or env")
return "Missing key!"
def remote_jobs():
yml_file = get_settings_yml_file()
my_jobs = []
try:
yml_iter = yml_file['jobs']['remotedefs']
for i in yml_iter:
my_jobs.append("{}".format(i))
except KeyError:
log.error("No remote jobdefs in settings file")
return "Missing key!"
return my_jobs
###############################################################################
# Everything below this line is just for debugging this
###############################################################################
def foo():
yml_file = get_settings_yml_file()
try:
return yml_file['foo']['aarch64_path']
except KeyError:
return "Missing key!"
def initialize():
log.info("Configure settings")
log.debug("config: {}".format(config_path()))
log.debug("repo binary: {}".format(repo_bin()))
log.debug("repo reference: {}".format(repo_reference()))
log.debug("aarch32_toolchain_path: {}".format(aarch32_toolchain_path()))
log.debug("aarch64_toolchain_path: {}".format(aarch64_toolchain_path()))
log.debug("aarch32_prefix: {}".format(aarch32_prefix()))
log.debug("aarch64_prefix: {}".format(aarch64_prefix()))
log.debug("workspace_path: {}".format(workspace_path()))
log.debug("log_dir: {}".format(log_dir()))
log.debug("log_file: {}".format(log_file()))
log.debug("db_file: {}".format(db_file()))
log.debug("config_path: {}".format(config_path()))
log.debug("remote_jobs: {}".format(remote_jobs()))
def initialize_logger():
LOG_FMT = ("[%(levelname)s] %(funcName)s():%(lineno)d %(message)s")
log.basicConfig(
# filename="core.log",
level=log.DEBUG,
format=LOG_FMT,
filemode='w')
if __name__ == "__main__":
initialize_logger()
initialize()
foo()
|
"""
CCT 建模优化代码
作者:赵润晓
日期:2021年5月21日
"""
from cctpy import *
from hust_sc_gantry import HUST_SC_GANTRY
import time
import numpy as np
# 可变参数
# 动量分散
momentum_dispersions = [-0.05, -0.0167, 0.0167, 0.05]
# 每平面、每动量分散粒子数目
particle_number_per_plane_per_dp = 12
# 每个机架(束线)粒子数目
particle_number_per_gantry = len(
momentum_dispersions) * particle_number_per_plane_per_dp * 2
ga32 = GPU_ACCELERATOR()
def create_gantry_beamline(param=[]):
qs1_gradient = param[0] if len(param) > 0 else 0
qs2_gradient = param[1] if len(param) > 0 else 0
qs1_second_gradient = param[2] if len(param) > 0 else 0
qs2_second_gradient = param[3] if len(param) > 0 else 0
dicct_tilt_1 = param[4] if len(param) > 0 else 90
dicct_tilt_2 = param[5] if len(param) > 0 else 90
dicct_tilt_3 = param[6] if len(param) > 0 else 90
agcct_tilt_0 = param[7] if len(param) > 0 else 90
agcct_tilt_2 = param[8] if len(param) > 0 else 90
agcct_tilt_3 = param[9] if len(param) > 0 else 90
dicct12_current = param[10] if len(param) > 0 else 10000
agcct12_current = param[11] if len(param) > 0 else 10000
agcct1_wn = int(param[12]) if len(param) > 0 else 20
agcct2_wn = int(param[13]) if len(param) > 0 else 20
q1 = param[14] if len(param) > 0 else 0
q2 = param[15] if len(param) > 0 else 0
####################################
DL1 = 1.592
GAP1 = 0.5
GAP2 = 0.5
qs1_length = 0.27
qs2_length = 0.27
DL2 = 2.5
qs1_aperture_radius = 60 * MM
qs2_aperture_radius = 60 * MM
dicct12_tilt_angles = [30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3]
agcct12_tilt_angles = [agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3]
agcct1_winding_number = agcct1_wn
agcct2_winding_number = agcct2_wn
dicct12_winding_number = 42
agcct1_bending_angle = 22.5 * (agcct1_wn / (agcct1_wn + agcct2_wn))
agcct2_bending_angle = 22.5 * (agcct2_wn / (agcct1_wn + agcct2_wn))
agcct12_inner_small_r = 92.5 * MM - 20 * MM # 92.5
agcct12_outer_small_r = 108.5 * MM - 20 * MM # 83+15
dicct12_inner_small_r = 124.5 * MM - 20 * MM # 83+30+1
dicct12_outer_small_r = 140.5 * MM - 20 * MM # 83+45 +2
dicct345_tilt_angles = [30, 88.773, 98.139, 91.748]
agcct345_tilt_angles = [101.792, 30, 62.677, 89.705]
dicct345_current = 9409.261
agcct345_current = -7107.359
agcct3_winding_number = 25
agcct4_winding_number = 40
agcct5_winding_number = 34
agcct3_bending_angle = -67.5 * (25 / (25 + 40 + 34))
agcct4_bending_angle = -67.5 * (40 / (25 + 40 + 34))
agcct5_bending_angle = -67.5 * (34 / (25 + 40 + 34))
agcct345_inner_small_r = 92.5 * MM + 0.1*MM # 92.5
agcct345_outer_small_r = 108.5 * MM + 0.1*MM # 83+15
dicct345_inner_small_r = 124.5 * MM + 0.1*MM # 83+30+1
dicct345_outer_small_r = 140.5 * MM + 0.1*MM # 83+45 +2
dicct345_winding_number = 128
part_per_winding = 60
cct345_big_r = 0.95
cct12_big_r = 0.95
return (Beamline.set_start_point(P2.origin()) # 设置束线的起点
# 设置束线中第一个漂移段(束线必须以漂移段开始)
.first_drift(direct=P2.x_direct(), length=DL1)
.append_agcct( # 尾接 acgcct
big_r=cct12_big_r, # 偏转半径
# 二极 CCT 和四极 CCT 孔径
small_rs=[dicct12_outer_small_r, dicct12_inner_small_r,
agcct12_outer_small_r, agcct12_inner_small_r],
bending_angles=[agcct1_bending_angle,
agcct2_bending_angle], # agcct 每段偏转角度
tilt_angles=[dicct12_tilt_angles,
agcct12_tilt_angles], # 二极 CCT 和四极 CCT 倾斜角
winding_numbers=[[dicct12_winding_number], [
agcct1_winding_number, agcct2_winding_number]], # 二极 CCT 和四极 CCT 匝数
# 二极 CCT 和四极 CCT 电流
currents=[dicct12_current, agcct12_current],
disperse_number_per_winding=part_per_winding # 每匝分段数目
)
.append_drift(GAP1) # 尾接漂移段
.append_qs( # 尾接 QS 磁铁
length=qs1_length,
gradient=qs1_gradient,
second_gradient=qs1_second_gradient,
aperture_radius=qs1_aperture_radius
)
.append_drift(GAP1)
.append_agcct(
big_r=cct12_big_r,
small_rs=[dicct12_outer_small_r, dicct12_inner_small_r,
agcct12_outer_small_r, agcct12_inner_small_r],
bending_angles=[agcct2_bending_angle,
agcct1_bending_angle],
tilt_angles=[dicct12_tilt_angles,
agcct12_tilt_angles],
winding_numbers=[[dicct12_winding_number], [
agcct2_winding_number, agcct1_winding_number]],
currents=[dicct12_current, agcct12_current],
disperse_number_per_winding=part_per_winding
)
.append_drift((DL1+DL2-1-0.27*2)/2)
.append_qs(0.27,q1,0,60*MM)
.append_drift(length=1*M)
.append_qs(0.27,q2,0,60*MM)
.append_drift((DL1+DL2-1-0.27*2)/2)
# .append_drift(DL1)
# .append_drift(DL2)
.append_agcct(
big_r=cct345_big_r,
small_rs=[dicct345_outer_small_r, dicct345_inner_small_r,
agcct345_outer_small_r, agcct345_inner_small_r],
bending_angles=[agcct3_bending_angle,
agcct4_bending_angle, agcct5_bending_angle],
tilt_angles=[dicct345_tilt_angles,
agcct345_tilt_angles],
winding_numbers=[[dicct345_winding_number], [
agcct3_winding_number, agcct4_winding_number, agcct5_winding_number]],
currents=[dicct345_current, agcct345_current],
disperse_number_per_winding=part_per_winding
)
.append_drift(GAP2)
.append_qs(
length=qs2_length,
gradient=qs2_gradient,
second_gradient=qs2_second_gradient,
aperture_radius=qs2_aperture_radius
)
.append_drift(GAP2)
.append_agcct(
big_r=cct345_big_r,
small_rs=[dicct345_outer_small_r, dicct345_inner_small_r,
agcct345_outer_small_r, agcct345_inner_small_r],
bending_angles=[agcct5_bending_angle,
agcct4_bending_angle, agcct3_bending_angle],
tilt_angles=[dicct345_tilt_angles,
agcct345_tilt_angles],
winding_numbers=[[dicct345_winding_number], [
agcct5_winding_number, agcct4_winding_number, agcct3_winding_number]],
currents=[dicct345_current, agcct345_current],
disperse_number_per_winding=part_per_winding
)
.append_drift(DL2)
)
total_beamline = create_gantry_beamline()
total_beamline_length = total_beamline.get_length()
# 起点理想粒子
ip_start = ParticleFactory.create_proton_along(
trajectory=total_beamline,
s=0.0,
kinetic_MeV=215
)
# 终点理想粒子
ip_end = ParticleFactory.create_proton_along(
trajectory=total_beamline,
s=total_beamline_length,
kinetic_MeV=215
)
# 相空间相椭圆粒子
pps = []
for dp in momentum_dispersions:
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=3.5 * MM, xpMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(
yMax=3.5 * MM, ypMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
# 迭代次数
times = 1
# 所有的参数/变量和目标
params_and_objs = []
# 运行,载入变量
def run(params: np.ndarray):
global times
start_time = time.time()
gantry_number = params.shape[0]
print(f"机架数目{gantry_number}")
# 创建机架
beamlines = create_beamlines(gantry_number, params)
print(f"制作机架用时{time.time() - start_time}")
# 将相空间相椭圆粒子转为实际粒子
ps = ParticleFactory.create_from_phase_space_particles(
ip_start, ip_start.get_natural_coordinate_system(), pps
)
print(f"粒子总数{len(ps) * gantry_number}")
# 核心,调用 GPU 运行
ps_end_list_list = ga32.track_multi_particle_beamline_for_magnet_with_multi_qs(
bls=beamlines,
ps=ps,
distance=total_beamline_length,
footstep=50 * MM
)
# 统计器
statistic_x = BaseUtils.Statistic()
statistic_y = BaseUtils.Statistic()
statistic_beam_sizes = BaseUtils.Statistic()
# 所有机架 所有目标
objs: List[List[float]] = []
# 对于每个机架
for gid in range(gantry_number): # ~120
#
ps_end_list_each_gantry: List[RunningParticle] = ps_end_list_list[gid]
# 不知道为什么,有些粒子的速率 speed 和速度 velocity 差别巨大
for p in ps_end_list_each_gantry:
p.speed = p.velocity.length()
pps_end_each_gantry: List[PhaseSpaceParticle] = PhaseSpaceParticle.create_from_running_particles(
ip_end, ip_end.get_natural_coordinate_system(), ps_end_list_each_gantry
)
# 单机架目标
obj: List[float] = []
# 对于所有粒子
for pid in range(0, len(pps_end_each_gantry), particle_number_per_plane_per_dp):
# 每 12 个粒子(每平面每组动量分散)
# 每 particle_number_per_plane_per_dp 个一组
for pp in pps_end_each_gantry[pid:pid + particle_number_per_plane_per_dp]:
# 统计 x 和 y
statistic_x.add(pp.x / MM)
statistic_y.add(pp.y / MM) # mm
# 分别求束斑
beam_size_x = (statistic_x.max() - statistic_x.min()) / 2
beam_size_y = (statistic_y.max() - statistic_y.min()) / 2
statistic_x.clear()
statistic_y.clear()
# 只有 x 和 y 中大的我需要
beam_size = max(beam_size_x, beam_size_y)
statistic_beam_sizes.add(beam_size) # 用于统计均值
obj.append(beam_size) # 用于记录每次束斑
# 均值
beam_size_avg = statistic_beam_sizes.average()
objs.append([abs(bs - beam_size_avg) for bs in obj] + [beam_size_avg])
statistic_beam_sizes.clear()
objs_np = np.array(objs)
for gid in range(gantry_number):
param = params[gid]
obj = objs_np[gid]
params_and_objs.append(np.concatenate((param, obj)))
np.savetxt(fname='./record/' + str(times) + '.txt', X=params_and_objs)
times += 1
print(f"用时{time.time() - start_time} s")
return objs_np
def create_beamlines(gantry_number, params):
return BaseUtils.submit_process_task(
task=create_gantry_beamline,
param_list=[[params[i]] for i in range(gantry_number)]
)
|
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
primenum = 10001
i=0
testnum=2
while i < primenum:
checkprime = True
for j in range(2,testnum):
if testnum%j==0:
checkprime=False
if checkprime==True:
i+=1
currentprime=testnum
# print("prime number",i,"is",currentprime)
testnum+=1
print(currentprime)
# This code takes a while to run (10m4s)
# We could speed it up by doing things like skipping numbers divisible by 2 and 3 from checking
primenum = 10001
i=1 # we're skipping evens, so we now need to add 1 to our count for 2
testnum=3
while i < primenum:
checkprime = True
for j in range(3,testnum):
if testnum%j==0:
checkprime=False
if checkprime==True:
i+=1
currentprime=testnum
# print("prime number",i,"is",currentprime)
testnum+=2
print(currentprime)
# This code is faster (twice as fast @ 5m6s), but there's still room for improvement:
primenum = 10001
i=3 # including 2,3,5 that we skip
testnum=7
while i < primenum:
checkprime = True
for j in range(7,testnum):
if testnum%j==0:
checkprime=False
if checkprime==True:
i+=1
currentprime=testnum
# print("prime number",i,"is",currentprime)
testnum+=2
if testnum%3==0:
testnum+=2
if testnum%5==0:
testnum+=2
print(currentprime)
# We're now down to 3m3s, which is over 3 times as fast as the original code
# There are more complicated math tricks involving prime numbers, but this speeds up the code a lot without going crazy.
|
def is_matched(expression):
stack = []
pair_lookup = {'{': '}', '(': ')', '[': ']'}
# loop through each character in the expression and push its closing counterpart
for ch in expression:
if ch in pair_lookup:
stack.append(pair_lookup[ch])
# when encounter a closing counterpart, check that the stack is not empty and that the top of the stack
# matches the closing counterpart
# otherwise, we know that the expression does not match
else:
if not stack or stack[-1] != ch:
return False
else:
stack.pop()
# stack should be empty after going through all the characters in expression
return len(stack) == 0
# expr = '{[(])}'
# is_matched(expr)
# expr = '{[(])}'
# stack = []
# pair_lookup = {'{': '}', '(': ')', '[': ']'}
t = int(input().strip())
for a0 in range(t):
expression = input().strip()
if is_matched(expression):
print("YES")
else:
print("NO")
import bisect
|
numbers=[2,3,1,6,4,8,9]
numbers.insert(0,22) # bu insert methodi yordamida ko'rsatilgan indexdagi listning ichiga qo'shadi
print(numbers)
|
#! /usr/bin/env python
import sys
import os
import argparse
from array import *
import numpy as np
import ROOT
import yaml
from pyjetty.alice_analysis.analysis.user.james import run_analysis_james_base
from pyjetty.alice_analysis.analysis.user.james import plotting_utils_subjet_z
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
################################################################
class RunAnalysisSubjetZ(run_analysis_james_base.RunAnalysisJamesBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, config_file='', **kwargs):
super(RunAnalysisSubjetZ, self).__init__(config_file, **kwargs)
print(self)
#---------------------------------------------------------------
# This function is called once for each subconfiguration
#---------------------------------------------------------------
def plot_single_result(self, jetR, obs_label, obs_setting, grooming_setting):
print('Plotting each individual result...')
# Plot final result for each 1D substructure distribution (with PYTHIA)
self.plot_final_result(jetR, obs_label, obs_setting, grooming_setting)
#----------------------------------------------------------------------
def plot_final_result(self, jetR, obs_label, obs_setting, grooming_setting):
print('Plot final results for {}: R = {}, {}'.format(self.observable, jetR, obs_label))
self.utils.set_plotting_options()
ROOT.gROOT.ForceStyle()
# Loop through pt slices, and plot final result for each 1D theta_g distribution
for bin in range(0, len(self.pt_bins_reported) - 1):
min_pt_truth = self.pt_bins_reported[bin]
max_pt_truth = self.pt_bins_reported[bin+1]
self.plot_observable(jetR, obs_label, obs_setting, grooming_setting, min_pt_truth, max_pt_truth, plot_pythia=True)
#---------------------------------------------------------------
# This function is called once after all subconfigurations have been looped over, for each R
#---------------------------------------------------------------
def plot_all_results(self, jetR):
print('Plotting overlay of all results...')
for i_config, overlay_list in enumerate(self.plot_overlay_list):
if len(overlay_list) > 1:
self.plot_final_result_overlay(i_config, jetR, overlay_list)
#----------------------------------------------------------------------
# This function is called once after all subconfigurations and jetR have been looped over
#----------------------------------------------------------------------
def plot_performance(self):
if not self.do_plot_performance:
return
print('Plotting performance plots...')
# Initialize performance plotting class, and plot
if self.is_pp:
self.plotting_utils = plotting_utils_subjet_z.PlottingUtils(self.output_dir_performance, self.config_file)
self.plot_single_performance(self.output_dir_performance)
else:
# Plot for each R_max
for R_max in self.max_distance:
output_dir_performance = os.path.join(self.output_dir_performance, 'Rmax{}'.format(R_max))
self.plotting_utils = plotting_utils_subjet_z.PlottingUtils(output_dir_performance, self.config_file, R_max = R_max)
self.plot_single_performance(output_dir_performance, R_max)
# Plot for thermal model
if self.do_thermal_closure and R_max == self.R_max:
output_dir_performance = os.path.join(self.output_dir_performance, 'thermal')
self.plotting_utils = plotting_utils_subjet_z.PlottingUtils(output_dir_performance, self.config_file, R_max = R_max, thermal = True)
self.plot_single_performance(output_dir_performance, R_max)
#----------------------------------------------------------------------
# This function is called once after all subconfigurations and jetR have been looped over
#----------------------------------------------------------------------
def plot_single_performance(self, output_dir_performance, R_max = None):
if R_max:
suffix = '_Rmax{}'.format(R_max)
else:
suffix = ''
# Create output subdirectories
self.create_output_subdir(output_dir_performance, 'jet')
self.create_output_subdir(output_dir_performance, 'resolution')
self.create_output_subdir(output_dir_performance, 'residual_pt')
self.create_output_subdir(output_dir_performance, 'residual_obs')
self.create_output_subdir(output_dir_performance, 'mc_projections_det')
self.create_output_subdir(output_dir_performance, 'mc_projections_truth')
self.create_output_subdir(output_dir_performance, 'truth')
self.create_output_subdir(output_dir_performance, 'data')
if 'leading' in self.observable:
self.create_output_subdir(output_dir_performance, 'z1_crosscheck')
if self.is_pp and 'inclusive' in self.observable:
self.create_output_subdir(output_dir_performance, 'subjet_matching_pp')
if not self.is_pp:
self.create_output_subdir(output_dir_performance, 'delta_pt')
# Generate performance plots
for jetR in self.jetR_list:
# Plot some subobservable-independent performance plots
self.plotting_utils.plot_DeltaR(jetR, self.jet_matching_distance)
self.plotting_utils.plot_JES(jetR)
self.plotting_utils.plot_JES_proj(jetR, self.pt_bins_reported)
self.plotting_utils.plotJER(jetR, self.utils.obs_label(self.obs_settings[0], self.grooming_settings[0]))
self.plotting_utils.plot_jet_reco_efficiency(jetR, self.utils.obs_label(self.obs_settings[0], self.grooming_settings[0]))
if not self.is_pp:
self.plotting_utils.plot_delta_pt(jetR, self.pt_bins_reported)
# Plot subobservable-dependent performance plots
for i, _ in enumerate(self.obs_subconfig_list):
obs_setting = self.obs_settings[i]
grooming_setting = self.grooming_settings[i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
if (jetR - obs_setting) < 1e-3:
continue
self.plotting_utils.plot_subjet_DeltaR(jetR, obs_label, self.jet_matching_distance)
self.plotting_utils.plot_obs_resolution(jetR, obs_label, self.xtitle, self.pt_bins_reported)
self.plotting_utils.plot_obs_residual_pt(jetR, obs_label, self.xtitle, self.pt_bins_reported)
self.plotting_utils.plot_obs_residual_obs(jetR, obs_label, self.xtitle)
self.plotting_utils.plot_obs_projections(jetR, obs_label, obs_setting, grooming_setting, self.xtitle, self.pt_bins_reported)
self.plotting_utils.plot_obs_truth(jetR, obs_label, obs_setting, grooming_setting, self.xtitle, self.pt_bins_reported)
if 'leading' in self.observable:
self.plotting_utils.plot_z1_crosscheck(jetR, obs_label, obs_setting, grooming_setting, self.xtitle, self.pt_bins_reported)
if self.is_pp and 'inclusive' in self.observable:
self.plotting_utils.plot_subjet_matching_pp(jetR, obs_label, obs_setting, grooming_setting, self.xtitle, self.pt_bins_reported)
if not self.is_pp:
# Plot subjet matched pt histograms
self.prong_match_threshold = 0.5
min_pt = 80.
max_pt = 100.
for i, overlay_list in enumerate(self.plot_overlay_list):
self.create_output_subdir(output_dir_performance, 'matched_pt_fraction_pt')
hname = 'h_{}_matched_pt_JetPt_R{}'.format(self.observable, jetR)
self.plotting_utils.plot_subjet_matching(i, jetR, hname, self.obs_subconfig_list, self.obs_settings, self.grooming_settings, overlay_list, self.prong_match_threshold, thermal=True)
self.create_output_subdir(output_dir_performance, 'prong_matching_deltaR')
self.create_output_subdir(output_dir_performance, 'prong_matching_deltaZ')
name_prefix = f'h_{self.observable}_matched_pt_deltaZ_JetPt_R{jetR}'
self.plotting_utils.plot_prong_matching_delta(i, jetR, name_prefix, self.obs_subconfig_list, self.obs_settings, self.grooming_settings, overlay_list, self.prong_match_threshold, min_pt, max_pt, plot_deltaz=True, plot_matched=True)
self.plotting_utils.plot_prong_matching_delta(i, jetR, name_prefix, self.obs_subconfig_list, self.obs_settings, self.grooming_settings, overlay_list, self.prong_match_threshold, min_pt, max_pt, plot_deltaz=True, plot_matched=False)
name_prefix = f'h_{self.observable}_matched_pt_deltaR_JetPt_R{jetR}'
self.plotting_utils.plot_prong_matching_delta(i, jetR, name_prefix, self.obs_subconfig_list, self.obs_settings, self.grooming_settings, overlay_list, self.prong_match_threshold, min_pt, max_pt, plot_deltaz=False, plot_matched=True)
self.plotting_utils.plot_prong_matching_delta(i, jetR, name_prefix, self.obs_subconfig_list, self.obs_settings, self.grooming_settings, overlay_list, self.prong_match_threshold, min_pt, max_pt, plot_deltaz=False, plot_matched=False)
for i, _ in enumerate(self.obs_subconfig_list):
obs_setting = self.obs_settings[i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
if (jetR - obs_setting) < 1e-3:
continue
output_dir_money = os.path.join(output_dir_performance, 'matched_pt_money')
self.create_output_subdir(output_dir_money, os.path.join(str(jetR), str(obs_setting)))
self.plotting_utils.plot_subjet_money_plot(self.observable, jetR, R_max, self.prong_match_threshold,
obs_setting, self.pt_bins_reported,
output_dir_money, self.ytitle, thermal=False)
#----------------------------------------------------------------------
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Jet substructure analysis')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='analysis_config.yaml',
help='Path of config file for analysis')
# Parse the arguments
args = parser.parse_args()
print('Configuring...')
print('configFile: \'{0}\''.format(args.configFile))
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = RunAnalysisSubjetZ(config_file = args.configFile)
analysis.run_analysis()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.jvm.resolve.jvm_tool import JvmToolBase
from pants.option.option_types import SkipOption
class ScalafmtSubsystem(JvmToolBase):
options_scope = "scalafmt"
name = "scalafmt"
help = "scalafmt (https://scalameta.org/scalafmt/)"
default_version = "3.2.1"
default_artifacts = ("org.scalameta:scalafmt-cli_2.13:{version}",)
default_lockfile_resource = (
"pants.backend.scala.lint.scalafmt",
"scalafmt.default.lockfile.txt",
)
skip = SkipOption("fmt", "lint")
|
# -*- coding: UTF-8 -*-
li = ["a", "b", "mpilgrim", "z", "example"]
print li
print li[1]
li.append("new")
print li
li.insert(2, "new")
print li
li.extend(["two", "elements"])
print li
print li.index("example")
print "c" in li
print "example" in li
li.remove("a")
print li
li.remove("new") # 删除首次出现的一个值
print li
print li.pop() # pop 会做两件事: 删除 list 的最后一个元素, 然后返回删除元素的值。
print li
li = li + ['example', 'new']
print li
li += ['two']
print li
li = [1, 2] * 3
print li
params = {"server": "mpilgrim", "database": "master", "uid": "sa", "pwd": "secret"}
print ["%s=%s" % (k, v) for k, v in params.items()]
print ";<-->;".join(["%s=%s" % (k, v) for k, v in params.items()])
li = ['server=mpilgrim', 'uid=sa', 'database=master', 'pwd=secret']
print li
s = ";".join(li)
print s
print s.split(";")
print s.split(";", 1)
li = [1, 9, 8, 4]
li.sort()
print li
print [elem * 2 for elem in li]
li = [elem * 2 for elem in li]
print li
params = {"server": "mpilgrim", "database": "master", "uid": "sa", "pwd": "secret"}
print params.keys()
print params.values()
print params.items()
print [k for k, v in params.items()]
print [v for k, v in params.items()]
print ['%s?%s' %(k, v) for k, v in params.items()]
print ["%s=%s" % (k, v) for k, v in params.items()]
# list过滤
li = ["a", "mpilgrim", "foo", "b", "c", "b", "d", "d"]
print li
print [elem for elem in li if len(elem) > 1]
print [elem for elem in li if len(elem) == 1]
print [elem for elem in li if elem != "b"]
print [elem for elem in li if li.count(elem) == 1]
|
import unittest
from knowledge_graph.Mind import Mind
class TestLoadingOntology(unittest.TestCase):
def test_valid_ontology_source(self):
onto = Mind()
self.assertIsNotNone(onto.get_ontology())
|
import unittest
from look_and_say import *
# http://dojopuzzles.com/problemas/exibe/sequencia-look-and-say/
class LookAndSayTest(unittest.TestCase):
def test_menor_numero(self):
self.assertEquals(11, look_and_say(1))
def test_segundo_menor_numero(self):
self.assertEquals(12, look_and_say(2))
def test_menor_dezena(self):
self.assertEquals(1110, look_and_say(10))
def test_com_dezena(self):
self.assertEquals(1112, look_and_say(12))
def test_com_segunda_dezena(self):
self.assertEquals(1210, look_and_say(20))
def test_com_outra_terceira_dezena(self):
self.assertEquals(1310, look_and_say(30))
def test_dezena_com_unidade_diferente_de_zero(self):
self.assertEquals(1915, look_and_say(95))
def test_dois_algarismos_iguais(self):
self.assertEquals(21, look_and_say(11))
def test_centena_sem_repetir(self):
self.assertEquals(111213, look_and_say(123))
def test_centena_repetindo_unidade_e_dezena(self):
self.assertEquals(1122, look_and_say(122))
def test_centena_repetindo_centena_e_dezena(self):
self.assertEquals(2112, look_and_say(112))
def test_centena_repetindo_tudo(self):
self.assertEquals(31, look_and_say(111))
unittest.main()
|
#!/usr/bin/env python
import argparse
from braindecode.experiments.parse import (
create_experiment_yaml_strings_from_files, create_config_strings, create_config_objects,
create_templates_variants_from_config_objects,
process_parameters_by_templates, process_templates)
import numbers
def parse_command_line_arguments():
parser = argparse.ArgumentParser(
description="""Print results stored in a folder.
Example: ./scripts/create_hyperopt_files.py configs/experiments/bci_competition/combined/raw_net_150_fs.yaml """
)
parser.add_argument('experiments_file_name', action='store',
choices=None,
help='Yaml experiment file to base hyperopt config on.')
args = parser.parse_args()
return args
def create_hyperopt_files(experiments_file_name):
## Get templates and Variants
config_strings = create_config_strings(experiments_file_name)
config_objects = create_config_objects(config_strings)
templates, variants = create_templates_variants_from_config_objects(config_objects)
## Create template string and save to file
template_str = "{\n templates: {\n"
for key in templates:
template_str += " {:s}: {:s},\n".format(key, templates[key])
template_str += "}}\n\n"
with open('hyperopt_template.yaml', 'w') as template_file:
template_file.write(template_str)
## Create parameter ranges and save to .pcs-file
# Fold param variants into one dict param -> list of possibilities
parameter_ranges = dict()
for param_dict in variants:
for key in param_dict:
if key in parameter_ranges and (param_dict[key] not in parameter_ranges[key]):
parameter_ranges[key].append(param_dict[key])
if key not in parameter_ranges:
parameter_ranges[key] = [param_dict[key]]
# Delete unnecessary stuff, add template name reminder
parameter_ranges.pop('dataset_filename')
parameter_ranges.pop('save_path')
parameter_ranges['template_name'] = ['!ADD_TEMPLATE_FILE_NAME!']
# Build string
hyperopt_param_string = ""
for key, values in parameter_ranges.iteritems():
# take middle value as default value
default_str = "[{:s}]".format(str(values[len(values) // 2]))
if len(values) == 1:
val_str = "{{{:s}}}".format(str(values[0]))
else:
is_integer = False
if all(isinstance(val, numbers.Number) for val in values):
val_str = "[{:s}, {:s}]".format(str(values[0]), str(values[-1]))
is_integer = all(val.is_integer() for val in values)
if (is_integer):
default_str += 'i'
else:
val_str = str(values).replace('(', '{').replace(')', '}')
line = "{:30s} {:30s} {:s}\n".format(str(key), val_str, default_str)
line = line.replace("$", "**")
# correct indentation
line = line.replace(" [**", "[**")
hyperopt_param_string += line
with open('hyperopt_params.pcs', 'w') as param_file:
param_file.write(hyperopt_param_string)
if __name__ == "__main__":
args = parse_command_line_arguments()
create_hyperopt_files(args.experiments_file_name)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
get_user_hw_action_list_query = """
SELECT hwa.id AS id,
uhwa.user_id AS user_id,
uhwa.value as value,
hwa.name AS action_name,
hwa.hw_action_type_id AS hw_action_type_id,
hwat.name AS hw_action_type,
hwa.min_value AS action_min_value,
hwa.max_value AS action_max_value,
hwa.active AS active,
hwa.deleted AS deleted
FROM public.user_hw_action AS uhwa
LEFT OUTER JOIN public.hw_action AS hwa ON hwa.id = uhwa.hw_action_id
LEFT OUTER JOIN public.hw_action_type AS hwat ON hwat.id = hwa.hw_action_type_id
WHERE uhwa.deleted is FALSE
AND ($1::BIGINT is NULL OR uhwa.user_id = $1::BIGINT)
AND (
$2::VARCHAR is NULL OR
hwa.name ILIKE $2::VARCHAR || '%' OR
hwa.name ILIKE '%' || $2::VARCHAR || '%' OR
hwa.name ILIKE $2::VARCHAR || '%')
"""
get_user_hw_action_list_count_query = """
SELECT count(*) AS user_hw_action_count
FROM public.user_hw_action AS uhwa
LEFT OUTER JOIN public.hw_action AS hwa ON hwa.id = uhwa.hw_action_id
LEFT OUTER JOIN public.hw_action_type AS hwat ON hwat.id = hwa.hw_action_type_id
WHERE uhwa.deleted is FALSE
AND ($1::BIGINT is NULL OR uhwa.user_id = $1::BIGINT)
AND (
$2::VARCHAR is NULL OR
hwa.name ILIKE $2::VARCHAR || '%' OR
hwa.name ILIKE '%' || $2::VARCHAR || '%' OR
hwa.name ILIKE $2::VARCHAR || '%')
"""
get_user_hw_action_element_query = """
SELECT hwa.id AS id,
uhwa.user_id AS user_id,
uhwa.value as value,
hwa.name AS action_name,
hwa.hw_action_type_id AS hw_action_type_id,
hwat.name AS hw_action_type,
hwa.min_value AS action_min_value,
hwa.max_value AS action_max_value,
hwa.active AS active,
hwa.deleted AS deleted
FROM public.user_hw_action AS uhwa
LEFT OUTER JOIN public.hw_action AS hwa ON hwa.id = uhwa.hw_action_id
LEFT OUTER JOIN public.hw_action_type AS hwat ON hwat.id = hwa.hw_action_type_id
WHERE uhwa.deleted is FALSE
AND uhwa.user_id = $1::BIGINT
AND uhwa.id = $2::BIGINT
"""
get_user_hw_action_times_by_location_id_query = """
SELECT coalesce(
to_char(min(date_from), 'YYYY-MM-DD"T"HH24:MI:SS"Z"'), NULL ) AS min_date,
coalesce(to_char(max(date_to), 'YYYY-MM-DD"T"HH24:MI:SS"Z"'), NULL) AS max_date FROM public.user_hw_action AS uha
LEFT OUTER JOIN public.user_hw_action_location_association AS uhala ON uha.id = uhala.user_hw_action_id
WHERE uhala.location_id = $1;"""
|
__author__ = "Narwhale"
animals = ['dog','lion','leopard']
for i in animals:
print(i)
#----------------------------------
for i in animals:
print('A %s will run'%i)
print('Any of hhese animals will run')
|
"""
Serializers for consuming job submissions for running models and generating reports
"""
import json
from django.conf import settings
from rest_framework import serializers
from landscapesim.async.tasks import run_model
from landscapesim.models import Library, Project, Scenario, RunScenarioModel
from landscapesim.serializers import imports
STSIM_MULTIPLIER_DIR = getattr(settings, 'STSIM_MULTIPLIER_DIR')
# Need to know the library_name, and the inner project and scenario ids for any job
BASIC_JOB_INPUTS = ['library_name', 'pid', 'sid']
class AsyncJobSerializerMixin(object):
"""
A base mixin for serializing the inputs and outputs, and validating that the minimum job info is provided.
"""
status = serializers.CharField(read_only=True)
inputs = serializers.JSONField(allow_null=True)
outputs = serializers.JSONField(read_only=True)
job_inputs = BASIC_JOB_INPUTS
def validate_inputs(self, value):
if value:
try:
value = json.loads(value)
if all(x in value.keys() for x in self.job_inputs):
return value
else:
raise serializers.ValidationError('Missing one of {}'.format(self.job_inputs))
except ValueError:
raise serializers.ValidationError('Invalid input JSON')
return {}
class RunModelReadOnlySerializer(AsyncJobSerializerMixin, serializers.ModelSerializer):
model_status = serializers.CharField(read_only=True)
class Meta:
model = RunScenarioModel
fields = ('uuid', 'created', 'status', 'model_status', 'progress', 'outputs', 'parent_scenario', 'result_scenario')
read_only_fields = ('uuid', 'created', 'status', 'outputs', 'parent_scenario', 'result_scenario')
class RunModelCreateSerializer(AsyncJobSerializerMixin, serializers.ModelSerializer):
""" Initial model run validation """
model_status = serializers.CharField(read_only=True)
class Meta:
model = RunScenarioModel
fields = ('uuid', 'created', 'status', 'model_status', 'progress', 'inputs', 'outputs', 'parent_scenario', 'result_scenario')
read_only_fields = ('uuid', 'created', 'status', 'outputs', 'parent_scenario', 'result_scenario')
def validate_inputs(self, value):
value = super(RunModelCreateSerializer, self).validate_inputs(value)
if value:
try:
config = value['config']
# Ensure that all configuration keys are supplied. Validation of this imports is handled asynchronously
# within the Celery.run_model task.
if not all(x[0] in config.keys() for x in imports.CONFIG_INPUTS + imports.VALUE_INPUTS):
raise serializers.ValidationError(
'Missing configurations within {}. Got the following configuration keys: {}'.format(
imports.CONFIG_INPUTS, list(config.keys())
)
)
# Return the value unchanged
return value
except ValueError:
raise serializers.ValidationError('Malformed configuration')
return {}
def create(self, validated_data):
library_name = validated_data['inputs']['library_name']
pid = validated_data['inputs']['pid']
sid = validated_data['inputs']['sid']
lib = Library.objects.get(name__exact=library_name)
proj = Project.objects.get(library=lib, pid=int(pid))
parent_scenario = Scenario.objects.get(project=proj, sid=int(sid))
result = run_model.delay(library_name, sid)
return RunScenarioModel.objects.create(
parent_scenario=parent_scenario,
celery_id=result.id,
inputs=json.dumps(validated_data['inputs']),
model_status='waiting'
)
|
import matplotlib.pyplot as plt
import fast_cppn
import quad_tree
import queue
import random
def get_pattern(cppn, x1, y1, x2_range=[-1, 1], y2_range=[-1, 1],
step=0.05, threshold=None):
import numpy as np
output = []
i = 0
for y in np.arange(y2_range[0], y2_range[1], step):
output.append([])
for x in np.arange(x2_range[0], x2_range[1], step):
temp = cppn.run(x1,y1,x,y)[0]
if threshold is not None:
if abs(temp) > threshold:
output[i].append(temp)
else:
output[i].append(0)
else:
output[i].append(temp)
cppn.restart_network()
i += 1
return np.array(output)
def show_cppn(output, colormap="BuGn"):
import matplotlib.pyplot as plt
cs = plt.contourf(output, 100, cmap=colormap,
origin='lower',extent=[-1, 1, -1, 1])
plt.colorbar()
ax = plt.gca() # gca stands for 'get current axis'
ax.spines['bottom'].set_position(('data', 0))
ax.spines['left'].set_position(('data', 0))
return ax
def overlay_tree(pattern, quad_tree, var_thr,colormap="autumn"):
ax = show_cppn(pattern, colormap)
q = queue.Queue()
q.put(quad_tree)
while not q.empty():
p = q.get()
if p.variance > var_thr:
for child in p.children:
q.put(child)
else:
ax.add_patch(plt.Rectangle((p.x-p.width,p.y-p.width), p.width*2, p.width*2, alpha = 1, fill = None))
ax.add_patch(plt.Circle((p.x,p.y), 0.01, fc = 'r'))
q.task_done()
plt.show()
return
def plot_quadtree(quad_tree, var):
ax = plt.gca()
q = queue.Queue()
q.put(quad_tree)
while not q.empty():
p = q.get()
if p.variance > var_thr:
for child in p.children:
q.put(child)
else:
if p.weight < 0.5:
ax.add_patch(plt.Rectangle((p.x-p.width,p.y-p.width), p.width*2, p.width*2, alpha = 0.5, color = 'k'))
ax.add_patch(plt.Circle((p.x,p.y), 0.01, fc = 'r'))
q.task_done()
plt.axis([-1, 1, -1, 1])
ax.spines['bottom'].set_position(('data', 0))
ax.spines['left'].set_position(('data', 0))
plt.show()
return
def get_rectangle(node, color):
x, y = offset_center(node)
return plt.Rectangle((x, y), node.width, node.width, fc=color)
test_net = fast_cppn.fast_network(4, 1, node_count=5)
test_net.add_connection(0, 4, 5.)
test_net.add_connection(1, 4, 5.)
test_net.add_connection(2, 4, 5.)
test_net.add_connection(3, 4, 5.)
test_net.set_activation_function(0, 0)
test_net.set_activation_function(1, 0)
test_net.set_activation_function(2, 0)
test_net.set_activation_function(3, 0)
test_net.set_activation_function(4, 3)
div_thr = 0.003
var_thr = 0.003
band_thr = 0.003
pattern = get_pattern(test_net,0,0)
qtr = quad_tree.Quadtree()
#a, b, cppn, div_thr = 0.03 , outgoing = False, initialDepth =4, maxDepth = 4
qtr.division_initialization(0, 0, test_net)
print(qtr.prune_extract(0,0,qtr,test_net))
overlay_tree(pattern, qtr, var_thr, "bone")
|
#-*- coding:utf-8 -*-
"""
if __name__ == '__main__':
tlist = []
head, mid, rear = 1, 0, 1
for i in range(input()):
tlist.append(head)
mid = int(head+rear)
head = rear
rear = mid
print tlist[i]
"""
tlist = []
head, mid, rear = 1, 2, 2
for i in range(1, input()):
tlist.append(head)
mid = int(head+rear)
head = rear
rear = mid
print tlist[i-1]
|
"""
Logic of the mongoDB interaction.
If you wish to use another databse, modify this file
and this file only.
"""
import pymongo
from pymongo.errors import PyMongoError
import config as cf
class DBError(PyMongoError):
""" Base class for Database errors, inheriting from
the base class for all pymongo errors.
"""
def connect(host=cf.HOST, port=cf.PORT):
""" Connect to mongoDB using the parameters
specified in the config.py module.
"""
return pymongo.Connection(host, port)
def twitter_collection(db_name=cf.TW_DB_NAME, collection_name=cf.TW_COLLECTION_NAME):
""" Return the twitter collection specified in the
config.py module.
"""
return connect()[db_name][collection_name]
def insert(item, collection):
""" Insert the argument item in the argument mongoDB collection.
If the item already is in the collection, the insertion will
fail silently (default behaviour of mongoDB).
"""
try:
collection.insert(item)
except PyMongoError, error:
raise DBError(error)
def size(collection):
""" Returns the size of the argument mongoDB collection. """
return collection.find().count()
def name(collection):
""" Returns the full name (db_name.collection_name) of a
mongoDB collection.
"""
return collection.full_name
|
ct = 1
mtx = []
lenMtx = int(input("Enter values to create a matrix : "))
for i in range(lenMtx) :
for x in range(1, lenMtx + 1) :
if x != ct :
mtx.append(0)
else :
mtx.append(1)
ct += 1
print(mtx)
|
# I'm using horizontal bars as I think it's more space efficient than the vertical one
# It's also seem easier to draw vertical comparison between bars
import numpy as np
import matplotlib.pyplot as plt
import pandas
from textwrap import wrap
from matplotlib.ticker import FuncFormatter
import locale
locale.setlocale(locale.LC_ALL, 'id_ID.UTF8')
berkasData = r'D:\path\ke\direktori\profil_kesehatan\bab_05\bab_05_14_dataKN1KN3.csv'
judulDiagram = 'Kunjungan Neonatal'
sumbuX = 'Cakupan'
sumbuY = 'Puskesmas'
berkasSimpan = r'D:\path\ke\direktori\profil_kesehatan\bab_05\bab_05_14_KN1KN3.pdf'
# read data file
colnames = ['puskesmas','KN1','KN3']
data = pandas.read_csv(berkasData, names=colnames, sep=';')
puskesmas = data.puskesmas.tolist()
bar1 = data.KN1.tolist()
bar2 = data.KN3.tolist()
ind = np.arange(len(puskesmas)) # the x locations for the groups
width = 0.35 # the width of the bars
# make bars
fig, ax = plt.subplots()
rects1 = ax.barh(ind, bar1, width, color='steelblue', label='KN1')
rects2 = ax.barh(ind + width, bar2, width, color='orangered', label = 'KN3')
# add some text for labels, title and axes ticks
ax.set_title(judulDiagram)
ax.set_xlim(0,110,20)
ax.set_xlabel(sumbuX)
ax.set_ylabel(sumbuY)
formatter = FuncFormatter(lambda x, pos: "{:n}%".format(x))
ax.xaxis.set_major_formatter(formatter)
ax.set_yticks(ind+0.5*width)
ax.set_yticklabels(list([ '\n'.join(wrap(l, 10)) for l in puskesmas ]))
ax.invert_yaxis()
ax.tick_params(axis='both', which='major', labelsize='small')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# make legend box
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
ax.legend(fontsize='x-small', loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=2)
# add data label
for i, v in enumerate(bar1):
ax.text(v+0.5, i, '{:n}'.format(v), ha='left', va='center', fontsize='x-small')
for i, v in enumerate(bar2):
ax.text(v+0.5, i+0.3, '{:n}'.format(v), ha='left', va='center', fontsize='x-small')
# finishing
pyrfig = plt.figure(1)
pyrfig.set_figwidth(8)
pyrfig.set_figheight(5)
fig.savefig(berkasSimpan, bbox_inches='tight')
plt.close(pyrfig)
# plt.show()
|
def longpal(s):
n = len(s)
maxpal = ''
for k in range(1,n+1):
for i in range(0,n+1-k):
j=0
while s[i+j] == s[i+k-1-j] and j < k/2:
j+=1
if j == k/2:
maxpal = s[i:i+k]
return maxpal
|
print"hello
print "aniket
|
# -*- coding: utf-8 -*-
from typing import Optional, Callable, Awaitable, Tuple, Union, List, TypeVar
from discord import Member, Embed
from commands.base.client import Ratelimit
from . import bot, Command, CommandError, authorise, Context
from . import get_alias, toggle_alias
from . import language
from . import toggle_botban, get_guild_botbans
from . import toggle, is_toggled, get_guild_toggles, CommandToggle
from . import Lister, BasePager
from . import database
from .authority import bot_mod, bot_admin, pm, no_pm, owner
from .converters import Required
# =====================
# === Alias Command ===
# =====================
@authorise(no_pm)
@bot.command()
async def alias(ctx, *, new_alias=None):
if new_alias is None:
aliases = (await get_alias(ctx.guild_id)).copy()
# this uses the fact that the ping invokers will
# always be at the start of the list, unless the main
# invoker is present somewhere
if bot.invoker in aliases:
aliases.remove(bot.invoker)
aliases = [f"<@{bot.user.id}>", bot.invoker] + aliases[2:]
else:
aliases = [f"<@{bot.user.id}>"] + aliases[2:]
await ctx.post_line("alias_list", str(aliases)[1:-1])
else:
if "@" in new_alias:
raise CommandError("no_pings")
new_alias = new_alias.strip()
added = await toggle_alias(ctx.guild_id, new_alias)
if added:
await ctx.post_line("add_alias", new_alias)
else:
await ctx.post_line("remove_alias", new_alias)
# ========================
# === Language Command ===
# ========================
async def language_auth(ctx):
return (await pm(ctx)) or (await bot_admin(ctx))
@authorise(language_auth, "bot_admin")
@bot.command("language")
async def language_command(ctx, newlang=None, set_guild=None):
if newlang is None:
line = ctx.get_raw_output("lang_list_entry")
lang_entries = "\n".join(
line.format(name=name, id=lang)
for lang, name in language.get_language_names().items()
if not lang.startswith("testing")
)
await ctx.post_line("lang_list", lang_entries)
return
newlang = newlang.lower()
if newlang not in language.LanguageManager.data:
raise CommandError("invalid_lang")
if set_guild in ["server", "guild"] and not ctx.is_private:
await language.set_guild_lang(ctx.guild_id, newlang)
ctx._lang = newlang
await ctx.post_line("success_guild")
else:
await language.set_channel_lang(ctx.channel_id, newlang)
ctx._lang = newlang
await ctx.post_line("success_channel")
# ======================
# === Botban Command ===
# ======================
async def list_botbans(ctx: Context):
users = []
for uid in await get_guild_botbans(ctx.guild_id):
user = ctx.guild.get_member(uid)
if user:
users.append(user.name)
if not users:
raise CommandError("no_botbanned_users")
await Lister(ctx, *users).start()
@authorise(no_pm)
@authorise(bot_mod)
@bot.command("botban")
async def botban_command(ctx: Context, target: Optional[Member] = None):
if target is None:
return await list_botbans(ctx)
# Cannot botban guild owner, guild admin, or bot coowner
if ctx.guild.owner_id == target.id:
raise CommandError("target_owner")
if target.id in ctx.bot.coowners:
raise CommandError("target_bot_owner")
if target.guild_permissions.administrator:
raise CommandError("target_admin")
is_botbanned = await toggle_botban(target.id, ctx.guild_id)
message = "success_ban" if is_botbanned else "success_unban"
await ctx.post_line(message)
# ====================
# === Help Command ===
# ====================
# Define some decorators inside a Help "module" to make importing cleaner
class Help:
_interjections = []
_failures = []
@classmethod
def command_help_interjections(cls, func: Callable[[Command, Context,
Embed], None]):
cls._interjections.append(func)
return func
@classmethod
def run_interjections(cls, cmd: Command, ctx: Context, embed: Embed):
for func in cls._interjections:
func(cmd, ctx, embed)
@classmethod
def on_command_help_fail(cls, func: Callable[[Context, Tuple[str, ...]],
Awaitable[bool]]):
cls._failures.append(func)
return func
@classmethod
async def run_external_help(cls, ctx: Context, path: Tuple[str, ...]):
for func in cls._failures:
if await func(ctx, path):
return
def ratelimit_to_line(invocations: int, time: int, ctx: Context) -> str:
if time > 180 or time % 60 == 0:
period = ctx.get_output("rl_minutes", time//60, numerical_ref=time//60)
else:
period = ctx.get_output("rl_seconds", time, numerical_ref=time)
calls = ctx.get_output("rl_invocations", invocations,
numerical_ref=invocations)
return f"{calls} {period}"
async def add_permissions(target: Command, ctx: Context, embed: Embed) -> None:
perms = []
for auth, name in target.auth.items():
if isinstance(auth, Ratelimit):
# Handle ratelimits separate
line = ratelimit_to_line(auth.invocations, auth.cooldown, ctx)
header = ctx.get_output("rl_header")
embed.add_field(name=header, value=line)
elif isinstance(auth, CommandToggle) and \
(await is_toggled(ctx.guild_id, target.qualified_id)):
# Handle toggles separate
header = ctx.get_output("toggle_header")
body = ctx.get_output("toggle_body")
embed.add_field(name=header, value=body)
elif name in ["toggle", "bot_banned"]:
# Do not show toggle or bot_banned on root
pass
else:
perms.append(name.replace("_", " ") \
.title() \
.replace("Pm", "PM")) # Special case for "No PM"
if perms:
line = ", ".join(perms)
header = ctx.get_output("auth_header")
embed.add_field(name=header, value=line)
Help.run_interjections(target, ctx, embed)
@bot.command("help")
async def help_command(ctx: Context, *path: str):
# Allow access to root since we are inside the framework anyway.
target: Command = ctx.bot.root_command
# Get the command or subcommand we want to access.
successes = []
for element in path:
found = target.subcommands.get(f"{ctx.lang}_{element}",
target.subcommands.get(element))
if found is None:
if target.parent is None:
return await Help.run_external_help(ctx, path)
else:
raise CommandError("command_not_found",
ctx.invoker + " ".join(successes))
successes.append(element)
target = found
# Build the messages. Page 1 will have a description and usage
# Pages 2 onwards will have different subcommands.
# Get all the relevant information first.
if not path:
name = language.get_output(ctx.command, "bot_help_title", ctx)
else:
name = ctx.invoker + " ".join(path)
uri = language.get_command_link(target, ctx)
desc = language.get_command_description(target, ctx)
usages = language.get_command_help_lines(target, ctx)
subcommands = []
for sub in set(target.subcommands.values()):
if await sub.validate_auth(ctx) is None:
try:
line = language.get_command_parent_help(sub, ctx)
subcommands.append(line)
except language.LanguageError:
pass
pages = []
# Set up first page
header = ctx.get_output("link_header")
embed = Embed(description=desc)
embed.set_author(name=f"{name} | {header}", url=uri)
await add_permissions(target, ctx, embed)
pages.append(embed)
if usages:
header = ctx.get_output("usage_header")
embed = Embed()
embed.set_author(name=f"{name} | {header}", url=uri)
for title, value in usages:
embed.add_field(name=title, value=value, inline=False)
pages.append(embed)
# Chunk up subcommands
chunks = [subcommands[i:i+6] for i in range(0, len(subcommands), 6)]
header = ctx.get_output("subcommand_header")
for chunk in chunks:
embed = Embed()
embed.set_author(name=f"{name} | {header}", url=uri)
for line in chunk:
embed.add_field(name=line[0], value=line[1], inline=False)
pages.append(embed)
await BasePager(ctx, *pages).start()
# ======================
# === Toggle Command ===
# ======================
T = TypeVar("T")
@bot.converter()
def toggle_path(arg: str, ctx: Context) -> Union[Command, List[Command]]:
target = ctx.bot.root_command
path = arg.split(".")
while path:
element = path.pop(0)
if element == "*":
return list(set(target.subcommands.values()))
target = target.subcommands.get(f"{ctx.lang}_{element}",
target.subcommands.get(element))
if target is None:
raise CommandError("TOGGLE_PATH_error", path)
return target
def flatten(*elements: Union[T, List[T], Tuple[T]]) -> List[T]:
output = []
for element in elements:
if isinstance(element, (list, tuple)):
output.extend(element)
else:
output.append(element)
return output
async def toggle_shared_function(ctx: Context, *commands: Command):
prefix = ctx.command.id
# Throw if prefix is not one of "enable", "disable" or "toggle"
updater = getattr(toggle, f"{prefix}_elements")
paths = [cmd.qualified_id for cmd in commands]
await updater(ctx.guild_id, *paths)
await ctx.post_line("success", ", ".join(paths), numerical_ref=len(commands))
@authorise(bot_admin)
@bot.command("toggle")
async def toggle_command(ctx: Context, *commands: Required[toggle_path]):
await toggle_shared_function(ctx, *flatten(*commands))
@toggle_command.subcommand("disable")
async def toggle_disable(ctx: Context, *commands: Required[toggle_path]):
await toggle_shared_function(ctx, *flatten(*commands))
@toggle_command.subcommand("enable")
async def toggle_enable(ctx: Context, *commands: Required[toggle_path]):
await toggle_shared_function(ctx, *flatten(*commands))
@toggle_command.subcommand("list")
async def toggle_list(ctx: Context):
toggles = sorted(await get_guild_toggles(ctx.guild_id))
if len(toggles) == 0:
raise CommandError("no_toggles")
await ctx.post_line("toggle_list", ", ".join(toggles))
# ====================
# === Quit Command ===
# ====================
@authorise(owner)
@bot.command("quit")
async def quit_command(ctx: Context):
await bot._bot.logout()
|
"""This module contains pipeline definitions"""
import logging
import numpy as np
import sys
# set up a logger, at least for the ImportError
model_logr = logging.getLogger(__name__)
model_logr.setLevel(logging.DEBUG)
model_sh = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s')
model_sh.setFormatter(formatter)
model_logr.addHandler(model_sh)
# model imports
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
experiment_dict = \
{
# Note: keys are of the form expt_*, which are used to execute the
# associated values of 'pl' keys
#
# experiments to build pipeline ################################################
'expt_1': {
'note': 'random guessing (maintains class distributions)',
'name': 'Crash Test Dummies',
'pl': Pipeline([ ('dummy_clf', DummyClassifier()) ])
},
'expt_2': {
'note': 'KNeigbours',
'name': 'K Nearest Neighbours',
'pl': Pipeline([ ('K_Neighbours', KNeighborsClassifier() )])
},
'expt_3': {
'note': 'Using linear kernel and C=0.025',
'name': 'SVC Classifier',
'pl': Pipeline([ ('dummy_clf', SVC(kernel="linear")) ])
}
} # end of experiment dict
# classifiers = [
# KNeighborsClassifier(3),
# SVC(kernel="linear", C=0.025),
# SVC(gamma=2, C=1),
# GaussianProcessClassifier(1.0 * RBF(1.0)),
# DecisionTreeClassifier(max_depth=5),
# RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
# MLPClassifier(alpha=1),
# AdaBoostClassifier(),
# GaussianNB(),
# QuadraticDiscriminantAnalysis()]
|
import string
def stringsplitter(string1):
arr2 = []
string1 = ''.join(c for c in string1 if c not in string.punctuation)
arr = string1.split(" " or "\n")
for i in range(len(arr)):
if not (type(arr[i]) != str or len(arr[i]) == 0):
arr2.append(arr[i])
print(arr2)
def zipfslaw1(arr):
d = {}
for i in range(len(arr)):
word = arr[i]
char1 = word[0].lower()
if char1 in d:
d[char1] += 1
else:
d[char1] = 1
print(d)
file = open("text1.txt", "r")
stringsplitter(file)
|
# Create SQLite table and populate it
import sqlite3
with sqlite3.connect("blog.db") as connection:
c = connection.cursor()
c.execute("""CREATE TABLE posts (title TEXT, post TEXT)""")
c.execute('INSERT INTO posts VALUES ("Well","I am well, thanks.")')
c.execute('INSERT INTO posts VALUES ("Good","I am good, thanks.")')
c.execute('INSERT INTO posts VALUES ("Okay","I am okay, thanks.")')
c.execute('INSERT INTO posts VALUES ("Excellent","I am excellent!")')
|
# Generated by Django 2.1.2 on 2018-12-11 12:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0013_auto_20181128_1358'),
]
operations = [
migrations.CreateModel(
name='Cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_of_city', models.CharField(blank=True, db_index=True, default='Тернопіль', help_text='Якщо не вказувати, по замовчуванню: це місто', max_length=128, null=True, verbose_name='Назва міста')),
],
),
migrations.CreateModel(
name='Streets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_of_street', models.CharField(db_index=True, help_text='Назва вулиці', max_length=128, verbose_name='Вулиця')),
('city', models.ForeignKey(help_text='Якщо не вказувати, по замовчуванню: це місто', on_delete=django.db.models.deletion.CASCADE, to='main.Cities', verbose_name='Місто')),
],
),
migrations.AlterModelOptions(
name='addresses',
options={'verbose_name': 'Адреса', 'verbose_name_plural': 'Адреси'},
),
migrations.RemoveField(
model_name='addresses',
name='city',
),
migrations.AlterField(
model_name='addresses',
name='apartment',
field=models.CharField(blank=True, db_index=True, help_text='Номер квартири (якщо є)', max_length=6, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='addresses',
name='street',
field=models.ForeignKey(help_text='Назва вулиці', max_length=128, on_delete=django.db.models.deletion.CASCADE, to='main.Streets', verbose_name='Вулиця'),
),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 6/4/19
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
overlapres = 50
overlapbox = np.mgrid[:float(overlapres), :float(overlapres)]
overlapbox += .5
overlapbox *= 2./overlapres
overlapbox -= 1
overlapbox = overlapbox.transpose((1,2,0))
def soMetricIoU(boxa, boxb):
relx = boxa[0]-boxb[0]
rely = boxa[1]-boxb[1]
ca, sa = np.cos(boxa[2]), np.sin(boxa[2])
cb, sb = np.cos(boxb[2]), np.sin(boxb[2])
la,wa = boxa[3:5]
lb,wb = boxb[3:5]
R = np.array([[la/lb*(ca*cb+sa*sb), wa/lb*(ca*sb-cb*sa)],
[la/wb*(cb*sa-ca*sb), wa/wb*(ca*cb+sa*sb)]])
t = np.array([(cb*relx + sb*rely)/lb, (cb*rely - sb*relx)/wb])
grid = np.einsum(R, [0,1], overlapbox, [2,3,1], [2,3,0]) + t
intersection = np.sum(np.all(abs(grid) < 1, axis=2))
ioa = float(intersection) / overlapres**2
return ioa / (1 - ioa + lb*wb/la/wa)
def soMetricEuc(boxa, boxb):
eucdist = np.hypot(boxa[0]-boxb[0],boxa[1]-boxb[1])
angledist = abs((boxa[2]-boxb[2] + np.pi/2)%np.pi - np.pi/2)
return eucdist + angledist/2 < 3.
class MetricMine2():
def __init__(self):
self.dets = []
self.switchscores = []
self.nmissed = 0
self.previousids = {}
self.previousscores = {}
self.newscene = True
def newScene(self):
self.previousids = {}
self.previousscores = {}
def okMetric(self, boxa, boxb):
return soMetricIoU(boxa, boxb) > .3
def goodMetric(self, boxa, boxb):
return soMetricIoU(boxa, boxb) > .7
def add(self, gt, gtscored, gtdifficulty, gtids, ests, scores, estids):
ngt = gt.shape[0]
assert gtscored.shape[0] == ngt
assert gtdifficulty.shape[0] == ngt
nests = ests.shape[0]
assert scores.shape[0] == nests
gtscored = gtscored & (gtdifficulty < 3)
estorder = np.argsort(scores)[::-1]
gtopen = np.ones(ngt, dtype=bool)
currentids = {}
currentscores = {}
for estidx in estorder:
bestgtGood = False
bestgtScored = False
bestgtidx = None
for gtidx in range(ngt):
if gtopen[gtidx] and self.okMetric(gt[gtidx], ests[estidx]):
keep = False
swap = bestgtidx is None
goodfit = self.goodMetric(gt[gtidx], ests[estidx])
isscored = gtscored[gtidx]
if not swap:
keep = bestgtGood and not goodfit
swap = bestgtGood and goodfit
if not keep and not swap:
swap = not bestgtScored and isscored
if swap:
bestgtidx = gtidx
bestgtGood = goodfit
bestgtScored = isscored
if bestgtidx is None:
self.dets.append((scores[estidx], False, False))
else:
gtopen[bestgtidx] = False
if bestgtScored:
self.dets.append((scores[estidx], True, bestgtGood))
# search for id swap
gtid = gtids[bestgtidx]
switch = (gtid in self.previousids and
self.previousids[gtid] != estids[estidx])
if switch:
switchscore = min(self.previousscores[gtid], scores[estidx])
self.switchscores.append(switchscore)
currentids[gtid] = estids[estidx]
currentscores[gtid] = scores[estidx]
self.nmissed += sum(gtopen & gtscored)
self.previousids = currentids
self.previousscores = currentscores
def calc(self):
dets = np.array(sorted(self.dets)[::-1])
switchscores = -np.array(sorted(self.switchscores)[::-1])
ndets = len(dets)
nt = sum(dets[:,1]) + self.nmissed
tps = np.cumsum(dets[:,1])
checkpts = np.append(np.where(np.diff(dets[:,0]))[0], ndets-1)
rec = tps[checkpts] / nt
prec = tps[checkpts] / (checkpts+1)
goodtpr = (np.cumsum(dets[:,2]))[checkpts] / nt
switches = np.searchsorted(switchscores, -dets[checkpts,0])
#mota = (2*tps[checkpts] - checkpts-1 - switches) / float(nt)
rec = np.concatenate(([0.], rec, [rec[-1]]))
prec = np.concatenate(([1.], prec, [0.]))
goodtpr = np.concatenate(([0.], goodtpr, [goodtpr[-1]]))
switches = np.concatenate(([switches[0]], switches, [switches[-1]]))
return np.array((rec, prec, goodtpr, switches)).T
def calcMOTA(self):
dets = np.array(sorted(self.dets)[::-1])
switchscores = -np.array(sorted(self.switchscores)[::-1])
ndets = len(dets)
nt = sum(dets[:,1]) + self.nmissed
tps = np.cumsum(dets[:,1])
checkpts = np.append(np.where(np.diff(dets[:,0]))[0], ndets-1)
switches = np.searchsorted(switchscores, -dets[checkpts,0])
mota = (2*tps[checkpts] - checkpts-1 - switches) / float(nt)
return max(mota)
" counts = 210, 1063, 369, 268, 679, 854, 378, 1843, 522, 2121"
""
if __name__ == '__main__':
"""
runs a single accuracy metric across multiple scenes
formatForKittiScore gets rid of things kitti didn't annotate
"""
from calibs import calib_extrinsics, calib_projections, view_by_day
from trackinginfo import sceneranges
from trackinginfo import calib_map_training as calib_map
from analyzeGT import readGroundTruthFileTracking, formatForKittiScoreTracking
from imageio import imread
scenes = [0,1,2,3,4,5,6,7,8,9]
nframesahead = 0
tests = [('trMGL3', 'MGL', 'b'),
('trMGLnofake3', 'w/o genuity', 'g--'),
('trackingresultsMGR', 'camera', 'k-.'),
('trMGLnodet3', 'w/o detectability', 'r:')]
gt_files = '/home/m2/Data/kitti/tracking_gt/{:04d}.txt'
estfiles = '/home/m2/Data/kitti/estimates/{:s}/{:02d}f{:04d}.npy'
img_files = '/home/m2/Data/kitti/tracking_image/training/{:04d}/000000.png'
ground_plane_files = '/home/m2/Data/kitti/tracking_ground/training/{:02d}f{:06d}.npy'
results = []
motas = []
for testfolder, testname, testcolor in tests:
metric = MetricMine2()
for scene_idx in scenes:
# run some performance metrics on numpy-stored results
startfile, endfile = sceneranges[scene_idx]
#startfile = 200
#endfile = 40
startfile += nframesahead
calib_idx = calib_map[scene_idx]
calib_extrinsic = calib_extrinsics[calib_idx].copy()
calib_extrinsic[2,3] += 1.65
view_angle = view_by_day[calib_idx]
calib_projection = calib_projections[calib_idx]
calib_projection = calib_projection.dot(np.linalg.inv(calib_extrinsic))
imgshape = imread(img_files.format(scene_idx)).shape[:2]
with open(gt_files.format(scene_idx), 'r') as fd: gtfilestr = fd.read()
gt_all, gtdontcares = readGroundTruthFileTracking(gtfilestr,('Car','Van'))
metric.newScene()
for fileidx in range(startfile, endfile):
ground = np.load(ground_plane_files.format(scene_idx, fileidx))
ests = np.load(estfiles.format(testfolder, scene_idx, fileidx))
estids = ests[:,6].astype(int)
scores = ests[:,5]
ests = ests[:,:5]
rede = formatForKittiScoreTracking(ests, estids, scores, fileidx,
ground, calib_projection, imgshape, gtdontcares)
ests = np.array([redd[0] for redd in rede])
scores = np.array([redd[2] for redd in rede])
estids = np.array([redd[1] for redd in rede])
gthere = gt_all[fileidx]
gtboxes = np.array([gtobj['box'] for gtobj in gthere])
gtscores = np.array([gtobj['scored'] for gtobj in gthere],dtype=bool)
gtdifficulty = np.array([gtobj['difficulty'] for gtobj in gthere],
dtype=int)
gtids = np.array([gtobj['id'] for gtobj in gthere],dtype=int)
gtdontcareshere = gtdontcares[fileidx]
metric.add(gtboxes, gtscores, gtdifficulty, gtids,
ests, scores, estids)
restest = metric.calc()
results.append((testname, restest, testcolor))
motas.append(metric.calcMOTA())
# nodet is currently nofakelogic
fig, axeses = plt.subplots(1, 3, figsize=(12., 3.))
plt1, plt2, plt3 = axeses.flat
plt1.set_xlim((0.5, 1.))
plt2.set_xlim((0.5, 1.))
plt3.set_xlim((0.5, 1.))
plt1.set_ylim((0.5, 1.))
plt2.set_ylim((0., 1.))
plt1.set_title('Precision vs Recall')
plt2.set_title('Close fit recall vs Recall')
plt3.set_title('# identity swaps vs Recall')
maxswaps = int(max(np.max(result[1][:,3]) for result in results))+1
plt3.set_yticks(list(range(0, maxswaps, maxswaps//5+1)))
for testname, result, color in results:
plt1.plot(result[:,0], result[:,1], color, label=testname)
plt2.plot(result[:,0], result[:,2], color, label=testname)
plt3.plot(result[:,0], result[:,3], color, label=testname)
#plt3.legend(loc='center right')
plt3.legend(bbox_to_anchor = (1.04, 1), loc="upper left")
#plt1.legend(bbox_to_anchor = (0., -0.05), loc="upper left", ncol=4)
plt.show()
|
import maya.cmds as cmds
selection = cmds.ls(sl=True)
ws = cmds.workspace(q = True, fullName = True)
wsp = ws + "/" + "images"
cmds.sysFile(wsp, makeDir=True)
for i in range(0,50):
cmds.xform('Lamborginhi_Aventador', ws =True, relative=True, rotation=(45, 45, 45) )
cmds.saveImage( currentView=True )
imageSnapshot = wsp + "/" + "image" + str(i) +".jpg"
cmds.refresh(cv=True, fe = "jpg", fn = imageSnapshot)
|
# Generated by Django 3.0.5 on 2020-05-04 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0004_clientprofile_branch'),
]
operations = [
migrations.AddField(
model_name='clientprofile',
name='contact1email',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='clientprofile',
name='contact2email',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
import re
class Verify:
def is_empty(self, items):
for item in items:
if bool(item) is False:
return True
return False
def is_whitespace(self, items):
for item in items:
if item.isspace() is True:
return True
return False
def payload(self, items, length, keys):
items = items.keys()
if len(items) == length:
for item in items:
if item not in keys:
return False
return True
def is_signup_payload(self, items):
res = self.payload(items, 3, ['email_address', 'username', 'password'])
return res
@staticmethod
def is_valid_email(email_address):
if re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email_address) is None:
res = True
else:
res = False
return res
@staticmethod
def is_valid_password(password):
if (len(password)<6) is True:
res = True
else:
res = False
return res
def is_login_payload(self, items):
res = self.payload(items, 2, ['username', 'password'])
return res
@staticmethod
def list_iterator(list):
for i in list:
if i is None or not i:
return False
|
from django.urls import include, path
from . import views
urlpatterns = [
# return render templates
path('', views.index),
path('dogs', views.display_all_dogs),
path('dogs/new', views.new_dog),
path('dogs/<int:single_dog_id>', views.display_single_dog),
path('dogs/<int:single_dog_id>/edit', views.edit_single_dog),
# processing routes
# return redirect to a path/url
path('dogs/create', views.create_dog),
path('dogs/<int:single_dog_id>/toggle_status', views.toggle_status),
# TODO
# register - validations, session
path('register', views.register),
# login - validations, session
path('login', views.login),
# logout - session
path('logout', views.logout),
]
|
import numpy as np
import random
from copy import deepcopy
from scipy.linalg import pinv2, cholesky, inv
from scipy import outer, dot, multiply, zeros, diag, mat, sum
def compute_ranks(x):
"""
Returns ranks in [0, len(x))]
which returns ranks in [1, len(x)].
(https://github.com/openai/evolution-strategies-starter/blob/master/es_distributed/es.py)
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
"""
https://github.com/openai/evolution-strategies-starter/blob/master/es_distributed/es.py
"""
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= .5
return y
def compute_weight_decay(weight_decay, model_param_list):
model_param_grid = np.array(model_param_list)
return -weight_decay * np.mean(model_param_grid * model_param_grid, axis=1)
class GA:
"""
Basic population based genetic algorithm
"""
def __init__(self, num_params,
pop_size=100,
elite_frac=0.1,
mut_rate=0.9,
mut_amp=0.1,
generator=None):
# misc
self.num_params = num_params
self.pop_size = pop_size
self.n_elites = int(self.pop_size * elite_frac)
# individuals
if generator is None:
self.individuals = [np.random.normal(
scale=0.1, size=(pop_size, num_params))]
else:
self.individuals = np.array([generator() for i in range(pop_size)])
self.new_individuals = deepcopy(self.individuals)
self.fitness = np.zeros(pop_size)
self.order = np.zeros(self.pop_size)
self.to_add = None
self.to_add_fitness = 0
# mutations
self.mut_amp = mut_amp
self.mut_rate = mut_rate
def best_actor(self):
"""
Returns the best set of parameters
"""
return deepcopy(self.individuals[self.order[-1]])
def best_fitness(self):
"""
Returns the best score
"""
return self.fitness[self.order[-1]]
def add_ind(self, parameters, fitness):
"""
Replaces the parameters of the worst individual
"""
self.to_add = deepcopy(parameters)
self.to_add_fitness = fitness
def set_new_params(self, new_params):
"""
Replaces the current new_population with the
given population of parameters
"""
self.new_individuals = deepcopy(np.array(new_params))
def ask(self):
"""
Returns the newly created individual(s)
"""
return deepcopy(self.new_individuals)
def tell(self, scores):
"""
Updates the population
"""
assert(len(scores) == len(self.new_individuals)
), "Inconsistent reward_table size reported."
# add new fitness evaluations
self.fitness = [s for s in scores]
# sort by fitness
self.order = np.argsort(self.fitness)
# replace individuals with new batch
self.individuals = deepcopy(self.new_individuals)
# replace worst ind with ind to add
if self.to_add is not None:
self.individuals[self.order[0]] = deepcopy(self.to_add)
self.fitness[self.order[0]] = self.to_add_fitness
self.order = np.argsort(self.fitness)
self.to_add = None
# tournament selection
tmp_individuals = []
while len(tmp_individuals) < (self.pop_size - self.n_elites):
k, l = np.random.choice(range(self.pop_size), 2, replace=True)
if self.fitness[k] > self.fitness[l]:
tmp_individuals.append(deepcopy(self.individuals[k]))
else:
tmp_individuals.append(deepcopy(self.individuals[l]))
# mutation
tmp_individuals = np.array(tmp_individuals)
for ind in range(tmp_individuals.shape[0]):
u = np.random.rand(self.num_params)
params = tmp_individuals[ind]
noise = np.random.normal(
loc=1, scale=self.mut_amp * (u < self.mut_rate))
params *= noise
# new population
self.new_individuals[self.order[:self.pop_size -
self.n_elites]] = np.array(tmp_individuals)
|
def my_parse_int(string):
try:
return int(string)
except ValueError:
return 'NaN'
'''
JavaScript provides a built-in parseInt method.
It can be used like this:
parseInt("10") returns 10
parseInt("10 apples") also returns 10
We would like it to return "NaN" (as a string) for the second case because the
input string is not a valid number.
You are asked to write a myParseInt method with the following rules:
It should make the conversion if the given string only contains a single
integer value (and eventually spaces - including tabs, line feeds... - at both ends)
For all other strings (including the ones representing float values),
it should return NaN
It should assume that all numbers are not signed and written in base 10
'''
|
import polars as pl
def test_horizontal_agg(fruits_cars: pl.DataFrame) -> None:
df = fruits_cars
out = df.select(pl.max([pl.col("A"), pl.col("B")])) # type: ignore
assert out[:, 0].to_list() == [5, 4, 3, 4, 5]
out = df.select(pl.min([pl.col("A"), pl.col("B")])) # type: ignore
assert out[:, 0].to_list() == [1, 2, 3, 2, 1]
|
# Generated by Django 3.1.5 on 2021-01-22 19:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_aggregator', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='assignment',
name='excused',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='assignment',
name='first_quartile',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='assignment',
name='max_score',
field=models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
migrations.AddField(
model_name='assignment',
name='median',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='assignment',
name='min_score',
field=models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
migrations.AddField(
model_name='assignment',
name='muted',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='assignment',
name='non_digital_submission',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='assignment',
name='posted_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='assignment',
name='submitted_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='assignment',
name='third_quartile',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='assignment',
name='title',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='assignment',
name='unlock_at',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='assignment',
name='due_at',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='assignment',
name='points_possible',
field=models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
migrations.AlterField(
model_name='assignment',
name='score',
field=models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
migrations.AlterField(
model_name='assignment',
name='status',
field=models.TextField(null=True),
),
]
|
import os
from pprint import pprint
import cv2
import requests
imageName = "car"
imgType = ".jpg"
def handle_plate_identifier(croppedFrame, accessToken, objectID, score=0, regions=[]):
currentImageName = imageName + str(objectID) + imgType
cv2.imwrite(currentImageName, croppedFrame)
licenseNumber = str()
with open(currentImageName, 'rb') as fp:
response = requests.post(
'https://api.platerecognizer.com/v1/plate-reader/',
data=dict(regions=regions), # Optional
files=dict(upload=fp),
headers={'Authorization': 'Token {}'.format(accessToken)})
responseJson = response.json()
if "results" in responseJson:
if responseJson["results"]:
licenseNumber = responseJson["results"][0]['plate'] if responseJson["results"][0]['score'] >= score else str()
os.remove(currentImageName)
return licenseNumber
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('baxter_rr_bridge')
import rospy
import baxter_interface
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
import time
import sys, argparse
import struct
import time
import RobotRaconteur as RR
import thread
import threading
import numpy
import traceback
import cv2
import cv2.aruco as aruco
baxter_servicedef="""
#Service to provide simple interface to Baxter
service BaxterCamera_interface
option version 0.4
struct BaxterImage
field int32 width
field int32 height
field int32 step
field uint8[] data
end struct
struct CameraIntrinsics
field double[] K
field double[] D
end struct
struct ImageHeader
field int32 width
field int32 height
field int32 step
end struct
struct ARtagInfo
field double[] tmats
field int32[] ids
end struct
object BaxterCamera
property uint8 camera_open
# camera control functions
function void openCamera()
function void closeCamera()
function void setExposure(int16 exposure)
function void setGain(int16 gain)
function void setWhiteBalance(int16 red, int16 green, int16 blue)
function void setFPS(double fps)
function void setCameraIntrinsics(CameraIntrinsics data)
function void setMarkerSize(double markerSize)
# functions to acquire data on the image
function BaxterImage getCurrentImage()
function ImageHeader getImageHeader()
function CameraIntrinsics getCameraIntrinsics()
function double getMarkerSize()
function ARtagInfo ARtag_Detection()
# pipe to stream images through
pipe BaxterImage ImageStream
end object
"""
class BaxterCamera_impl(object):
def __init__(self, camera_name, mode, half_res):
print "Initializing ROS Node"
rospy.init_node('baxter_cameras', anonymous = True)
# Lock for multithreading
self._lock = threading.RLock()
# for image pipe
self._imagestream = None
self._imagestream_endpoints = dict()
self._imagestream_endpoints_lock = threading.RLock()
# get access to camera controls from RSDK
self._camera = baxter_interface.CameraController(camera_name)
self._camera_name = camera_name;
# automatically close camera at start
# self._camera.close()
self._camera_open = False
# set constant ImageHeader structure
self.setResolution(mode,half_res)
self._image_header = RR.RobotRaconteurNode.s.NewStructure(
"BaxterCamera_interface.ImageHeader" )
self._image_header.width = int(self._camera.resolution[0])
self._image_header.height = int(self._camera.resolution[1])
self._image_header.step = int(4)
self._camera_intrinsics = None
# set exposure, gain, white_balance to auto
self._camera.exposure = self._camera.CONTROL_AUTO
self._camera.gain = self._camera.CONTROL_AUTO
self._camera.white_balance_red = self._camera.CONTROL_AUTO
self._camera.white_balance_green = self._camera.CONTROL_AUTO
self._camera.white_balance_blue = self._camera.CONTROL_AUTO
# set BaxterImage struct
self._image = RR.RobotRaconteurNode.s.NewStructure("BaxterCamera_interface.BaxterImage")
self._image.width = self._image_header.width
self._image.height = self._image_header.height
self._image.step = self._image_header.step
# Initialize ARtag detection
self._aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
self._arucoParams = aruco.DetectorParameters_create()
self._markerSize = 0.085
# open camera
def openCamera(self):
if self._camera_open:
return
# start camera and subscription
try:
self._camera.open()
except (OSError):
print "Could not open camera. Perhaps the other cameras are already open?"
return
# get camera intrinsic values and fill Robot Raconteur struct
self._caminfo_sub = rospy.Subscriber("cameras/" + self._camera_name + "/camera_info",
CameraInfo, self.set_CameraIntrinsics)
# Suscriber to camera image
print "Subscribing to", self._camera_name
self._image_sub = rospy.Subscriber("cameras/" + self._camera_name + "/image", Image, self.set_imagedata)
self._camera_open = True
def closeCamera(self):
if not self._camera_open:
return
if self._image_sub:
self._image_sub.unregister()
self._camera.close()
self._camera_open = False
@property
def camera_open(self):
if self._camera_open:
return 1
else:
return 0
# subscriber function for camera image
def set_imagedata(self,camdata):
with self._lock:
if camdata.data:
self._image.data = numpy.frombuffer(camdata.data,dtype="u1")
with self._imagestream_endpoints_lock:
# Send to pipe endpoints
for ep in self._imagestream_endpoints:
dict_ep = self._imagestream_endpoints[ep]
# Loop through indices in nested dict
for ind in dict_ep:
# Attempt to send frame to connected endpoint
try:
pipe_ep=dict_ep[ind]
pipe_ep.SendPacket(self._image)
except:
# on error, assume pipe has been closed
self.ImageStream_pipeclosed(pipe_ep)
def set_CameraIntrinsics(self, data):
if (self._camera_intrinsics is None):
print "Setting Camera Intrinsic Data"
self._camera_intrinsics = RR.RobotRaconteurNode.s.NewStructure(
"BaxterCamera_interface.CameraIntrinsics" )
K = list(data.K)
K[2] -= data.roi.x_offset;
K[5] -= data.roi.y_offset;
self._camera_intrinsics.K = tuple(K)
self._camera_intrinsics.D = tuple(data.D)
self._caminfo_sub.unregister()
# The following function is to set camera parameters manually
def setCameraIntrinsics(self, data):
if (self._camera_intrinsics is None):
print "Setting Camera Intrinsic Data"
else:
print "Setting already exists. Overwriting now..."
K = list(data.K)
self._camera_intrinsics.K = tuple(K)
self._camera_intrinsics.D = tuple(data.D)
self._caminfo_sub.unregister()
def getCurrentImage(self):
with self._lock:
return self._image
def getImageHeader(self):
return self._image_header
def getCameraIntrinsics(self):
return self._camera_intrinsics
''' This is meant to only be called once at the initialization of the program'''
def setResolution(self, mode, half_res):
self._camera.resolution = self._camera.MODES[mode]
# half resolution not always possible
if (mode in [0,1,4] and half_res == 1):
print 'Cannot do half-resolution at (1280,800), (960, 600), or (384,240)'
half_res = 0
self._camera.half_resolution = (half_res != 0)
print 'Resolution set to: ', self._camera.resolution
if (self._camera.half_resolution):
print '**Displaying at half-resolution'
def setExposure(self, exposure):
if (exposure < 0 or exposure > 100 and exposure != self._camera.CONTROL_AUTO):
print 'Exposure must be in [0, 100]'
return
self._camera.exposure = exposure
def setGain(self, gain):
if (gain < 0 or gain > 79 and gain != self._camera.CONTROL_AUTO):
print 'Gain must be in [0, 79]'
return
self._camera.gain = gain
def setWhiteBalance(self, red, green, blue):
if (red < 0 or red > 4095 and red != self._camera.CONTROL_AUTO):
print 'White Balance values must be in [0, 4095]'
return
self._camera.white_balance_red = red
if (green < 0 or green > 4095 and green != self._camera.CONTROL_AUTO):
print 'White Balance values must be in [0, 4095]'
return
self._camera.white_balance_green = green
if (blue < 0 or blue > 4095 and blue != self._camera.CONTROL_AUTO):
print 'White Balance values must be in [0, 4095]'
return
self._camera.white_balance_blue = blue
def setFPS(self, fps):
if (fps <= 0 or fps > 30):
print 'fps must be positive and cannot exceed 30'
return
self._camera.fps = fps
# Functions related to AR tags
# Marker size
def setMarkerSize(self, markerSize):
with self._lock:
self._markerSize = markerSize
def getMarkerSize(self):
with self._lock:
markerSize = self._markerSize
return markerSize
def ARtag_Detection(self):
if not self.camera_open:
self.openCamera()
print "Detecting AR tags..."
currentImage = self.getCurrentImage()
imageData = currentImage.data
imageData = numpy.reshape(imageData, (800, 1280, 4))
gray = cv2.cvtColor(imageData, cv2.COLOR_BGRA2GRAY)
corners, ids, rejected = aruco.detectMarkers(gray, self._aruco_dict, parameters=self._arucoParams)
if ids is not None:
Tmat = []
IDS = []
detectioninfo = RR.RobotRaconteurNode.s.NewStructure("BaxterCamera_interface.ARtagInfo")
for anid in ids:
IDS.append(anid[0])
for corner in corners:
pc, Rc = self.getObjectPose(corner)
Tmat.extend([ Rc[0][0], Rc[1][0], Rc[2][0], 0.0,
Rc[0][1], Rc[1][1], Rc[2][1], 0.0,
Rc[0][2], Rc[1][2], Rc[2][2], 0.0,
pc[0], pc[1], pc[2], 1.0])
detectioninfo.tmats = Tmat
detectioninfo.ids = IDS
return detectioninfo
# function that AR tag detection uses
def getObjectPose(self, corners):
with self._lock:
camMatrix = numpy.reshape(self._camera_intrinsics.K, (3, 3))
distCoeff = numpy.zeros((1, 5), dtype=numpy.float64)
distCoeff[0][0] = self._camera_intrinsics.D[0]
distCoeff[0][1] = self._camera_intrinsics.D[1]
distCoeff[0][2] = self._camera_intrinsics.D[2]
distCoeff[0][3] = self._camera_intrinsics.D[3]
distCoeff[0][4] = self._camera_intrinsics.D[4]
# print "cameramatrix: ", camMatrix
# print "distortion coefficient: ", distCoeff
# AR Tag Dimensions in object frame
objPoints = numpy.zeros((4, 3), dtype=numpy.float64)
# (-1, +1, 0)
objPoints[0,0] = -1*self._markerSize/2.0 # -1
objPoints[0,1] = 1*self._markerSize/2.0 # +1
objPoints[0,2] = 0.0
# (+1, +1, 0)
objPoints[1,0] = self._markerSize/2.0 # +1
objPoints[1,1] = self._markerSize/2.0 # +1
objPoints[1,2] = 0.0
# (+1, -1, 0)
objPoints[2,0] = self._markerSize/2.0 # +1
objPoints[2,1] = -1*self._markerSize/2.0 # -1
objPoints[2,2] = 0.0
# (-1, -1, 0)
objPoints[3,0] = -1*self._markerSize/2.0 # -1
objPoints[3,1] = -1*self._markerSize/2.0 # -1
objPoints[3,2] = 0.0
# Get each corner of the tags
imgPoints = numpy.zeros((4, 2), dtype=numpy.float64)
for i in range(4):
imgPoints[i, :] = corners[0, i, :]
# SolvePnP
retVal, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, camMatrix, distCoeff)
Rca, b = cv2.Rodrigues(rvec)
Pca = tvec
# print "pca, rca: ", Pca, Rca
return [Pca, Rca]
######################################
# pipe functions
@property
def ImageStream(self):
return self._imagestream
@ImageStream.setter
def ImageStream(self, value):
self._imagestream = value
# Set the PipeConnecCallback to ImageStream_pipeconnect that will
# called when a PipeEndpoint connects
value.PipeConnectCallback = self.ImageStream_pipeconnect
def ImageStream_pipeconnect(self, pipe_ep):
# Lock the _imagestream_endpoints deictionary and place he pipe_ep in
# a nested dict that is indexed by the endpoint of the client and the
# index of the pipe
with self._imagestream_endpoints_lock:
# if there is not an enry for this client endpoint, add it
if (not pipe_ep.Endpoint in self._imagestream_endpoints):
self._imagestream_endpoints[pipe_ep.Endpoint] = dict()
# Add pipe_ep to the correct dictionary given the endpoint + index
dict_ep = self._imagestream_endpoints[pipe_ep.Endpoint]
dict_ep[pipe_ep.Index] = pipe_ep
pipe_ep.PipeEndpointClosedCallback = self.ImageStream_pipeclosed
def ImageStream_pipeclosed(self, pipe_ep):
with self._imagestream_endpoints_lock:
try:
dict_ep = self._imagestream_endpoints[pipe_ep.Endpoint]
del(dict_ep[pipe_ep.Index])
except:
traceback.print_exc()
def main(argv):
# parse command line arguments
parser = argparse.ArgumentParser(description='Initialize Baxter Camera.')
parser.add_argument('camera_name', metavar='camera_name',
choices=['left_hand_camera', 'right_hand_camera', 'head_camera'],
help='name of the camera to connect to')
parser.add_argument('--mode', type=int, default = 5,
choices=range(0,6),
help='mode of camera resolution')
parser.add_argument('--half_res', type=int, default = 0,
choices=range(0,2),
help='Show in half resolution [0 / 1]')
parser.add_argument('--port', type=int, default = 0,
help='TCP port to host service on (will auto-generate if not specified)')
args = parser.parse_args(argv)
#Enable numpy
RR.RobotRaconteurNode.s.UseNumPy=True
#Set the RobotRaconteur Node name
RR.RobotRaconteurNode.s.NodeName="BaxterCameraServer"
#Create transport, register it, and start the server
print "Registering Transport"
t = RR.TcpTransport()
t.EnableNodeAnnounce(RR.IPNodeDiscoveryFlags_NODE_LOCAL |
RR.IPNodeDiscoveryFlags_LINK_LOCAL | RR.IPNodeDiscoveryFlags_SITE_LOCAL)
RR.RobotRaconteurNode.s.RegisterTransport(t)
t.StartServer(args.port)
port = args.port
if (port == 0):
port = t.GetListenPort()
#Register the service type and the service
print "Starting Service"
RR.RobotRaconteurNode.s.RegisterServiceType(baxter_servicedef)
#Initialize object
baxter_obj = BaxterCamera_impl(args.camera_name, args.mode, args.half_res)
RR.RobotRaconteurNode.s.RegisterService(args.camera_name,
"BaxterCamera_interface.BaxterCamera", baxter_obj)
print "Service started, connect via"
print "tcp://localhost:" + str(port) + "/BaxterCameraServer/" + args.camera_name
raw_input("press enter to quit...\r\n")
baxter_obj.closeCamera()
# This must be here to prevent segfault
RR.RobotRaconteurNode.s.Shutdown()
if __name__ == '__main__':
main(sys.argv[1:])
|
from assets import art
from resource import Resource
from cashier import Cashier
class Printer:
"""This class act like a printer which the users interact with
"""
print(art.logo)
print('Welcome to automated printer')
@classmethod
def printer_machine(cls):
resource = Resource()
while True:
user_input = input("\nWhat format would you like 'coloured' or 'greyscale'?\n")
while user_input.lower() not in ['coloured', 'greyscale', 'report', 'off']:
user_input = input("Invalid!!!! Please put in 'coloured' or 'greyscale'\n")
while user_input == 'report':
print(resource.report())
user_input = input("\nWhat format would you like 'coloured' or 'greyscale'?\n")
# This switch off the printer if 'off' is been typed
if user_input == 'off':
return 'Thanks for using our service Bye.....'
number_of_pages = input("How many pages?\n")
while number_of_pages.isnumeric() is not True:
print("You've entered an invalid number")
number_of_pages = input("How many pages?\n")
# This checks if there is enough resource to print the pages the users want.
# If No the printer tells the user no enough resources but if yes then the
# monies(currencies) is being asked from users
ink_and_price = resource.check_resource(user_input, number_of_pages)
if ink_and_price != True:
print(ink_and_price)
else:
print(f'Your price is ₦{resource.calculate_bill()}')
print("Please insert Monies.")
# This catches an error if the user type in a string instead of a number
try:
biyar = int(input('How many Biyar: '))
faiba = int(input('How many Faiba: '))
muri = int(input('How many Muri: '))
wazobia = int(input('How many Wazobia: '))
except ValueError:
print("You've entered an invalid input")
Printer.printer_machine()
# This add all the currencies payed by the user
calculate_money = Cashier(biyar, faiba, muri, wazobia)
calculate_money.get_total()
# This confirm the payment if it is less than or greater than the actual cost
print(calculate_money.confirming_payment(resource))
# This update the resources after printing the pages
resource.deduct_resource()
print('Thanks for using our Printing service. Hope you enjoyed it!!')
print("===================================================================")
print(Printer.printer_machine())
|
from tensorflow import keras
from tensorflow.keras import layers, utils
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import Pipeline
from original_approach import dataset
from sklearn.pipeline import Pipeline
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import Adam
from skopt.space import Real, Categorical, Integer
X = dataset.X
y = dataset.y
def build_model(
learning_rate,
blocks,
kernel_size,
initial_filters,
dropout
):
input_shape = (*X[0].shape, 1)
model = keras.Sequential()
model.add(keras.Input(shape=(input_shape)))
for block in range(blocks):
filters = initial_filters*(block+1)
model.add(layers.Conv2D(filters=filters, kernel_size=kernel_size, activation="relu",
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPool2D())
model.add(layers.Flatten())
model.add(layers.Dropout(dropout))
model.add(layers.Dense(units=dataset.num_outputs, activation='softmax'))
model.compile(
optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
)
print(input_shape)
model.summary()
return model
def reshape_3d(x):
return x.reshape(*x.shape, 1)
estimator = Pipeline([
('reshape', FunctionTransformer(func=reshape_3d)),
('model', KerasClassifier(build_fn=build_model, verbose=0)),
])
param_distributions_original = dict(
model__batch_size=[50, 100, 200],
model__epochs=[20, 50],
model__learning_rate=[0.001, 0.005],
model__blocks=[1, 2],
model__kernel_size=[2, 3],
model__initial_filters=[30, 60],
model__dropout=[0.2, 0.4],
)
param_distributions = dict(
model__batch_size=[50, 100, 200],
model__epochs=[20, 40, 60, 90],
model__learning_rate=[0.001, 0.003, 0.005],
model__blocks=[1, 2, 3, 4],
model__kernel_size=[2, 3, 4],
model__initial_filters=[200, 300, 500],
model__dropout=[0.2, 0.3, 0.4, 0.5, 0.6],
)
search_spaces = dict(
model__batch_size=Integer(10, 300),
model__epochs=Integer(5, 50),
model__learning_rate=Real(0.0001, 0.01, prior='log-uniform'),
model__blocks=Integer(1, 3),
model__kernel_size=Integer(2, 5),
model__initial_filters=Integer(10, 500),
model__dropout=Real(0.2, 0.4),
)
|
import boto3
import json
# Document
documentName = "7_screen.png"
documentName = "test2.png"
# Read document content
with open(documentName, 'rb') as document:
imageBytes = bytearray(document.read())
# Amazon Textract client
textract = boto3.client('textract')
# Call Amazon Textract
response = textract.detect_document_text(Document={'Bytes': imageBytes})
print(json.dumps(response, indent=4))
# Print detected text
for item in response["Blocks"]:
if item["BlockType"] in ["LINE", "WORD"]:
print(item["Text"])
|
__author__ = "Vincent"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def clean_df():
global df
df= pd.read_csv("DOHMH_New_York_City_Restaurant_Inspection_Results.csv")
df = df.dropna()
#Remove invalid grades (P & Z = grade pending)
mask = df.GRADE.isin(['P','Z','Not Yet Graded'])
df = df[~mask]
return df
def test_grades(grade_list):
'''This function states whereas grades are improving overall
To do this, we compare whereas most recent grade is better than the average grade or not
input = list of grades ordered (from most to less recent)
output = 1 if grade improved, 0 if status quo, -1 if decreased'''
improving = 0
# convert letter grades into numerical grades
numerical_grade_list = grade_list
for index, grade in enumerate(grade_list):
if grade == 'A':
numerical_grade_list[index] = 3
elif grade == 'B':
numerical_grade_list[index] = 2
elif grade == 'C':
numerical_grade_list[index] = 1
average_grade = sum(numerical_grade_list) / float(len(numerical_grade_list))
if grade_list[0] > average_grade:
improving = 1
elif grade_list[0] == average_grade:
improving = 0
elif grade_list[0] < average_grade:
improving = -1
return improving
def test_restaurant_grades(camis_id):
grade_list = (((df[df['CAMIS']==camis_id]).sort(columns='GRADE DATE'))['GRADE']).tolist()
return test_grades(grade_list)
def borough_evolution():
for boro in df['BORO'].unique().tolist():
camis_ids = ((df[df['BORO']==boro])['CAMIS']).unique().tolist()
sum = 0
for camis_id in camis_ids:
sum = sum + test_restaurant_grades(camis_id)
print "The evolution of borough {0} is {1}".format(boro,sum)
def grade_counter(df):
grades = ['A','B','C']
grouped_grade = {}
grade_count = {}
for grade in grades:
grades_df = df[df["GRADE"] == grade]
grades_df['GRADE DATE'] = pd.to_datetime(grades_df["GRADE DATE"])
grades_df = grades_df.sort(columns = 'GRADE DATE', ascending = True )
grouped_grade[grade] = grades_df.groupby(grades_df['GRADE DATE'].map(lambda x: x.year)).count()
for grade in grade_dict:
count = grouped_grade[grade]['GRADE'].tolist()
if len(count)==4:
count.insert(0, 0)
grade_count[grade] = count
return grade_count
def plot():
#Plot for nyc
grade_count = grade_counter(df)
plt.plot([2011, 2012, 2013, 2014, 2015], grade_count['A'], label="A")
plt.plot([2011, 2012, 2013, 2014, 2015], grade_count['B'], label="B")
plt.plot([2011, 2012, 2013, 2014, 2015], grade_count['C'], label="C")
plt.title("Evolution of number of restaurant per grades - nyc")
plt.xlabel("Year")
plt.xticks([2011, 2012, 2013, 2014, 2015])
plt.legend(loc = 2)
plt.savefig("grade_improvement_nyc.pdf")
plt.close()
# plot for the different boroughs
for boro in df['BORO'].unique().tolist():
grade_count = grade_counter(df[df['BORO']==boro])
plt.plot([2011, 2012, 2013, 2014, 2015], grade_count['A'], label="A")
plt.plot([2011, 2012, 2013, 2014, 2015], grade_count['B'], label="B")
plt.plot([2011, 2012, 2013, 2014, 2015], grade_count['C'], label="C")
plt.title("Evolution of number of restaurant per grades - {0}".format(boro))
plt.xlabel("Year")
plt.xticks([2011, 2012, 2013, 2014, 2015])
plt.legend(loc = 2)
plt.savefig("grade_improvement_{0}.pdf".format(str(boro).lower()))
plt.close()
def main():
borough_evolution()
plot()
if __name__ == '__main__':
clean_df()
main()
|
from django.urls import path
from django.urls.resolvers import URLPattern
from. import views
urlpatterns = [
path("", views.index)
]
|
import utils
import time
def url_name(url):
# the web page opens up
chrome_driver.get(url)
# webdriver will wait for 4 sec before throwing a
# NoSuchElement exception so that the element
# is detected and not skipped.
time.sleep(4)
def first_picture():
# finds the first picture
pic = chrome_driver.find_element_by_class_name("_9AhH0")
pic.click() # clicks on the first picture
def like_pic():
time.sleep(4)
like = chrome_driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/article/div[3]/section[1]/span[1]/button/div/span')
time.sleep(2)
like.click() # clicking the like button
def next_picture():
time.sleep(2)
# finds the button which gives the next picture
# nex = chrome.find_element_by_class_name("HBoOv")
nex = chrome_driver.find_element_by_xpath("//a[contains(.,'Next')]")
time.sleep(1)
return nex
def continue_liking():
while (True):
next_el = next_picture()
# if next button is there then
if next_el != False:
# click the next button
next_el.click()
time.sleep(2)
# like the picture
like_pic()
time.sleep(2)
else:
print("not found")
break
chrome_driver = utils.get_driver()
time.sleep(1)
url_name(utils.URL)
utils.login(chrome_driver, utils.USERNAME, utils.PASSWORD)
first_picture()
like_pic()
continue_liking()
|
import time
import sys
import ibmiotf.application
import ibmiotf.device
import random
import requests
#Provide your IBM Watson Device Credentials
organization = "985bj1"
deviceType = "ibmiot"
deviceId = "1001"
authMethod = "token"
authToken = "1234567890"
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
deviceCli.connect()
while True:
length1=random.randint(0, 100)
length2=random.randint(0, 100)
length3=random.randint(0, 100)
leakage=random.randint(0,30)
data = { 'Jar1' : length1, 'Jar2': length2, 'Jar3': length3, 'Leakage': leakage }
#notification alerts-----------------------------------------------------------
if length1<=25:
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"W6zXBDk7wCu90PghITyr15YnHlKpaLbo3txQ8JVvR2djqeNmAc1ogy0ztbx76P8uTCDfrakjnshpGNcK","sender_id":"FSTSMS","message":"Jar1 is empty","language":"english","route":"p","numbers":"7993778964"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
if length2<=25:
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"W6zXBDk7wCu90PghITyr15YnHlKpaLbo3txQ8JVvR2djqeNmAc1ogy0ztbx76P8uTCDfrakjnshpGNcK","sender_id":"FSTSMS","message":"Jar2 is empty","language":"english","route":"p","numbers":"7993778964"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
if length3<=25:
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"W6zXBDk7wCu90PghITyr15YnHlKpaLbo3txQ8JVvR2djqeNmAc1ogy0ztbx76P8uTCDfrakjnshpGNcK","sender_id":"FSTSMS","message":"Jar3 is empty","language":"english","route":"p","numbers":"7993778964"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
Exhaust_fan=0
if leakage>=15:
Exhaust_fan = 1
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"W6zXBDk7wCu90PghITyr15YnHlKpaLbo3txQ8JVvR2djqeNmAc1ogy0ztbx76P8uTCDfrakjnshpGNcK","sender_id":"FSTSMS","message":"Cylinder is leaking and Exhaust fan is ON","language":"english","route":"p","numbers":"7993778964"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
#------------------------------------------------------------------------------
def myOnPublishCallback():
print ("Jar1 = %s m" % length1, "and Jar2 = %s m" % length2, "and Jar3 = %s m" % length3, "and Leakage = %s" % leakage )
success = deviceCli.publishEvent("Parking", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(2)
deviceCli.commandCallback = myCommandCallback
deviceCli.disconnect()
|
# ---------------------------------------------------------------------------
# extract_basin_nlcd_grid_count.py
# Created on: 2014-07-22 18:31:19.00000 (generated by ArcGIS/ModelBuilder)
# Description: extract NLCD gridded data using basin shapefiles and write
# a land cover count summary to csv file
# ---------------------------------------------------------------------------
################### User Input #####################
RFC = 'LMRFC'
basins_folder = r'P:\NWS\GIS\LMRFC\Shapefiles\basins_calib'
# if you only want to run specific basins -> list them below
# otherwise set it equal to empty list (basins_overwrite = [])
basins_overwrite = []
output_dir = 'P:\\NWS\\GIS\\' + RFC + '\\NLCD\\data_files\\' # this must contain a folder for each basin (eg. FONN7)
################# End User Input ######################
# location of PRISM Raster (CONUS)
NLCD_Dataset = r'Q:\\GISLibrary\\NLCD\\nlcd_2011_landcover_2011_edition_2014_03_31.img'
# Import arcpy module
import arcpy
import os
import csv
arcpy.env.overwriteOutput = True
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
# Set Geoprocessing environments
arcpy.env.scratchWorkspace = "P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output.gdb" # temporary file storage directory
arcpy.env.parallelProcessingFactor = "50"
print 'ok so far...'
#################################################################################
# find all basins in RFC or only run the specified basin list
basin_files = os.listdir(basins_folder) # list all basin shapefiles in the above specified directory
if len(basins_overwrite) != 0:
basin_files = basins_overwrite # use the basins_overright variable to run only specified basins instead of all RFC basins
basins = []
check_dir = os.listdir(output_dir) # list all folders in output_dir
for each in basin_files:
if each[:5] not in basins:
basins.append(each[:5])
print 'Identified ' + str(len(basins)) + ' basins in ' + RFC + ' input directory...'
# loop through basins
for basin in basins:
## Script arguments
Basin_Boundary = basins_folder + '\\' + basin + '.shp'
print basin
## Local variables:
Basin_Raster = 'P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output\\' + basin
Basin_Points = 'P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output\\' + basin + '_points'
NLCD = 'P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output\\'
# Process: Extract by Mask
print 'Extracting by mask...'
#arcpy.gp.ExtractByMask_sa(NLCD_Dataset, Basin_Boundary, Basin_Raster)
arcpy.Clip_management(NLCD_Dataset, "#", Basin_Raster, Basin_Boundary, "0", "ClippingGeometry")
# Process: Build Raster Attribute Table
print 'Building basin raster nlcd attribute table...'
arcpy.BuildRasterAttributeTable_management(Basin_Raster, "Overwrite")
arcpy.TableToTable_conversion(NLCD_Dataset, NLCD, 'nlcd_table.dbf')
# Process: Join Field
print 'Joining field with land cover name...'
arcpy.JoinField_management(Basin_Raster, "Value", NLCD + 'nlcd_table.dbf', "VALUE", "Land_Cover")
# Process: Table to Table
print 'Converting to .dbf table...'
arcpy.TableToTable_conversion(Basin_Raster, NLCD, basin + '_NLCD.dbf')#, "", "Value \"Value\" false false false 4 Long 0 0 ,First,#,P:\\NWS\\GIS\\Models\\Model_Output.gdb\\Extract_img1\\Band_1,Value,-1,-1;Count \"Count\" false false false 8 Double 0 0 ,First,#,P:\\NWS\\GIS\\Models\\Model_Output.gdb\\Extract_img1\\Band_1,Count,-1,-1;Land_Cover \"Land_Cover\" true false false 254 Text 0 0 ,First,#,P:\\NWS\\GIS\\Models\\Model_Output.gdb\\Extract_img1\\Band_1,Land_Cover,-1,-1", "")
# Process: output csv file
print 'Creating '+ basin + '_NLCD.csv file...'
rows = arcpy.SearchCursor(NLCD + basin + '_NLCD.dbf')
nlcd_csv = open(output_dir + basin + '_NLCD.csv', 'wb')
csvFile = csv.writer(nlcd_csv) #output csv
fieldnames = [f.name for f in arcpy.ListFields(NLCD + basin + '_NLCD.dbf')]
allRows = []
for row in rows:
rowlist = []
for field in fieldnames:
rowlist.append(row.getValue(field))
allRows.append(rowlist)
csvFile.writerow(fieldnames)
for row in allRows:
csvFile.writerow(row)
row = None
rows = None
nlcd_csv.close()
print 'Script completed!!'
|
import cv2
import numpy as np
# Used everytime trackbar changes. (We don't want to do anything)
def emptyFunction(self):
pass
windowName = 'BGR color pallette'
cv2.namedWindow(windowName)
img1 = np.zeros((512, 512, 3), np.uint8)
cv2.createTrackbar('B', windowName, 0, 255, emptyFunction) # Name, lowerbound, upperbound, function when switched
cv2.createTrackbar('G', windowName, 0, 255, emptyFunction)
cv2.createTrackbar('R', windowName, 0, 255, emptyFunction)
# create switch for ON/OFF functionality
switch = '0: Normal \n1 : Invert'
cv2.createTrackbar(switch, windowName, 0, 1, emptyFunction)
while True:
cv2.imshow(windowName, img1)
if cv2.waitKey(1) == 27: # ESC to exit
break
blue = cv2.getTrackbarPos('B', windowName)
green = cv2.getTrackbarPos('G', windowName)
red = cv2.getTrackbarPos('R', windowName)
if cv2.getTrackbarPos(switch, windowName):
img1[:] = abs(255 - np.array([blue, green, red]))
else:
img1[:] = [blue, green, red]
cv2.destroyAllWindows()
# HSV Pallette
windowName = 'HSV color pallette'
cv2.namedWindow(windowName)
img1 = np.zeros((512, 512, 3), np.uint8) # Window Size: 512x512
cv2.createTrackbar('H', windowName, 0, 180, emptyFunction) # Name, lowerbound, upperbound, ?
cv2.createTrackbar('S', windowName, 0, 255, emptyFunction)
cv2.createTrackbar('V', windowName, 0, 255, emptyFunction)
# create switch for ON/OFF functionality
switch = '0: Normal \n1 : Invert'
cv2.createTrackbar(switch, windowName, 0, 1, emptyFunction)
while True:
cv2.imshow(windowName, cv2.cvtColor(img1, cv2.COLOR_HSV2BGR))
if cv2.waitKey(1) == 27: # ESC to exit
break
hue = cv2.getTrackbarPos('H', windowName)
saturation = cv2.getTrackbarPos('S', windowName)
value = cv2.getTrackbarPos('V', windowName)
if cv2.getTrackbarPos(switch, windowName):
img1[:, :, 1:] = abs(255 - np.array([saturation, value]))
img1[:, :, 0] = abs(180 - np.array([hue]))
else:
img1[:] = [hue, saturation, value]
cv2.destroyAllWindows()
|
from collections import defaultdict
def pentagonal(n):
return int((3 * n ** 2 - n) / 2)
pentagon_numbers = defaultdict(bool)
pentagonal_list = []
# list for itterating over numbers, dict for checking existence
for i in range(1, 10000):
pentagonal_list.append(pentagonal(i))
for i in pentagonal_list:
pentagon_numbers[i] = True
D = 1000000000 # arbitratily chosen
for index, j in enumerate(pentagonal_list):
for k in pentagonal_list[index:]:
diff = k - j
if diff > D: # time to break innermost loop, all next D will be bigger
break
if pentagon_numbers[diff] and pentagon_numbers[k + j] and D > diff:
D = diff
print("j", j, "k", k)
print(D)
|
# -*- coding: utf-8 -*-
import random
class Baraja(object):
def __init__(self):
self.palos = ["Espadas", "Corazones", "Tréboles", "Diamantes"]
self.rangos = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King", "As"]
self.maso = []
for palo in self.palos:
for rango in self.rangos:
self.maso.append(rango + " de " + palo)
def barajear(self):
random.shuffle(self.maso)
def repartir(self):
print(self.maso.pop())
# self.maso.pop(0)
"""
Usando (instanciando) el objeto
"""
baraja = Baraja()
baraja.barajear()
baraja.repartir()
baraja.repartir()
baraja.repartir()
baraja.repartir()
baraja.repartir()
|
import math
# Qt
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QLine
from PyQt5.QtCore import QPoint
from PyQt5.QtGui import QPolygon
class Arrow:
def __init__(self, points, color=Qt.black, fill=True, tipLength=5, orientation="right"):
"""
Creates an Arrow object
following given path (point list)
"""
self.setColor(color)
self.setFill(fill)
# create line object
i = 0
self.lines = []
while (i < int(len(points)/2)):
self.lines.append(QLine(points[int(2*i)],points[int(2*i+1)]))
i += 2
# create arrow head
tip = points[-1]
if orientation == "right":
p1 = QPoint(tip.x()-5, tip.y()+math.tan(45)*5)
p2 = QPoint(tip.x()-5, tip.y()-math.tan(45)*5)
elif orientation == "left":
tip = QPoint(tip.x()-5, tip.y())
p1 = QPoint(tip.x()+5,tip.y()-math.tan(45)*5)
p2 = QPoint(tip.x()+5,tip.y()+math.tan(45)*5)
elif orientation == "up":
tip = QPoint(tip.x(), tip.y()+5)
p1 = QPoint(tip.x()-math.tan(45)*5, tip.y()-10)
p2 = QPoint(tip.x()+math.tan(45)*5, tip.y()-10)
elif orientation == "down":
tip = QPoint(tip.x(), tip.y()+5)
p1 = QPoint(tip.x()-math.tan(45)*5, tip.y()-5)
p2 = QPoint(tip.x()+math.tan(45)*5, tip.y()-5)
self.arrowHead = QPolygon([p1,tip,p2])
def getLines(self):
"""
Returns line objects between two
pipeline objects
"""
return self.lines
def getArrowHead(self):
"""
Returns arrow head
at the tip of connection
"""
return self.arrowHead
def getArrow(self):
"""
Returns line + arrow head
"""
return [self.getLines(),self.getArrowHead()]
def getColor(self):
"""
Returns line & arrow head color
"""
return self.color
def setColor(self, color):
"""
Sets line & arrow head color
"""
self.color = color
def setFill(self, fill):
"""
Sets wheter arrow head should
be filled or not
"""
self.fill = fill
def getFill(self):
"""
Returs True if arrow head should
be filled
"""
return self.fill
def draw(self, painter):
"""
Draws pipeline connection
ie., draws both line & arrow head
"""
[lines, arrowh] = self.getArrow()
painter.setPen(self.getColor())
if (self.getFill()):
painter.setBrush(self.getColor())
for line in lines:
painter.drawLine(line) # draw line
painter.drawPolygon(arrowh) # draw arrow head
|
from unittest import TestCase
from app import app
from models import User, db, Post, Tag, PostTag
# Perform tests on a Test database
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly_test'
app.config['SQLALCHEMY_ECHO'] = False
# Real Errors
app.config['TESTING'] = True
db.drop_all()
db.create_all()
class UserViewsTestCase(TestCase):
def setUp(self):
# Clear any leftover entries in DB
Post.query.delete()
User.query.delete()
# Create a sample entry
user = User(first_name="John", last_name="Doe")
db.session.add(user)
db.session.commit()
# Save sample entry's id
self.user_id = user.id
def tearDown(self):
# Clear any tainted DB transactions
db.session.rollback()
def test_directory_view(self):
with app.test_client() as client:
res = client.get('/users')
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('John Doe', html)
def test_user_add_view(self):
with app.test_client() as client:
res = client.get('/users/new')
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('<form action="/users/new" method="POST"', html)
def test_user_add_post(self):
with app.test_client() as client:
res = client.post('/users/new', data={'first-name':'Jimmy', 'last-name':'Dean', 'image-url': ''}, follow_redirects=True )
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('Jimmy Dean', html)
def test_user_details(self):
with app.test_client() as client:
res = client.get(f'/users/{self.user_id}')
html = res.get_data(as_text=True)
user = User.query.get(self.user_id)
self.assertEqual(res.status_code, 200)
self.assertIn('John Doe', html)
self.assertIn(f'<img src="{user.image_url}"', html)
def test_user_edit_post(self):
with app.test_client() as client:
data = {'first-name':'James', 'last-name':'Doe', 'image-url':''}
res = client.post(f'/users/{self.user_id}/edit', data=data, follow_redirects=True)
html = res.get_data(as_text=True)
user = User.query.get(self.user_id)
self.assertEqual(res.status_code, 200)
self.assertIn('James Doe', html)
self.assertEqual(user.first_name, 'James')
def test_user_delete_post(self):
with app.test_client() as client:
res = client.post(f'/users/{self.user_id}/delete', follow_redirects=True)
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertNotIn('John Doe', html)
self.assertEqual(User.get_all(), [])
class PostViewsTestCase(TestCase):
def setUp(self):
# Clear leftover entries
Tag.query.delete()
Post.query.delete()
User.query.delete()
# New sample entries
user = User(first_name="John", last_name="Doe")
db.session.add(user)
db.session.commit()
post = Post(title="Day at the Zoo", content="Lorem ipsum dolor", poster_id = user.id)
db.session.add(post)
db.session.commit()
# Cache IDs
self.post_id = post.id
self.user_id = user.id
def tearDown(self):
db.session.rollback()
def test_new_post_form_view(self):
with app.test_client() as client:
res = client.get(f'/users/{self.user_id}/posts/new')
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn(f'form action="/users/{self.user_id}/posts/new" method="POST"', html)
self.assertIn('Add Post for John Doe', html)
def test_new_post_submission(self):
with app.test_client() as client:
data = {'post-title': 'My Newest Post', 'post-content': 'Ladee-da-dee-dah', 'post-tags': 'beatles&, lyrics '}
res = client.post(f'/users/{self.user_id}/posts/new', data=data, follow_redirects=True)
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('My Newest Post', html)
self.assertIn('John Doe', html)
self.assertIn('beatles', html)
self.assertIn('lyrics', html)
posts = Post.query.filter_by(title='My Newest Post').all()
self.assertNotEqual(posts, [])
tags = Tag.query.all()
self.assertEqual(len(tags), 2)
def test_failed_new_post_submisssion(self):
with app.test_client() as client:
data = {'post-title': '', 'post-content': 'oainrgoairnhae', 'post-tags': ''}
res = client.post(f'/users/{self.user_id}/posts/new', data=data, follow_redirects=True)
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('alert', html)
def test_post_view(self):
with app.test_client() as client:
res = client.get(f'/posts/{self.post_id}')
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('Day at the Zoo', html)
self.assertIn('Lorem ipsum dolor', html)
self.assertIn('John Doe', html)
def test_deleted_user_post_view(self):
with app.test_client() as client:
User.query.filter_by(id=self.user_id).delete()
res=client.get(f'/posts/{self.post_id}')
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('Day at the Zoo', html)
self.assertIn('Lorem ipsum dolor', html)
self.assertIn('deleted user', html)
def test_post_edit_form_view(self):
with app.test_client() as client:
res = client.get(f'/posts/{self.post_id}/edit')
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn(f'<form action="/posts/{self.post_id}/edit" method="POST"', html)
def test_edit_post_submission(self):
with app.test_client() as client:
data = {'post-title': 'New Title', 'post-content': 'New Content', 'post-tags': 'la-dee-da and, things'}
res = client.post(f'/posts/{self.post_id}/edit', data=data, follow_redirects=True)
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('New Title', html)
self.assertIn('New Content', html)
self.assertIn('ladeeda', html)
self.assertIn('things', html)
post = Post.query.get(self.post_id)
self.assertEqual(post.title, 'New Title')
self.assertEqual(post.content, 'New Content')
def test_failed_edit_submission(self):
with app.test_client() as client:
data = {'post-title': '', 'post-content':'', 'post-tags': ''}
res = client.post(f'/posts/{self.post_id}/edit', data=data, follow_redirects=True)
html = res.get_data(as_text=True)
self.assertEqual(res.status_code, 200)
self.assertIn('Day at the Zoo', html)
self.assertIn('Lorem ipsum dolor', html)
post = Post.query.get(self.post_id)
self.assertEqual(post.title, 'Day at the Zoo')
self.assertEqual(post.content, 'Lorem ipsum dolor')
def test_deletion_submission(self):
with app.test_client() as client:
res = client.post(f'/posts/{self.post_id}/delete')
self.assertEqual(res.status_code, 302)
post = Post.query.get(self.post_id)
self.assertIsNone(post)
|
__author__ = 'Justin'
import networkx as nx
from random import choice
from geopy.distance import vincenty as latlondist
def randomnodes(G,distancelimit,print_=False):
lons = nx.get_node_attributes(G,'lon')
lats = nx.get_node_attributes(G,'lat')
nodesdist = 0
connected = False
while(nodesdist < distancelimit or not(connected)):
randomnodes = [choice(G.nodes()),choice(G.nodes())]
origin = randomnodes[0]
destination = randomnodes[1]
nodesdist = latlondist([lats[origin],lons[origin]],[lats[destination],lons[destination]]).miles
if nx.has_path(G,origin,destination):
connected = True
else:
connected = False
if(print_):
print('Source:',[lats[origin],lons[origin]])
print('Destination',[lats[destination],lons[destination]])
return origin,destination
def randompairs(G,numpairs,distancelimit):
pairs = []
for index in range(1,numpairs+1,1):
origin,destination = randomnodes(G,distancelimit)
pair = [origin,destination]
pairs.append(pair)
return pairs
|
print("*"*30,"捕鱼达人","*"*30)
username = input("输入参与者用户名:")
password = input("输入密码:")
print("%s 请充值才能加入游戏!" % username)
coins = int(input("您充值的金额为:"))
print("%s 元充值成功!当前游戏币是:%d" % (username,coins))
|
from classes.DBOperations import *
import unidecode
import requests
from collections import OrderedDict
postalcodedict = {}
class weather:
def __init__(self, dbOperations=None):
self.dbOperations = dbOperations
# simple function that returns a key for a given value from a dict
def get_key(self, val):
for key, value in postalcodedict.items():
if val in value:
return key
return "key doesn't exist"
# this function interacts with the weather API and updates a single line according to the postalcode given
def insertWeatherData(self, city, pcLocation):
if self.dbOperations is None:
self.dbOperations = DBOperations().getDB()
self.dbOperations.getConnection()
while True:
# takes away all accents in a word to avoid errors
city = unidecode.unidecode(city)
# giving the city name to the API
r = requests.get(
'http://api.worldweatheronline.com/premium/v1/weather.ashx?key=54e2762b58214c3e833145855190705&q=' + city + ',Switzerland&format=json')
json_data = r.json()
# extracting targeted data on json file (location found by API, max temp and min temp per month)
location = json_data['data']['request'][0]['query']
maxtempJan = json_data['data']['ClimateAverages'][0]['month'][0]['absMaxTemp']
maxtempFeb = json_data['data']['ClimateAverages'][0]['month'][1]['absMaxTemp']
maxtempMar = json_data['data']['ClimateAverages'][0]['month'][2]['absMaxTemp']
maxtempApr = json_data['data']['ClimateAverages'][0]['month'][3]['absMaxTemp']
maxtempMai = json_data['data']['ClimateAverages'][0]['month'][4]['absMaxTemp']
maxtempJun = json_data['data']['ClimateAverages'][0]['month'][5]['absMaxTemp']
maxtempJul = json_data['data']['ClimateAverages'][0]['month'][6]['absMaxTemp']
maxtempAug = json_data['data']['ClimateAverages'][0]['month'][7]['absMaxTemp']
maxtempSep = json_data['data']['ClimateAverages'][0]['month'][8]['absMaxTemp']
maxtempOct = json_data['data']['ClimateAverages'][0]['month'][9]['absMaxTemp']
maxtempNov = json_data['data']['ClimateAverages'][0]['month'][10]['absMaxTemp']
maxtempDec = json_data['data']['ClimateAverages'][0]['month'][11]['absMaxTemp']
mintempJan = json_data['data']['ClimateAverages'][0]['month'][0]['avgMinTemp']
mintempFeb = json_data['data']['ClimateAverages'][0]['month'][1]['avgMinTemp']
mintempMar = json_data['data']['ClimateAverages'][0]['month'][2]['avgMinTemp']
mintempApr = json_data['data']['ClimateAverages'][0]['month'][3]['avgMinTemp']
mintempMai = json_data['data']['ClimateAverages'][0]['month'][4]['avgMinTemp']
mintempJun = json_data['data']['ClimateAverages'][0]['month'][5]['avgMinTemp']
mintempJul = json_data['data']['ClimateAverages'][0]['month'][6]['avgMinTemp']
mintempAug = json_data['data']['ClimateAverages'][0]['month'][7]['avgMinTemp']
mintempSep = json_data['data']['ClimateAverages'][0]['month'][8]['avgMinTemp']
mintempOct = json_data['data']['ClimateAverages'][0]['month'][9]['avgMinTemp']
mintempNov = json_data['data']['ClimateAverages'][0]['month'][10]['avgMinTemp']
mintempDec = json_data['data']['ClimateAverages'][0]['month'][11]['avgMinTemp']
# when the weather API doesn't recognise the location in Switzerland,
# this part redirects the city to the city right before in the postalcodedict and goes back to the beginning of the loop
if "Switzerland" not in location or "USA" in location:
print(city)
ordered = OrderedDict(postalcodedict)
keys = list(ordered.keys())
index = keys.index(city)-1
print(ordered[keys[index]])
closest_pc = int(ordered[keys[index]][0])
print(closest_pc)
city = weather.get_key(closest_pc)
print("Closest city: "+ city)
# when the weather API does recognise the location in Switzerland, this part updates the database
# once updated, the while loop breaks
else:
print("Successful")
with DBOperations.connection.cursor() as cursor:
sql = "UPDATE `weatherData` SET `maxtempJan`= %s, `maxtempFeb`= %s, `maxtempMar`= %s, " \
"`maxtempApr`= %s, `maxtempMai`= %s, `maxtempJun`= %s, `maxtempJul`= %s, `maxtempAug`= %s, " \
"`maxtempSep`= %s, `maxtempOct`= %s, `maxtempNov`= %s, `maxtempDec`= %s, `mintempJan`= %s, " \
"`mintempFeb`= %s, `mintempMar`= %s, `mintempApr`= %s, `mintempMai`= %s, `mintempJun`= %s, " \
"`mintempJul`= %s, `mintempAug`= %s, `mintempSep`= %s, `mintempOct`= %s, `mintempNov`= %s, " \
"`mintempDec`= %s WHERE `postalCode` = %s;"
cursor.execute(sql, (maxtempJan, maxtempFeb, maxtempMar, maxtempApr, maxtempMai, maxtempJun, maxtempJul,
maxtempAug, maxtempSep, maxtempOct, maxtempNov, maxtempDec, mintempJan, mintempFeb,
mintempMar, mintempApr, mintempMai, mintempJun, mintempJul, mintempAug, mintempSep,
mintempOct, mintempNov, mintempDec, pcLocation))
break
self.dbOperations.connection.commit()
# this function updates the whole table with weather data
def updatewholetable(self):
if self.dbOperations is None:
self.dbOperations = DBOperations().getDB()
self.dbOperations.getConnection()
# this part selects all the lines in the table
with DBOperations.connection.cursor() as cursor:
sql = "SELECT * FROM `weatherData`"
cursor.execute(sql)
codes = cursor.fetchall()
x = 0
# after selecting all lines in the database, each line is filled with the weather data here
for code in codes:
city = code["city"]
pc = code["postalCode"]
weather.insertWeatherData(city, pc)
x += 1
print(x)
cursor.close()
postalCodes = postalCodes(DBOperations("kezenihi_srmidb3"))
postalCodes.getPostalCodesDict()
weather = weather(DBOperations("kezenihi_srmidb3"))
weather.updatewholetable()
|
import gc
gc.enable()
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class FeatureSelection(object):
def __init__(self, feature_score_name):
self.feature_score_name = feature_score_name
pass
def load_data(self):
pass
def get_feature_score(self):
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/3 下午7:13
# @Author : zs
# @Site :
# @File : test.py
# @Software: PyCharm
import os
import shutil
def main(list):
list.sort()
a = max(int(list[0]), int(list[1]))
b = min(int(list[0]), int(list[1]))
for i in range(2, len(list)):
if int(list[i]) > a:
b = a
a = int(list[i])
elif int(list[i]) > b:
b = int(list[i])
return a, b
def onefile(list):
list.sort()
a = list[0]
return a
if __name__ == '__main__':
oldpath = '/home/zs/face_rec/test_out'
# new_path = '/home/zs/face_rec/test'
for i in os.listdir(oldpath):
new_path = '/home/zs/face_rec/two_file'
new_path = os.path.join(new_path, i)
i = os.path.join(oldpath, i)
dict = {}
for j in os.listdir(i):
j = os.path.join(i, j)
dict.update({len(os.listdir(j)): j})
print(dict)
# dict = {'12': '/home/test1/', '3': '/home/test2/', '9': '/home/test3/', '1': '/home/test4/', '91': '/home/test5/'}
filelist = list(dict.keys())
try:
x, y = main(filelist)
print(x, y)
path1, path2 = dict.get(x), dict.get(y)
print(path1, path2)
# print(dict.get(str(y)))
# os.makedirs(new_path, exist_ok=True)
if not os.path.exists(os.path.join(new_path, '1')):
shutil.copytree(path1, os.path.join(new_path, '1'))
# os.makedirs(new_path, exist_ok=True)
shutil.copytree(path2, os.path.join(new_path, '2'))
else:
print('已存在目标数据文件!')
except IndexError:
x = onefile(filelist)
print(x)
path1 = dict.get(x)
print(path1)
# os.makedirs(new_path, exist_ok=True)
if not os.path.exists(os.path.join(new_path, '1')):
shutil.copytree(path1, os.path.join(new_path, '1'))
else:
print('已存在目标数据文件!')
|
# Generated by Django 2.1.2 on 2018-12-11 05:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ann', '0015_datasetdetailreview_masa_studi'),
]
operations = [
migrations.RenameField(
model_name='hiddenlayer',
old_name='neuralnetwork',
new_name='neural_network',
),
migrations.RenameField(
model_name='training',
old_name='neuralnetwork',
new_name='neural_network',
),
]
|
#--*-- coding:utf-8 --*--
class Computer:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
def execute(self):
return 'execute a program'
class Synthesizer:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
def speak(self):
return 'is playing an esong'
class Human:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
def speak(self):
return 'Say Hello'
class Adapter:
def __init__(self,obj,adapted_methods):
self.obj=obj
self.__dict__.update(adapted_methods)
def __str__(self):
return str(self.obj)
def main():
objects=[Computer('Auss')]
synth=Synthesizer('moog')
objects.append(Adapter(synth,dict(execute=synth.speak)))
human=Human('Bob')
objects.append(Adapter(human,dict(execute=human.speak)))
for i in objects:
print ('{}{}'.format(str(i),i.execute()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.