repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
krzem5/Python-Ascii_Img_Converter
|
src/main.py
|
<filename>src/main.py
from PIL import Image
import numpy as np
gscale1 = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. "
gscale2 = '@%#*+=-:. '
def getAverageL(image):
im = np.array(image)
w,h = im.shape
return np.average(im.reshape(w*h))
def covertImageToAscii(fileName, cols, scale, moreLevels):
global gscale1, gscale2
image = Image.open(fileName).convert('L')
W, H = image.size[0], image.size[1]
print("input image dims: %d x %d" % (W, H))
w = W/cols
h = w/scale
rows = int(H/h)
print("cols: %d, rows: %d" % (cols, rows))
print("tile dims: %d x %d" % (w, h))
if cols > W or rows > H:
print("Image too small for specified cols!")
exit(0)
aimg = []
for j in range(rows):
y1 = int(j*h)
y2 = int((j+1)*h)
if j == rows-1:y2 = H
aimg.append("")
for i in range(cols):
x1 = int(i*w)
x2 = int((i+1)*w)
if i == cols-1:x2 = W
img = image.crop((x1, y1, x2, y2))
avg = int(getAverageL(img))
if moreLevels:gsval = gscale1[int((avg*69)/255)]
else:gsval = gscale2[int((avg*9)/255)]
aimg[j] += gsval
return aimg
def main():
# imgFile=input('>FILE>')
imgFile="img.jpg"
outFile = 'out.txt'
scale = 0.43
cols = 100
print('generating ASCII art...')
aimg = covertImageToAscii(imgFile, cols, scale, False)
with open(outFile, 'w') as f:
for row in aimg:f.write(row + '\n')
print("ASCII art written to %s" % outFile)
if __name__ == '__main__':
main()
|
pjcasas29/wavenet_vocoder_cqt
|
datasets/wavallinn.py
|
<reponame>pjcasas29/wavenet_vocoder_cqt
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
from nnmnkwii import preprocessing as P
from hparams import hparams
from os.path import exists, basename, splitext
import librosa
from glob import glob
from os.path import join
from skimage import io
from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_raw
def scale_minmax(X, min=0.0, max=1.0):
X_std = (X - X.min()) / (X.max() - X.min())
X_scaled = X_std * (max - min) + min
return X_scaled
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
src_files = sorted(glob(join(in_dir, "*.wav")))
for wav_path in src_files:
futures.append(executor.submit(
partial(_process_utterance, out_dir, index, wav_path, "dummy")))
index += 1
return [future.result() for future in tqdm(futures)]
def _process_utterance(out_dir, index, wav_path, text):
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
# Trim begin/end silences
# NOTE: the threshold was chosen for clean signals
#wav, _ = librosa.effects.trim(wav, top_db=60, frame_length=2048, hop_length=512)
#if hparams.highpass_cutoff > 0.0:
# wav = audio.low_cut_filter(wav, hparams.sample_rate, hparams.highpass_cutoff)
# Mu-law quantize
if is_mulaw_quantize(hparams.input_type):
# Trim silences in mul-aw quantized domain
silence_threshold = 0
#if silence_threshold > 0:
# [0, quantize_channels)
# out = P.mulaw_quantize(wav, hparams.quantize_channels - 1)
# start, end = audio.start_and_end_indices(out, silence_threshold)
# wav = wav[start:end]
constant_values = P.mulaw_quantize(0, hparams.quantize_channels - 1)
out_dtype = np.int16
elif is_mulaw(hparams.input_type):
# [-1, 1]
constant_values = P.mulaw(0.0, hparams.quantize_channels - 1)
out_dtype = np.float32
else:
# [-1, 1]
constant_values = 0.0
out_dtype = np.float32
# Compute a mel-scale spectrogram from the trimmed wav:
# (N, D)
mel_spectrogram = audio.logmelspectrogram(wav).astype(np.float32).T
if hparams.global_gain_scale > 0:
wav *= hparams.global_gain_scale
# Time domain preprocessing
if hparams.preprocess is not None and hparams.preprocess not in ["", "none"]:
f = getattr(audio, hparams.preprocess)
wav = f(wav)
# Clip
if np.abs(wav).max() > 1.0:
print("""Warning: abs max value exceeds 1.0: {}""".format(np.abs(wav).max()))
# ignore this sample
return ("dummy", "dummy", -1, "dummy")
wav = np.clip(wav, -1.0, 1.0)
# Set waveform target (out)
if is_mulaw_quantize(hparams.input_type):
out = P.mulaw_quantize(wav, hparams.quantize_channels - 1)
elif is_mulaw(hparams.input_type):
out = P.mulaw(wav, hparams.quantize_channels - 1)
else:
out = wav
#print(len(wav))
# zero pad
# this is needed to adjust time resolution between audio and mel-spectrogram
l, r = audio.pad_lr(out, hparams.fft_size, audio.get_hop_size())
if l > 0 or r > 0:
out = np.pad(out, (l, r), mode="constant", constant_values=constant_values)
N = mel_spectrogram.shape[0]
assert len(out) >= N * audio.get_hop_size()
# time resolution adjustment
# ensure length of raw audio is multiple of hop_size so that we can use
# transposed convolution to upsample
out = out[:N * audio.get_hop_size()]
assert len(out) % audio.get_hop_size() == 0
# Write the spectrograms to disk:
name = splitext(basename(wav_path))[0]
audio_filename = '%s-wave.npy' % (name)
mel_filename = '%s-feats.npy' % (name)
spectrogram = '%s-img.png' % (name)
from PIL import Image
np.save(os.path.join(out_dir, audio_filename),
out.astype(out_dtype), allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename),
mel_spectrogram.astype(np.float32), allow_pickle=False)
print("mel_max: " + str(np.max(mel_spectrogram.astype(np.float32))))
print("mel_min: " + str(np.min(mel_spectrogram.astype(np.float32))))
print("mel_shape: " + str(mel_spectrogram.astype(np.float32).shape))
#Save as image
img = audio.mel2png(mel_spectrogram.astype(np.float32))
#print("Shape of img before save : " + str(img.shape))
spec_path = os.path.join(out_dir, spectrogram)
# save as PNG
io.imsave(spec_path, img, check_contrast=False)
#Image.fromarray(img).save(os.path.join(out_dir, spectrogram))
# Return a tuple describing this training example:
mel_back = audio.png2mel(io.imread(spec_path))
#print("Shape of image after save: " + str(mel_back.shape))
#print("Subtraction: " + str(mel_back - mel_spectrogram))
return (audio_filename, mel_filename, N, text)
|
olganaumova2007/memory-card
|
my_memory_card.py
|
<filename>my_memory_card.py<gh_stars>0
#создай приложение для запоминания информации
from PyQt5.QtCore import Qt #подключаем библеотеки
from PyQt5.QtWidgets import QApplication,QWidget,QPushButton,QLabel,QVBoxLayout,QHBoxLayout,QRadioButton,QGroupBox,QButtonGroup#подключаем библиотеки и классы
from random import shuffle #поключаем модули
programma= QApplication([])#создаем оконое приложение
glavnoe_okno=QWidget()#создаем главное окно
glavnoe_okno.setWindowTitle('Тест')
glavnoe_okno.move(900,70)
glavnoe_okno.resize(400,200)#устанавливаем высоту и ширину
glavnoe_okno.cur_question=-1
question_list=[]
class Question():#создаем класс которая будет опредилять правильный ответ дал пользователь или нет
def __init__(self,vopros,variant1,variant2,variant3,variant4):
self.question=vopros
self.right_answer=variant1
self.wrong1=variant2
self.wrong2=variant3
self.wrong3=variant4
def ask(q:Question):#создаем функцию
vopros.setText(q.question)
shuffle(answer)#здесь смешиваются ответы
answer[0].setText(q.right_answer)
answer[1].setText(q.wrong1)
answer[2].setText(q.wrong2)
answer[3].setText(q.wrong3)
pravil.setText(q.right_answer)
show_question()#вызываем функцию
def next_question():
glavnoe_okno.cur_question+=1
if glavnoe_okno.cur_question > len(question_list):
glavnoe_okno.cur_question=0
q=question_list[glavnoe_okno.cur_question]
ask(q)
def check_answer():#создаем функцию
''' если выбран какой-то вариант ответа, то надо проверить и показать панель ответов'''
# если выбран кнопка answers[0], то установить в виджет resltat текст 'Правильно!'
if answer[0].isChecked():
resltat.setText('Правильно')
# иначе, установить 'Неверно!'
else:
resltat.setText('Не правильно')
show_answer()
# вызываем функцию show_answer
def show_question():#создаем функцию
buttonGroup.setExclusive(False)
variant1.setChecked(False)
variant2.setChecked(False)
variant3.setChecked(False)
variant4.setChecked(False)
buttonGroup.setExclusive(True)
gruppavopros.show()#показываем вопрос
gruppaotvet.hide()#скрываем ответ
knopka.setText('Ответить')
def show_answer():#создаем функцию для того что бы показать правильный ответ
gruppavopros.hide()#спрятать вопрос
gruppaotvet.show()#показать ответ
knopka.setText('Следующий вопрос')
def start():#создаем функцию обработка клика по кнопке
if knopka.text()=='Ответить':
check_answer()
# вызывать функцию проверки ответа
else:
next_question()
vopros=QLabel('Какой национальности не существует')#создаем виджет вопрос
gruppavopros=QGroupBox('Вариант ответа')#создаем виджет группу
variant1=QRadioButton('Энцы')
variant2=QRadioButton('Смурфы')
variant3=QRadioButton('Чулымцы')
variant4=QRadioButton('Алеуты')
answer = [variant1,variant2,variant3,variant4]
naprav_gruppa=QVBoxLayout()
naprav_gruppa.addWidget(variant1)
naprav_gruppa.addWidget(variant2)
naprav_gruppa.addWidget(variant3)
naprav_gruppa.addWidget(variant4)
gruppavopros.setLayout(naprav_gruppa)
buttonGroup=QButtonGroup()
buttonGroup.addButton(variant1)
buttonGroup.addButton(variant2)
buttonGroup.addButton(variant3)
buttonGroup.addButton(variant4)
knopka=QPushButton('Ответить')
gruppaotvet=QGroupBox('Результат')#
resltat=QLabel('Правильно')
pravil=QLabel('тут будет правильный ответ')
gruppanapravotvet=QVBoxLayout()
gruppanapravotvet.addWidget(resltat)
gruppanapravotvet.addWidget(pravil)#ghb
gruppaotvet.setLayout(gruppanapravotvet)#привязываем к направляющей
gruppaotvet.hide()#скрытие группу ответа для скрытия вопроса
glavnapr = QVBoxLayout()
glavnapr.addWidget(vopros)
glavnapr.addWidget(gruppavopros)
glavnapr.addWidget(gruppaotvet)
glavnapr.addWidget(knopka)
glavnoe_okno.setLayout(glavnapr)
knopka.clicked.connect(start)
q1=Question('Какой национальности не существует?','Энцы','Смурфы','Чулымцы','Алеуты')
q2=Question('2+5?','7','2','5','9')
q3=Question('5+5?','10','4','676','0')
q4=Question('567+3?','570','345','5689','569')
question_list.append(q1)
question_list.append(q2)
question_list.append(q3)
question_list.append(q4)
next_question()
glavnoe_okno.show()
programma.exec_()
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
main.py
|
<reponame>cq615/Joint-Motion-Estimation-and-Segmentation<filename>main.py
import os, time, h5py, sys
from models.seg_network import build_FCN_triple_branch
from dataio.dataset import *
from utils.metrics import *
from dataio.data_generator import *
from utils.visualise import *
import lasagne.layers as L
import lasagne
import theano
import theano.tensor as T
if __name__ == '__main__':
shift = 10
rotate = 10
scale = 0.1
intensity = 0.1
flip = False
base_path = 'seg_flow'
data_path = os.path.join(base_path, 'data')
model_path = os.path.join(base_path, 'model')
size = 192
n_class = 4
# Prepare theano variables
image_var = T.tensor4('image')
image_pred_var = T.tensor4('image_pred')
label_var = T.itensor4('label')
image_seg_var = T.tensor4('seg')
# Compile the model (CNN one, to build RNN one, compile build_FCN_triple_branch_rnn and use sequence data)
net = build_FCN_triple_branch(image_var, image_pred_var, image_seg_var, shape=(None, 1, size, size))
model_file = os.path.join(model_path, 'FCN_VGG16_sz192_joint_learn_ft_tmp.npz')
with np.load(model_file) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
L.set_all_param_values([net['out'], net['outs']], param_values)
learning_rate = T.scalar('learning_rate')
prediction = L.get_output(net['fr_st'])
loc = L.get_output(net['out'])
flow_loss = lasagne.objectives.squared_error(prediction, image_pred_var)
flow_loss = flow_loss.mean() + 0.001 * huber_loss(loc)
prediction_seg = L.get_output(net['outs'])
loss_seg = categorical_crossentropy(prediction_seg, label_var)
loss = flow_loss + 0.01 * loss_seg.mean()
params = L.get_all_params(net['out'], trainable=True)
updates = lasagne.updates.adam(loss, params, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08)
# Compile a function performing a training step on a mini-batch and returning the corresponding training loss
train_fn_flow = theano.function([image_var, image_pred_var, image_seg_var, label_var, learning_rate],
[loss, flow_loss, loss_seg.mean()], updates=updates, on_unused_input='ignore')
params = lasagne.layers.get_all_params(net['outs'], trainable=True)
updates = lasagne.updates.adam(loss, params, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08)
train_fn_seg = theano.function([image_var, image_pred_var, image_seg_var, label_var, learning_rate], [loss,flow_loss, loss_seg.mean()], updates=updates,on_unused_input='ignore')
model_name = 'FCN_VGG16_sz{0}'.format(size)
test_prediction = lasagne.layers.get_output(net['outs'])
test_loss = categorical_crossentropy(test_prediction, label_var)
test_acc = categorical_accuracy(test_prediction, label_var)
test_dice_lv = categorical_dice(test_prediction, label_var, 1)
test_dice_myo = categorical_dice(test_prediction, label_var, 2)
test_dice_rv = categorical_dice(test_prediction, label_var, 3)
# Compile a second function computing the testing loss and accuracy
test_fn = theano.function([image_seg_var, label_var], [test_acc, test_dice_lv, test_dice_myo, test_dice_rv],
on_unused_input='ignore')
# Launch the training loop
print("Starting training...")
np.random.seed(100)
start_time = time.time()
table = []
num_epoch = 100
max_iter = 100
batch_size = 64
start = 1
lr = 1e-4
log_every = 100
data_train_path = 'train'
test_image, test_label = load_dataset(os.path.join(data_path, 'test_UKBB2964_sz{0}_n100.h5'.format(size)))
for epoch in range(start, start + num_epoch):
# In each epoch, we do a full pass over the training data:
start_time_epoch = time.time()
train_loss = 0
train_loss_image = 0
train_loss_seg = 0
train_batches = 10
for t in range(max_iter):
image, label = generate_batch(data_train_path, batch_size=6, img_size=192)
image2, label2 = data_augmenter(image, label, shift=shift, rotate=rotate, scale=scale, intensity=intensity,
flip=flip)
label2_1hot = convert_to_1hot(label2, n_class)
# Train motion estimation and segmentation iteratively. After pretraining, the network can be trained jointly.
loss, loss_flow, loss_seg = train_fn_flow(image2[:, 0:1], image2[:, 1:], image2[:, 1:], label2_1hot, lr)
loss, loss_flow, loss_seg = train_fn_seg(image2[:,0:1], image2[:,1:],image2[:,1:], label2_1hot, lr)
# add warped segmentation loss
#loss, loss_flow, loss_seg = train_fn_seg_warped(image2[:, 0:1], image2[:, 1:], image2[:, 0:1], label2_1hot,1e-6)
train_loss += loss
train_loss_image += loss_flow
train_loss_seg += loss_seg
train_batches += 1
if train_batches % log_every == 0:
#
# And a full pass over the testing data:
test_loss = 0
test_acc = 0
test_dice_lv = 0
test_dice_myo = 0
test_dice_rv = 0
test_batches = 0
for image_test, label_test in iterate_minibatches(test_image, test_label, batch_size):
label_1hot = convert_to_1hot(label_test, n_class)
acc, dice_lv, dice_myo, dice_rv = test_fn(image_test, label_1hot)
test_acc += acc
test_dice_lv += dice_lv
test_dice_myo += dice_myo
test_dice_rv += dice_rv
test_batches += 1
test_acc /= test_batches
test_dice_lv /= test_batches
test_dice_myo /= test_batches
test_dice_rv /= test_batches
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(epoch, start + num_epoch - 1, time.time() - start_time_epoch))
# print(' learning rate:\t\t{:.8f}'.format(float(learning_rate.get_value())))
print(" training loss:\t\t{:.6f}".format(train_loss / train_batches))
print(" training loss flow: \t\t{:.6f}".format(train_loss_image / train_batches))
print(" training loss seg: \t\t{:.6f}".format(train_loss_seg / train_batches))
print(" testing accuracy:\t\t{:.2f} %".format(test_acc * 100))
print(" testing Dice LV:\t\t{:.6f}".format(test_dice_lv))
print(" testing Dice Myo:\t\t{:.6f}".format(test_dice_myo))
print(" testing Dice RV:\t\t{:.6f}".format(test_dice_rv))
np.savez(os.path.join(model_path, '{0}_joint_learn_ft_tmp.npz'.format(model_name, epoch)),
*lasagne.layers.get_all_param_values([net['out'], net['outs']]))
np.savez(os.path.join(model_path, '{0}_joint_learn_ft1_epoch{1:03d}.npz'.format(model_name, epoch)),
*lasagne.layers.get_all_param_values([net['out'], net['outs']]))
print("Training took {:.3f}s in total.".format(time.time() - start_time))
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
dataio/dataset.py
|
<gh_stars>10-100
import os, time, h5py, sys
import nibabel as nib
import cv2
import numpy as np
from scipy import ndimage
def load_dataset(filename):
f = h5py.File(filename, 'r')
image = np.array(f['image'])
label = np.array(f['label'])
return image, label
def convert_to_1hot(label, n_class):
# Convert a label map (N x 1 x H x W) into a one-hot representation (N x C x H x W)
label_swap = label.swapaxes(1, 3)
label_flat = label_swap.flatten()
n_data = len(label_flat)
label_1hot = np.zeros((n_data, n_class), dtype='int16')
label_1hot[range(n_data), label_flat] = 1
label_1hot = label_1hot.reshape((label_swap.shape[0], label_swap.shape[1], label_swap.shape[2], n_class))
label_1hot = label_1hot.swapaxes(1, 3)
return label_1hot
def convert_to_1hot_3d(label, n_class):
# Convert a label map (N1XYZ) into a one-hot representation (NCXYZ)
label_swap = label.swapaxes(1, 4)
label_flat = label_swap.flatten()
n_data = len(label_flat)
label_1hot = np.zeros((n_data, n_class), dtype='int16')
label_1hot[range(n_data), label_flat] = 1
label_1hot = label_1hot.reshape((label_swap.shape[0], label_swap.shape[1], label_swap.shape[2], label_swap.shape[3], n_class))
label_1hot = label_1hot.swapaxes(1, 4)
return label_1hot
def data_augmenter(image, label, shift=0.0, rotate=0.0, scale=0.0, intensity=0.0, flip=False):
# Perform affine transformation on image and label, which are 4D tensors of dimension (N, C, X, Y).
image2 = np.zeros(image.shape, dtype='float32')
label2 = np.zeros(label.shape, dtype='int16')
for i in range(image.shape[0]):
# Random affine transformation using normal distributions
shift_var = [np.clip(np.random.normal(), -3, 3) * shift, np.clip(np.random.normal(), -3, 3) * shift]
rotate_var = np.clip(np.random.normal(), -3, 3) * rotate
scale_var = 1 + np.clip(np.random.normal(), -3, 3) * scale
intensity_var = 1 + np.clip(np.random.normal(), -3, 3) * intensity
# Apply affine transformation (rotation + scale + shift) to training images
row, col = image.shape[2:]
M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_var, 1.0 / scale_var)
M[:, 2] += shift_var
for c in range(image.shape[1]):
image2[i, c] = ndimage.interpolation.affine_transform(image[i, c], M[:, :2], M[:, 2], order=1)
label2[i, 0] = ndimage.interpolation.affine_transform(label[i, 0], M[:, :2], M[:, 2], order=0)
# Apply intensity variation
image2[i, :] *= intensity_var
# Apply random horizontal or vertical flipping
if flip:
if np.random.uniform() >= 0.5:
image2[i, :] = image2[i, :, ::-1, :]
label2[i, 0] = label2[i, 0, ::-1, :]
else:
image2[i, :] = image2[i, :, :, ::-1]
label2[i, 0] = label2[i, 0, :, ::-1]
return image2, label2
def data_augmenter_3d(image, label, shift=0.0, rotate=0.0, scale=0.0, intensity=0.0, flip=False):
# Perform affine transformation on image and label, which are 4D tensors of dimension (N, C, X, Y).
image2 = np.zeros(image.shape, dtype='float32')
label2 = np.zeros(label.shape, dtype='int16')
for i in range(image.shape[0]):
# Random affine transformation using normal distributions
shift_var = [np.clip(np.random.normal(), -3, 3) * shift, np.clip(np.random.normal(), -3, 3) * shift]
rotate_var = np.clip(np.random.normal(), -3, 3) * rotate
scale_var = 1 + np.clip(np.random.normal(), -3, 3) * scale
intensity_var = 1 + np.clip(np.random.normal(), -3, 3) * intensity
# Apply affine transformation (rotation + scale + shift) to training images
row, col = image.shape[2:4]
M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_var, 1.0 / scale_var)
M[:, 2] += shift_var
for z in range(image.shape[4]):
image2[i, 0, :, :, z] = ndimage.interpolation.affine_transform(image[i, 0, :, :, z], M[:, :2], M[:, 2], order=1)
label2[i, 0, :, :, z] = ndimage.interpolation.affine_transform(label[i, 0, :, :, z], M[:, :2], M[:, 2], order=0)
# Apply intensity variation
image2[i] *= intensity_var
# Apply random horizontal or vertical flipping
if flip:
if np.random.uniform() >= 0.5:
image2[i] = image2[i, :, ::-1, :]
label2[i] = label2[i, :, ::-1, :]
else:
image2[i] = image2[i, :, :, ::-1]
label2[i] = label2[i, :, :, ::-1]
return image2, label2
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
models/network.py
|
<filename>models/network.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 16:44:08 2018
@author: cq615
"""
import theano
import theano.tensor as T
from theano.sandbox.cuda import dnn
import lasagne
import lasagne.layers as L
from lasagne.layers.base import Layer
from lasagne.utils import as_tuple
from lasagne.layers.pool import pool_output_length
import numpy as np
from layers import *
def Conv(incoming, num_filters, filter_size=3,
stride=(1, 1), pad='same', W=lasagne.init.HeNormal(),
b=None, nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
"""
Overrides the default parameters for ConvLayer
"""
ensure_set_name('conv', kwargs)
return L.Conv2DLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b,
nonlinearity=nonlinearity, **kwargs)
# Calculate the memory required for a network
def memory_requirement(layer):
# Data blobs
sum = 0
for l in L.get_all_layers(layer):
sum += np.prod(l.output_shape)
float_len = 4
sum = (sum * float_len) / pow(2, 30)
print('Memory for data blobs = {0:.3f}GB'.format(sum))
# Parameters
sum = 0
for l in L.get_all_layers(layer):
for p in l.get_params():
sum += np.prod(p.get_value().shape)
print('Number of parameters = {0}'.format(sum))
sum = (sum * float_len) / pow(2, 30)
print('Memory for parameters = {0:.3f}GB'.format(sum))
def bilinear_1d(sz):
if sz % 2 == 0:
raise NotImplementedError('`Bilinear kernel` requires odd filter size.')
c = (sz + 1) / 2
h = np.array(range(1, c + 1) + range(c - 1, 0, -1))
h = h / float(c)
return h
def bilinear_2d(sz):
W = np.ones((sz, sz))
h = bilinear_1d(sz)
for i in range(sz):
W[i, :] *= h
for j in range(sz):
W[:, j] *= h
return W
def set_conv_bilinear_weights(params, num_filters, filter_size):
# Set weights
[W, b] = params
W_val = np.zeros((num_filters, num_filters, filter_size, filter_size), dtype=np.float32)
for c in range(num_filters):
W_val[c, c, :, :] = bilinear_2d(filter_size)
b_val = np.zeros((num_filters,), dtype=np.float32)
W.set_value(W_val)
b.set_value(b_val)
class BilinearUpsamplingLayer(Layer):
"""
2D bilinear upsampling layer
Performs 2D bilinear upsampling over the two trailing axes of a 4D or 5D input tensor.
Parameters
----------
incoming : a :class:`Layer` instance or tuple
The layer feeding into this layer, or the expected input shape.
scale_factor : integer or iterable
The scale factor in each dimension. If an integer, it is promoted to
a square scale factor region. If an iterable, it should have two
elements.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
"""
def __init__(self, incoming, scale_factor, **kwargs):
super(BilinearUpsamplingLayer, self).__init__(incoming, **kwargs)
self.scale_factor = scale_factor
if self.scale_factor < 1:
raise ValueError('Scale factor must be >= 1, not {0}'.format(self.scale_factor))
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # copy / convert to mutable list
if output_shape[2] is not None:
output_shape[2] *= self.scale_factor
if output_shape[3] is not None:
output_shape[3] *= self.scale_factor
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
upscaled = input
if self.scale_factor > 1:
if input.ndim == 4:
upscaled = T.nnet.abstract_conv.bilinear_upsampling(input=input, ratio=self.scale_factor)
elif input.ndim == 5:
# Swap dimension order from bcxyz to bczyx
input_swap = input.swapaxes(2, 4)
shape = input_swap.shape
# Squeeze the first two dimensions so it becomes a 4D tensor
# and 2D bilinear_upsampling can be applied.
input_reshape = input_swap.reshape((shape[0] * shape[1], shape[2], shape[3], shape[4]))
upscaled = T.nnet.abstract_conv.bilinear_upsampling(input=input_reshape, ratio=self.scale_factor)
# Recover the 5D tensor shape
upscaled_reshape = upscaled.reshape((shape[0], shape[1], shape[2], \
shape[3] * self.scale_factor, shape[4] * self.scale_factor))
upscaled = upscaled_reshape.swapaxes(2, 4)
return upscaled
class Conv3DDNNLayer(lasagne.layers.conv.BaseConvLayer):
"""
lasagne.layers.Conv3DDNNLayer(incoming, num_filters, filter_size,
stride=(1, 1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,
**kwargs)
3D convolutional layer
Performs a 3D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity. This implementation uses
``theano.sandbox.cuda.dnn.dnn_conv3d`` directly.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 5D tensor, with shape ``(batch_size,
num_input_channels, input_rows, input_columns, input_depth)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 3-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 3-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of three integers allows different symmetric
padding per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
4D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 5D tensor with shape ``(num_filters,
num_input_channels, filter_rows, filter_columns, filter_depth)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns, output_depth)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: False)
Whether to flip the filters and perform a convolution, or not to flip
them and perform a correlation. Flipping adds a bit of overhead, so it
is disabled by default. In most cases this does not make a difference
anyway because the filters are learned, but if you want to compute
predictions with pre-trained weights, take care if they need flipping.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1, 1),
pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,
convolution=dnn.dnn_conv3d,
**kwargs):
super(Conv3DDNNLayer, self).__init__(incoming, num_filters, filter_size,
stride, pad, untie_biases, W, b,
nonlinearity, flip_filters, n=3,
**kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
# by default we assume 'cross', consistent with corrmm.
conv_mode = 'conv' if self.flip_filters else 'cross'
border_mode = self.pad
if border_mode == 'same':
border_mode = tuple(s // 2 for s in self.filter_size)
conved = self.convolution(img=input, kerns=self.W,
border_mode=border_mode,
subsample=self.stride,
conv_mode=conv_mode)
return conved
class Pool3DDNNLayer(Layer):
"""
3D pooling layer
Performs 3D mean- or max-pooling over the 3 trailing axes of a 5D input
tensor. This is an alternative implementation which uses
``theano.sandbox.cuda.dnn.dnn_pool`` directly.
Parameters
----------
incoming : a :class:`Layer` instance or tuple
The layer feeding into this layer, or the expected input shape.
pool_size : integer or iterable
The length of the pooling region in each dimension. If an integer, it
is promoted to a square pooling region. If an iterable, it should have
two elements.
stride : integer, iterable or ``None``
The strides between sucessive pooling regions in each dimension.
If ``None`` then ``stride = pool_size``.
pad : integer or iterable
Number of elements to be added on each side of the input
in each dimension. Each value must be less than
the corresponding stride.
ignore_border : bool (default: True)
This implementation never includes partial pooling regions, so this
argument must always be set to True. It exists only to make sure the
interface is compatible with :class:`lasagne.layers.MaxPool2DLayer`.
mode : string
Pooling mode, one of 'max', 'average_inc_pad' or 'average_exc_pad'.
Defaults to 'max'.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
Notes
-----
The value used to pad the input is chosen to be less than
the minimum of the input, so that the output of each pooling region
always corresponds to some element in the unpadded input region.
"""
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0),
ignore_border=True, mode='max', **kwargs):
super(Pool3DDNNLayer, self).__init__(incoming, **kwargs)
self.pool_size = as_tuple(pool_size, 3)
if len(self.input_shape) != 5:
raise ValueError("Tried to create a 3D pooling layer with "
"input shape %r. Expected 5 input dimensions "
"(batchsize, channels, 3 spatial dimensions)."
% (self.input_shape,))
if stride is None:
self.stride = self.pool_size
else:
self.stride = as_tuple(stride, 3)
self.pad = as_tuple(pad, 3)
self.mode = mode
# The ignore_border argument is for compatibility with MaxPool2DLayer.
# ignore_border=False is not supported. Borders are always ignored.
self.ignore_border = ignore_border
if not self.ignore_border:
raise NotImplementedError("Pool3DDNNLayer does not support "
"ignore_border=False.")
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # copy / convert to mutable list
output_shape[2] = pool_output_length(input_shape[2],
pool_size=self.pool_size[0],
stride=self.stride[0],
pad=self.pad[0],
ignore_border=self.ignore_border,
)
output_shape[3] = pool_output_length(input_shape[3],
pool_size=self.pool_size[1],
stride=self.stride[1],
pad=self.pad[1],
ignore_border=self.ignore_border,
)
output_shape[4] = pool_output_length(input_shape[4],
pool_size=self.pool_size[2],
stride=self.stride[2],
pad=self.pad[2],
ignore_border=self.ignore_border,
)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
pooled = dnn.dnn_pool(input, self.pool_size, self.stride,
self.mode, self.pad)
return pooled
class MaxPool3DDNNLayer(Pool3DDNNLayer):
"""
3D max-pooling layer
Performs 3D max-pooling over the three trailing axes of a 5D input tensor.
"""
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0),
ignore_border=True, **kwargs):
super(MaxPool3DDNNLayer, self).__init__(incoming, pool_size, stride,
pad, ignore_border, mode='max',
**kwargs)
def softmax_4dtensor(x):
e_x = T.exp(x - x.max(axis=1, keepdims=True))
e_x = e_x / e_x.sum(axis=1, keepdims=True)
return e_x
def build_FCN(image_var, shape=(None, 1, None, None), n_class=1, load_vgg=False):
# Build fully-connected network for semantic segmentation only
net = {}
net['in'] = L.InputLayer(shape, image_var)
net['conv1_1'] = L.batch_norm(L.Conv2DLayer(net['in'], filter_size=3, num_filters=64, pad='same'))
net['conv1_2'] = L.batch_norm(L.Conv2DLayer(net['conv1_1'], filter_size=3, num_filters=64, pad='same'))
net['conv2_1'] = L.batch_norm(L.Conv2DLayer(net['conv1_2'], stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2'] = L.batch_norm(L.Conv2DLayer(net['conv2_1'], filter_size=3, num_filters=128, pad='same'))
net['conv3_1'] = L.batch_norm(L.Conv2DLayer(net['conv2_2'], stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2'] = L.batch_norm(L.Conv2DLayer(net['conv3_1'], filter_size=3, num_filters=256, pad='same'))
net['conv3_3'] = L.batch_norm(L.Conv2DLayer(net['conv3_2'], filter_size=3, num_filters=256, pad='same'))
net['conv4_1'] = L.batch_norm(L.Conv2DLayer(net['conv3_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2'] = L.batch_norm(L.Conv2DLayer(net['conv4_1'], filter_size=3, num_filters=512, pad='same'))
net['conv4_3'] = L.batch_norm(L.Conv2DLayer(net['conv4_2'], filter_size=3, num_filters=512, pad='same'))
net['conv5_1'] = L.batch_norm(L.Conv2DLayer(net['conv4_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2'] = L.batch_norm(L.Conv2DLayer(net['conv5_1'], filter_size=3, num_filters=512, pad='same'))
net['conv5_3'] = L.batch_norm(L.Conv2DLayer(net['conv5_2'], filter_size=3, num_filters=512, pad='same'))
net['out1'] = L.batch_norm(L.Conv2DLayer(net['conv1_2'], filter_size=3, num_filters=64, pad='same'))
net['out2'] = L.batch_norm(L.Conv2DLayer(net['conv2_2'], filter_size=3, num_filters=64, pad='same'))
net['out3'] = L.batch_norm(L.Conv2DLayer(net['conv3_3'], filter_size=3, num_filters=64, pad='same'))
net['out4'] = L.batch_norm(L.Conv2DLayer(net['conv4_3'], filter_size=3, num_filters=64, pad='same'))
net['out5'] = L.batch_norm(L.Conv2DLayer(net['conv5_3'], filter_size=3, num_filters=64, pad='same'))
net['out2_up'] = BilinearUpsamplingLayer(net['out2'], scale_factor=2)
net['out3_up'] = BilinearUpsamplingLayer(net['out3'], scale_factor=4)
net['out4_up'] = BilinearUpsamplingLayer(net['out4'], scale_factor=8)
net['out5_up'] = BilinearUpsamplingLayer(net['out5'], scale_factor=16)
net['concat'] = L.ConcatLayer([net['out1'],
net['out2_up'],
net['out3_up'],
net['out4_up'],
net['out5_up']])
net['comb_1'] = L.Conv2DLayer(net['concat'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2'] = L.batch_norm(L.Conv2DLayer(net['comb_1'], filter_size=1, num_filters=64, pad='same'))
net['out'] = L.Conv2DLayer(net['comb_2'], filter_size=1, num_filters=n_class, pad='same', nonlinearity=softmax_4dtensor)
# Initialise the weights for the combination layer so that concatenation is initially equivalent to summation
print('Initialise the combination weights ...')
W = np.zeros(net['comb_1'].get_params()[0].get_value().shape, dtype='float32')
b = np.zeros(net['comb_1'].get_params()[1].get_value().shape, dtype='float32')
for i in range(64):
W[i, i::64] = 1.0
b[i] = 0.0
net['comb_1'].get_params()[0].set_value(W)
net['comb_1'].get_params()[1].set_value(b)
if load_vgg:
# Initialise the convolutional layers using VGG16 weights
print('Initialise the convolutional layers using VGG16 weights ...')
with np.load('/vol/biomedic/users/wbai/data/deep_learning/VGG/VGG_ILSVRC_16_layers.npz') as f:
vgg = f['vgg'][()]
for layer_name in ['conv1_1', 'conv1_2',
'conv2_1', 'conv2_2',
'conv3_1', 'conv3_2', 'conv3_3',
'conv4_1', 'conv4_2', 'conv4_3',
'conv5_1', 'conv5_2', 'conv5_3']:
# Since we apply batch_norm to the convolutional layer, each layer becomes Conv + BN + ReLU.
# We need to find the original Conv layer by using .input_layer twice.
# Also, batch_norm will remove the bias parameter b. Only W is kept.
if layer_name == 'conv1_1':
W_mean = np.mean(vgg[layer_name]['W'], axis=1, keepdims=True)
net[layer_name].input_layer.input_layer.get_params()[0].set_value(np.repeat(W_mean, shape[1], axis=1))
else:
net[layer_name].input_layer.input_layer.get_params()[0].set_value(vgg[layer_name]['W'])
return net
def build_FCN_triple_branch(image_var, image_pred_var, image_seg_var, shape=(None, 1, None, None), n_class=1, load_vgg=False):
# Build fully-connected network for motion estimation and semantic segmentation
net = {}
# Siamese-style motion estimation brach
net['in'] = L.InputLayer(shape, image_var)
net['conv1_1'] = L.batch_norm(L.Conv2DLayer(net['in'], filter_size=3, num_filters=64, pad='same'))
net['conv1_2'] = L.batch_norm(L.Conv2DLayer(net['conv1_1'], filter_size=3, num_filters=64, pad='same'))
net['conv2_1'] = L.batch_norm(L.Conv2DLayer(net['conv1_2'], stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2'] = L.batch_norm(L.Conv2DLayer(net['conv2_1'], filter_size=3, num_filters=128, pad='same'))
net['conv3_1'] = L.batch_norm(L.Conv2DLayer(net['conv2_2'], stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2'] = L.batch_norm(L.Conv2DLayer(net['conv3_1'], filter_size=3, num_filters=256, pad='same'))
net['conv3_3'] = L.batch_norm(L.Conv2DLayer(net['conv3_2'], filter_size=3, num_filters=256, pad='same'))
net['conv4_1'] = L.batch_norm(L.Conv2DLayer(net['conv3_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2'] = L.batch_norm(L.Conv2DLayer(net['conv4_1'], filter_size=3, num_filters=512, pad='same'))
net['conv4_3'] = L.batch_norm(L.Conv2DLayer(net['conv4_2'], filter_size=3, num_filters=512, pad='same'))
net['conv5_1'] = L.batch_norm(L.Conv2DLayer(net['conv4_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2'] = L.batch_norm(L.Conv2DLayer(net['conv5_1'], filter_size=3, num_filters=512, pad='same'))
net['conv5_3'] = L.batch_norm(L.Conv2DLayer(net['conv5_2'], filter_size=3, num_filters=512, pad='same'))
net['in_pred'] = L.InputLayer(shape, image_pred_var)
net['conv1_1s'] = L.batch_norm(L.Conv2DLayer(net['in_pred'], W = net['conv1_1'].input_layer.input_layer.W, filter_size=3, num_filters=64, pad='same'))
net['conv1_2s'] = L.batch_norm(L.Conv2DLayer(net['conv1_1s'], W = net['conv1_2'].input_layer.input_layer.W,filter_size=3, num_filters=64, pad='same'))
net['conv2_1s'] = L.batch_norm(L.Conv2DLayer(net['conv1_2s'], W = net['conv2_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2s'] = L.batch_norm(L.Conv2DLayer(net['conv2_1s'], W = net['conv2_2'].input_layer.input_layer.W,filter_size=3, num_filters=128, pad='same'))
net['conv3_1s'] = L.batch_norm(L.Conv2DLayer(net['conv2_2s'], W = net['conv3_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2s'] = L.batch_norm(L.Conv2DLayer(net['conv3_1s'], W = net['conv3_2'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv3_3s'] = L.batch_norm(L.Conv2DLayer(net['conv3_2s'], W = net['conv3_3'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv4_1s'] = L.batch_norm(L.Conv2DLayer(net['conv3_3s'], W = net['conv4_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2s'] = L.batch_norm(L.Conv2DLayer(net['conv4_1s'], W = net['conv4_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv4_3s'] = L.batch_norm(L.Conv2DLayer(net['conv4_2s'], W = net['conv4_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_1s'] = L.batch_norm(L.Conv2DLayer(net['conv4_3s'], W = net['conv5_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2s'] = L.batch_norm(L.Conv2DLayer(net['conv5_1s'], W = net['conv5_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_3s'] = L.batch_norm(L.Conv2DLayer(net['conv5_2s'], W = net['conv5_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['concat1'] = L.ConcatLayer([net['conv1_2'], net['conv1_2s']])
net['concat2'] = L.ConcatLayer([net['conv2_2'], net['conv2_2s']])
net['concat3'] = L.ConcatLayer([net['conv3_3'], net['conv3_3s']])
net['concat4'] = L.ConcatLayer([net['conv4_3'], net['conv4_3s']])
net['concat5'] = L.ConcatLayer([net['conv5_3'], net['conv5_3s']])
net['out1'] = L.batch_norm(L.Conv2DLayer(net['concat1'], filter_size=3, num_filters=64, pad='same'))
net['out2'] = L.batch_norm(L.Conv2DLayer(net['concat2'], filter_size=3, num_filters=64, pad='same'))
net['out3'] = L.batch_norm(L.Conv2DLayer(net['concat3'], filter_size=3, num_filters=64, pad='same'))
net['out4'] = L.batch_norm(L.Conv2DLayer(net['concat4'], filter_size=3, num_filters=64, pad='same'))
net['out5'] = L.batch_norm(L.Conv2DLayer(net['concat5'], filter_size=3, num_filters=64, pad='same'))
net['out2_up'] = BilinearUpsamplingLayer(net['out2'], scale_factor=2)
net['out3_up'] = BilinearUpsamplingLayer(net['out3'], scale_factor=4)
net['out4_up'] = BilinearUpsamplingLayer(net['out4'], scale_factor=8)
net['out5_up'] = BilinearUpsamplingLayer(net['out5'], scale_factor=16)
net['concat'] = L.ConcatLayer([net['out1'],
net['out2_up'],
net['out3_up'],
net['out4_up'],
net['out5_up']])
net['comb_1'] = L.Conv2DLayer(net['concat'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2'] = L.batch_norm(L.Conv2DLayer(net['comb_1'], filter_size=1, num_filters=64, pad='same'))
net['out'] = L.Conv2DLayer(net['comb_2'], filter_size=1, num_filters=2, pad='same', nonlinearity=lasagne.nonlinearities.tanh)
# Spatial Transformation (source to target)
net['fr_st'] = OFLayer(net['in'],net['out'], name='fr_st')
# Segmentation branch
net['in_seg'] = L.InputLayer(shape, image_seg_var)
net['conv1_1ss'] = L.batch_norm(L.Conv2DLayer(net['in_seg'], W = net['conv1_1'].input_layer.input_layer.W, filter_size=3, num_filters=64, pad='same'))
net['conv1_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv1_1ss'], W = net['conv1_2'].input_layer.input_layer.W,filter_size=3, num_filters=64, pad='same'))
net['conv2_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv1_2ss'], W = net['conv2_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv2_1ss'], W = net['conv2_2'].input_layer.input_layer.W,filter_size=3, num_filters=128, pad='same'))
net['conv3_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv2_2ss'], W = net['conv3_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv3_1ss'], W = net['conv3_2'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv3_3ss'] = L.batch_norm(L.Conv2DLayer(net['conv3_2ss'], W = net['conv3_3'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv4_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv3_3ss'], W = net['conv4_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv4_1ss'], W = net['conv4_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv4_3ss'] = L.batch_norm(L.Conv2DLayer(net['conv4_2ss'], W = net['conv4_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv4_3ss'], W = net['conv5_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv5_1ss'], W = net['conv5_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_3ss'] = L.batch_norm(L.Conv2DLayer(net['conv5_2ss'], W = net['conv5_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['out1s'] = L.batch_norm(L.Conv2DLayer(net['conv1_2ss'], filter_size=3, num_filters=64, pad='same'))
net['out2s'] = L.batch_norm(L.Conv2DLayer(net['conv2_2ss'], filter_size=3, num_filters=64, pad='same'))
net['out3s'] = L.batch_norm(L.Conv2DLayer(net['conv3_3ss'], filter_size=3, num_filters=64, pad='same'))
net['out4s'] = L.batch_norm(L.Conv2DLayer(net['conv4_3ss'], filter_size=3, num_filters=64, pad='same'))
net['out5s'] = L.batch_norm(L.Conv2DLayer(net['conv5_3ss'], filter_size=3, num_filters=64, pad='same'))
net['out2s_up'] = BilinearUpsamplingLayer(net['out2s'], scale_factor=2)
net['out3s_up'] = BilinearUpsamplingLayer(net['out3s'], scale_factor=4)
net['out4s_up'] = BilinearUpsamplingLayer(net['out4s'], scale_factor=8)
net['out5s_up'] = BilinearUpsamplingLayer(net['out5s'], scale_factor=16)
net['concats'] = L.ConcatLayer([net['out1s'],
net['out2s_up'],
net['out3s_up'],
net['out4s_up'],
net['out5s_up']])
net['comb_1s'] = L.Conv2DLayer(net['concats'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2s'] = L.batch_norm(L.Conv2DLayer(net['comb_1s'], filter_size=1, num_filters=64, pad='same'))
net['outs'] = L.Conv2DLayer(net['comb_2s'], filter_size=1, num_filters=4, pad='same', nonlinearity=softmax_4dtensor)
# warp source segmentation to target
net['warped_outs'] = OFLayer(net['outs'],net['out'], name='fr_st')
return net
def build_FCN_triple_branch_rnn(image_var, image_pred_var, image_seg_var, shape=(None, 1, None, None, None), shape_seg = (None, 1, None, None), n_class=1, load_vgg=False):
# Build network for joint motion estimation and segmentation with RNN
net = {}
net['in'] = L.InputLayer(shape, image_var)
net['in'] = L.DimshuffleLayer(net['in'],(0,4,1,2,3))
shape = L.get_output_shape(net['in']) #shape=[batch_size, seq_size, num_channel, width, height]
n_channel = shape[2]
batchsize = shape[0]
seqlen = shape[1]
width = shape[3]
height = shape[4]
# Reshape sequence input to batches dimension for easy extracting features
net['in'] = L.ReshapeLayer(net['in'], (-1, n_channel, width, height))
net['conv1_1'] = L.batch_norm(L.Conv2DLayer(net['in'], filter_size=3, num_filters=64, pad='same'))
net['conv1_2'] = L.batch_norm(L.Conv2DLayer(net['conv1_1'], filter_size=3, num_filters=64, pad='same'))
net['conv2_1'] = L.batch_norm(L.Conv2DLayer(net['conv1_2'], stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2'] = L.batch_norm(L.Conv2DLayer(net['conv2_1'], filter_size=3, num_filters=128, pad='same'))
net['conv3_1'] = L.batch_norm(L.Conv2DLayer(net['conv2_2'], stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2'] = L.batch_norm(L.Conv2DLayer(net['conv3_1'], filter_size=3, num_filters=256, pad='same'))
net['conv3_3'] = L.batch_norm(L.Conv2DLayer(net['conv3_2'], filter_size=3, num_filters=256, pad='same'))
net['conv4_1'] = L.batch_norm(L.Conv2DLayer(net['conv3_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2'] = L.batch_norm(L.Conv2DLayer(net['conv4_1'], filter_size=3, num_filters=512, pad='same'))
net['conv4_3'] = L.batch_norm(L.Conv2DLayer(net['conv4_2'], filter_size=3, num_filters=512, pad='same'))
net['conv5_1'] = L.batch_norm(L.Conv2DLayer(net['conv4_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2'] = L.batch_norm(L.Conv2DLayer(net['conv5_1'], filter_size=3, num_filters=512, pad='same'))
net['conv5_3'] = L.batch_norm(L.Conv2DLayer(net['conv5_2'], filter_size=3, num_filters=512, pad='same'))
# somehow redundant, can be improved for efficiency
net['in_pred'] = L.InputLayer(shape, image_pred_var)
net['in_pred'] = L.DimshuffleLayer(net['in_pred'],(0,4,1,2,3))
net['in_pred'] = L.ReshapeLayer(net['in_pred'], (-1, n_channel, width, height))
net['conv1_1s'] = L.batch_norm(L.Conv2DLayer(net['in_pred'], W = net['conv1_1'].input_layer.input_layer.W, filter_size=3, num_filters=64, pad='same'))
net['conv1_2s'] = L.batch_norm(L.Conv2DLayer(net['conv1_1s'], W = net['conv1_2'].input_layer.input_layer.W,filter_size=3, num_filters=64, pad='same'))
net['conv2_1s'] = L.batch_norm(L.Conv2DLayer(net['conv1_2s'], W = net['conv2_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2s'] = L.batch_norm(L.Conv2DLayer(net['conv2_1s'], W = net['conv2_2'].input_layer.input_layer.W,filter_size=3, num_filters=128, pad='same'))
net['conv3_1s'] = L.batch_norm(L.Conv2DLayer(net['conv2_2s'], W = net['conv3_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2s'] = L.batch_norm(L.Conv2DLayer(net['conv3_1s'], W = net['conv3_2'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv3_3s'] = L.batch_norm(L.Conv2DLayer(net['conv3_2s'], W = net['conv3_3'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv4_1s'] = L.batch_norm(L.Conv2DLayer(net['conv3_3s'], W = net['conv4_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2s'] = L.batch_norm(L.Conv2DLayer(net['conv4_1s'], W = net['conv4_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv4_3s'] = L.batch_norm(L.Conv2DLayer(net['conv4_2s'], W = net['conv4_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_1s'] = L.batch_norm(L.Conv2DLayer(net['conv4_3s'], W = net['conv5_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2s'] = L.batch_norm(L.Conv2DLayer(net['conv5_1s'], W = net['conv5_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_3s'] = L.batch_norm(L.Conv2DLayer(net['conv5_2s'], W = net['conv5_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['concat1'] = L.ConcatLayer([net['conv1_2'], net['conv1_2s']])
net['concat2'] = L.ConcatLayer([net['conv2_2'], net['conv2_2s']])
net['concat3'] = L.ConcatLayer([net['conv3_3'], net['conv3_3s']])
net['concat4'] = L.ConcatLayer([net['conv4_3'], net['conv4_3s']])
net['concat5'] = L.ConcatLayer([net['conv5_3'], net['conv5_3s']])
net['out1'] = L.batch_norm(L.Conv2DLayer(net['concat1'], filter_size=3, num_filters=64, pad='same'))
net['out2'] = L.batch_norm(L.Conv2DLayer(net['concat2'], filter_size=3, num_filters=64, pad='same'))
net['out3'] = L.batch_norm(L.Conv2DLayer(net['concat3'], filter_size=3, num_filters=64, pad='same'))
net['out4'] = L.batch_norm(L.Conv2DLayer(net['concat4'], filter_size=3, num_filters=64, pad='same'))
net['out5'] = L.batch_norm(L.Conv2DLayer(net['concat5'], filter_size=3, num_filters=64, pad='same'))
net['out2_up'] = BilinearUpsamplingLayer(net['out2'], scale_factor=2)
net['out3_up'] = BilinearUpsamplingLayer(net['out3'], scale_factor=4)
net['out4_up'] = BilinearUpsamplingLayer(net['out4'], scale_factor=8)
net['out5_up'] = BilinearUpsamplingLayer(net['out5'], scale_factor=16)
net['concat'] = L.ConcatLayer([net['out1'],
net['out2_up'],
net['out3_up'],
net['out4_up'],
net['out5_up']])
net['comb_1'] = L.Conv2DLayer(net['concat'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2'] = L.batch_norm(L.Conv2DLayer(net['comb_1'], filter_size=1, num_filters=64, pad='same'))
net['comb_2_rshp'] = L.ReshapeLayer(net['comb_2'],(-1, seqlen, 64, width, height))
net['in_to_hid'] = L.Conv2DLayer(L.InputLayer((None, 64, width, height)), num_filters=2, filter_size=1, nonlinearity=None, name ='in_to_hid' )
net['hid_to_hid'] = L.Conv2DLayer(L.InputLayer(net['in_to_hid'].output_shape), num_filters=2, filter_size=1, nonlinearity=None, name = 'hid_to_hid')
net['rec'] = L.CustomRecurrentLayer(net['comb_2_rshp'], net['in_to_hid'], net['hid_to_hid'],nonlinearity=lasagne.nonlinearities.tanh, name = 'rec')
net['out'] = L.ReshapeLayer(net['rec'], (-1, 2, width, height))
#net['out'] = L.Conv2DLayer(net['comb_2'], filter_size=1, num_filters=2, pad='same', nonlinearity=lasagne.nonlinearities.tanh)
net['fr_st'] = OFLayer(net['in'],net['out'], name='fr_st')
net['fr_st'] = L.ReshapeLayer(net['fr_st'],(-1, seqlen, n_channel, width, height))
net['fr_st'] = L.DimshuffleLayer(net['fr_st'],(0,2,3,4,1))
net['in_seg'] = L.InputLayer(shape_seg, image_seg_var)
# net['in_seg'] = L.DimshuffleLayer(net['in_seg'],(0,4,1,2,3))
# net['in_seg'] = L.ReshapeLayer(net['in_seg'], (-1, n_channel, width, height))
net['conv1_1ss'] = L.batch_norm(L.Conv2DLayer(net['in_seg'], W = net['conv1_1'].input_layer.input_layer.W, filter_size=3, num_filters=64, pad='same'))
net['conv1_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv1_1ss'], W = net['conv1_2'].input_layer.input_layer.W,filter_size=3, num_filters=64, pad='same'))
net['conv2_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv1_2ss'], W = net['conv2_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv2_1ss'], W = net['conv2_2'].input_layer.input_layer.W,filter_size=3, num_filters=128, pad='same'))
net['conv3_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv2_2ss'], W = net['conv3_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv3_1ss'], W = net['conv3_2'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv3_3ss'] = L.batch_norm(L.Conv2DLayer(net['conv3_2ss'], W = net['conv3_3'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv4_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv3_3ss'], W = net['conv4_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv4_1ss'], W = net['conv4_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv4_3ss'] = L.batch_norm(L.Conv2DLayer(net['conv4_2ss'], W = net['conv4_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_1ss'] = L.batch_norm(L.Conv2DLayer(net['conv4_3ss'], W = net['conv5_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2ss'] = L.batch_norm(L.Conv2DLayer(net['conv5_1ss'], W = net['conv5_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_3ss'] = L.batch_norm(L.Conv2DLayer(net['conv5_2ss'], W = net['conv5_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['out1s'] = L.batch_norm(L.Conv2DLayer(net['conv1_2ss'], filter_size=3, num_filters=64, pad='same'))
net['out2s'] = L.batch_norm(L.Conv2DLayer(net['conv2_2ss'], filter_size=3, num_filters=64, pad='same'))
net['out3s'] = L.batch_norm(L.Conv2DLayer(net['conv3_3ss'], filter_size=3, num_filters=64, pad='same'))
net['out4s'] = L.batch_norm(L.Conv2DLayer(net['conv4_3ss'], filter_size=3, num_filters=64, pad='same'))
net['out5s'] = L.batch_norm(L.Conv2DLayer(net['conv5_3ss'], filter_size=3, num_filters=64, pad='same'))
net['out2s_up'] = BilinearUpsamplingLayer(net['out2s'], scale_factor=2)
net['out3s_up'] = BilinearUpsamplingLayer(net['out3s'], scale_factor=4)
net['out4s_up'] = BilinearUpsamplingLayer(net['out4s'], scale_factor=8)
net['out5s_up'] = BilinearUpsamplingLayer(net['out5s'], scale_factor=16)
net['concats'] = L.ConcatLayer([net['out1s'],
net['out2s_up'],
net['out3s_up'],
net['out4s_up'],
net['out5s_up']])
net['comb_1s'] = L.Conv2DLayer(net['concats'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2s'] = L.batch_norm(L.Conv2DLayer(net['comb_1s'], filter_size=1, num_filters=64, pad='same'))
net['outs'] = L.Conv2DLayer(net['comb_2s'], filter_size=1, num_filters=4, pad='same', nonlinearity=softmax_4dtensor)
net['warped_outs'] = OFLayer(net['outs'],net['out'], name='fr_st')
#net['warped_outs'] = L.ReshapeLayer(net['warped_outs'],(-1, seqlen, n_channel, width, height))
#net['warped_outs'] = L.DimshuffleLayer(net['warped_outs'],(0,2,3,4,1))
return net
def build_FCN_Siemese_flow(image_var, image_pred_var, shape=(None, 1, None, None), n_class=1, load_vgg=False):
# Build fully-connected network for motion estimation only
net = {}
net['in'] = L.InputLayer(shape, image_var)
net['conv1_1'] = L.batch_norm(L.Conv2DLayer(net['in'], filter_size=3, num_filters=64, pad='same'))
net['conv1_2'] = L.batch_norm(L.Conv2DLayer(net['conv1_1'], filter_size=3, num_filters=64, pad='same'))
net['conv2_1'] = L.batch_norm(L.Conv2DLayer(net['conv1_2'], stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2'] = L.batch_norm(L.Conv2DLayer(net['conv2_1'], filter_size=3, num_filters=128, pad='same'))
net['conv3_1'] = L.batch_norm(L.Conv2DLayer(net['conv2_2'], stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2'] = L.batch_norm(L.Conv2DLayer(net['conv3_1'], filter_size=3, num_filters=256, pad='same'))
net['conv3_3'] = L.batch_norm(L.Conv2DLayer(net['conv3_2'], filter_size=3, num_filters=256, pad='same'))
net['conv4_1'] = L.batch_norm(L.Conv2DLayer(net['conv3_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2'] = L.batch_norm(L.Conv2DLayer(net['conv4_1'], filter_size=3, num_filters=512, pad='same'))
net['conv4_3'] = L.batch_norm(L.Conv2DLayer(net['conv4_2'], filter_size=3, num_filters=512, pad='same'))
net['conv5_1'] = L.batch_norm(L.Conv2DLayer(net['conv4_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2'] = L.batch_norm(L.Conv2DLayer(net['conv5_1'], filter_size=3, num_filters=512, pad='same'))
net['conv5_3'] = L.batch_norm(L.Conv2DLayer(net['conv5_2'], filter_size=3, num_filters=512, pad='same'))
net['in_pred'] = L.InputLayer(shape, image_pred_var)
net['conv1_1s'] = L.batch_norm(L.Conv2DLayer(net['in_pred'], W = net['conv1_1'].input_layer.input_layer.W, filter_size=3, num_filters=64, pad='same'))
net['conv1_2s'] = L.batch_norm(L.Conv2DLayer(net['conv1_1s'], W = net['conv1_2'].input_layer.input_layer.W,filter_size=3, num_filters=64, pad='same'))
net['conv2_1s'] = L.batch_norm(L.Conv2DLayer(net['conv1_2s'], W = net['conv2_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2s'] = L.batch_norm(L.Conv2DLayer(net['conv2_1s'], W = net['conv2_2'].input_layer.input_layer.W,filter_size=3, num_filters=128, pad='same'))
net['conv3_1s'] = L.batch_norm(L.Conv2DLayer(net['conv2_2s'], W = net['conv3_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2s'] = L.batch_norm(L.Conv2DLayer(net['conv3_1s'], W = net['conv3_2'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv3_3s'] = L.batch_norm(L.Conv2DLayer(net['conv3_2s'], W = net['conv3_3'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv4_1s'] = L.batch_norm(L.Conv2DLayer(net['conv3_3s'], W = net['conv4_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2s'] = L.batch_norm(L.Conv2DLayer(net['conv4_1s'], W = net['conv4_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv4_3s'] = L.batch_norm(L.Conv2DLayer(net['conv4_2s'], W = net['conv4_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_1s'] = L.batch_norm(L.Conv2DLayer(net['conv4_3s'], W = net['conv5_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2s'] = L.batch_norm(L.Conv2DLayer(net['conv5_1s'], W = net['conv5_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_3s'] = L.batch_norm(L.Conv2DLayer(net['conv5_2s'], W = net['conv5_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['concat1'] = L.ConcatLayer([net['conv1_2'], net['conv1_2s']])
net['concat2'] = L.ConcatLayer([net['conv2_2'], net['conv2_2s']])
net['concat3'] = L.ConcatLayer([net['conv3_3'], net['conv3_3s']])
net['concat4'] = L.ConcatLayer([net['conv4_3'], net['conv4_3s']])
net['concat5'] = L.ConcatLayer([net['conv5_3'], net['conv5_3s']])
net['out1'] = L.batch_norm(L.Conv2DLayer(net['concat1'], filter_size=3, num_filters=64, pad='same'))
net['out2'] = L.batch_norm(L.Conv2DLayer(net['concat2'], filter_size=3, num_filters=64, pad='same'))
net['out3'] = L.batch_norm(L.Conv2DLayer(net['concat3'], filter_size=3, num_filters=64, pad='same'))
net['out4'] = L.batch_norm(L.Conv2DLayer(net['concat4'], filter_size=3, num_filters=64, pad='same'))
net['out5'] = L.batch_norm(L.Conv2DLayer(net['concat5'], filter_size=3, num_filters=64, pad='same'))
net['out2_up'] = BilinearUpsamplingLayer(net['out2'], scale_factor=2)
net['out3_up'] = BilinearUpsamplingLayer(net['out3'], scale_factor=4)
net['out4_up'] = BilinearUpsamplingLayer(net['out4'], scale_factor=8)
net['out5_up'] = BilinearUpsamplingLayer(net['out5'], scale_factor=16)
net['concat'] = L.ConcatLayer([net['out1'],
net['out2_up'],
net['out3_up'],
net['out4_up'],
net['out5_up']])
net['comb_1'] = L.Conv2DLayer(net['concat'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2'] = L.batch_norm(L.Conv2DLayer(net['comb_1'], filter_size=1, num_filters=64, pad='same'))
net['out'] = L.Conv2DLayer(net['comb_2'], filter_size=1, num_filters=2, pad='same', nonlinearity=lasagne.nonlinearities.tanh)
net['fr_st'] = OFLayer(net['in'],net['out'], name='fr_st')
# Initialise the weights for the combination layer so that concatenation is initially equivalent to summation
print('Initialise the combination weights ...')
W = np.zeros(net['comb_1'].get_params()[0].get_value().shape, dtype='float32')
b = np.zeros(net['comb_1'].get_params()[1].get_value().shape, dtype='float32')
for i in range(64):
W[i, i::64] = 1.0
b[i] = 0.0
net['comb_1'].get_params()[0].set_value(W)
net['comb_1'].get_params()[1].set_value(b)
if load_vgg:
# Initialise the convolutional layers using VGG16 weights
print('Initialise the convolutional layers using VGG16 weights ...')
with np.load('/vol/biomedic/users/wbai/data/deep_learning/VGG/VGG_ILSVRC_16_layers.npz') as f:
vgg = f['vgg'][()]
for layer_name in ['conv1_1', 'conv1_2',
'conv2_1', 'conv2_2',
'conv3_1', 'conv3_2', 'conv3_3',
'conv4_1', 'conv4_2', 'conv4_3',
'conv5_1', 'conv5_2', 'conv5_3']:
# Since we apply batch_norm to the convolutional layer, each layer becomes Conv + BN + ReLU.
# We need to find the original Conv layer by using .input_layer twice.
# Also, batch_norm will remove the bias parameter b. Only W is kept.
if layer_name == 'conv1_1':
W_mean = np.mean(vgg[layer_name]['W'], axis=1, keepdims=True)
net[layer_name].input_layer.input_layer.get_params()[0].set_value(np.repeat(W_mean, shape[1], axis=1))
else:
net[layer_name].input_layer.input_layer.get_params()[0].set_value(vgg[layer_name]['W'])
return net
def build_FCN_Siemese_flow_rnn(image_var, image_pred_var, shape=(None, 1, None, None, None), n_class=1, load_vgg=False):
# Build fully-connected network for motion estimation only with RNN component
net = {}
net['in'] = L.InputLayer(shape, image_var)
net['in'] = L.DimshuffleLayer(net['in'],(0,4,1,2,3))
shape = L.get_output_shape(net['in']) #shape=[batch_size, seq_size, num_channel, width, height]
n_channel = shape[2]
batchsize = shape[0]
seqlen = shape[1]
width = shape[3]
height = shape[4]
net['in'] = L.ReshapeLayer(net['in'], (-1, n_channel, width, height))
net['conv1_1'] = L.batch_norm(L.Conv2DLayer(net['in'], filter_size=3, num_filters=64, pad='same'))
net['conv1_2'] = L.batch_norm(L.Conv2DLayer(net['conv1_1'], filter_size=3, num_filters=64, pad='same'))
net['conv2_1'] = L.batch_norm(L.Conv2DLayer(net['conv1_2'], stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2'] = L.batch_norm(L.Conv2DLayer(net['conv2_1'], filter_size=3, num_filters=128, pad='same'))
net['conv3_1'] = L.batch_norm(L.Conv2DLayer(net['conv2_2'], stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2'] = L.batch_norm(L.Conv2DLayer(net['conv3_1'], filter_size=3, num_filters=256, pad='same'))
net['conv3_3'] = L.batch_norm(L.Conv2DLayer(net['conv3_2'], filter_size=3, num_filters=256, pad='same'))
net['conv4_1'] = L.batch_norm(L.Conv2DLayer(net['conv3_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2'] = L.batch_norm(L.Conv2DLayer(net['conv4_1'], filter_size=3, num_filters=512, pad='same'))
net['conv4_3'] = L.batch_norm(L.Conv2DLayer(net['conv4_2'], filter_size=3, num_filters=512, pad='same'))
net['conv5_1'] = L.batch_norm(L.Conv2DLayer(net['conv4_3'], stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2'] = L.batch_norm(L.Conv2DLayer(net['conv5_1'], filter_size=3, num_filters=512, pad='same'))
net['conv5_3'] = L.batch_norm(L.Conv2DLayer(net['conv5_2'], filter_size=3, num_filters=512, pad='same'))
net['in_pred'] = L.InputLayer(shape, image_pred_var)
net['in_pred'] = L.DimshuffleLayer(net['in_pred'],(0,4,1,2,3))
net['in_pred'] = L.ReshapeLayer(net['in_pred'], (-1, n_channel, width, height))
net['conv1_1s'] = L.batch_norm(L.Conv2DLayer(net['in_pred'], W = net['conv1_1'].input_layer.input_layer.W, filter_size=3, num_filters=64, pad='same'))
net['conv1_2s'] = L.batch_norm(L.Conv2DLayer(net['conv1_1s'], W = net['conv1_2'].input_layer.input_layer.W,filter_size=3, num_filters=64, pad='same'))
net['conv2_1s'] = L.batch_norm(L.Conv2DLayer(net['conv1_2s'], W = net['conv2_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=128, pad='same'))
net['conv2_2s'] = L.batch_norm(L.Conv2DLayer(net['conv2_1s'], W = net['conv2_2'].input_layer.input_layer.W,filter_size=3, num_filters=128, pad='same'))
net['conv3_1s'] = L.batch_norm(L.Conv2DLayer(net['conv2_2s'], W = net['conv3_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=256, pad='same'))
net['conv3_2s'] = L.batch_norm(L.Conv2DLayer(net['conv3_1s'], W = net['conv3_2'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv3_3s'] = L.batch_norm(L.Conv2DLayer(net['conv3_2s'], W = net['conv3_3'].input_layer.input_layer.W,filter_size=3, num_filters=256, pad='same'))
net['conv4_1s'] = L.batch_norm(L.Conv2DLayer(net['conv3_3s'], W = net['conv4_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv4_2s'] = L.batch_norm(L.Conv2DLayer(net['conv4_1s'], W = net['conv4_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv4_3s'] = L.batch_norm(L.Conv2DLayer(net['conv4_2s'], W = net['conv4_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_1s'] = L.batch_norm(L.Conv2DLayer(net['conv4_3s'], W = net['conv5_1'].input_layer.input_layer.W,stride=2, filter_size=3, num_filters=512, pad='same'))
net['conv5_2s'] = L.batch_norm(L.Conv2DLayer(net['conv5_1s'], W = net['conv5_2'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['conv5_3s'] = L.batch_norm(L.Conv2DLayer(net['conv5_2s'], W = net['conv5_3'].input_layer.input_layer.W,filter_size=3, num_filters=512, pad='same'))
net['concat1'] = L.ConcatLayer([net['conv1_2'], net['conv1_2s']])
net['concat2'] = L.ConcatLayer([net['conv2_2'], net['conv2_2s']])
net['concat3'] = L.ConcatLayer([net['conv3_3'], net['conv3_3s']])
net['concat4'] = L.ConcatLayer([net['conv4_3'], net['conv4_3s']])
net['concat5'] = L.ConcatLayer([net['conv5_3'], net['conv5_3s']])
net['out1'] = L.batch_norm(L.Conv2DLayer(net['concat1'], filter_size=3, num_filters=64, pad='same'))
net['out2'] = L.batch_norm(L.Conv2DLayer(net['concat2'], filter_size=3, num_filters=64, pad='same'))
net['out3'] = L.batch_norm(L.Conv2DLayer(net['concat3'], filter_size=3, num_filters=64, pad='same'))
net['out4'] = L.batch_norm(L.Conv2DLayer(net['concat4'], filter_size=3, num_filters=64, pad='same'))
net['out5'] = L.batch_norm(L.Conv2DLayer(net['concat5'], filter_size=3, num_filters=64, pad='same'))
net['out2_up'] = BilinearUpsamplingLayer(net['out2'], scale_factor=2)
net['out3_up'] = BilinearUpsamplingLayer(net['out3'], scale_factor=4)
net['out4_up'] = BilinearUpsamplingLayer(net['out4'], scale_factor=8)
net['out5_up'] = BilinearUpsamplingLayer(net['out5'], scale_factor=16)
net['concat'] = L.ConcatLayer([net['out1'],
net['out2_up'],
net['out3_up'],
net['out4_up'],
net['out5_up']])
net['comb_1'] = L.Conv2DLayer(net['concat'], filter_size=1, num_filters=64, pad='same', nonlinearity=None)
net['comb_2'] = L.batch_norm(L.Conv2DLayer(net['comb_1'], filter_size=1, num_filters=64, pad='same'))
net['comb_2_rshp'] = L.ReshapeLayer(net['comb_2'],(-1, seqlen, 64, width, height))
net['in_to_hid'] = L.Conv2DLayer(L.InputLayer((None, 64, width, height)), num_filters=2, filter_size=1, nonlinearity=None, name ='in_to_hid' )
net['hid_to_hid'] = L.Conv2DLayer(L.InputLayer(net['in_to_hid'].output_shape), num_filters=2, filter_size=1, nonlinearity=None, name = 'hid_to_hid')
net['rec'] = L.CustomRecurrentLayer(net['comb_2_rshp'], net['in_to_hid'], net['hid_to_hid'],nonlinearity=lasagne.nonlinearities.tanh, name = 'rec')
net['out'] = L.ReshapeLayer(net['rec'], (-1, 2, width, height))
#net['out'] = L.Conv2DLayer(net['comb_2'], filter_size=1, num_filters=2, pad='same', nonlinearity=lasagne.nonlinearities.tanh)
net['fr_st'] = OFLayer(net['in'],net['out'], name='fr_st')
net['fr_st'] = L.ReshapeLayer(net['fr_st'],(-1, seqlen, n_channel, width, height))
net['fr_st'] = L.DimshuffleLayer(net['fr_st'],(0,2,3,4,1))
return net
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
pytorch_version/main_motion.py
|
import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from network import *
from dataset import *
from util import *
import time
import scipy.io
import os
import matplotlib.pyplot as plt
import pdb
def get_to_cuda(cuda):
def to_cuda(tensor):
return tensor.cuda() if cuda else tensor
return to_cuda
def convert_to_1hot(label, n_class):
# Convert a label map (N x 1 x H x W) into a one-hot representation (N x C x H x W)
label_swap = label.swapaxes(1, 3)
label_flat = label_swap.flatten()
n_data = len(label_flat)
label_1hot = np.zeros((n_data, n_class), dtype='int16')
label_1hot[range(n_data), label_flat] = 1
label_1hot = label_1hot.reshape((label_swap.shape[0], label_swap.shape[1], label_swap.shape[2], n_class))
label_1hot = label_1hot.swapaxes(1, 3)
return label_1hot
def categorical_dice(prediction, truth, k):
# Dice overlap metric for label value k
A = (np.argmax(prediction, axis=1) == k)
B = (np.argmax(truth, axis=1) == k)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B)+0.001)
def huber_loss(x):
bsize, csize, height, width = x.size()
d_x = torch.index_select(x, 3, torch.arange(1, width).cuda()) - torch.index_select(x, 3, torch.arange(width-1).cuda())
d_y = torch.index_select(x, 2, torch.arange(1, height).cuda()) - torch.index_select(x, 2, torch.arange(height-1).cuda())
err = torch.sum(torch.mul(d_x, d_x))/height + torch.sum(torch.mul(d_y, d_y))/width
err /= bsize
tv_err = torch.sqrt(0.01+err)
return tv_err
def freeze_layer(layer):
for param in layer.parameters():
param.requires_grad = False
def plot_grid(gridx, gridy, **kwargs):
""" plot deformation grid """
for i in range(gridx.shape[0]):
plt.plot(gridx[i,:], gridy[i,:], **kwargs)
for i in range(gridx.shape[1]):
plt.plot(gridx[:,i], gridy[:,i], **kwargs)
def save_flow(x, pred, x_pred, flow):
#print(flow.shape)
x = x.data.cpu().numpy()
pred = pred.data.cpu().numpy()
x_pred = x_pred.data.cpu().numpy()
flow = flow.data.cpu().numpy() * 96
flow = flow[:,:, 60:140, 40:120]
X, Y = np.meshgrid(np.arange(0, 80, 2), np.arange(0, 80, 2))
plt.subplots(figsize=(6, 6))
plt.subplot(221)
plt.imshow(x[5, 0, 60:140, 40:120], cmap='gray')
plt.axis('off')
plt.subplot(222)
plt.imshow(pred[5, 0, 60:140, 40:120], cmap='gray')
plt.axis('off')
plt.subplot(223)
plt.imshow(x_pred[5, 0, 60:140, 40:120], cmap='gray')
plt.axis('off')
plt.subplot(224)
plt.imshow(x_pred[5, 0, 60:140, 40:120], cmap='gray')
plt.quiver(X, Y, flow[5, 0, fc00:db20:35b:7399::5, ::2], -flow[5, 1, fc00:db20:35b:7399::5, ::2], scale_units='xy', scale=1, color='r')
# plot_grid(X - flow[5, 0, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::6],
# Y - flow[5, 1, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::6],
# color='r', linewidth=0.5)
plt.axis('off')
plt.savefig('./models/flow_map.png')
plt.close()
lr = 1e-4
n_worker = 4
bs = 10
n_epoch = 100
model_save_path = './models/model_flow_tmp.pth'
model = Registration_Net()
print(model)
# model.load_state_dict(torch.load(model_save_path))
model = model.cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=lr)
flow_criterion = nn.MSELoss()
Tensor = torch.cuda.FloatTensor
def train(epoch):
model.train()
epoch_loss = []
for batch_idx, batch in tqdm(enumerate(training_data_loader, 1),
total=len(training_data_loader)):
x, x_pred, x_gnd = batch
x_c = Variable(x.type(Tensor))
x_predc = Variable(x_pred.type(Tensor))
optimizer.zero_grad()
net = model(x_c, x_predc, x_c)
flow_loss = flow_criterion(net['fr_st'], x_predc) + 0.01 * huber_loss(net['out'])
flow_loss.backward()
optimizer.step()
epoch_loss.append(flow_loss.item())
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(training_data_loader.dataset),
100. * batch_idx / len(training_data_loader), np.mean(epoch_loss)))
save_flow(x_c, x_predc, net['fr_st'], net['out'])
# scipy.io.savemat(os.path.join('./models/flow_test.mat'),
# mdict={'flow': net['out'].data.cpu().numpy()})
torch.save(model.state_dict(), model_save_path)
print("Checkpoint saved to {}".format(model_save_path))
data_path = '../test'
train_set = TrainDataset(data_path, transform=data_augment)
# loading the data
training_data_loader = DataLoader(dataset=train_set, num_workers=n_worker,
batch_size=bs, shuffle=True)
for epoch in range(0, n_epoch + 1):
print('Epoch {}'.format(epoch))
start = time.time()
train(epoch)
end = time.time()
print("training took {:.8f}".format(end-start))
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
utils/visualise.py
|
<gh_stars>10-100
import numpy as np
import matplotlib.pyplot as plt
import os, sys
def mask_color_img(img, mask):
alpha = 0.5
rows, cols = img.shape
# Construct a colour image to superimpose
color_mask = np.zeros((rows, cols, 3))
color_mask[mask == 1] = [1, 0, 0] # Red block
color_mask[mask == 2] = [0, 1, 0] # Green block
color_mask[mask == 3] = [0, 0, 1] # Blue block
# Construct RGB version of grey-level image
img_color = np.dstack((img, img, img))
img_masked = img_color * 0.8 + np.double(color_mask) * 0.3
return img_masked
def create_prediction_video(save_dir, img, pred, loc, seq_num):
# create a video with joint prediction of ROI
mask = np.argmax(pred, axis=1)
img_mask_bank = []
for t in xrange(seq_num):
img_mask = mask_color_img(img[0, t], mask[t])
img_mask_bank.append(img_mask)
mask[mask == 1] = 0
mask[mask == 3] = 0
mask[mask == 2] = 1
mask = np.tile(mask[:, np.newaxis], (1, 2, 1, 1))
loc = loc * mask
img_mask_bank = np.array(img_mask_bank)
flow = loc[:, :, 60:140, 40:120] * 96
X, Y = np.meshgrid(np.arange(0, 80, 2), np.arange(0, 80, 2))
for t in xrange(seq_num):
# meanu = np.mean(flow[t, 0])
# meanv = np.mean(flow[t, 1])
plt.imshow(img_mask_bank[t, 60:140, 40:120])
# mean_scale = np.sqrt(meanu ** 2 + meanv ** 2) * 200
plt.quiver(X, Y, -flow[t, 0, fdf8:f53e:61e4::18, ::2], flow[t, 1, fdf8:f53e:61e4::18, ::2], scale_units='xy', scale=1, color='y')
plt.axis('off')
plt.savefig(os.path.join(save_dir, 'test_%d.png'%t))
plt.close()
image_dir = os.path.join(save_dir, 'test_%d.png')
video_dir = os.path.join(save_dir, 'video.avi')
#os.system('ffmpeg - f image2 - i {0} - vcodec mpeg4 - b 800k {1}'.format(image_dir, video_dir))
print("Done: images saved in {}".format(image_dir))
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
test_prediction.py
|
"""
Created on Mon Feb 12 17:52:19 2018
@author: cq615
"""
import os, time, h5py, sys
from models.seg_network import build_FCN_triple_branch_rnn
from dataio.dataset import *
from utils.metrics import *
from dataio.data_generator import *
from utils.visualise import *
import lasagne.layers as L
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
def tensor5(name=None, dtype=None):
if dtype is None:
dtype = theano.config.floatX
type = T.TensorType(dtype, (False, )*5)
return type(name)
if __name__ == '__main__':
data_test_path = 'test'
save_dir = 'visualisation'
seq_num = 50
n_class = 4
size = 192
# Build the network
image_var = tensor5('image')
image_pred_var = tensor5('image_pred')
label_var = T.itensor4('label')
image_seg_var = T.tensor4('image_seg')
net = build_FCN_triple_branch_rnn(image_var, image_pred_var, image_seg_var, shape = (None,1,size, size, seq_num), shape_seg = (None, 1, size, size))
#model_file = 'model/FCN_VGG16_sz192_flow_simese_rnn_shared.npz'
model_file = 'model/FCN_VGG16_sz192_triple_3d_rnn_warped_tmp.npz'
with np.load(model_file) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
L.set_all_param_values([net['out'],net['outs']], param_values)
test_prediction = L.get_output(net['outs'])
test_loc = L.get_output(net['out'], deterministic = True)
test_fn = theano.function([image_var, image_pred_var, image_seg_var], [test_prediction, test_loc], on_unused_input='ignore')
filename = [f for f in sorted(os.listdir(data_test_path))]
for f_id in filename[1:2]:
# Load the dataset
print("Loading data...")
img, seg = load_UKBB_test_data_3d(data_test_path, f_id, size)
print("Starting testing...")
# reshape input
input_data_seg = np.reshape(img,(-1,size,size))
input_data_seg = input_data_seg[:,np.newaxis]
input_data = np.transpose(img,(0,2,3,1))
input_data = np.expand_dims(input_data, axis=1)
input_data_pred = np.tile(input_data[...,0:1],(1,1,1,1,input_data.shape[4]))
start_time = time.time()
pred, loc = test_fn(input_data_pred, input_data, input_data_seg)
print("Multi-task prediction time:\t{:.2f}s".format(time.time()-start_time))
print("Creating a video for joint prediction...")
create_prediction_video(save_dir, img, pred, loc, seq_num)
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
pytorch_version/network.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable, grad
import numpy as np
import math
def relu():
return nn.ReLU(inplace=True)
def conv(in_channels, out_channels, kernel_size=3, stride=1, padding = 1, nonlinearity = relu):
conv_layer = nn.Conv2d(in_channels = in_channels, out_channels= out_channels, kernel_size = kernel_size, stride = stride, padding = padding, bias = False)
nn.init.xavier_uniform(conv_layer.weight, gain=np.sqrt(2.0))
nll_layer = nonlinearity()
bn_layer = nn.BatchNorm2d(out_channels)
# nn.init.constant_(bn_layer.weight, 1)
# nn.init.constant_(bn_layer.bias, 0)
layers = [conv_layer, bn_layer, nll_layer]
return nn.Sequential(*layers)
def conv_blocks_2(in_channels, out_channels, strides=1):
conv1 = conv(in_channels, out_channels, stride = strides)
conv2 = conv(out_channels, out_channels, stride=1)
layers = [conv1, conv2]
return nn.Sequential(*layers)
def conv_blocks_3(in_channels, out_channels, strides=1):
conv1 = conv(in_channels, out_channels, stride = strides)
conv2 = conv(out_channels, out_channels, stride=1)
conv3 = conv(out_channels, out_channels, stride=1)
layers = [conv1, conv2, conv3]
return nn.Sequential(*layers)
def generate_grid(x, offset):
x_shape = x.size()
grid_w, grid_h = torch.meshgrid([torch.linspace(-1, 1, x_shape[2]), torch.linspace(-1, 1, x_shape[3])]) # (h, w)
grid_w = grid_w.cuda().float()
grid_h = grid_h.cuda().float()
grid_w = nn.Parameter(grid_w, requires_grad=False)
grid_h = nn.Parameter(grid_h, requires_grad=False)
offset_h, offset_w = torch.split(offset, 1, 1)
offset_w = offset_w.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])) # (b*c, h, w)
offset_h = offset_h.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])) # (b*c, h, w)
offset_w = grid_w + offset_w
offset_h = grid_h + offset_h
offsets = torch.stack((offset_h, offset_w), 3)
return offsets
class Registration_Net(nn.Module):
"""Deformable registration network with input from image space """
def __init__(self, n_ch=1):
super(Registration_Net, self).__init__()
self.conv_blocks = [conv_blocks_2(n_ch, 64), conv_blocks_2(64, 128, 2), conv_blocks_3(128, 256, 2), conv_blocks_3(256, 512, 2), conv_blocks_3(512, 512, 2)]
self.conv = []
for in_filters in [128, 256, 512, 1024, 1024]:
self.conv += [conv(in_filters, 64)]
self.conv_blocks = nn.Sequential(*self.conv_blocks)
self.conv = nn.Sequential(*self.conv)
self.conv6 = nn.Conv2d(64 * 5, 64, 1)
self.conv7 = conv(64, 64, 1, 1, 0)
self.conv8 = nn.Conv2d(64, 2, 1)
def forward(self, x, x_pred, x_img):
# x: source image; x_pred: target image; x_img: source image or segmentation map
net = {}
net['conv0'] = x
net['conv0s'] = x_pred
for i in range(5):
net['conv%d'% (i+1)] = self.conv_blocks[i](net['conv%d'%i])
net['conv%ds' % (i + 1)] = self.conv_blocks[i](net['conv%ds' % i])
net['concat%d'%(i+1)] = torch.cat((net['conv%d'% (i+1)], net['conv%ds' % (i + 1)]), 1)
net['out%d'%(i+1)] = self.conv[i](net['concat%d'%(i+1)])
if i > 0:
net['out%d_up'%(i+1)] = F.interpolate(net['out%d'%(i+1)], scale_factor=2**i, mode='bilinear', align_corners=True)
net['concat'] = torch.cat((net['out1'], net['out2_up'], net['out3_up'], net['out4_up'], net['out5_up']), 1)
net['comb_1'] = self.conv6(net['concat'])
net['comb_2'] = self.conv7(net['comb_1'])
net['out'] = torch.tanh(self.conv8(net['comb_2']))
net['grid'] = generate_grid(x_img, net['out'])
net['fr_st'] = F.grid_sample(x_img, net['grid'])
return net
class Seg_Motion_Net(nn.Module):
"""Joint motion estimation and segmentation """
def __init__(self, n_ch=1):
super(Seg_Motion_Net, self).__init__()
self.conv_blocks = [conv_blocks_2(n_ch, 64), conv_blocks_2(64, 128, 2), conv_blocks_3(128, 256, 2), conv_blocks_3(256, 512, 2), conv_blocks_3(512, 512, 2)]
self.conv = []
for in_filters in [128, 256, 512, 1024, 1024]:
self.conv += [conv(in_filters, 64)]
self.conv_blocks = nn.Sequential(*self.conv_blocks)
self.conv = nn.Sequential(*self.conv)
self.conv6 = nn.Conv2d(64 * 5, 64, 1)
self.conv7 = conv(64, 64, 1, 1, 0)
self.conv8 = nn.Conv2d(64, 2, 1)
self.convs = []
for in_filters in [64, 128, 256, 512, 512]:
self.convs += [conv(in_filters, 64)]
self.convs = nn.Sequential(*self.convs)
self.conv6s = nn.Conv2d(64*5,64,1)
self.conv7s = conv(64,64,1,1,0)
self.conv8s = nn.Conv2d(64,4,1)
def forward(self, x, x_pred, x_img):
# x: source image; x_pred: target image; x_img: image to be segmented
# motion estimation branch
net = {}
net['conv0'] = x
net['conv0s'] = x_pred
for i in range(5):
net['conv%d'% (i+1)] = self.conv_blocks[i](net['conv%d'%i])
net['conv%ds' % (i + 1)] = self.conv_blocks[i](net['conv%ds' % i])
net['concat%d'%(i+1)] = torch.cat((net['conv%d'% (i+1)], net['conv%ds' % (i + 1)]), 1)
net['out%d'%(i+1)] = self.conv[i](net['concat%d'%(i+1)])
if i > 0:
net['out%d_up'%(i+1)] = F.interpolate(net['out%d'%(i+1)], scale_factor=2**i, mode='bilinear', align_corners=True)
net['concat'] = torch.cat((net['out1'], net['out2_up'], net['out3_up'], net['out4_up'], net['out5_up']), 1)
net['comb_1'] = self.conv6(net['concat'])
net['comb_2'] = self.conv7(net['comb_1'])
net['out'] = torch.tanh(self.conv8(net['comb_2']))
net['grid'] = generate_grid(x_img, net['out'])
net['fr_st'] = F.grid_sample(x_img, net['grid'])
# segmentation branch
net['conv0ss'] = x_img
for i in range(5):
net['conv%dss' % (i + 1)] = self.conv_blocks[i](net['conv%dss' % i])
net['out%ds' % (i+1)] = self.convs[i](net['conv%dss' % (i+1)])
if i > 0:
net['out%ds_up'%(i+1)] = F.interpolate(net['out%ds'%(i+1)], scale_factor=2**i, mode='bilinear', align_corners=True)
net['concats'] = torch.cat((net['out1s'],
net['out2s_up'],
net['out3s_up'],
net['out4s_up'],
net['out5s_up']), 1)
net['comb_1s'] = self.conv6s(net['concats'])
net['comb_2s'] = self.conv7s(net['comb_1s'])
net['outs'] = self.conv8s(net['comb_2s'])
net['outs_softmax'] = F.softmax(net['outs'], dim=1)
# net['warped_outs'] = F.grid_sample(net['outs_softmax'], net['grid'], padding_mode='border')
return net
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
pytorch_version/dataset.py
|
<gh_stars>10-100
import torch.utils.data as data
import numpy as np
import datetime
from os import listdir
from os.path import join
from util import *
class TrainDataset(data.Dataset):
def __init__(self, data_path, transform=None):
super(TrainDataset, self).__init__()
self.data_path = data_path
self.filename = [f for f in sorted(listdir(self.data_path))]
# data augmentation
self.transform = transform
def __getitem__(self, index):
# update the seed to avoid workers sample the same augmentation parameters
np.random.seed(datetime.datetime.now().second + datetime.datetime.now().microsecond)
# load the nifti images
input, target = load_data_3d(self.data_path, self.filename[index], size = 192)
if self.transform:
input, target = self.transform(input, target)
image = input[0,:1]
image_pred = input[0,1:]
return image, image_pred, target[0,0]
def __len__(self):
return len(self.filename)
class TestDataset(data.Dataset):
def __init__(self, data_path, frame, transform=None):
super(TestDataset, self).__init__()
self.data_path = data_path
self.filename = [f for f in sorted(listdir(self.data_path))]
# data augmentation
self.transform = transform
self.frame = frame
def __getitem__(self, index):
# update the seed to avoid workers sample the same augmentation parameters
#np.random.seed(datetime.datetime.now().second + datetime.datetime.now().microsecond)
# load the nifti images
input, target, dx = load_test_data(self.data_path, self.filename[index], self.frame, size = 192)
if self.transform:
input, target = self.transform(input, target)
return input, target
def __len__(self):
return len(self.filename)
class TestDataset_flow(data.Dataset):
def __init__(self, data_path, transform=None):
super(TestDataset_flow, self).__init__()
self.data_path = data_path
self.filename = [f for f in sorted(listdir(self.data_path))]
# data augmentation
self.transform = transform
def __getitem__(self, index):
# update the seed to avoid workers sample the same augmentation parameters
#np.random.seed(datetime.datetime.now().second + datetime.datetime.now().microsecond)
# load the nifti images
input_ed, target_ed, dx = load_test_data(self.data_path, self.filename[index], 'ED', size=192)
input_es, target_es, dx = load_test_data(self.data_path, self.filename[index], 'ES', size=192)
# if self.transform:
# input, target = self.transform(input, target)
return input_ed, target_ed, input_es, target_es, dx
def __len__(self):
return len(self.filename)
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
utils/seg_metrics.py
|
<reponame>cq615/Joint-Motion-Estimation-and-Segmentation<filename>utils/seg_metrics.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 17:53:58 2018
@author: cq615
"""
import numpy as np, cv2
def np_categorical_dice(pred, truth, k):
# Dice overlap metric for label value k
A = (pred == k).astype(np.float32)
B = (truth == k).astype(np.float32)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B))
def distance_metric(seg_A, seg_B, dx, k):
# Measure the distance errors between the contours of two segmentations
# The manual contours are drawn on 2D slices.
# We calculate contour to contour distance for each slice.
table_md = []
table_hd = []
K, X, Y, Z = seg_A.shape
for z in range(Z):
# Binary mask at this slice
slice_A = seg_A[k, :, :, z].astype(np.uint8)
slice_B = seg_B[k, :, :, z].astype(np.uint8)
# The distance is defined only when both contours exist on this slice
if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:
# Find contours and retrieve all the points
contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pts_A = contours[0]
for i in range(1, len(contours)):
pts_A = np.vstack((pts_A, contours[i]))
contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pts_B = contours[0]
for i in range(1, len(contours)):
pts_B = np.vstack((pts_B, contours[i]))
# Distance matrix between point sets
M = np.zeros((len(pts_A), len(pts_B)))
for i in range(len(pts_A)):
for j in range(len(pts_B)):
M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])
# Mean distance and hausdorff distance
md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx
hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx
table_md += [md]
table_hd += [hd]
# Return the mean distance and Hausdorff distance across 2D slices
mean_md = np.mean(table_md) if table_md else None
mean_hd = np.mean(table_hd) if table_hd else None
return mean_md, mean_hd
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
utils/metrics.py
|
<reponame>cq615/Joint-Motion-Estimation-and-Segmentation
import theano.tensor as T
def categorical_crossentropy(prediction, truth):
# prediction: label probability prediction, 4D tensor
# truth: ground truth label map, 1-hot representation, 4D tensor
return - T.mean(T.sum(truth * T.log(prediction), axis=1))
def weighted_categorical_crossentropy(prediction, truth, weight):
# prediction: label probability prediction, 4D tensor
# truth: ground truth label map, 1-hot representation, 4D tensor
return - T.mean(T.sum((truth * T.log(prediction)) * weight, axis=1))
def categorical_accuracy(prediction, truth):
A = T.argmax(prediction, axis=1, keepdims=True)
B = T.argmax(truth, axis=1, keepdims=True)
return T.mean(T.eq(A, B))
def categorical_dice(prediction, truth, k):
# Dice overlap metric for label value k
A = T.eq(T.argmax(prediction, axis=1, keepdims=True), k)
B = T.eq(T.argmax(truth, axis=1, keepdims=True), k)
return 2 * T.sum(A * B) / T.cast(T.sum(A) + T.sum(B), 'float32')
def huber_loss(x):
d_x = x[:,:,:,1:]-x[:,:,:,:-1]
d_y = x[:,:,1:,:]-x[:,:,:-1,:]
err = (d_x**2).sum()+(d_y**2).sum()
err /= 20.0
tv_err = T.sqrt(0.01+err)
return tv_err
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
dataio/data_generator.py
|
import os, h5py,sys
import nibabel as nib, numpy as np
# A generator function using the keyword yield
def iterate_minibatches(image, label, batch_size):
assert len(image) == len(label)
for start_idx in range(0, len(image) - batch_size + 1, batch_size):
end_idx = start_idx + batch_size
idx = slice(start_idx, end_idx)
yield image[idx], label[idx]
def crop_and_fill(img,size):
img_new = np.zeros((img.shape[0],img.shape[1],size,size))
h = np.amin([size,img.shape[2]])
w = np.amin([size,img.shape[3]])
img_new[:,:,size//2-h//2:size//2+h//2,size//2-w//2:size//2+w//2]=img[:,:,img.shape[2]//2-h//2:img.shape[2]//2+h//2,img.shape[3]//2-w//2:img.shape[3]//2+w//2]
return img_new
def load_UKBB_train_data_3d(data_path, filename, size):
# Load images and labels, save them into a hdf5 file
nim = nib.load(os.path.join(data_path, filename, 'sa.nii.gz'))
image = nim.get_data()[:, :, :, :]
# generate random index for t and z dimension
rand_t = np.random.randint(0,image.shape[3])
rand_z = np.random.randint(0,image.shape[2])
# preprocessing
image_max = np.max(np.abs(image))
image /= image_max
image_sa = image[...,rand_z, rand_t]
image_sa = image_sa[np.newaxis, np.newaxis]
# Read image and header information
frame = np.random.choice(['ED','ES'])
nim = nib.load(os.path.join(data_path, filename, 'sa_'+frame+'.nii.gz'))
image = nim.get_data()[:, :, :]
nim_seg = nib.load(os.path.join(data_path, filename, 'label_sa_'+frame+'.nii.gz'))
seg = nim_seg.get_data()[:, :, :]
image_frame = image[...,rand_z]
image_frame /= image_max
seg_frame = seg[...,rand_z]
image_frame = image_frame[np.newaxis, np.newaxis]
seg_frame = seg_frame[np.newaxis, np.newaxis]
image_bank = np.concatenate((image_sa, image_frame), axis=1)
image_bank = crop_and_fill(image_bank, size)
seg_bank = crop_and_fill(seg_frame, size)
image_bank = np.transpose(image_bank, (0, 1, 3, 2))
seg_bank = np.transpose(seg_bank, (0, 1, 3, 2))
# plt.subplot(211)
# plt.imshow(image_bank[0,1],cmap='gray')
# plt.subplot(212)
# plt.imshow(seg_bank[0,0],cmap='gray')
# plt.show()
return image_bank, seg_bank
def generate_batch(data_path, batch_size, img_size):
filename = [f for f in sorted(os.listdir(data_path))]
batch_num = np.random.randint(0,len(os.listdir(data_path)), batch_size)
batch_filename = [filename[i] for i in batch_num]
train_image=[]
train_label=[]
for f in batch_filename:
img, seg = load_UKBB_train_data_3d(data_path, f, img_size)
train_image.append(img)
train_label.append(seg)
train_image = np.concatenate(train_image)
train_label = np.concatenate(train_label)
return train_image, train_label
def load_UKBB_test_data_3d(data_path, filename, size):
# load UKBB dataset (only the mid-ventricular slice)
nim = nib.load(os.path.join(data_path, filename, 'sa.nii.gz'))
image = nim.get_data()[:, :, :, :]
image_mid = image[:, :, int(round(image.shape[2] // 2.0))]
image_mid = np.array(image_mid, dtype='float32')
# preprocessing
curr_data = image_mid
pl, ph = np.percentile(curr_data, (.01, 99.99))
curr_data[curr_data < pl] = pl
curr_data[curr_data > ph] = ph
curr_data = (curr_data.astype(float) - pl) / (ph - pl)
image_mid = curr_data
nim_seg = nib.load(os.path.join(data_path, filename, 'label_sa_ED.nii.gz'))
seg = nim_seg.get_data()[:, :, :]
seg_mid = seg[:, :, int(round(seg.shape[2] // 2.0))]
seg_mid = seg_mid[np.newaxis]
image_bank = image_mid[np.newaxis]
seg_bank = seg_mid[..., np.newaxis]
image_bank = np.transpose(image_bank, (0, 3, 2, 1))
seg_bank = np.transpose(seg_bank, (0, 3, 2, 1))
image_bank = crop_and_fill(image_bank, size)
seg_bank = crop_and_fill(seg_bank, size)
seg_bank = np.array(seg_bank, dtype='int32')
image_bank = np.array(image_bank, dtype='float32')
return image_bank, seg_bank
|
cq615/Joint-Motion-Estimation-and-Segmentation
|
models/layers.py
|
import theano.tensor as T
import lasagne
from lasagne.layers import Layer
import theano
import numpy as np
from lasagne.layers import MergeLayer, Conv2DLayer
def ensure_set_name(default_name, kwargs):
"""Ensure that the parameters contain names. Be careful, kwargs need to be
passed as a dictionary here
Parameters
----------
default_name: string
default name to set if neither name or pr is present, or if name is not
present but pr is, the name becomes ``pr+default_name''
kwargs: dict
keyword arguments given to functions
Returns
-------
kwargs: dict
"""
if 'name' not in kwargs:
raise Warning("You need to name the layers, "
"otherwise it simply won't work")
global id_ctr
if 'name' in kwargs and 'pr' in kwargs:
kwargs['name'] = kwargs['pr']+kwargs['name']
elif 'name' not in kwargs and 'pr' in kwargs:
idx = next(id_ctr)
kwargs['name'] = kwargs['pr'] + default_name + '_g' + str(idx)
elif 'name' not in kwargs:
idx = next(id_ctr)
kwargs['name'] = default_name + '_g' + str(idx)
return kwargs
def Conv(incoming, num_filters, filter_size=3,
stride=(1, 1), pad='same', W=lasagne.init.HeNormal(),
b=None, nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
"""
Overrides the default parameters for ConvLayer
"""
ensure_set_name('conv', kwargs)
return Conv2DLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b,
nonlinearity=nonlinearity, **kwargs)
def as_tuple(x, N, t=None):
"""
Coerce a value to a tuple of given length (and possibly given type).
Parameters
----------
x : value or iterable
N : integer
length of the desired tuple
t : type, optional
required type for all elements
Returns
-------
tuple
``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise.
Raises
------
TypeError
if `type` is given and `x` or any of its elements do not match it
ValueError
if `x` is iterable, but does not have exactly `N` elements
"""
try:
X = tuple(x)
except TypeError:
X = (x,) * N
if (t is not None) and not all(isinstance(v, t) for v in X):
raise TypeError("expected a single value or an iterable "
"of {0}, got {1} instead".format(t.__name__, x))
if len(X) != N:
raise ValueError("expected a single value or an iterable "
"with length {0}, got {1} instead".format(N, x))
return X
class IdLayer(Layer):
def get_output_for(self, input, **kwargs):
return input
class SumLayer(Layer):
def get_output_for(self, input, **kwargs):
return input.sum(axis=-1)
def get_output_shape_for(self, input_shape):
return input_shape[:-1]
class SHLULayer(Layer):
def get_output_for(self, input, **kwargs):
return T.sgn(input) * T.maximum(input - 1, 0)
class ResidualLayer(lasagne.layers.ElemwiseSumLayer):
'''
Residual Layer, which just wraps around ElemwiseSumLayer
'''
def __init__(self, incomings, **kwargs):
ensure_set_name('res', kwargs)
super(ResidualLayer, self).__init__(incomings, **kwargs)
# store names
input_names = []
for l in incomings:
if isinstance(l, lasagne.layers.InputLayer):
input_names.append(l.name if l.name else l.input_var.name)
elif l.name:
input_names.append(l.name)
else:
input_names.append(str(l))
self.input_names = input_names
def get_output_for(self, inputs, **kwargs):
return super(lasagne.layers.ElemwiseSumLayer,
self).get_output_for(inputs, **kwargs)
class OFLayer(MergeLayer):
def __init__(self, incoming, localization_network, downsample_factor=1,
border_mode='nearest', **kwargs):
super(OFLayer, self).__init__(
[incoming, localization_network], **kwargs)
self.downsample_factor = as_tuple(downsample_factor, 2)
self.border_mode = border_mode
input_shp, loc_shp = self.input_shapes
# loc_shp=(batch_size,2,height, width)
# if loc_shp[-1] != 6 or len(loc_shp) != 2:
# raise ValueError("The localization network must have "
# "output shape: (batch_size, 6)")
if len(input_shp) != 4:
raise ValueError("The input network must have a 4-dimensional "
"output shape: (batch_size, num_input_channels, "
"input_rows, input_columns)")
def get_output_shape_for(self, input_shapes):
shape = input_shapes[0]
factors = self.downsample_factor
return (shape[:2] + tuple(None if s is None else int(s // f)
for s, f in zip(shape[2:], factors)))
def get_output_for(self, inputs, **kwargs):
# see eq. (1) and sec 3.1 in [1]
input, theta = inputs
return _transform(theta, input, self.downsample_factor,
self.border_mode)
def _transform(theta, input, downsample_factor, border_mode):
num_batch, num_channels, height, width = input.shape
# theta = T.reshape(theta, (-1, 2, 3))
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = T.cast(height // downsample_factor[0], 'int32')
out_width = T.cast(width // downsample_factor[1], 'int32')
grid = _meshgrid(out_height, out_width)
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
# T_g = T.dot(theta, grid)
T_d = T.reshape(theta, (-1,2,out_height*out_width))
grid = grid.reshape((1,grid.shape[0], grid.shape[1])).repeat(T_d.shape[0],0)
T_g = T_d+grid
#T_g = (T_d+1) * grid;
#T_g = T_d[:,2:,:] + T_g;
x_s = T_g[:, 0]
y_s = T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_height, out_width, border_mode)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
def _interpolate(im, x, y, out_height, out_width, border_mode):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
# for indexing, we need to take care of the border mode for outside pixels.
if border_mode == 'nearest':
x0 = T.clip(x0_f, 0, width_f - 1)
x1 = T.clip(x1_f, 0, width_f - 1)
y0 = T.clip(y0_f, 0, height_f - 1)
y1 = T.clip(y1_f, 0, height_f - 1)
elif border_mode == 'mirror':
w = 2 * (width_f - 1)
x0 = T.minimum(x0_f % w, -x0_f % w)
x1 = T.minimum(x1_f % w, -x1_f % w)
h = 2 * (height_f - 1)
y0 = T.minimum(y0_f % h, -y0_f % h)
y1 = T.minimum(y1_f % h, -y1_f % h)
elif border_mode == 'wrap':
x0 = T.mod(x0_f, width_f)
x1 = T.mod(x1_f, width_f)
y0 = T.mod(y0_f, height_f)
y1 = T.mod(y1_f, height_f)
else:
raise ValueError("border_mode must be one of "
"'nearest', 'mirror', 'wrap'")
x0, x1, y0, y1 = (T.cast(v, 'int32') for v in (x0, x1, y0, y1))
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
def _linspace(start, stop, num):
# Theano linspace. Behaves similar to np.linspace
start = T.cast(start, theano.config.floatX)
stop = T.cast(stop, theano.config.floatX)
num = T.cast(num, theano.config.floatX)
step = (stop-start)/(num-1)
return T.arange(num, dtype=theano.config.floatX)*step+start
def _meshgrid(height, width):
# This function is the grid generator from eq. (1) in reference [1].
# It is equivalent to the following numpy code:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
# It is implemented in Theano instead to support symbolic grid sizes.
# Note: If the image size is known at layer construction time, we could
# compute the meshgrid offline in numpy instead of doing it dynamically
# in Theano. However, it hardly affected performance when we tried.
x_t = T.dot(T.ones((height, 1)),
_linspace(-1.0, 1.0, width).dimshuffle('x', 0))
y_t = T.dot(_linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
T.ones((1, width)))
x_t_flat = x_t.reshape((1, -1))
y_t_flat = y_t.reshape((1, -1))
# ones = T.ones_like(x_t_flat)
# grid = T.concatenate([x_t_flat, y_t_flat, ones], axis=0)
grid = T.concatenate([x_t_flat, y_t_flat], axis=0)
return grid
class SubpixelLayer(Layer):
def __init__(self, incoming,r,c, **kwargs):
super(SubpixelLayer, self).__init__(incoming, **kwargs)
self.r=r # Upscale factor
self.c=c # number of output channels
def get_output_shape_for(self, input_shape):
return (input_shape[0],self.c,self.r*input_shape[2],self.r*input_shape[3])
def get_output_for(self, input, deterministic=False, **kwargs):
out = T.zeros((input.shape[0],self.output_shape[1],self.output_shape[2],self.output_shape[3]))
for x in xrange(self.r): # loop across all feature maps belonging to this channel
for y in xrange(self.r):
out=T.set_subtensor(out[:,:,x::self.r,y::self.r],input[:,self.r*x+y::self.r*self.r,:,:])
return out
class ScaleLayer(Layer):
def __init__(self, incoming,r, **kwargs):
super(ScaleLayer, self).__init__(incoming, **kwargs)
self.r=r # Upscale factor
def get_output_for(self, input, **kwargs):
out = input*self.r
return out
class ZeroLayer(Layer):
def __init__(self, incoming,r, **kwargs):
super(ZeroLayer, self).__init__(incoming, **kwargs)
self.r=r # Upscale factor
def get_output_for(self, input, **kwargs):
out = T.set_subtensor(input[T.abs_(input)<self.r],0.0)
return out
class ConvAggr(Layer):
def __init__(self, incoming, num_channels, filter_size=3, stride=(1, 1),
pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv_aggr', kwargs)
super(ConvAggr, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_channels, filter_size, stride, pad=pad,
W=W, b=b, nonlinearity=None, **kwargs)
# copy params
self.params = self.conv.params.copy()
def get_output_for(self, input, **kwargs):
return self.conv.get_output_for(input)
def get_output_shape_for(self, input_shape):
return self.conv.get_output_shape_for(input_shape)
class TileLayer(Layer):
def __init__(self, incoming, **kwargs):
super(TileLayer, self).__init__(incoming, **kwargs)
def get_output_for(self, input, **kwargs):
out = T.tile(input[...,input.shape[-1]/2:input.shape[-1]/2+1],(1,1,1,1,input.shape[-1]))
return out
class MC_prep(Layer):
def __init__(self, incoming, **kwargs):
super(MC_prep, self).__init__(incoming, **kwargs)
def get_output_for(self, input, **kwargs):
out = T.zeros((input.shape[-1], input.shape[1], input.shape[2], input.shape[3], input.shape[4]))
out = T.set_subtensor(out[0:1],input)
out = T.set_subtensor(out[1:2],T.concatenate([input[...,1:2], input[...,0:1], input[..., 2:3], input[..., 3:4], input[..., 4:]], axis = 4))
out = T.set_subtensor(out[2:3],T.concatenate([input[...,2:3], input[...,1:2], input[..., 3:4], input[..., 0:1], input[..., 4:]], axis = 4))
out = T.set_subtensor(out[3:4],T.concatenate([input[...,3:4], input[...,2:3], input[..., 4:], input[..., 1:2], input[..., 0:1]], axis = 4))
out = T.set_subtensor(out[4:5],T.concatenate([input[...,4:], input[...,3:4], input[..., 2:3], input[..., 1:2], input[..., 0:1]], axis = 4))
return out
def get_output_shape_for(self, input_shape):
return (input_shape[-1], input_shape[1], input_shape[2], input_shape[3], input_shape[4])
|
lanewinfield/robotpornaddict
|
porn.py
|
import httplib, urllib, base64, json
from moviepy.editor import *
from pytube import YouTube
import os
import urllib
import urllib2
from bs4 import BeautifulSoup
import random
from random import randint
from twython import Twython
from gtts import gTTS
def getDescription(img):
headers = {
# Request headers
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': '', # KEY FOR AZURE
}
params = urllib.urlencode({
# Request parameters
# 'language': 'unk',
# 'detectOrientation ': 'true',
})
data = open(img, 'rb').read()
conn = httplib.HTTPSConnection('api.projectoxford.ai')
conn.request("POST", "/vision/v1.0/analyze?visualFeatures=Description%s" % params, data, headers)
response = conn.getresponse()
data = response.read()
thejson = json.loads(data)
conn.close()
return thejson["description"]["captions"][0]["text"]
def deleteOldVideo():
# DELETE OLD VIDEO
try:
os.remove('/home/brian/porn/input.mp4')
print "input.mp4 deleted"
except OSError:
pass
try:
os.remove('/home/brian/porn/video1.mp4')
print "video1.mp4 deleted"
except OSError:
pass
try:
os.remove('/home/brian/porn/video2.mp4')
print "video2.mp4 deleted"
except OSError:
pass
try:
os.remove('/home/brian/porn/video3.mp4')
print "video3.mp4 deleted"
except OSError:
pass
deleteOldVideo()
command = "/usr/local/bin/youtube-dl -o /home/brian/porn/input.mp4 http://pornhub.com/random"
print command
os.system(command)
time_middle = int(VideoFileClip("/home/brian/porn/input.mp4").duration/2)
time_beginning = int(VideoFileClip("/home/brian/porn/input.mp4").duration*0.2)
time_end = int(VideoFileClip("/home/brian/porn/input.mp4").duration*0.8)
command = "/home/brian/tobecontinued/ffmpeg-3.2-64bit-static/ffmpeg -i /home/brian/porn/input.mp4 -ss "+str(time_beginning)+" -t 5 /home/brian/porn/video1.mp4"
print command
os.system(command)
command = "/home/brian/tobecontinued/ffmpeg-3.2-64bit-static/ffmpeg -i /home/brian/porn/input.mp4 -ss "+str(time_middle)+" -t 5 /home/brian/porn/video2.mp4"
print command
os.system(command)
command = "/home/brian/tobecontinued/ffmpeg-3.2-64bit-static/ffmpeg -i /home/brian/porn/input.mp4 -ss "+str(time_end)+" -t 5 /home/brian/porn/video3.mp4"
print command
os.system(command)
clip1 = VideoFileClip("/home/brian/porn/video1.mp4")
clip2 = VideoFileClip("/home/brian/porn/video2.mp4")
clip3 = VideoFileClip("/home/brian/porn/video3.mp4")
smallclip1 = clip1.resize((11, 6))
smallclip2 = clip2.resize((11, 6))
smallclip3 = clip3.resize((11, 6))
regularclip1 = smallclip1.resize((1280, 720))
regularclip2 = smallclip2.resize((1280, 720))
regularclip3 = smallclip3.resize((1280, 720))
screensize = clip1.size
clip1.save_frame("/home/brian/porn/frame1.png", t=0)
clip2.save_frame("/home/brian/porn/frame2.png", t=0)
clip3.save_frame("/home/brian/porn/frame3.png", t=0)
descriptions = [getDescription("/home/brian/porn/frame1.png")+"?", getDescription("/home/brian/porn/frame2.png")+"?", getDescription("/home/brian/porn/frame3.png")+"?"]
# AUDIO
i = 1
for s in descriptions:
tts = gTTS(text=s, lang='en')
tts.save("/home/brian/porn/audio"+str(i)+".mp3")
i = i+1
textclip1 = TextClip(descriptions[0], fontsize=70, color="white", method="caption", align="center", font="/home/brian/porn/InputSans-Black.ttf", size=[600, 950]).set_duration(5)
textclip2 = TextClip(descriptions[1], fontsize=70, color="white", method="caption", align="center", font="/home/brian/porn/InputSans-Black.ttf", size=[600, 950]).set_duration(5)
textclip3 = TextClip(descriptions[2], fontsize=70, color="white", method="caption", align="center", font="/home/brian/porn/InputSans-Black.ttf", size=[600, 950]).set_duration(5)
audioclip1 = AudioFileClip("/home/brian/porn/audio1.mp3")
audioclip2 = AudioFileClip("/home/brian/porn/audio2.mp3")
audioclip3 = AudioFileClip("/home/brian/porn/audio3.mp3")
concClip = concatenate_videoclips([regularclip1, regularclip2, regularclip3])
comp = CompositeAudioClip([concClip.audio.volumex(0.6),audioclip1.set_start(1),audioclip2.set_start(6),audioclip3.set_start(11)])
final_clip = CompositeVideoClip([concClip, textclip1.set_pos(('center', 'center')).set_start(0), textclip2.set_pos(('center', 'center')).set_start(5), textclip3.set_pos(('center', 'center')).set_start(10)]).set_audio(comp).set_duration(15)
final_clip.write_videofile("/home/brian/porn/final.mp4",audio_codec='aac')
# TWEET IT
APP_KEY = ''
APP_SECRET = ''
ACCESS_KEY = ''#keep the quotes, replace this with your access token
ACCESS_SECRET = ''#keep the quotes, replace this with your access token secret
twitter = Twython(APP_KEY, APP_SECRET, ACCESS_KEY, ACCESS_SECRET)
tweetCopy = descriptions[0]
video = open('/home/brian/porn/final.mp4', 'rb')
response = twitter.upload_video(media=video, media_type='video/mp4')
twitter.update_status(status=tweetCopy, media_ids=[response['media_id']])
|
leafwind/twitch_analysis
|
app/handlers/get_signin_stats_handler.py
|
# -*- coding: utf-8 -*-
import sqlite3
import numpy as np
import pandas as pd
import logging
import time
from datetime import datetime
import json
from collections import OrderedDict
import math
with open('config.json') as fp:
CONFIG = json.load(fp)
html_newline = '<br />'
def h1(s):
return "<h1>" + s + "</h1>"
def h2(s):
return "<h2>" + s + "</h2>"
def ul(s):
return "<ul>" + s + "</ul>"
def li(s):
return "<li>" + s + "</li>"
def table(s, border=1, style=""):
return "<table border=\'{}\' style=\'{}\'>".format(border, style) + s + "</table>"
def th(s):
return "<th>" + s + "</th>"
def tr(s):
return "<tr>" + s + "</tr>"
def td(s):
return "<td>" + str(s) + "</td>"
def style_color(s, color='blue'):
return "<span style='color:{}'>".format(color) + str(s) + "</span>"
def get_signin_info(channel, user):
global CONFIG
signin_info = {}
conn = sqlite3.connect(CONFIG['db'])
signins = pd.read_sql_query("select 1 from signin where user=? and channel=?", conn, params=(user.lower(), channel))
count = len(signins)
last_signin = pd.read_sql_query("select ts_day from signin where user=? and channel=? order by ts_day desc limit 1", conn, params=(user.lower(), channel))
if last_signin.empty:
signin_info = {
'count': count,
'last_date': '{}'.format(time.strftime("%Y-%m-%d", time.gmtime(0))),
'last_date_ts_utc': 0,
}
else:
signin_info = {
'count': count,
'last_date': '{}'.format(time.strftime("%Y-%m-%d", time.gmtime(last_signin['ts_day'] + 8 * 3600))),
'last_date_ts_utc': last_signin['ts_day'],
}
conn.close()
return signin_info
def get_signin_stats(channel, user, html=False):
signin_info = get_signin_info(channel, user)
if html:
html_str = []
welcome_str = "Hi! ㄈ{} 已經累積簽到 {} 次,阿不就好棒棒(́◉◞౪◟◉‵)".format(style_color(user, 'blue'), style_color(signin_info['count'], 'red'))
welcome_str = h2(welcome_str)
html_str.append(welcome_str)
welcome_str = "最近一次簽到在 {} 也就是{}".format(signin_info['last_date'], _decode_human_date(signin_info['last_date_ts_utc']))
welcome_str = h2(welcome_str)
html_str.append(welcome_str)
html_str = ''.join(html_str)
return html_str
else:
return { 'count': signin_info['count'], 'last_date': signin_info['last_date'] }
def _decode_human_date(ts_utc):
last_date = datetime.utcfromtimestamp(ts_utc)
now = datetime.utcnow()
delta = now - last_date
if delta.days == 0:
return "今天"
else:
return " {} 天前".format(delta.days)
|
leafwind/twitch_analysis
|
app/main.py
|
from flask import Flask
from flask import request
from flask import jsonify
import logging
import requests
import json
import time
import calendar
from handlers.get_channel_stats_handler import get_stats, get_signin_ranking
from handlers.get_signin_stats_handler import get_signin_stats
application = Flask(__name__)
application.config['PROPAGATE_EXCEPTIONS'] = True
logging.basicConfig(filename='error.log',level=logging.DEBUG)
with open('config.json') as fp:
CONFIG = json.load(fp)
client_id = CONFIG["client_id"]
@application.route("/")
def hello():
return "<h1 style='color:red'>Hello There!</h1>"
## set the project root directory as the static folder, you can set others.
#app = Flask(__name__, static_url_path='')
<EMAIL>('/js/<path:path>')
#def send_js(path):
# return send_from_directory('js', path)
def h1(s):
return "<h1>{}</h1>".format(s)
@application.route('/signin_ranking/<channel>')
def get_signin_ranking_handler(channel):
result = get_signin_ranking(channel)
return result
@application.route('/signin', methods=['GET'])
def _get_signin_status_hanlder():
user = request.args.get('user')
channel = request.args.get('channel')
result = get_signin_stats(channel, user, html=False)
return jsonify(result)
@application.route('/signin/<channel>/<user>')
def get_signin_status_hanlder(channel, user):
result = get_signin_stats(channel, user, html=True)
return result
@application.route('/follow/<channel>/<user>')
def follow_status(channel, user):
global client_id
url = 'https://api.twitch.tv/kraken/users/{}/follows/channels/{}'.format(user, channel)
headers = {'Accept': 'application/vnd.twitchtv.v3+json', 'Client-ID': client_id}
r = requests.get(url, headers=headers)
info = json.loads(r.text)
if info.get('status', None) == 404:
return h1("{} is not following {}".format(user, channel))
else:
since = time.strptime(info['created_at'], "%Y-%m-%dT%H:%M:%S+00:00")
since_ts = calendar.timegm(since)
now = time.gmtime(time.time())
now_ts = calendar.timegm(now)
diff_ts = now_ts - since_ts
msg = "{} start following {} from <span style='color:red'>{}-{}-{}</span>, total <span style='color:blue'>{}</span> days".format(user, channel, since.tm_year, since.tm_mon, since.tm_mday, int(diff_ts / 86400))
msg = h1(msg)
return msg
@application.route('/<channel>/list_all_current_users')
def get_all_users(channel):
from twitch_utils import get_current_users
all_users = get_current_users(channel)
msg = "<br />".join(all_users)
msg = h1(msg)
return msg
@application.route('/<channel>/stats')
def get_stats_handler(channel):
result = get_stats(channel)
return result
@application.route('/post/<int:post_id>')
def show_post(post_id):
return 'Post %d' % post_id
#if __name__ == "__main__":
# application.run(host='0.0.0.0')
|
leafwind/twitch_analysis
|
app/handlers/get_channel_stats_handler.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
import sqlite3
import numpy as np
import pandas as pd
import logging
import time
import json
from collections import OrderedDict
import math
with open('config.json') as fp:
CONFIG = json.load(fp)
html_newline = '<br />'
def h1(s):
return "<h1>" + s + "</h1>"
def h2(s):
return "<h2>" + s + "</h2>"
def ul(s):
return "<ul>" + s + "</ul>"
def li(s):
return "<li>" + s + "</li>"
def table(s, border=1, style=""):
return "<table border=\'{}\' style=\'{}\'>".format(border, style) + s + "</table>"
def th(s):
return "<th>" + s + "</th>"
def tr(s):
return "<tr>" + s + "</tr>"
def td(s):
return "<td>" + s + "</td>"
def style_color(s, color='blue'):
return "<span style='color:{}'>".format(color) + s + "</span>"
def get_stream_info(channel, n_top_chatters, n_top_msgs):
global CONFIG
stream_info = {}
conn = sqlite3.connect(CONFIG['db'])
streams = pd.read_sql_query("select id, game, created_at, end_at from stream where channel =\'{}\' order by created_at desc".format(channel), conn)
popularity = pd.read_sql_query("select n_user, ts from channel_popularity where channel = \'{}\'".format(channel), conn)
chat = pd.read_sql_query("select user, msg, ts from chat where channel = \'{}\'".format(channel), conn)
for index, stream in streams.iterrows():
stream['end_at'] = stream['end_at'] if stream['end_at'] > 0 else int(time.time())
p = popularity[(popularity.ts >= stream['created_at']) & (popularity.ts <= stream['end_at'])]
c = chat[(chat.ts >= stream['created_at']) & (chat.ts <= stream['end_at'])]
n_total_chat = c['user'].count()
def group_by_min(df, ind, col):
return int(df[col].loc[ind]/60)
chat_groupby_min = c['ts'].groupby(lambda x: group_by_min(c, x, 'ts')).count()
max_chat_by_min = chat_groupby_min.max()
mean_chat_by_min = chat_groupby_min.mean()
std_chat_by_min = chat_groupby_min.std()
created_at = stream['created_at']
end_at = stream['end_at']
max_user = p['n_user'].max()
mean_user = p['n_user'].mean()
unique_chat_user = c['user'].nunique()
chat_groupby_user = c[['user', 'ts']].groupby('user', as_index=False).count()
top_chatters = chat_groupby_user.nlargest(n_top_chatters, columns='ts')['user'].tolist()
top_chatters = ", ".join(top_chatters)
top_chatters = top_chatters.encode('utf-8')
chat_groupby_msg = c[['msg', 'ts']].groupby('msg', as_index=False).count()
top_msgs = chat_groupby_msg.nlargest(n_top_msgs, columns='ts')['msg'].tolist()
top_msgs = ", ".join(top_msgs)
top_msgs = top_msgs.encode('utf-8')
stream_info[stream['id']] = {
'game': stream['game'].encode('utf-8'),
'created_at': '{}'.format(time.strftime("%Y-%m-%d %H:%M", time.gmtime(created_at + 8 * 3600))),
'end_at': '{}'.format(time.strftime("%Y-%m-%d %H:%M", time.gmtime(end_at + 8 * 3600))),
'duration_min': '{}'.format(int((end_at - created_at) / 60)),
'n_total_chat': str(n_total_chat),
'max_user': str(max_user),
'mean_user': 'nan' if math.isnan(mean_user) else str(int(mean_user)),
'unique_chat_user': str(unique_chat_user),
'interactivity': '{:.0%}'.format(1.0 * unique_chat_user / max_user),
'max_chat_by_min': str(max_chat_by_min),
'mean_chat_by_min': '{:.1f}'.format(mean_chat_by_min),
'cov_chat_by_min': '{:.0%}'.format(std_chat_by_min / mean_chat_by_min),
'top_chatters': top_chatters,
'top_msgs': top_msgs,
}
stream_info = OrderedDict(sorted(stream_info.items(), key=lambda t: t[1]["created_at"], reverse=True))
conn.close()
return stream_info
def get_signin_ranking(channel):
global CONFIG
stream_info = {}
conn = sqlite3.connect(CONFIG['db'])
result = pd.read_sql_query("select user, count(1) as count, strftime('%Y-%m-%d', datetime(max(ts_day), 'unixepoch')) as last_signin_date from signin where channel = \'{}\' group by user order by count(1) desc".format(channel), conn)
header_translation = [
('user', '使用者'),
('count', '簽到次數'),
('last_signin_date', '最後簽到日期')
]
html_str = []
row = []
for header in header_translation:
row.append(th(header[1]))
html_str.append(tr("".join(row)))
for index, log in result.iterrows():
row = []
for header in header_translation:
value = str(log[header[0]])
row.append(td(value))
html_str.append(tr("".join(row)))
html_str = table("".join(html_str), border=1, style="font-size:24px;")
return html_str
def get_stats(channel):
header_translation = [
('game', '遊戲名稱'),
('created_at', '開始時間'),
('end_at', '結束時間'),
('duration_min', '實況時間(分)'),
('n_total_chat', '總發言數量'),
('mean_chat_by_min', '平均每分鐘發言'),
('max_chat_by_min', '最大每分鐘發言'),
('mean_user', '平均觀眾'),
('max_user', '最多同時觀眾'),
('unique_chat_user', '不重複發言觀眾'),
('interactivity', '互動比例(發言/全部觀眾)'),
('cov_chat_by_min', '觀眾情緒起伏指數'),
('top_chatters', '最常發言觀眾'),
('top_msgs', '最多重複訊息'),
]
stream_info = get_stream_info(channel, n_top_chatters=5, n_top_msgs=10)
html_str = []
row = []
for header in header_translation:
row.append(th(header[1]))
html_str.append(tr("".join(row)))
for _id in stream_info:
row = []
for header in header_translation:
row.append(td(stream_info[_id][header[0]]))
html_str.append(tr("".join(row)))
html_str = table("".join(html_str), border=1, style="font-size:24px;")
return html_str
def whatisthis(s):
if isinstance(s, str):
print "ordinary string: {}".format(s)
elif isinstance(s, unicode):
print "unicode string: {}".format(s.encode('utf-8'))
else:
print "not a string"
|
leafwind/twitch_analysis
|
app/twitch_utils.py
|
<filename>app/twitch_utils.py
import requests
USERLIST_API = "http://tmi.twitch.tv/group/user/{}/chatters"
def get_current_users(ch, user_type='all'):
url = USERLIST_API.format(ch)
r = requests.get(url).json()
if user_type == 'all':
all_users = set(sum(r['chatters'].values(), []))
return all_users
elif user_type in ['moderators', 'staff', 'admins', 'global_mods', 'viewers']:
users = set(r['chatters'][user_type])
return users
else:
return set()
|
SKJNR/Face-Counting-
|
app.py
|
import cv2
# Get a reference to webcam
cap = cv2.VideoCapture(0)
# Create the haar cascade
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
ret, frame = cap.read(0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
count=0
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
count+=1
# Display count
cv2.putText(frame, str(count), (70, 45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.imshow('Video Face Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
cap.release()
cv2.destroyAllWindows()
|
SmileyJames/piecash
|
tests/test_transaction.py
|
<filename>tests/test_transaction.py
# coding=utf-8
from __future__ import unicode_literals
from collections import defaultdict
from datetime import datetime, date, time
from decimal import Decimal
import pytest
from piecash import Transaction, Split, GncImbalanceError, GncValidationError, Lot
from test_helper import db_sqlite_uri, db_sqlite, new_book, new_book_USD, book_uri, book_basic, book_transactions
# dummy line to avoid removing unused symbols
a = db_sqlite_uri, db_sqlite, new_book, new_book_USD, book_uri, book_basic, book_transactions
class TestTransaction_create_transaction(object):
def test_create_basictransaction_neutraltime(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
e = book_basic.accounts(name="exp")
tr = Transaction(currency=EUR, description=u"wire from Hélène", notes=u"on St-Eugène day",
post_date=date(2014, 1, 1),
splits=[
Split(account=a, value=100, memo=u"mémo asset"),
Split(account=e, value=-100, memo=u"mémo exp"),
])
assert isinstance(tr.post_date, date)
book_basic.flush()
book_basic.validate()
assert isinstance(tr.post_date, date)
def test_create_basictransaction_validation_date(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
e = book_basic.accounts(name="exp")
splits = [
Split(account=a, value=100, memo=u"mémo asset"),
Split(account=e, value=-10, memo=u"mémo exp"),
]
with pytest.raises(GncValidationError):
tr = Transaction(currency=EUR, description=u"wire from Hélène", notes=u"on St-Eugène day",
post_date=datetime(2014, 1, 1),
enter_date=datetime(2014, 1, 1),
splits=splits)
with pytest.raises(GncValidationError):
tr = Transaction(currency=EUR, description=u"wire from Hélène", notes=u"on St-Eugène day",
post_date=datetime(2014, 1, 1),
enter_date=time(10, 59, 00),
splits=splits)
with pytest.raises(GncValidationError):
tr = Transaction(currency=EUR, description=u"wire from Hélène", notes=u"on St-Eugène day",
post_date=date(2014, 1, 1),
enter_date=date(2014, 1, 1),
splits=splits)
tr = Transaction(currency=EUR, description=u"wire from Hélène", notes=u"on St-Eugène day",
post_date=None,
enter_date=None,
splits=splits)
with pytest.raises(GncImbalanceError):
book_basic.flush()
book_basic.validate()
def test_create_basictransaction(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
e = book_basic.accounts(name="exp")
tr = Transaction(currency=EUR, description=u"wire from Hélène", notes=u"on St-Eugène day",
post_date=date(2014, 1, 1),
enter_date=datetime(2014, 1, 1),
splits=[
Split(account=a, value=100, memo=u"mémo asset"),
Split(account=e, value=-10, memo=u"mémo exp"),
])
# check issue with balance
with pytest.raises(GncImbalanceError):
book_basic.flush()
book_basic.validate()
# adjust balance
Split(account=e, value=-90, memo="missing exp", transaction=tr)
book_basic.flush()
# check no issue with str
assert str(tr)
assert str(tr.splits)
assert repr(tr)
assert repr(tr.splits)
assert tr.notes == u"on St-Eugène day"
def test_create_basictransaction_splitfirst(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
e = book_basic.accounts(name="exp")
s = Split(account=a, value=Decimal(1))
assert repr(s)
def test_create_cdtytransaction(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
tr = Transaction(currency=EUR, description="buy stock", notes=u"on St-Eugène day",
post_date=date(2014, 1, 2),
enter_date=datetime(2014, 1, 3),
splits=[
Split(account=a, value=100, memo=u"mémo asset"),
Split(account=s, value=-90, memo=u"mémo brok"),
])
# check issue with quantity for broker split not defined
with pytest.raises(GncValidationError):
book_basic.validate()
sb = tr.splits(account=s)
sb.quantity = 15
# check issue with quantity not same sign as value
with pytest.raises(GncValidationError):
book_basic.validate()
sb.quantity = -15
# verify imbalance issue
with pytest.raises(GncImbalanceError):
book_basic.validate()
# adjust balance
Split(account=a, value=-10, memo="missing asset corr", transaction=tr)
book_basic.save()
assert str(sb)
assert str(sb)
# changing currency of an existing transaction is not allowed
tr.currency = book_basic.currencies(mnemonic="USD")
with pytest.raises(GncValidationError):
book_basic.validate()
book_basic.cancel()
# check sum of quantities are not balanced per commodity but values are
d = defaultdict(lambda: Decimal(0))
for sp in tr.splits:
assert sp.quantity == sp.value or sp.account != a
d[sp.account.commodity] += sp.quantity
d["cur"] += sp.value
assert d["cur"] == 0
assert all([v != 0 for k, v in d.items() if k != "cur"])
def test_create_split_overflow(self, book_basic):
a = book_basic.accounts(name="asset")
# raise error as Transaction has a non CURRENCY commodity
with pytest.raises(TypeError):
sp = Split(account=a, value=1. / 3., quantity=10, memo=u"mémo asset")
with pytest.raises(ValueError):
sp = Split(account=a, value=Decimal(1) / Decimal(3), quantity=10, memo=u"mémo asset")
sp = Split(account=a, value=Decimal(1234567890123455678), quantity=10, memo=u"mémo asset")
with pytest.raises(ValueError):
sp = Split(account=a, value=Decimal(1234567890123455678901234), quantity=10, memo=u"mémo asset")
def test_create_cdtytransaction_cdtycurrency(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
tr = Transaction(currency=s.commodity, description="buy stock", notes=u"on St-Eugène day",
post_date=date(2014, 1, 2),
enter_date=datetime(2014, 1, 3),
splits=[
Split(account=a, value=100, quantity=10, memo=u"mémo asset"),
Split(account=s, value=-100, quantity=-10, memo=u"mémo brok"),
])
# raise error as Transaction has a non CURRENCY commodity
with pytest.raises(GncValidationError):
book_basic.validate()
def test_create_cdtytransaction_tradingaccount(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
book_basic.use_trading_accounts = True
tr = Transaction(currency=EUR, description="buy stock", notes=u"on St-Eugène day",
post_date=date(2014, 1, 2),
enter_date=datetime(2014, 1, 3),
splits=[
Split(account=a, value=100, memo=u"mémo asset"),
Split(account=s, value=-100, quantity=-15, memo=u"mémo brok"),
])
book_basic.validate()
assert "{}".format(tr) == "Transaction<[EUR] 'buy stock' on 2014-01-02>"
assert "{}".format(s) == "Account<asset:broker[ïoà]>"
assert "{}".format(tr.splits(account=s)) == "Split<Account<asset:broker[ïoà]> -100 EUR [-15 ïoà]>"
assert "{}".format(tr.splits(account=a)) == "Split<Account<asset[EUR]> 100 EUR>"
# check sum of quantities are all balanced per commodity as values are
d = defaultdict(lambda: Decimal(0))
for sp in tr.splits:
assert sp.quantity == sp.value or sp.account != a
d[sp.account.commodity] += sp.quantity
d["cur"] += sp.value
assert d["cur"] == 0
assert all([v == Decimal(0) for k, v in d.items() if k != "cur"])
# change existing quantity
sp = tr.splits(memo=u"mémo brok")
sp.quantity += 1
book_basic.validate()
# check sum of quantities are all balanced per commodity as values are
d = defaultdict(lambda: Decimal(0))
for sp in tr.splits:
assert sp.quantity == sp.value or sp.account != a
d[sp.account.commodity] += sp.quantity
d["cur"] += sp.value
assert d["cur"] == 0
assert all([v == Decimal(0) for k, v in d.items() if k != "cur"])
class TestTransaction_lots(object):
def test_create_simpletlot_addsplits(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
l = Lot(title=u"test mé", account=s, notes=u"ïlya")
for i, am in enumerate([45, -35, -20]):
tr = Transaction(currency=EUR, description="trade stock", notes=u"àçö",
post_date=date(2014, 1, 1 + i),
enter_date=datetime(2014, 1, 1 + i),
splits=[
Split(account=a, value=am * 10, memo=u"mémo asset"),
Split(account=s, value=-am * 10, quantity=-am, memo=u"mémo brok", lot=l),
])
book_basic.flush()
def test_create_simpletlot_initialsplits(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
sp = []
for i, am in enumerate([45, -35, -20]):
tr = Transaction(currency=EUR, description="trade stock", notes=u"àçö",
post_date=date(2014, 1, 1 + i),
enter_date=datetime(2014, 1, 1 + i),
splits=[
Split(account=a, value=am * 10, memo=u"mémo asset"),
Split(account=s, value=-am * 10, quantity=-am, memo=u"mémo brok"),
])
sp.append(tr.splits(account=s))
l = Lot(title=u"test mé", account=s, notes=u"ïlya", splits=sp)
book_basic.flush()
def test_create_closedlot_addsplits(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
l = Lot(title=u"test mé", account=s, notes=u"ïlya")
l.is_closed = 1
# raise valueerror as lot is closed
with pytest.raises(ValueError):
tr = Transaction(currency=EUR, description="trade stock", notes=u"àçö",
post_date=date(2014, 1, 1),
enter_date=datetime(2014, 1, 1),
splits=[
Split(account=a, value=10, memo=u"mémo asset"),
Split(account=s, value=- 10, quantity=-2, memo=u"mémo brok", lot=l),
])
def test_create_simplelot_inconsistentaccounts(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
racc = book_basic.root_account
a = book_basic.accounts(name="asset")
s = book_basic.accounts(name="broker")
l = Lot(title=u"test mé", account=a, notes=u"ïlya")
# raise valueerror as split account not the same as lot account
tr = Transaction(currency=EUR, description="trade stock", notes=u"àçö",
post_date=date(2014, 1, 1),
enter_date=datetime(2014, 1, 1),
splits=[
Split(account=a, value=10, memo=u"mémo asset"),
Split(account=s, value=- 10, quantity=-2, memo=u"mémo brok", lot=l),
])
with pytest.raises(ValueError):
book_basic.validate()
class TestTransaction_changes(object):
def test_delete_existing_transaction(self, book_transactions):
l = len(book_transactions.transactions)
s = len(book_transactions.splits)
tr = book_transactions.transactions(description="my revenue")
book_transactions.delete(tr)
book_transactions.save()
nl = len(book_transactions.transactions)
ns = len(book_transactions.splits)
assert nl == l - 1
assert ns == s - 2
def test_change_cdty_split_price(self, book_transactions):
tr = book_transactions.transactions(description="my purchase of stock")
sp = tr.splits(account=book_transactions.accounts(name="broker"))
assert len(book_transactions.prices) == 6
p = [p for p in book_transactions.prices if p.date.day == 29][0]
p_expected = (sp.value / sp.quantity).quantize(Decimal("0.000001"))
assert p.value == p_expected
# changing the quantity of the split should NOT change the existing price
sp.quantity = (5, 1)
book_transactions.validate()
p_not_expected = (sp.value / sp.quantity).quantize(Decimal("0.000001"))
assert len(book_transactions.prices) == 6
assert p.value == p_expected
assert p_expected != p_not_expected
# changing the post date of the transaction of the split should create a new price
tr.post_date = date(2015, 1, 29)
book_transactions.validate()
book_transactions.flush()
assert len(book_transactions.prices) == 7
|
SmileyJames/piecash
|
piecash/core/factories.py
|
# coding=utf-8
from __future__ import unicode_literals
from .._common import GnucashException
from ..yahoo_client import get_latest_quote
def create_stock_accounts(cdty, broker_account, income_account=None, income_account_types="D/CL/I"):
"""Create the multiple accounts used to track a single stock, ie:
- broker_account/stock.mnemonic
and the following accounts depending on the income_account_types argument
- D = Income/Dividend Income/stock.mnemonic
- CL = Income/Cap Gain (Long)/stock.mnemonic
- CS = Income/Cap Gain (Short)/stock.mnemonic
- I = Income/Interest Income/stock.mnemonic
Args:
broker_account (:class:`piecash.core.account.Account`): the broker account where the account holding
the stock is to be created
income_account (:class:`piecash.core.account.Account`): the income account where the accounts holding
the income related to the stock are to be created
income_account_types (str): "/" separated codes to drive the creation of income accounts
Returns:
:class:`piecash.core.account.Account`: a tuple with the account under the broker_account where the stock is held
and the list of income accounts.
"""
if cdty.namespace == "CURRENCY":
raise GnucashException("{} is a currency ! You can't create stock_accounts for currencies".format(cdty))
from .account import Account
symbol = cdty.mnemonic
try:
acc = broker_account.children(name=symbol)
except KeyError:
acc = Account(symbol, "STOCK", cdty, broker_account)
inc_accounts = []
if income_account:
cur = cdty.base_currency
for inc_acc in income_account_types.split("/"):
sub_account_name = {
"D": "Dividend Income",
"CL": "Cap Gain (Long)",
"CS": "Cap Gain (Short)",
"I": "Interest Income",
}[inc_acc]
try:
sub_acc = income_account.children(name=sub_account_name)
except KeyError:
sub_acc = Account(sub_account_name, "INCOME", cur.base_currency, income_account)
try:
cdty_acc = sub_acc.children(name=symbol)
except KeyError:
cdty_acc = Account(symbol, "INCOME", cur, sub_acc)
inc_accounts.append(cdty_acc)
return acc, inc_accounts
def create_currency_from_ISO(isocode):
"""
Factory function to create a new currency from its ISO code
Args:
isocode (str): the ISO code of the currency (e.g. EUR for the euro)
Returns:
:class:`Commodity`: the currency as a commodity object
"""
from .commodity import Commodity
# if self.get_session().query(Commodity).filter_by(isocode=isocode).first():
# raise GncCommodityError("Currency '{}' already exists".format(isocode))
from .currency_ISO import ISO_currencies
cur = ISO_currencies.get(isocode)
if cur is None:
raise ValueError("Could not find the ISO code '{}' in the ISO table".format(isocode))
# create the currency
cdty = Commodity(mnemonic=cur.mnemonic,
fullname=cur.currency,
fraction=10 ** int(cur.fraction),
cusip=cur.cusip,
namespace="CURRENCY",
quote_flag=1,
)
# self.gnc_session.add(cdty)
return cdty
def create_stock_from_symbol(symbol, book=None):
"""
Factory function to create a new stock from its symbol. The ISO code of the quoted currency of the stock is
stored in the slot "quoted_currency".
Args:
symbol (str): the symbol for the stock (e.g. YHOO for the Yahoo! stock)
Returns:
:class:`Commodity`: the stock as a commodity object
.. note::
The information is gathered from the yahoo-finance package
The default currency in which the quote is traded is stored in a slot 'quoted_currency'
.. todo::
use 'select * from yahoo.finance.sectors' and 'select * from yahoo.finance.industry where id ="sector_id"'
to retrieve name of stocks and allow therefore the creation of a stock by giving its "stock name" (or part of it).
This could also be used to retrieve all symbols related to the same company
"""
from .commodity import Commodity
share = get_latest_quote(symbol)
stock = Commodity(mnemonic=symbol,
fullname=share.name,
fraction=10000,
namespace=share.exchange,
quote_flag=1,
quote_source="yahoo",
quote_tz=share.timezone,
)
if book:
book.session.add(stock)
return stock
def single_transaction(post_date,
enter_date,
description,
value,
from_account,
to_account):
from . import Transaction, Split
# currency is derived from "from_account" (as in GUI)
currency = from_account.commodity
# currency of other destination account should be identical (as only one value given)
assert currency == to_account.commodity, "Commodities of accounts should be the same"
tx = Transaction(
currency=currency,
post_date=post_date,
enter_date=enter_date,
description=description,
splits=[
Split(account=from_account, value=-value),
Split(account=to_account, value=value),
])
return tx
|
SmileyJames/piecash
|
piecash/core/book.py
|
<filename>piecash/core/book.py
import warnings
from collections import defaultdict
from operator import attrgetter
from sqlalchemy import Column, VARCHAR, ForeignKey
from sqlalchemy.orm import relation, aliased, joinedload
from sqlalchemy.orm.base import instance_state
from sqlalchemy.orm.exc import NoResultFound
from . import factories
from .account import Account
from .commodity import Commodity, Price
from .transaction import Split, Transaction
from .._common import CallableList, GnucashException
from .._declbase import DeclarativeBaseGuid
from ..business.invoice import Invoice
from ..sa_extra import kvp_attribute
class Book(DeclarativeBaseGuid):
"""
A Book represents a GnuCash document. It is created through one of the two factory functions
:func:`create_book` and :func:`open_book`.
Canonical use is as a context manager like (the book is automatically closed at the end of the with block)::
with create_book() as book:
...
.. note:: If you do not use the context manager, do not forget to close the session explicitly (``book.close()``)
to release any lock on the file/DB.
The book puts at disposal several attributes to access the main objects of the GnuCash document::
# to get the book and the root_account
ra = book.root_account
# to get the list of accounts, commodities or transactions
for acc in book.accounts: # or book.commodities or book.transactions
# do something with acc
# to get a specific element of these lists
EUR = book.commodities(namespace="CURRENCY", mnemonic="EUR")
# to get a list of all objects of some class (even non core classes)
budgets = book.get(Budget)
# or a specific object
budget = book.get(Budget, name="my first budget")
You can check a session has changes (new, deleted, changed objects) by getting the ``book.is_saved`` property.
To save or cancel changes, use ``book.save()`` or ``book.cancel()``::
# save a session if it is no saved (saving a unchanged session is a no-op)
if not book.is_saved:
book.save()
Attributes:
root_account (:class:`piecash.core.account.Account`): the root account of the book
root_template (:class:`piecash.core.account.Account`): the root template of the book (usage not yet clear...)
default_currency (:class:`piecash.core.commodity.Commodity`): the currency of the root account (=default currency of the book)
uri (str): connection string of the book (set by the GncSession when accessing the book)
session (:class:`sqlalchemy.orm.session.Session`): the sqlalchemy session encapsulating the book
use_trading_accounts (bool): true if option "Use trading accounts" is enabled
use_split_action_field (bool): true if option "Use Split Action Field for Number" is enabled
RO_threshold_day (int): value of Day Threshold for Read-Only Transactions (red line)
control_mode (list(str)) : list of allowed non-standard operations like : "allow-root-subaccounts"
counter_customer (int) : counter for :class:`piecash.business.person.Customer` id (link to slot "counters/gncCustomer")
counter_vendor (int) : counter for :class:`piecash.business.person.Vendor` id (link to slot "counters/gncVendor")
counter_employee (int) : counter for :class:`piecash.business.person.Employee` id (link to slot "counters/gncEmployee")
counter_invoice (int) : counter for :class:`piecash.business.invoice.Invoice` id (link to slot "counters/gncInvoice")
counter_job (int) : counter for :class:`piecash.business.invoice.Job` id (link to slot "counters/gncJob")
counter_bill (int) : counter for :class:`piecash.business.invoice.Bill` id (link to slot "counters/gncBill")
counter_exp_voucher (int) : counter for :class:`piecash.business.invoice.Invoice` id (link to slot "counters/gncExpVoucher")
counter_order (int) : counter for :class:`piecash.business.invoice.Order` id (link to slot "counters/gncOrder")
business_company_phone (str): phone number of book company (link to slit "options/Business/Company Phone Number")
business_company_email (str): email of book company (link to slit "options/Business/Company Email Address")
business_company_contact (str): contact person of book company (link to slit "options/Business/Company Contact Person")
business_company_ID (str): ID of book company (link to slit "options/Business/Company ID")
business_company_name (str): name of book company (link to slit "options/Business/Company Name")
business_company_address (str): address of book company (link to slit "options/Business/Company Address")
business_company_website (str): website URL of book company (link to slit "options/Business/Company Website URL")
"""
__tablename__ = 'books'
__table_args__ = {}
# column definitions
root_account_guid = Column('root_account_guid', VARCHAR(length=32),
ForeignKey('accounts.guid'), nullable=False)
root_template_guid = Column('root_template_guid', VARCHAR(length=32),
ForeignKey('accounts.guid'), nullable=False)
# relation definitions
root_account = relation('Account',
# back_populates='root_book',
foreign_keys=[root_account_guid],
)
root_template = relation('Account',
foreign_keys=[root_template_guid])
uri = None
session = None
# link options to KVP
use_trading_accounts = kvp_attribute("options/Accounts/Use Trading Accounts",
from_gnc=lambda v: v == 't',
to_gnc=lambda v: 't',
default=False)
use_split_action_field = kvp_attribute("options/Accounts/Use Split Action Field for Number",
from_gnc=lambda v: v == 't',
to_gnc=lambda v: 't' if v else 'f',
default=False)
RO_threshold_day = kvp_attribute("options/Accounts/Day Threshold for Read-Only Transactions (red line)",
from_gnc=lambda v: int(v),
to_gnc=lambda v: float(v),
default=0)
counter_customer = kvp_attribute("counters/gncCustomer", default=0)
counter_vendor = kvp_attribute("counters/gncVendor", default=0)
counter_employee = kvp_attribute("counters/gncEmployee", default=0)
counter_invoice = kvp_attribute("counters/gncInvoice", default=0)
counter_job = kvp_attribute("counters/gncJob", default=0)
counter_bill = kvp_attribute("counters/gncBill", default=0)
counter_exp_voucher = kvp_attribute("counters/gncExpVoucher", default=0)
counter_order = kvp_attribute("counters/gncOrder", default=0)
business_company_phone = kvp_attribute("options/Business/Company Phone Number", default="")
business_company_email = kvp_attribute("options/Business/Company Email Address", default="")
business_company_contact = kvp_attribute("options/Business/Company Contact Person", default="")
business_company_ID = kvp_attribute("options/Business/Company ID", default="")
business_company_name = kvp_attribute("options/Business/Company Name", default="")
business_company_address = kvp_attribute("options/Business/Company Address", default="")
business_company_website = kvp_attribute("options/Business/Company Website URL", default="")
def __init__(self, root_account=None, root_template=None):
self.root_account = root_account
self.root_template = root_template
def __unirepr__(self):
return u"Book<{}>".format(self.uri)
_control_mode = None
@property
def control_mode(self):
if self._control_mode is None:
self._control_mode = []
return self._control_mode
@property
def default_currency(self):
return self.root_account.commodity
@default_currency.setter
def default_currency(self, value):
assert isinstance(value, Commodity) and value.namespace == "CURRENCY"
self.root_account.commodity = value
@property
def book(self):
warnings.warn("deprecated", DeprecationWarning)
return self
def validate(self):
Book.validate_book(self.session)
@staticmethod
def track_dirty(session, flush_context, instances):
"""
Record in session._all_changes the objects that have been modified before each flush
"""
for change, l in {"dirty": session.dirty,
"new": session.new,
"deleted": session.deleted}.items():
for obj in l:
# retrieve the dictionnary of changes for the given obj
attrs = session._all_changes.setdefault(id(obj), {})
# add the change of state to the list of state changes
attrs.setdefault("STATE_CHANGES", []).append(change)
attrs.setdefault("OBJECT", obj)
# save old value of attr if not already saved
# (if a value is changed multiple time, we keep only the first "old value")
for k, v in instance_state(obj).committed_state.items():
if k not in attrs:
attrs[k] = v
@staticmethod
def validate_book(session):
session.flush()
# identify object to validate
txs = set()
# iterate on all explicitly changes objects to see
# if we need to add other objects for check
for attrs in session._all_changes.values():
obj = attrs["OBJECT"]
for o_to_validate in obj.object_to_validate(attrs["STATE_CHANGES"]):
txs.add(o_to_validate)
assert None not in txs, "No object should return None to validate. fix the code"
# sort object from local to global (ensure Split checked before Transaction)
from . import Account, Transaction, Split, Commodity
sort_order = defaultdict(lambda: 20, {Account: 10, Transaction: 5, Split: 3, Commodity: 2})
txs = list(txs)
txs.sort(key=lambda x: sort_order[type(x)])
# for each object, validate it
for tx in txs:
tx.validate()
_trading_accounts = None
def trading_account(self, cdty):
"""Return the trading account related to the commodity. If it does not exist and the option
"Use Trading Accounts" is enabled, create it on the fly"""
key = namespace, mnemonic = cdty.namespace, cdty.mnemonic
if self._trading_accounts is None:
self._trading_accounts = {}
tacc = self._trading_accounts.get(key, None)
if tacc: return tacc
from .account import Account
try:
trading = self.root_account.children(name="Trading")
except KeyError:
trading = Account(name="Trading",
type="TRADING",
placeholder=1,
commodity=self.default_currency,
parent=self.root_account)
try:
nspc = trading.children(name=cdty.namespace)
except KeyError:
nspc = Account(name=namespace,
type="TRADING",
placeholder=1,
commodity=self.default_currency,
parent=trading)
try:
tacc = nspc.children(name=cdty.mnemonic)
except KeyError:
tacc = Account(name=mnemonic,
type="TRADING",
placeholder=0,
commodity=cdty,
parent=nspc)
# self.flush()
return tacc
# add session alike functions
def add(self, obj):
"""Add an object to the book (to be used if object not linked in any way to the book)"""
self.session.add(obj)
obj.on_book_add()
def delete(self, obj):
"""Delete an object from the book (to remove permanently an object)"""
self.session.delete(obj)
def save(self):
"""Save the changes to the file/DB (=commit transaction)
"""
self.session.commit()
def flush(self):
"""Flush the book"""
self.session.flush()
def cancel(self):
"""Cancel all the changes that have not been saved (=rollback transaction)
"""
self.session.rollback()
@property
def is_saved(self):
"""Save the changes to the file/DB (=commit transaction)
"""
return self.session.is_saved
# add context manager that close the session when leaving
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""Close a session. Any changes not yet saved are rolled back. Any lock on the file/DB is released.
"""
session = self.session
# cancel pending changes
session.rollback()
# if self._acquire_lock:
# # remove the lock
# session.delete_lock()
session.close()
# add general getters for gnucash classes
def get(self, cls, **kwargs):
"""
Generic getter for a GnuCash object in the `GncSession`. If no kwargs is given, it returns the list of all
objects of type cls (uses the sqlalchemy session.query(cls).all()).
Otherwise, it gets the unique object which attributes match the kwargs
(uses the sqlalchemy session.query(cls).filter_by(\*\*kwargs).one() underneath)::
# to get the first account with name="Income"
inc_account = session.get(Account, name="Income")
# to get all accounts
accs = session.get(Account)
Args:
cls (class): the class of the object to retrieve (Account, Price, Budget,...)
kwargs (dict): the attributes to filter on
Returns:
object: the unique object if it exists, raises exceptions otherwise
"""
if kwargs:
try:
return self.session.query(cls).filter_by(**kwargs).one()
except NoResultFound:
raise ValueError("Could not find a {}({})".format(cls.__name__,
kwargs))
else:
return self.session.query(cls)
@property
def transactions(self):
"""
gives easy access to all transactions in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.transaction.Transaction`
"""
from .transaction import Transaction
return CallableList(self.session.query(Transaction))
@property
def splits(self):
"""
gives easy access to all splits in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.transaction.Split`
"""
from .transaction import Split
return CallableList(self.session.query(Split))
@property
def accounts(self):
"""
gives easy access to all accounts in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.account.Account`
"""
from .account import Account
return CallableList(self.session.query(Account).filter(Account.parent != None))
@property
def commodities(self):
"""
gives easy access to all commodities in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.commodity.Commodity`
"""
from .commodity import Commodity
return CallableList(self.session.query(Commodity))
@property
def invoices(self):
"""
gives easy access to all commodities in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.commodity.Commodity`
"""
return CallableList(self.session.query(Invoice))
@property
def currencies(self):
"""
gives easy access to all currencies in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.commodity.Commodity`
"""
from .commodity import Commodity
def fallback(mnemonic):
cur = factories.create_currency_from_ISO(isocode=mnemonic)
self.add(cur)
# self.flush()
return cur
cl = CallableList(self.session.query(Commodity).filter_by(namespace="CURRENCY"))
cl.fallback = fallback
return cl
@property
def prices(self):
"""
gives easy access to all prices in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.core.commodity.Price`
"""
from .commodity import Price
return CallableList(self.session.query(Price))
@property
def customers(self):
"""
gives easy access to all commodities in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.business.people.Customer`
"""
from ..business import Customer
return CallableList(self.session.query(Customer))
@property
def vendors(self):
"""
gives easy access to all commodities in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.business.people.Vendor`
"""
from ..business import Vendor
return CallableList(self.session.query(Vendor))
@property
def employees(self):
"""
gives easy access to all commodities in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.business.people.Employee`
"""
from ..business import Employee
return CallableList(self.session.query(Employee))
@property
def taxtables(self):
"""
gives easy access to all commodities in the book through a :class:`piecash.model_common.CallableList`
of :class:`piecash.business.tax.Taxtable`
"""
from ..business import Taxtable
return CallableList(self.session.query(Taxtable))
@property
def query(self):
"""
proxy for the query function of the underlying sqlalchemy session
"""
return self.session.query
def preload(self):
# preload list of accounts
accounts = self.session.query(Account).options(joinedload("splits").joinedload("transaction"),
joinedload("children"),
joinedload("commodity"),
).all()
# load all splits
splits = self.session.query(Split).join(Transaction).options(
joinedload("account"),
joinedload("lot")) \
.order_by(Transaction.post_date, Split.value).all()
return accounts, splits
def splits_df(self, additional_fields=None):
"""
Return a pandas DataFrame with all splits (:class:`piecash.core.commodity.Split`) from the book
:parameters: :class:`list`
:return: :class:`pandas.DataFrame`
"""
try:
import pandas
except ImportError:
raise GnucashException("pandas is required to output dataframes")
# Initialise default argument here
additional_fields = additional_fields if additional_fields else []
# preload list of accounts
accounts = self.session.query(Account).all()
# preload list of commodities
commodities = self.session.query(Commodity).filter(Commodity.namespace != "template").all()
# preload list of transactions
transactions = self.session.query(Transaction).all()
# load all splits
splits = self.session.query(Split).join(Transaction) \
.order_by(Transaction.post_date, Split.value).all()
# build dataframe. Adds additional transaction.guid field.
fields = ["guid", "value", "quantity", "memo", "transaction.guid", "transaction.description",
"transaction.post_date", "transaction.currency.guid", "transaction.currency.mnemonic",
"account.fullname", "account.commodity.guid", "account.commodity.mnemonic",
] + additional_fields
fields_getter = [attrgetter(fld) for fld in fields]
df_splits = pandas.DataFrame([[fg(sp) for fg in fields_getter]
for sp in splits], columns=fields)
df_splits = df_splits[df_splits["account.commodity.mnemonic"] != "template"]
df_splits = df_splits.set_index("guid")
return df_splits
def prices_df(self):
"""
Return a pandas DataFrame with all prices (:class:`piecash.core.commodity.Price`) from the book
:return: :class:`pandas.DataFrame`
"""
try:
import pandas
except ImportError:
raise GnucashException("pandas is required to output dataframes")
# preload list of commodities
commodities = self.session.query(Commodity).all()
# load all prices
Currency = aliased(Commodity)
prices = self.session.query(Price) \
.join(Commodity, Price.commodity) \
.join(Currency, Price.currency) \
.order_by(Commodity.mnemonic, Price.date, Currency.mnemonic).all()
fields = ["date", "type", "value",
"commodity.guid", "commodity.mnemonic",
"currency.guid", "currency.mnemonic", ]
fields_getter = [attrgetter(fld) for fld in fields]
df_prices = pandas.DataFrame([[fg(pr) for fg in fields_getter]
for pr in prices], columns=fields)
return df_prices
|
SmileyJames/piecash
|
piecash/scripts/qif_export.py
|
<filename>piecash/scripts/qif_export.py
#!/usr/local/bin/python
"""Basic script to export QIF. Heavily untested ..."""
import sys
# https://github.com/jemmyw/Qif/blob/master/QIF_references
import click
from piecash.scripts.cli import cli
@cli.command()
@click.argument('book', type=click.Path(exists=True))
@click.option('--output', type=click.File('w'), default="-",
help="File to which to export the data (default=stdout)")
def qif(book, output):
"""Export to QIF format.
This scripts export a GnuCash BOOK to the QIF format.
"""
try:
import qifparse.qif as _qif
except ImportError:
_qif = None
print("You need to install the qifparse module ('pip install qifparse')")
sys.exit()
import piecash
map_gnc2qif = {
"CASH": 'Cash',
"BANK": 'Bank',
"RECEIVABLE": 'Bank',
"PAYABLE": 'Ccard',
"MUTUAL": 'Bank',
"CREDIT": 'Ccard',
"ASSET": 'Oth A',
"LIABILITY": 'Oth L',
"TRADING": 'Oth L',
# 'Invoice', # Quicken for business only
"STOCK": 'Invst',
}
with piecash.open_book(book, open_if_lock=True) as s:
b = _qif.Qif()
map = {}
for acc in s.accounts:
if acc.parent == s.book.root_template: continue
elif acc.type in ["INCOME", "EXPENSE", "EQUITY"]:
item = _qif.Category(name=acc.fullname,
description=acc.description,
expense=acc.type == "EXPENSE",
income=acc.type == "INCOME" or acc.type == "EQUITY",
)
b.add_category(item)
elif acc.type in map_gnc2qif:
actype = map_gnc2qif[acc.type]
if actype=="Invst":
# take parent account
item = _qif.Account(name=acc.fullname, account_type=actype)
else:
item = _qif.Account(name=acc.fullname, account_type=actype)
b.add_account(item)
else:
print("unknow {} for {}".format(acc.type, acc.fullname))
map[acc.fullname] = item
# print(str(b))
def split_type(sp):
qif_obj = map[sp.account.fullname]
if isinstance(qif_obj, _qif.Account):
return qif_obj.account_type
else:
return "Expense" if qif_obj.expense else "Income"
def sort_split(sp):
type = split_type(sp)
if type=="Invst":
return 1
elif type in ["Expense","Income"]:
return 2
else:
return 0
tpl = s.book.root_template
for tr in s.transactions:
if not tr.splits or len(tr.splits)<2: continue # split empty transactions
if tr.splits[0].account.parent==tpl: continue # skip template transactions
splits = sorted(tr.splits, key=sort_split)
if all(sp.account.commodity.namespace == "CURRENCY" for sp in splits):
sp1, sp2 = splits[:2]
item = _qif.Transaction(date=tr.post_date,
num=tr.num,
payee=tr.description,
amount=sp1.value,
memo=sp1.memo,
)
if isinstance(map[sp2.account.fullname], _qif.Account):
item.to_account = sp2.account.fullname
else:
item.category = sp2.account.fullname
if len(splits) > 2:
for sp in splits[1:]:
if isinstance(map[sp.account.fullname], _qif.Account):
asp = _qif.AmountSplit(amount=-sp.value,
memo=sp.memo,
to_account=sp.account.fullname,
)
else:
asp = _qif.AmountSplit(amount=-sp.value,
memo=sp.memo,
category=sp.account.fullname,
)
item.splits.append(asp)
map[sp1.account.fullname].add_transaction(item,
header="!Type:{}".format(map[sp1.account.fullname].account_type))
else:
# match pattern of splits for an investment
sp_account, sp_security, sp_others = splits[0], splits[1], splits[2:]
assert split_type(sp_account) in ["Bank", "Cash"]
assert split_type(sp_security) in ["Invst"]
assert all(split_type(sp)=="Expense" for sp in sp_others)
assert sp_security.account.parent.type=="BANK", "Security account has no parent STOCK account (aka a Brokerage account)"
item = _qif.Investment(date=tr.post_date,
action="Buy" if sp_security.quantity>0 else "Sell",
security=sp_security.account.commodity.mnemonic,
price=sp_security.value / sp_security.quantity,
quantity=sp_security.quantity,
amount=sp_security.value,
commission=sum(sp.value for sp in sp_others),
first_line=tr.description,
to_account=sp_account.account.fullname,
amount_transfer=abs(sp_account.value),
)
map[sp_security.account.fullname]\
.add_transaction(item, header="!Type:{}".format(split_type(sp_security)))
output.write(str(b))
|
SmileyJames/piecash
|
tests/test_commodity.py
|
# coding=utf-8
from __future__ import unicode_literals
from datetime import date
from decimal import Decimal
import pytest
from piecash import Price, Commodity, GnucashException
from piecash.core.commodity import GncPriceError
from test_helper import db_sqlite_uri, db_sqlite, new_book, new_book_USD, book_uri, book_basic, \
is_inmemory_sqlite, needweb
# dummy line to avoid removing unused symbols
a = db_sqlite_uri, db_sqlite, new_book, new_book_USD, book_uri, book_basic
class TestCommodity_create_commodity(object):
def test_create_commodity(self, book_basic):
assert len(book_basic.commodities) == 2
cdty = Commodity(namespace="AMEX", mnemonic="APPLE", fullname="Apple", book=book_basic)
book_basic.flush()
assert len(book_basic.commodities) == 3
with pytest.raises(GnucashException):
cdty.base_currency
cdty["quoted_currency"] = "EUR"
assert cdty.base_currency == book_basic.commodities(mnemonic="EUR")
def test_create_commodity_uniqueness(self, book_basic):
assert len(book_basic.commodities) == 2
cdty1 = Commodity(namespace="AMEX", mnemonic="APPLE", fullname="Apple", book=book_basic)
cdty2 = Commodity(namespace="AMEX", mnemonic="APPLE", fullname="Apple", book=book_basic)
with pytest.raises(ValueError):
book_basic.save()
def test_base_currency_commodity(self, book_basic):
cdty = Commodity(namespace="AMEX", mnemonic="APPLE", fullname="Apple", book=book_basic)
with pytest.raises(GnucashException):
cdty.base_currency
# should trigger creation of USD currency
cdty["quoted_currency"] = "USD"
assert cdty.base_currency.mnemonic == 'USD'
book_basic.flush()
assert cdty.base_currency == book_basic.currencies(mnemonic="USD")
cdty["quoted_currency"] = "EUR"
assert cdty.base_currency == book_basic.currencies(mnemonic="EUR")
def test_base_currency_commodity_no_book(self, book_basic):
cdty = Commodity(namespace="AMEX", mnemonic="APPLE", fullname="Apple")
with pytest.raises(GnucashException):
cdty.base_currency
def test_base_currency_currency(self, book_basic):
cdty = book_basic.currencies(mnemonic="USD")
assert cdty.base_currency.mnemonic == "EUR"
class TestCommodity_create_prices(object):
def test_create_basicprice(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
USD = book_basic.currencies(mnemonic="USD")
p = Price(commodity=USD, currency=EUR, date=date(2014, 2, 22), value=Decimal('0.54321'))
# check price exist
np = USD.prices.first()
assert np is p
assert repr(p) == "Price<2014-02-22 : 0.54321 EUR/USD>"
p2 = Price(commodity=USD, currency=EUR, date=date(2014, 2, 21), value=Decimal('0.12345'))
book_basic.flush()
assert p.value + p2.value == Decimal("0.66666")
assert len(USD.prices.all()) == 2
def test_create_duplicateprice(self, book_basic):
EUR = book_basic.commodities(namespace="CURRENCY")
USD = book_basic.currencies(mnemonic="USD")
p = Price(commodity=USD, currency=EUR, date=date(2014, 2, 22), value=Decimal('0.54321'))
p1 = Price(commodity=USD, currency=EUR, date=date(2014, 2, 22), value=Decimal('0.12345'))
book_basic.flush()
assert USD.prices.filter_by(value=Decimal('0')).all() == []
assert USD.prices.filter_by(value=Decimal('0.12345')).one() == p1
with pytest.raises(ValueError):
book_basic.validate()
@needweb
def test_update_currency_prices(self, book_basic):
if not is_inmemory_sqlite(book_basic):
print("skipping test for {}".format(book_basic))
return
EUR = book_basic.default_currency
with pytest.raises(GncPriceError):
EUR.update_prices()
USD = book_basic.currencies(mnemonic="USD")
USD.update_prices()
assert len(list(USD.prices)) < 7
assert (USD.prices.first() is None) or (USD.prices.first().commodity is USD)
CAD = book_basic.currencies(mnemonic="CAD")
CAD.update_prices()
assert len(list(CAD.prices)) < 7
assert (CAD.prices.first() is None) or (CAD.prices.first().commodity is CAD)
# redo update prices which should not bring new prices
l = len(list(USD.prices))
USD.update_prices()
assert len(list(USD.prices)) == l
book_basic.validate()
assert len(book_basic.prices) < 14
@needweb
def test_update_stock_prices(self, book_basic):
if not is_inmemory_sqlite(book_basic):
print("skipping test for {}".format(book_basic))
return
cdty = Commodity(mnemonic="AAPL", namespace="NASDAQ", fullname="Apple", book=book_basic)
# cdty["quoted_currency"] = "USD"
# assert cdty.get("quoted_currency") == "USD"
cdty.update_prices()
book_basic.flush()
L = len(list(cdty.prices))
assert L < 7
cdty.update_prices()
book_basic.flush()
assert len(list(cdty.prices)) == L
book_basic.validate()
def test_price_update_on_commodity_no_book(self, book_basic):
cdty = Commodity(namespace="AMEX", mnemonic="APPLE", fullname="Apple")
with pytest.raises(GncPriceError):
cdty.update_prices()
|
Snow-dash/tag_json
|
main.py
|
<filename>main.py
#-------------------changeable var below-------------------
originPath = '' #-----where is the file,will effect the output file's location
pat=''#-----where you store the data\minecraft\tags folder in version.jar
typeli=['blocks','items','entity_types','fluids','game_events']#------what tags type you want to output
#-------------------changeable var above-------------------
import os,json,sys
sys.path.append(originPath + "\\slpp-23-master")
os.chdir(originPath)
from slpp import slpp as lua
outjs=open('out.txt','w',encoding='utf-8')
def sorteddict(dic):
indic={}
indic2={}
for i in dic:
indic[i]=dic[i]
indic=sorted(indic)
for i in indic:
indic2[i]=dic[i]
return indic2
dic={}
dic['tag_ori']={}
dic['tag']={}
jsdic={}
for root, dirs, files in os.walk(pat, topdown=False):
loctp=''
fix=''
sub=root[len(pat)+1:]
if '\\' in sub:
loctp=sub.split('\\')[0]
fix=sub.split('\\')[1]+'/'
else:
loctp=sub
fix=''
for fi in files:
if loctp not in jsdic:
jsdic[loctp]={}
jsdic[loctp][fix+fi]=root+'\\'+fi
for tp in typeli:
if tp not in dic['tag_ori']:
dic['tag_ori'][str(tp)]={}
#dic['tag_ori'][tp][js]=[]
l=jsdic[tp]
for js in l:
if js[0:-5] not in dic['tag_ori'][tp]:
dic['tag_ori'][tp][js[0:-5]]=[]
temp=open(jsdic[tp][js],'r')
tjs=json.loads(temp.read())
for key in tjs['values']:
if js[0:-5] not in dic['tag_ori'][tp]:
dic['tag_ori'][tp][js[0:-5]]=[]
dic['tag_ori'][tp][js[0:-5]].append(key)
temp.close()
#记录原始标签 write down origin tag
tempdic={}#记录展开下级标签的标签->id对应关系 tag to ID
for tp in typeli:
l=jsdic[tp]
for js in l:
temp=open(jsdic[tp][js],'r')
tjs=json.loads(temp.read())
if tp not in tempdic:
tempdic[tp]={}
tempdic[tp][js[0:-5]]=[]
for key in tjs["values"]:#key:标签json记录的id或标签
#print(key)
if '#' in key:
temp2=open(pat+'\\'+tp+'\\'+key[11:]+'.json')#二级标签
tjs2=json.loads(temp2.read())
for key2 in tjs2["values"]:
if '#' in key2:
temp3=open(pat+'\\'+tp+'\\'+key2[11:]+'.json')#三级标签
tjs3=json.loads(temp3.read())
for key3 in tjs3["values"]:
if '#' in key3:
#print("标签迭代次数过多\n")
temp4=open(pat+'\\'+tp+'\\'+key3[11:]+'.json')#四级标签
tjs4=json.loads(temp4.read())
for key4 in tjs4["values"]:
if '#' in key4:
print("标签迭代次数过多\n")
else:
tempdic[tp][js[0:-5]].append(key4[10:])
temp4.close()
else:
tempdic[tp][js[0:-5]].append(key3[10:])
temp3.close()
else:
tempdic[tp][js[0:-5]].append(key2[10:])
temp2.close()
else:
tempdic[tp][js[0:-5]].append(key[10:])
for tp in typeli:
for tag in tempdic[tp]:
if tp not in dic['tag']:
dic['tag'][tp]={}
dic['tag'][tp][tag]=sorted(list(set(tempdic[tp][tag])))#去重排序 MC-223843
#print(dic['tag'][tp][tag])
dic['tag'][tp]=sorteddict(dic['tag'][tp])
dic['ID']={}
for tp in typeli:
dic['ID'][tp]={}
for tag in tempdic[tp]:
for objid in tempdic[tp][tag]:
if objid not in dic['ID'][tp]:
dic['ID'][tp][objid]=[]
dic['ID'][tp][objid].append(tag)
for tp in typeli:
for objid in dic['ID'][tp]:
dic['ID'][tp][objid]=sorted(list(set(dic['ID'][tp][objid])))
outjs.write(lua.encode(dic))
outjs.close()
|
bguta/Malcolm-x-bot
|
malcolm_tweet.py
|
import config
import malcolm_quotes as mx
import IndexReader as ir
charLim = 280
api = config.api()
def main():
stuff = ir.main()
pg = int(stuff[0])
qt = int(stuff[1])
txt = mx.getQuote(pg, qt)
# print(txt)
print("page: " + str(pg) + " Quote: " + str(qt))
tweets = shorten(txt)
if(div(txt) > 1):
if (not posted(tweets)):
api.update_status(tweets.pop(0))
for tweet in tweets:
ide = api.user_timeline(id=api.me().id, count=1)[0].id
api.update_status(tweet, in_reply_to_status_id=ide)
# print(tweet)
else:
main()
else:
if (not posted(tweets)):
api.update_status(tweets[0])
# print(tweet[0])
else:
main()
def div(txt):
if len(txt) > charLim:
for i in range(1, 10):
if (len(txt) // i < 280 and (len(txt) // i) % 280 >= 10):
return i
raise ValueError("WTF? how long is this quote")
return 1
def shorten(txt):
i = div(txt)
letters = list(txt)
ar = []
for j in range(i):
q = j + 1
t = "".join(letters[j * len(letters) // i: q * len(letters) // i])
ar.append(t)
return ar
def posted(txts):
tweets = [tweet.full_text for tweet in api.user_timeline(
id=api.me().id, count=(10 ** 4), tweet_mode="extended")]
for tweet in tweets:
if eq(tweet, txts[0]):
print("Already posted")
return True
return False
def eq(tx1, tx2):
if len(tx1) != len(tx2):
return False
if tx1 == tx2:
return True
return False
|
bguta/Malcolm-x-bot
|
malcolm_quotes.py
|
<reponame>bguta/Malcolm-x-bot
import requests
from bs4 import BeautifulSoup as bs
import re
url = "http://www.azquotes.com/author/9322-Malcolm_X?p="
# p must be between 1 and 32: this is the page
# q must be between 1 and 25; this is the quote
# this method returns a quote by malcolm x from the url
def getQuote(p, q):
if(p is 32):
q = (q % 6) + 1
if(p < 1 or p > 32):
p = 16
if(not(p is 32) and (q > 25 or q < 1)):
q = 15
page = requests.get(url + str(p))
s = bs(page.content, "html.parser")
qts = s.findAll("ul", {"class": "list-quotes"})[0].findAll("li")
qt = re.sub(r"^\s+", " ", qts[q].text.strip(), flags=re.MULTILINE)
qt = re.sub(r"\n Report", "", qt)
qt = re.sub(r"\n", "", qt)
qt = qt.split(". Malcolm X")[0]
return qt
|
bguta/Malcolm-x-bot
|
dailyTimer.py
|
<filename>dailyTimer.py
import time
import malcolm_tweet
import dropbox_ as dbx
import random
# this is the main function and scheduler
day = 86400
def main():
dbx.download("index.txt", "index.txt")
malcolm_tweet.main()
dbx.upload("index.txt", "index.txt")
def getTime():
return random.randint(day // 5, day)
counter = 0
t = getTime()
while True:
time.sleep(1)
if(counter % 3600 == 0):
print("waiting...")
counter += 1
if counter == t:
main()
counter = 0
t = getTime()
|
bguta/Malcolm-x-bot
|
IndexReader.py
|
<filename>IndexReader.py
import re
def look():
with open("index.txt", "r") as file:
line = file.readline()
ar = re.sub(r"\t", " ", line).split()
return ar
def write(p, q):
with open("index.txt", "w") as file:
file.write(str(p) + "\t" + str(q))
def main():
reset = False
stuff = look()
p = int(stuff[0])
q = int(stuff[1])
if(p is 32 and q is 6):
reset = True
if(q is 25 and p < 32):
p += 1
if(not(p is 32)):
q = (q % 25) + 1
else:
q = (q % 6) + 1
if(reset is True):
p = 1
write(p, q)
return stuff
|
bguta/Malcolm-x-bot
|
config.py
|
# this is your twitter info
# i removied mine but you can add yours and it will do the exact same thing
import tweepy
# your info
consumer_key =
consumer_secret =
access_token =
access_token_secret =
def api():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
|
bguta/Malcolm-x-bot
|
dropbox_.py
|
"""
A module to read and write dropbox files and save them using dropbox
"""
import dropbox
import os
ac = "" # add your own access key for dropbox api
dbx = dropbox.Dropbox(ac)
def upload(source, output, large=False):
"""
@param source
a file name e.g (test.txt). this is the file which is read
@param output
a file name for the output e.g (test.txt), this is the name
as the file in dropbox
"""
with open(source, "rb") as file:
if not large:
dbx.files_upload(file.read(),
"/" + output, mode=dropbox.files.WriteMode('overwrite'))
else:
with open(source, "rb") as f:
# got this from ----> https://stackoverflow.com/a/43724479
file_size = os.path.getsize(source)
CHUNK_SIZE = 4 * 1024 * 1024
upload_session_start_result = dbx.files_upload_session_start(
f.read(CHUNK_SIZE))
cursor = dropbox.files.UploadSessionCursor(session_id=upload_session_start_result.session_id,
offset=f.tell())
commit = dropbox.files.CommitInfo(path="/" + output,
mode=dropbox.files.WriteMode('overwrite'))
while f.tell() < file_size:
if ((file_size - f.tell()) <= CHUNK_SIZE):
dbx.files_upload_session_finish(f.read(CHUNK_SIZE),
cursor,
commit)
else:
dbx.files_upload_session_append(f.read(CHUNK_SIZE),
cursor.session_id,
cursor.offset)
cursor.offset = f.tell()
def download(source, output):
"""
@param source
a file name e.g (test.txt). this is the file which is read in dropbox
@param output
a file name for the output e.g (test.txt), this is the name of the file
to download
"""
meta, res = dbx.files_download("/" + source)
with open(output, "wb") as file:
file.write(res.content)
|
AdamIsrael/openmano
|
utils.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: <EMAIL>
##
'''
utils is a module that implements functions that are used by all openmano modules,
dealing with aspects such as reading/writing files, formatting inputs/outputs for quick translation
from dictionaries to appropriate database dictionaries, etc.
'''
__author__="<NAME>, <NAME>"
__date__ ="$08-sep-2014 12:21:22$"
import datetime
from jsonschema import validate as js_v, exceptions as js_e
#from bs4 import BeautifulSoup
def read_file(file_to_read):
"""Reads a file specified by 'file_to_read' and returns (True,<its content as a string>) in case of success or (False, <error message>) in case of failure"""
try:
f = open(file_to_read, 'r')
read_data = f.read()
f.close()
except Exception,e:
return (False, str(e))
return (True, read_data)
def write_file(file_to_write, text):
"""Write a file specified by 'file_to_write' and returns (True,NOne) in case of success or (False, <error message>) in case of failure"""
try:
f = open(file_to_write, 'w')
f.write(text)
f.close()
except Exception,e:
return (False, str(e))
return (True, None)
def format_in(http_response, schema):
try:
client_data = http_response.json()
js_v(client_data, schema)
#print "Input data: ", str(client_data)
return True, client_data
except js_e.ValidationError, exc:
print "validate_in error, jsonschema exception ", exc.message, "at", exc.path
return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
def remove_extra_items(data, schema):
deleted=[]
if type(data) is tuple or type(data) is list:
for d in data:
a= remove_extra_items(d, schema['items'])
if a is not None: deleted.append(a)
elif type(data) is dict:
#TODO deal with patternProperties
if 'properties' not in schema:
return None
for k in data.keys():
if k not in schema['properties'].keys():
del data[k]
deleted.append(k)
else:
a = remove_extra_items(data[k], schema['properties'][k])
if a is not None: deleted.append({k:a})
if len(deleted) == 0: return None
elif len(deleted) == 1: return deleted[0]
else: return deleted
#def format_html2text(http_content):
# soup=BeautifulSoup(http_content)
# text = soup.p.get_text() + " " + soup.pre.get_text()
# return text
def convert_bandwidth(data, reverse=False):
'''Check the field bandwidth recursivelly and when found, it removes units and convert to number
It assumes that bandwidth is well formed
Attributes:
'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
'reverse': by default convert form str to int (Mbps), if True it convert from number to units
Return:
None
'''
if type(data) is dict:
for k in data.keys():
if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
convert_bandwidth(data[k], reverse)
if "bandwidth" in data:
try:
value=str(data["bandwidth"])
if not reverse:
pos = value.find("bps")
if pos>0:
if value[pos-1]=="G": data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000
elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
else:
value = int(data["bandwidth"])
if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
else: data["bandwidth"]=str(value) + " Mbps"
except:
print "convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"]
return
if type(data) is tuple or type(data) is list:
for k in data:
if type(k) is dict or type(k) is tuple or type(k) is list:
convert_bandwidth(k, reverse)
def convert_datetime2str(var):
'''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
It enters recursively in the dict var finding this kind of variables
'''
if type(var) is dict:
for k,v in var.items():
if type(v) is datetime.datetime:
var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
elif type(v) is dict or type(v) is list or type(v) is tuple:
convert_datetime2str(v)
if len(var) == 0: return True
elif type(var) is list or type(var) is tuple:
for v in var:
convert_datetime2str(v)
def convert_str2boolean(data, items):
'''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
Done recursively
Attributes:
'data': dictionary variable to be checked. None or empty is considered valid
'items': tuple of keys to convert
Return:
None
'''
if type(data) is dict:
for k in data.keys():
if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
convert_str2boolean(data[k], items)
if k in items:
if type(data[k]) is str:
if data[k]=="false" or data[k]=="False": data[k]=False
elif data[k]=="true" or data[k]=="True": data[k]=True
if type(data) is tuple or type(data) is list:
for k in data:
if type(k) is dict or type(k) is tuple or type(k) is list:
convert_str2boolean(k, items)
def check_valid_uuid(uuid):
id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
try:
js_v(uuid, id_schema)
return True
except js_e.ValidationError:
return False
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/common/exceptions.py
|
<filename>ironic_inventory/common/exceptions.py
# -*- encoding: utf-8 -*-
"""Common exceptions for the inventory manager.
"""
class ExistingMACAddress(Exception):
code = 409
message = u'A server with the MAC address %(address)s already exists.'
def __init__(self, address, message=None, **kwargs):
"""
:param address: The conflicting MAC address.
:param message: The exception message. Optional
"""
if not message:
# Construct the default message.
message = self.message % address
super(ExistingMACAddress, self).__init__(message)
class ExistingServerName(Exception):
code = 409
message = u'A server using the name %(name)s already exists.'
def __init__(self, name, message=None, **kwargs):
"""
:param name:
:param message:
:param kwargs:
"""
if not message:
message = self.message % name
super(ExistingServerName, self).__init__(message)
class ExistingServer(Exception):
code = 409
message = u'This server already exists.'
def __init__(self):
super(ExistingServer, self).__init__()
class ServerNotFound(Exception):
code = 404
message = u'The server %(identifier)s was not found.'
def __init__(self, message=None, **kwargs):
"""
:param message: An overridden exception message.
:param uuid: The server's uuid
:param name: The server's name
"""
if not message:
if kwargs.get('name'):
message = self.message % kwargs['name']
elif kwargs.get('uuid'):
message = self.message % kwargs['uuid']
else:
message = u'The server was not found.'
super(ServerNotFound, self).__init__(message)
class ServerReserved(Exception):
message = ('The server %(uuid) has an existing reservation, please remove'
' the reservation and retry.')
def __init__(self, message=None, **kwargs):
"""
:param message:
:param server_uuid:
"""
if not message:
uuid = kwargs.get('server_uuid')
if not uuid:
message = ('The server has an existing reservation, please'
' remove and retry the operation.')
else:
message = self.message % uuid
super(ServerReserved, self).__init__(message)
class ServerNotReserved(Exception):
message = 'The server %(server_uuid)s does not have a reservation.'
def __init__(self, message=None, **kwargs):
if not message:
uuid = kwargs.get('server_uuid')
if not uuid:
message = 'The server does not have an existing reservation.'
else:
message = self.message % uuid
super(ServerNotReserved, self).__init__(message)
class ServerNotDeployed(Exception):
message = 'The server %(uuid)s is not in a deployed state.'
def __init__(self, message=None, **kwargs):
"""
:param message: A custom message.
:param uuid: The server's uuid
"""
if not message:
uuid = kwargs.get('uuid')
if not uuid:
message = 'The server is not in a deployed state.'
else:
message = self.message % uuid
super(ServerNotDeployed, self).__init__(message)
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/db/sqlalchemy/alembic/versions/4f310004f218_adding_initial_tables.py
|
"""Adding initial tables
Revision ID: 4f310004f218
Revises:
Create Date: 2015-10-23 16:08:21.396455
"""
# revision identifiers, used by Alembic.
revision = '4f310004f218'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('reservations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('servers',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('cpu_count', sa.Integer(), nullable=True),
sa.Column('local_drive_capacity', sa.Integer(), nullable=True),
sa.Column('psu_capacity', sa.Integer(), nullable=True),
sa.Column('psu_size', sa.String(length=36), nullable=True),
sa.Column('memory_mb', sa.Integer(), nullable=True),
sa.Column('cpu_architecture', sa.String(), nullable=True),
sa.Column('driver_name', sa.String(), nullable=True),
sa.Column('deploy_kernel', sa.String(), nullable=True),
sa.Column('deploy_ramdisk', sa.String(), nullable=True),
sa.Column('ipmi_address', sa.String(), nullable=True),
sa.Column('ipmi_password', sa.String(), nullable=True),
sa.Column('impi_username', sa.String(), nullable=True),
sa.Column('impi_priv_level', sa.String(), nullable=True),
sa.Column('ipmi_mac_address', sa.String(), nullable=True),
sa.Column('reservation_id', sa.Integer(), nullable=True),
sa.Column('deployed', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('ipmi_mac_address', name='uniq_servers0impmimacaddress'),
sa.UniqueConstraint('name', name='uniq_servers0name'),
sa.UniqueConstraint('uuid', name='uniq_servers0uuid')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('servers')
op.drop_table('reservations')
### end Alembic commands ###
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/controllers/v1/server.py
|
# -*- encoding: utf-8 -*-
import pecan
from pecan import rest
from wsme import types as wtypes
from ironic_inventory.api.controllers.base import ApiBase
from ironic_inventory.api.controllers.root import wsme_expose
from ironic_inventory.db import api as dbapi
from ironic_inventory.objects.server import ServerFields
class Server(ApiBase):
"""API Representation of a Server.
"""
id = wtypes.StringType
uuid = wtypes.StringType
name = wtypes.StringType
cpu_count = wtypes.IntegerType
local_drive_capacity = wtypes.IntegerType
psu_capacity = wtypes.IntegerType
psu_size = wtypes.IntegerType
memory_mb = wtypes.IntegerType
cpu_architecture = wtypes.StringType
ipmi_password = wtypes.StringType
ipmi_username = wtypes.StringType
ipmi_priv_level = wtypes.StringType
ipmi_mac_address = wtypes.StringType
reservation_id = wtypes.StringType
deployed = wtypes.BinaryType
def __init__(self, **kwargs):
self.fields = []
for field in ServerFields:
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@classmethod
def from_dict(cls, **kwargs):
server = cls(
id=kwargs.get('id'),
uuid=kwargs.get('uuid'),
name=kwargs.get('name'),
cpu_count=kwargs.get('cpu_count'),
local_drive_capacity=kwargs.get('local_drive_capacity'),
psu_capacity=kwargs.get('psu_capacity'),
psu_size=kwargs.get('psu_size'),
memory_mb=kwargs.get('memory_mb'),
cpu_architecture=kwargs.get('cpu_architecture'),
ipmi_password=kwargs.get('ipmi_password'),
ipmi_username=kwargs.get('ipmi_username'),
ipmi_priv_level=kwargs.get('ipmi_priv_level'),
ipmi_mac_address=kwargs.get('ipmi_mac_address'),
reservation_id=kwargs.get('reservation_id'),
deployed=kwargs.get('deployed')
)
return server
class ServerCollection(ApiBase):
servers = [Server]
@classmethod
def from_list_of_dicts(cls, server_list):
"""
:param server_list:
:return:
"""
collection = cls()
collection.servers = [
Server.from_dict(server.as_dict) for server in server_list]
return collection
class ServerController(rest.RestController):
dbapi = dbapi.get_instance()
@wsme_expose(Server, wtypes.StringType)
def get_one(self, server_uuid):
"""Get a single server.
:return:
"""
server = Server.from_dict(
self.dbapi.get_server_by_uuid(server_uuid).as_dict())
return server
@wsme_expose(ServerCollection, wtypes.StringType, int, wtypes.text, wtypes.text)
def get_all(self):
servers = self.dbapi.get_all_servers()
return ServerCollection.from_list_of_dicts(servers)
@wsme_expose(Server, body=Server, status_code=201)
def post(self, server):
"""Create a new server.
:param server: A server supplied via the request body.
"""
db_server = self.dbapi.add_server(pecan.request.contex, **server.as_dict())
return Server.from_dict(db_server.as_dict())
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/controllers/v1/reservation.py
|
from pecan import rest
from wsme import types as wtpes
from ironic_inventory.api.controllers.base import ApiBase
class Reservation(ApiBase):
"""API repersentation of a reservation."""
id = wtpes.IntegerType
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/common/paths.py
|
# -*- encoding: utf-8 -*-
"""Common application path definitions and helper methods."""
import os
from oslo_config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help=('Directory where the ironic inventory python module is'
' installed.')),
cfg.StrOpt('bindir',
default='$pybasedir/bin',
help=('Directory where ironic inventory binaries are'
' installed.')),
cfg.StrOpt('state_path',
default='$pybasedir',
help=("Top-level directory for maintaining the invenotry"
" manager's state.")),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/tests/db/__init__.py
|
<reponame>softlayer/ironic-inventory-integrator
__author__ = 'chaustin'
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/cmd/dbsync.py
|
# -*- encoding: utf-8 -*-
import sys
from oslo_config import cfg
# from ironic_inventory.common import service
# from ironic_inventory.db import migration
CONF = cfg.CONF
class DBCommand(object):
pass
def main():
valid_commands = {
'upgrade', 'downgrade', 'revision', 'version', 'stamp',
'create_schema'
}
service.prepare_service(sys.argv)
CONF.command.func()
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/db/sqlalchemy/base.py
|
<gh_stars>1-10
# -*- encoding: utf-8 -*-
"""Base definitions for SQLAlchemy and db specific configurations."""
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models as oslo_models
from sqlalchemy.ext.declarative import declarative_base
from ironic_inventory.common import paths
sql_opts = [
cfg.StrOpt('mysql_engine',
default='InnoDB',
help='MySQL engine to use.')
]
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('inventory.sqlite')
db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite')
class InventoryBase(oslo_models.TimestampMixin, oslo_models.ModelBase):
"""DeclarativeBaseImpl class for inventory objects. """
def as_dict(self):
"""Represent a SQLAlchemy declarative base model as a dict by
introspecting it's columns.
"""
# Note(caustin): A dict. comprehension may be better here but, it is
# unclear if the case of a empty table needs to be considered.
model_dict = dict()
for colum in self.__table__.columns:
model_dict[colum.name] = self[colum.name]
return model_dict
def save(self, session=None):
"""Override ModelBase.save() to handle the case of session=None"""
# Note(caustin): This may be indicative of a smell from the project's
# layout. Look at refactoring the internal API to avoid this.
import ironic_inventory.db.sqlalchemy.api as db_api
if session is None:
session = db_api.get_session()
super(InventoryBase, self).save(session)
DeclarativeBaseImpl = declarative_base(cls=InventoryBase)
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/controllers/root.py
|
# -*- encoding: utf-8 -*-
"""
Root controller for web api.
"""
from pecan import rest
from wsme import types as wtpes
from ironic_inventory.api.controllers.base import wsme_expose
class Version(object):
"""The version's ID"""
id = wtpes.text
@staticmethod
def get_default(id):
version = Version()
version.id = id
return version
class Root(object):
"""The name of the API"""
name = wtpes.text
"""The suported versions"""
versions = [Version]
"""The default version"""
default_version = Version
@staticmethod
def get_default():
"""
:return:
"""
version_one = Version.get_default('v1')
root = Root()
root.name = "Ironic Inventory Manager"
root.versions = [version_one]
root.default_version = version_one
return root
class RootController(rest.RestController):
@wsme_expose(Root)
def get(self):
return Root.get_default()
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/db/sqlalchemy/models.py
|
<reponame>softlayer/ironic-inventory-integrator
# -*- encoding: utf-8 -*-
"""
SQLAlchemy models for bare metal inventory management.
"""
from sqlalchemy import Column, Integer, String, Boolean, schema
from sqlalchemy import ForeignKey, orm
from ironic_inventory.db.sqlalchemy.base import DeclarativeBaseImpl
from ironic_inventory.common import paths
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('inventory.sqlite')
class Server(DeclarativeBaseImpl):
"""Represents a Bare Metal Server."""
# TODO(caustin): Look at this table and normalize.
__tablename__ = 'servers'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_servers0uuid'),
schema.UniqueConstraint('name', name='uniq_servers0name'),
schema.UniqueConstraint('ipmi_mac_address',
name='uniq_servers0impmimacaddress'),)
# server information.
id = Column(Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36), nullable=False)
name = Column(String())
cpu_count = Column(Integer())
local_drive_capacity = Column(Integer())
psu_capacity = Column(Integer())
psu_size = Column(String(36))
memory_mb = Column(Integer())
cpu_architecture = Column(String(), default='x86_64')
# Driver Information.
driver_name = Column(String())
deploy_kernel = Column(String())
deploy_ramdisk = Column(String())
# IPMI Information.
ipmi_address = Column(String())
ipmi_password = Column(String())
impi_username = Column(String())
impi_priv_level = Column(String(), default='OPERATOR')
ipmi_mac_address = Column(String())
# Reservation Data
reservation_id = Column(Integer, ForeignKey('reservations.id'))
reservation = orm.relationship('Reservation', uselist=False,
cascade='all, delete-orphan',
single_parent=True)
# Deployed Flag
# Note(caustin): This may be better normalized.
deployed = Column(Boolean(), default=False)
class Reservation(DeclarativeBaseImpl):
"""Represents a reservation request for a server"""
# Note(caustin): Using SQLAlchemy's method of declaring one-to-one
# relationships. This may be better suited as an association table if
# there is the need for additional information to be stored in this table.
__tablename__ = 'reservations'
id = Column(Integer(), primary_key=True, autoincrement=True)
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/tests/test_units.py
|
from unittest import TestCase
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/db/sqlalchemy/migration.py
|
# -*- encoding: utf-8 -*-
"""Database setup and migration commands."""
from oslo_config import cfg
from stevedore import driver
_IMPL = None
def get_backend():
global _IMPL
if not _IMPL:
cfg.CONF.import_opt('backend', 'oslo_db.options', group='database')
_IMPL = driver.DriverManager("ironic_inventory.database.migration_backend",
cfg.CONF.database.backend).driver
return _IMPL
def upgrade(version=None):
"""Migrate the database to `version` or the most recent version."""
return get_backend().upgrade(version)
def downgrade(version=None):
return get_backend().downgrade(version)
def version():
return get_backend().version()
def stamp(version):
return get_backend().stamp(version)
def revision(message, autogenerate):
return get_backend().revision(message, autogenerate)
def create_schema():
return get_backend().create_schema()
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/controllers/v1/__init__.py
|
from ironic_inventory.api.controllers.v1 import server
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/db/sqlalchemy/api.py
|
# -*- encoding: utf-8 -*-
"""API/Interface to the SQLAlchemy backend.
"""
import copy
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from sqlalchemy.orm import exc as sqla_exc
from ironic_inventory.common import exceptions
from ironic_inventory.db import api
from ironic_inventory.db.sqlalchemy import models
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""
:return:
"""
return Connection()
class Connection(api.Connection):
def __init__(self):
pass
def _get_servers_query(self, kwargs):
session = get_session()
filters = copy.copy(kwargs)
filters['reservation_id'] = None
filters['deployed'] = False
query = session.query(models.Server).filter_by(**filters)
return query
def _delete_reservation(self, reservation_id, server_uuid):
session = get_session()
with session.begin():
query = session.query(models.Reservation).filter_by(id=reservation_id)
try:
reservation = query.one()
except sqla_exc.NoResultFound:
# HACK(caustin): For now, swallow this exception.
# in the very near future roll back the deployment of the
# server raise and error .
LOG.warn('Reservation for server being %(uuid)s deployed was not'
'found.', {'uuid': server_uuid})
session.delete(reservation)
def add_server(self, **kwargs):
"""Adds a provisional server to the inventory.
:param name: The Server's name or id.
:param cpu_count: The number of CPUs in the server.
:param chassis_drive_capacity: The drive capacity of the server's chassis.
:param psu_capacity: The server's power supply capicity.
:param chassis_size: The size of the server's chassis.
:param memory: The server's memory in MB.
:param local_drive_size: The size in GB of the local drive.
:param driver_name: The name of the Ironic provisioning driver.
:param deploy_kernel: The UUID of the deploy kernel.
:param deploy_ramdisk: The UUID of the deploy ramdisk.
:param ipmi_address: The IP Address of the IPMI interface.
:param ipmi_password: The <PASSWORD>.
:param impi_username: The User ID / name of the IPMI user.
:param impi_priv_level: The IPMI Privilege Level of the user.
:param ipmi_mac_address: The MAC Address of the IPMI interface.
:param cpu_arch: The CPU Architecture. Defaults to 'x86_64'
"""
server = models.Server()
server.update(kwargs)
try:
server.save()
except db_exc.DBDuplicateEntry as exc:
if 'ipmi_mac_address' in exc.columns:
raise exceptions.ExistingMACAddress(
address=kwargs['ipmi_mac_address'])
if 'name' in exc.columns:
raise exceptions.ExistingServerName(name=kwargs['name'])
else:
raise exceptions.ExistingServer()
return server
def remove_server(self, uuid):
"""Remove a server from the inventory pool.
:param uuid: The server's uuid.
"""
session = get_session()
with session.begin():
query = session.query(models.Server).filter_by(uuid=uuid)
try:
server = query.one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(server_uuid=uuid)
if server.reservation_id:
# Don't delete servers with an existing reservation.
raise exceptions.ServerReserved()
query.delete()
def get_all_servers(self):
"""Get all servers as a list.
"""
session = get_session()
return session.query(models.Server).all()
def get_matching_servers(self, **kwargs):
"""Return a list of servers that match the search parameters.
:param cpu_count: The number of CPUs in the server.
:param chassis_drive_capacity: The drive capacity of the server's chassis.
:param psu_capacity: The server's power supply capicity.
:param chassis_size: The size of the server's chassis.
:param memory: The server's memory in MB.
:param local_drive_size: The size in GB of the local drive.
:param cpu_arch: The CPU Architecture. Defaults to 'x86_64'
:return: list
"""
try:
query = self._get_servers_query(kwargs)
servers = query.all()
for server in servers:
self.reserve_server(server)
except sqla_exc.NoResultFound:
# Note(caustin): For now, I am considering the case where no match is
# found to not be an exception. So, just return None.
return None
return servers
def get_single_server_match(self, **kwargs):
"""Return a single server that matches the search parameters.
:param cpu_count: The number of CPUs in the server.
:param chassis_drive_capacity: The drive capacity of the server's chassis.
:param psu_capacity: The server's power supply capicity.
:param chassis_size: The size of the server's chassis.
:param memory: The server's memory in MB.
:param local_drive_size: The size in GB of the local drive.
:param cpu_arch: The CPU Architecture. Defaults to 'x86_64'
"""
try:
query = self._get_servers_query(kwargs)
server = query.first()
self.reserve_server(server)
except sqla_exc.NoResultFound:
# Note(caustin): For now, I consider the case where no server meeting
# the critera is found to be non-exceptional. So, returning None in
# this case.
return None
return server
def get_server_by_uuid(self, server_id):
"""Get a server by it's uuid
:param server_id: The server's uuid
"""
session = get_session()
query = session.query(models.Server).filter_by(uuid=server_id)
try:
return query.one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(uuid=server_id)
def get_server_by_name(self, server_name):
"""Get a server by it's name.
:param server_name: The server's unique name.
"""
session = get_session()
query = session.query(models.Server).filter_by(name=server_name)
try:
return query.one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(name=server_name)
def update_server(self, server_uuid, **kwargs):
"""
:param server_uuid:
:param kwargs:
"""
session = get_session()
with session.begin():
query = session.query(models.Server).filter_by(uuid=server_uuid)
try:
# TODO (caustin): 'with_lockmode' has been superseded by
# with_for_update in SQLAlchemy. Update and test when possible.
server = query.with_lockmode('update').one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(uuid=server_uuid)
if server.reservation_id:
# We probably shouldn't update a server that has an existing
# reservation in place.
raise exceptions.ServerReserved()
server.update(kwargs)
return server
def reserve_server(self, server_instance):
"""Create a reservation for a server.
:param server_instance: A server object.
"""
if server_instance.reservation_id:
raise exceptions.ServerReserved(server_uuid=server_instance.uuid)
reservation = models.Reservation()
reservation.save()
server_instance.update({'reservation_id': reservation.id})
server_instance.save()
return server_instance
def cancel_reservation(self, server_uuid):
"""Cancel a reservation for a server.
"""
server = self.get_server_by_uuid(self, server_uuid)
reservation_id = server.reservation_id
if not reservation_id:
raise exceptions.ServerNotReserved(self, server_uuid=server_uuid)
updated_server = self.update_server(self, server_uuid, **{'reservation_id': None})
self._delete_reservation(server.reservation_id, server_uuid)
return updated_server
def deploy_server(self, server_uuid, *args, **kwargs):
"""Mark a server as being used by an ironic node.
:param server_instance:
:param args:
:param kwargs:
:return:
"""
server = self.get_server_by_uuid(server_uuid)
reservation_id = server.reservation_id
if reservation_id:
raise exceptions.ServerNotReserved(server_uuid)
update_values = {'reservation_id': None, 'deployed': True}
deployed_server = self.update_server(server_uuid, **update_values)
self._delete_reservation(reservation_id, server_uuid)
return deployed_server
def return_server_to_pool(self, server_uuid, *args, **kwargs):
"""Returns a previously deployed server to the pool of available servers.
:param server_uuid:
:param args:
:param kwargs:
:return:
"""
session = get_session()
with session.begin():
query = session.query(models.Server).filter_by(uuid=server_uuid)
try:
server = query.with_lockmode('update').one()
except sqla_exc.NoResultFound:
raise exceptions.ServerNotFound(uuid=server_uuid)
if not server.deployed:
raise exceptions.ServerNotDeployed(uuid=server_uuid)
server.update({'deployed': False})
return server
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/db/api.py
|
# -*- encoding: utf-8 -*-
"""Database API for CRUD on inventory servers.
"""
import abc
import six
from oslo_config import cfg
from oslo_db import api as db_api
_BACKEND_MAPPING = {'sqlalchemy': 'ironic_inventory.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True)
def get_instance():
return IMPL
@six.add_metaclass(abc.ABCMeta)
class Connection(object):
@abc.abstractmethod
def add_server(self, **kwargs):
pass
@abc.abstractmethod
def remove_server(self, uuid):
pass
@abc.abstractmethod
def get_all_servers(self):
pass
@abc.abstractmethod
def get_matching_servers(self, **kwargs):
pass
@abc.abstractmethod
def get_single_server_match(self, **kwargs):
pass
@abc.abstractmethod
def get_server_by_uuid(self, server_id):
pass
@abc.abstractmethod
def get_server_by_name(self, server_name):
pass
@abc.abstractmethod
def update_server(self, server_uuid, **kwargs):
pass
@abc.abstractmethod
def reserve_server(self, server_instance):
pass
@abc.abstractmethod
def cancel_reservation(self, server_uuid):
pass
@abc.abstractmethod
def deploy_server(self, server_uuid, *args, **kwargs):
pass
@abc.abstractmethod
def return_server_to_pool(self, server_id, *args, **kwargs):
pass
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/config.py
|
<filename>ironic_inventory/api/config.py
# -*- encoding: utf-8 -*-
# Server Specific Configurations
server = {
'port': '8090',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'ironic_inventory.api.controllers.root.RootController',
'modules': ['ironic_inventory.api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/ironic_inventory/templates',
'debug': True,
}
wsme = {
'debug': True,
}
logging = {
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'ironic_inventory': {'level': 'DEBUG', 'handlers': ['console']},
'pecan': {'level': 'DEBUG', 'handlers': ['console']},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/objects/server.py
|
<gh_stars>1-10
"""A definition of the fields describing a server used in lieu of defining remotable
objects"""
ServerFields = frozenset(['id', 'uuid', 'name', 'cpu_count',
'local_drive_capacity', 'psu_capacity', 'psu_size',
'memory_mb', 'cpu_architecture', 'ipmi_password',
'ipmi_username', 'ipmi_priv_level',
'ipmi_mac_address', 'reservation_id', 'deployed'])
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/hooks.py
|
# -*- encoding: utf-8 -*-
"""Pecan request hooks."""
from pecan import hooks
from ironic_inventory.db import api as dbapi
class SqlAlchemyHook(hooks.PecanHook):
def before(self, state):
state.request.dbapi = dbapi.get_instance()
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/app.py
|
<reponame>softlayer/ironic-inventory-integrator
import pecan
from pecan import make_app
from ironic_inventory.api import config
def get_pecan_config():
"""Load the pecan configuration file and return the config object"""
config_file = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(config_file)
def setup_app(config):
pecan_config = get_pecan_config()
app = make_app(
pecan_config.app.root,
static_root=pecan_config.app.static_root,
debug=pecan_config.app.debug,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
)
return app
|
softlayer/ironic-inventory-integrator
|
ironic_inventory/api/controllers/base.py
|
<reponame>softlayer/ironic-inventory-integrator
from wsme import types as wtypes
from wsmeext import pecan as wsme_pecan
__author__ = 'chaustin'
class ApiBase(wtypes.Base):
"""A Base Object for handling serialization of objects from request
"""
def as_dict(self):
return {field: getattr(self, field) for field in self.fields}
def wsme_expose(*args, **kwargs):
"""Coerce the content type to json"""
if 'rest_content_types' not in kwargs:
kwargs['rest_content_types'] = ('json', )
return wsme_pecan.wsexpose(*args, **kwargs)
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex013.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video
s = float(input('qual erá o salario do funcionario? R$'))
aul = s * 15 / 100
print('o salario do funcionari que andes custava R${:.2f} agora com 15% de aumento custará R${:.2f}'.format(s, (s+aul)))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex045_GAME Pedra Papel e Tesoura.py
|
<filename>Exercicios/ex045_GAME Pedra Papel e Tesoura.py
from random import randint
from time import sleep
itens = ('Pedra', 'Papel', 'Tesoura') # lista de itens
computador = randint(0, 2) # o computador esta sorteando entre 0, 1 ou 2..
print(''' Suas opções
[0] Pedra
[1] Papel
[2] Tesoura''')
player = int(input('Qual é sua jogada? \n'))
sleep(0.5)
print('JO')
sleep(0.5)
print('KEN')
sleep(0.5)
print('PO')
print('=--=' * 7)
print('O computador jogou: {}{}{}'.format('\033[34m', itens[computador], '\033[m'))
print('O jogador jogou: {}{}{}'.format('\033[31m', itens[player], '\033[m'))
print('=--=' * 7)
print()
if computador == 0: # computador jogou PEDRA
if player == 0:
print('O jogo enpatou!')
elif player == 1:
print('Jogador vence!')
elif player == 2:
print('Computador vence!')
else:
print('Jogada invalida!!!')
elif computador == 1: # computador jogou PAPEL
if player == 1:
print('o jogo enpatou!')
elif player == 0:
print('Computador vence!')
elif player == 2:
print('Jogador vence!')
else:
print('Jogada invalida!!!')
elif computador == 2: # computador jogou TESOURA
if player == 2:
print('O jogo enpatou!')
elif player == 1:
print('Computador vence!')
elif player == 0:
print('Jogador vence')
else:
print('Jogada invalida!!!')
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex005.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video
A = float(input('Informe um valor numerico: '))
B = float(input('Informe outro valor numerico: '))
auxiliar = B
B = A
A = auxiliar
print('o valor de A-{} e de B-{} '.format(A, B))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula08a.py
|
<gh_stars>1-10
from math import sqrt
numero1 = float(input('digite um numero'))
raiz = sqrt(numero1)
print('a raiz de {} é {:.2f}'.format(numero1, raiz))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex028.py
|
from random import randint
numeroescolhido = randint(0, 5)
escolha = int(input('Adivinhe o numero que o computador escolheu entre 0 e 5: '))
if numeroescolhido == escolha:
print('parabens você acertou')
else:
print('Que pena você errou, Boa Sorte na proxima ')
print('O numero era {}'.format(numeroescolhido))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex005.py
|
n1 = int(input('Digite um numero:'))
cores = {'amarelo': '\033[33m',
'limpa': '\033[m',
'vermelha': '\033[31m'}
print('analisando o valor {}{}{} o seu antecesor é {}{}{} e seu sucessor é {}{}{}'.format(cores['amarelo'], n1, cores['limpa'],
cores['vermelha'], (n1-1), cores['limpa'],
cores['vermelha'], (n1+1), cores['limpa']))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/test.py
|
import math
def raiz():
try:
numero1 = float(input('Informe o numero para descobrir sua raiz: '))
resultado = math.sqrt(numero1)
return resultado
except ValueError:
print('Somente numeros')
def porcentagem():
try:
numero1 = float(input('Informe a porcentagem: '))
numero2 = float(input('Informe o numero que deseja saber a porcentagem: '))
resultado = (numero2 / 100) * numero1
return resultado
except ValueError:
print('Somente numeros')
def adicao():
resultado = numero1 + numero2
return resultado
def subtracao():
resultado = numero1 - numero2
return resultado
def multiplicacao():
resultado = numero1 * numero2
return resultado
def divisao():
resultado = numero1 / numero2
return resultado
def potenciacao():
resultado = math.pow(numero1, numero2)
return resultado
operacao = str(input('Informe a operação "+ - * / ** raiz ou %": ')).strip()
if operacao == 'raiz':
print('{:.2f}'.format(raiz()))
elif operacao == '%':
print(porcentagem())
else:
if (operacao == '+') or (operacao == '-') or (operacao == '*') or (operacao == '/') or (operacao == '**'):
try:
numero1 = float(input('Informe o 1° numero: '))
numero2 = float(input('Informe o 2° numero: '))
if operacao == '+':
print(adicao())
elif operacao == '-':
print(subtracao())
elif operacao == '*':
print(multiplicacao())
elif operacao == '/':
print(divisao())
elif operacao == '**':
print(potenciacao())
except ValueError:
print('Somente numeros')
else:
print('Operação invalida')
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex047_Contagem de pares.py
|
<filename>Exercicios/ex047_Contagem de pares.py
from colorama import Fore
from colorama import Style
vetinpar = []
vetpar = []
for c in range(1, 51):
if c % 2 == 0:
vetinpar.append(c)
else:
vetpar.append(c)
print('Os numeros ímpares são: {}{}{}'.format(Fore.LIGHTRED_EX, vetinpar, Style.RESET_ALL))
print('Os numeros pares são: {}{}{}'.format(Fore.LIGHTGREEN_EX, vetinpar, Style.RESET_ALL))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex049_Tabuada v.2.0.py
|
<filename>Exercicios/ex049_Tabuada v.2.0.py
class cor:
BLACK = '\033[30m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
PURPLE = '\033[95m'
CYAN = '\033[96m'
GRAY = '\33[37m'
WHITE = '\033[97m'
CLEAR = '\033[m'
numero = int(input('Informe o numero da tabuada: '))
print('====={}Tabuada{}====='.format(cor.GREEN, cor.CLEAR))
for c in range(1, 11):
print('{}{} X {}{} = {}{}{}'.format(cor.GREEN, numero, c, cor.CLEAR, cor.RED, (numero * c), cor.CLEAR))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex003.py
|
<gh_stars>1-10
n1 = int(input('digite um numero:'))
n2 = int(input('digite outro:'))
s = n1 + n2
cores = {'limpa': '\033[m',
'vermelho': '\033[0;31m',
'amarelo': '\033[0;33m'}
print('a soma entre {}{}{} e {}{}{} corresponde {}{}'.format(cores['amarelo'], n1, cores['limpa'], cores['amarelo'], n2,
cores['limpa'], cores['vermelho'], s, cores['limpa']))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex012.py
|
v = float(input('qual e o valor do seu produto? R$:'))
d = v / 100 * 5
print('o valor do produto de R$:{} agora com 5% de desconto ficou R$:{:.2f}'.format(v, (v-d)))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula07.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<gh_stars>1-10
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
s = n1 + n2
m = n1 * n2
d = n1 / n2
di = n1 // n2
po = n1 ** n2
print('A soma de {} e {} é de {} a multiplicação é {} e a divisão {:.3f}'.format(n1, n2, s, m, d), end=' ')
print('A divisão inteira vale {} e a potenciação é {}'.format(di, po))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula011a.py
|
print('\033[0;31;40mOlá, Mundo!\033[m')
print('\033[7;97mOlá, Mundo!\033[m')
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex015.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<filename>Exercicios/ex015.py<gh_stars>1-10
di = int(input('quantos dias o carro foi alugado?'))
km = float(input('quantos km o carro percorreu?'))
total = (km * 0.15) + (di * 60)
print('o aluguel do carro custou R${:.2f}'.format(total))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex031_Custo Da Viagem.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<gh_stars>1-10
distancia = float(input('informe a distancia da sua viagem (km)'))
if distancia > 200:
print('O valor de sua viagem custara R$:{:.2f}'.format(distancia*0.45))
else:
print('O valor de sua viagem custará R$:{:.2f}'.format(distancia*0.50))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex013.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<filename>SATC_exercicios/ex013.py
vetcustos = []
vetprecos = []
for i in range(3):
vetcustos.append(float(input('Informe o custo de um produto: ')))
vetprecos.append(float(input('Informe o preço da vendo do produto: ')))
if vetcustos[i] == vetprecos[i]:
print('Não houve lucro e nem prejuiso')
elif vetcustos[i] < vetprecos[i]:
print('Houve lucro')
else:
print('houve prejuizo')
print('custo: {}'.format(vetcustos))
print('venda: {}'.format(vetprecos))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex015_Trabalho Modulo III.py
|
<filename>SATC_exercicios/ex015_Trabalho Modulo III.py
vetpessoas = []
# vetor de pessoas
def removerpaciente():
# função para remover os pacientes
for i in range(len(vetpessoas)):
print('Há pessoa da posição {}{}{} é {}{}{}'.format('\033[35m', i, '\033[m', '\033[31m', vetpessoas[i],
'\033[m'))
try:
posicao = (int(input('Informe a posição que deseja remover: ')))
if posicao < len(vetpessoas):
vetpessoas.pop(posicao)
else:
print(' {}Opção invalida{} '.format('\033[31m', '\033[m'))
except ValueError:
limpar()
print(' {}Somente numeros{} '.format('\033[31m', '\033[m'))
def atendimento(vetpessoas):
# função para o atendimento dos pacientes
print('O paciente: {}{}{} está sendo chamado(a)'.format('\033[34m', vetpessoas[0], '\033[m'))
vetpessoas.pop(0)
def cadastro():
# função para cadastrar os pacientes
vetpessoas.append(str(input('Informe o nome do paciente: ')))
def limpar():
# função para 'limpar a tela' (15 linhas em branco)
print("\n" * 20)
while True:
# menu de escolhas
print('-=-' * 13)
print('| {}1-{} Atendimento |'.format('\033[36m', '\033[m'))
print('| {}2-{} Cadastro |'.format('\033[36m', '\033[m'))
print('| {}3-{} Remover Usuario |'.format('\033[36m', '\033[m'))
print('| {}4-{} Listagem |'.format('\033[36m', '\033[m'))
print('| {}5-{} Sair |'.format('\033[36m', '\033[m'))
print('-=-' * 13)
opcao = str(input('')).strip()
# variavel para receber a escolha do menu
if opcao == '1':
limpar()
if len(vetpessoas) == 0:
print(' {}A lista está vazia{} '.format('\033[31m', '\033[m'))
else:
atendimento(vetpessoas)
# usado para chamar a função de atendimento
elif opcao == '2':
limpar()
cadastro()
limpar()
# usado para chamar a função de cadastro
elif opcao == '3':
limpar()
if len(vetpessoas) == 0:
print(' {}A lista está vazia{} '.format('\033[31m', '\033[m'))
else:
removerpaciente()
# usado para chamar a função de remover paciente
elif opcao == '4':
limpar()
if len(vetpessoas) == 0:
print(' {}A lista está vazia{} '.format('\033[31m', '\033[m'))
else:
for i in range(len(vetpessoas)):
print('{}{}°{} pessoa é {}{}{}'.format('\033[34m', (i + 1), '\033[m', vetpessoas[i],
'\033[31m', '\033[m'))
# usado para mostrar a lista
elif opcao == '5':
print('{} Fim do programa!! {}'.format('\033[92:40m', '\033[m'))
break
# usado para fechar o programa
else:
limpar()
print(' {}Opção invalida{} '.format('\033[31m', '\033[m'))
# caso a opção informada não seja correspondente, retorna opção invalida
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex030.py
|
numero = int(input('Informe um número INTEIRO: '))
if numero % 2 == 0:
print('O numero {} e par'.format(numero))
else:
print('O numero {} e inpar'.format(numero))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula010.py
|
<filename>Aulas/aula010.py
nota1 = float(input('digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1 + nota2)/2
if media >= 6:
print('Sua media é exelente!! {} '.format('\U0001F496'))
else:
print('Sua media é ruin ESTUDE MAIS {} '.format('\U0001F4A2'))
# o codigo ->\U0001F4A2<- e o codigo ->\U0001F496<- são codigos de emoji
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex014.py
|
<gh_stars>1-10
# Desenvolva uma calculadora que possua um menu, tratamento de erro caso o usuario escolha uma operação invalida e utilize funções na mesma
def calculadora(numero1, numero2, operacao):
if operacao == '+':
print(numero1 + numero2)
elif operacao == '-':
print(numero1 - numero2)
elif operacao == '*':
print(numero1 * numero2)
elif operacao == '/':
print(numero1 / numero2)
else:
print('Operação invalida Reinicie o programa!!')
print('========================================')
print('| Informe o primeiro numero |')
print('| Informe a operação + - * / |')
print('| Informe o segundo numero |')
print('========================================')
numero1 = float(input(''))
operacao = str(input(''))
numero2 = float(input(''))
print('Resultado...')
print(calculadora(numero1, numero2, operacao))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex023.py
|
<gh_stars>1-10
numero = int(input('Digite um numero: '))
u = numero // 1 % 10
d = numero // 10 % 10
c = numero // 100 % 10
m = numero // 1000 % 10
print('Analisando o numero: {}'.format(numero))
print('Unidade(s): {}'.format(u))
print('Desena(s): {}'.format(d))
print('Centena(s): {}'.format(c))
print('Milhar(es): {}'.format(m))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex027.py
|
<filename>Exercicios/ex027.py
nome = str(input('Digite seu nome completo :')).strip()
nome1 = nome.split()
print('prazer em conheser(lo)!!')
print('Seu primeiro nome é {}'.format(nome1[0]))
print('Seu ultimo nome é {}'.format(nome1[len(nome1)-1]))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex003.py
|
nome = str(input('Informe o seu nome: '))
salario = float(input('Informe seu salário fixo: '))
vendas = float(input('informe o total de vendas: '))
newsalario = salario + (salario * 15 / 100)
print('Olá, {} seu salário fixo é R$:{:.2f} e o seu salário final desse mes é R$:{:.2f}'.format(nome, salario, newsalario))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex018.py
|
from math import radians, sin, cos, tan
angulo = float(input('Digite o valor do angulo desejado: '))
print('')
seno = sin(radians(angulo))
print('o seno do angulo {}° É> {:.2f}'.format(angulo, seno))
cosseno = cos(radians(angulo))
print('O cosseno do angulo {}° É> {:.2f}'.format(angulo, cosseno))
tangente = tan(radians(angulo))
print('À tangente o ângulo {}° É> {:.2f}'.format(angulo, tangente))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex020.py
|
from random import shuffle
nome1 = input('Primeiro nome:')
nome2 = input('Segundo nome:')
nome3 = input('Terceiro nome:')
nome4 = input('Quarto nome:')
lista = [nome1, nome2, nome3, nome4]
shuffle(lista)
print("")
print('A ordem de apresentação do trabalho será:')
print(lista)
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex021.py
|
import pygame
pygame.mixer.init()
pygame.mixer.music.load('ex021.mp3')
pygame.mixer.music.play()
input('=====Iniciado...=====')
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex010.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video
numero1 = float(input('Informe o primeiro numero: '))
numero2 = float(input('Informe o segundo numero: '))
if numero1 > numero2:
print('O numero {} é maior'.format(numero1))
else:
print('numero {} e maior'.format(numero2))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex010.py
|
<filename>Exercicios/ex010.py
n1 = float(input('Quantos reais voce tem na carteira? R$'))
print('Com R${} voce pode comprar U${:.2f} '.format(n1, (n1/5.20)))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula09a.py
|
<filename>Aulas/aula09a.py
frase = 'Curso em Video Python'
dividido = frase.split()
print(dividido[0])
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex041_Classificando Atletas.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<gh_stars>1-10
from datetime import date
nascimento = int(input('Informe o ano de nacimento: '))
ano = date.today().year
idade = ano - nascimento
print('')
print('A idade do atleta é {} ano(S)'.format(idade))
if idade <= 9:
print('A categoria do atleta é MIRIM')
elif idade <= 14:
print('A categoria do atleta é INFANTIL')
elif idade <= 19:
print('A categoria do atleta é JÚNIOR')
elif idade <= 25:
print('A categoria do atlrta é SÊNIOR')
else:
print('A categoria do atleta é MASTER')
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex046_Contagem regressiva.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<filename>Exercicios/ex046_Contagem regressiva.py<gh_stars>1-10
from time import sleep
print('Contagem regressiva para o lançamento dos fogos de artificios!!\n')
for c in range(10, -1, -1):
print('Contagem regreciva: {}'.format(c))
sleep(1)
print('\nEstourando os fogos de artificio...')
print('BOOOM BOOM BOMM...')
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex026.py
|
frase = str(input('Digite uma frase ')).strip()
print('Aparece {} vezes a letra A'.format(frase.upper().count('A')))
print('A primeira letra A apareseu na posição {}'.format(frase.find('A')+1))
print('A ultima letra A apareceu na posição {}'.format(frase.rfind('A')+1))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex007.py
|
cores = {'vermelho': '\033[31m',
'limpa': '\033[m',
'verde': '\033[34m'}
preco = float(input('informe o valor do seu produto:'))
parcela = int(input('Informe em quantas vezes quer parcelar:'))
print('')
vetparcela = []
for i in range(parcela):
vetparcela.append(preco / parcela)
print('O valor de seu produto R$:{} parcelado em {} Vezes de R$:{}{:.2f}{} por parcela'.format(preco, parcela, cores['vermelho'], (preco/parcela), cores['limpa']))
print('')
for i in range(parcela):
print('A parcela N° {}{}{} De valor {}{:.2f}{}'.format(cores['verde'], i + 1, cores['limpa'], cores['vermelho'], vetparcela[i], cores['limpa']))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex056_Analisador completo.py
|
somaidade = 0
mediaidade = 0
idadevelho = 0
nomevelho = ''
mulhernova = 0
for p in range(1, 5):
print('------ {}° PESSOA ------')
nome = str(input('Informe seu nome: ')).strip()
idade = int(input('Informe sua idade: '))
sexo = str(input('informe seu Sexo [M/F]')).strip()
somaidade += idade
if p == 1 and sexo in 'Mm':
idadevelho = idade
nomevelho = nome
if sexo in 'Mm' and idade > idadevelho:
idadevelho = idade
nomevelho = nome
if sexo in 'Ff' and idade <= 20:
mulhernova += 1
mediaidade = somaidade / 4
print('')
print('A media de idade do grupo é de {} anos'.format(mediaidade))
print('O homen mais velho tem {} anos e su nome é {}'.format(idadevelho, nomevelho))
print('São {} mulher(es) com ou abaixo de 20 anos de idade!'.format(mulhernova))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex004.py
|
<filename>SATC_exercicios/ex004.py
nome = str(input('Informe o nome do aluno: '))
nota1 = float(input('Informe a 1° nota: '))
nota2 = float(input('Informe a 2° nota: '))
nota3 = float(input('Informe a 3° nota: '))
media = (nota1 + nota2 + nota3) / 3
print('A media do aluno: {} é de {:.2f}'.format(nome, media))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula013.py
|
<filename>Aulas/aula013.py
s = 0
for c in range(1, 5):
num = int(input('Informe um numero: '))
s += num
print('\nA soma total dos numeros foi de {}'.format(s))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/arquivo de cores.py
|
class cor:
BLACK = '\033[30m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
PURPLE = '\033[95m'
CYAN = '\033[96m'
GRAY = '\33[37m'
WHITE = '\033[97m'
CLEAR = '\033[m'
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex054_Grupo da Maioridade.py
|
from datetime import date
nascimento = []
for c in range(7):
nascimento.append(int(input('Informe o ano que nasceu há {}° pessoa: '.format(c+1))))
for c in range(len(nascimento)):
anoatual = date.today().year
idade = anoatual - nascimento[c]
if idade >= 18:
print('\nVocê é MAIOR de idade! --> Sua idade {}'.format(idade))
else:
print('\nVocê é MENOR de idade! --> Sua idade {}'.format(idade))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex050_Soma dos pares.py
|
soma = 0
for c in range(1, 7):
numero = float(input('Informe o {}° número: '.format(c)))
if numero % 2 == 0:
soma = numero + soma
print('A soma dos números pares digitados é : {}'.format(soma))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex009.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video
n1 = int(input('Digite o numero para ver sua tabuada:'))
print('_' * 13)
print(' {} X 1 = {:2}'.format(n1, (n1 * 1)))
print(' {} X 2 = {:2}'.format(n1, (n1 * 2)))
print(' {} X 3 = {:2}'.format(n1, (n1 * 3)))
print(' {} X 4 = {:2}'.format(n1, (n1 * 4)))
print(' {} X 5 = {:2}'.format(n1, (n1 * 5)))
print(' {} X 6 = {:2}'.format(n1, (n1 * 6)))
print(' {} X 7 = {:2}'.format(n1, (n1 * 7)))
print(' {} X 8 = {:2}'.format(n1, (n1 * 8)))
print(' {} X 9 = {:2}'.format(n1, (n1 * 9)))
print(' {} X 10 = {}'.format(n1, (n1 * 10)))
print('_' * 13)
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex017.py
|
<filename>Exercicios/ex017.py
from math import hypot
catetoadijacente = float(input('determine o cateto adijacente: '))
catetooposto = float(input('derermine o cateto oposto: '))
hipotenusa = hypot(catetoadijacente, catetooposto)
print('a hipotenusa mede {:.2f}'.format(hipotenusa)32)
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula08b.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video
import emoji
print(emoji.emojize('olá,mundo :smiling_imp:', use_aliases=True))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex014.py
|
g = float(input('informe os graus °C'))
print('a temperatura {}°C corresponde a {}°F'.format(g, (g * 9/5) + 32))
print('e tam bem corresponde ha {}°K'.format(g+273.15))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex007.py
|
<reponame>GabrielMazzuchello/Curso-Em-Video<filename>Exercicios/ex007.py
n1 = float(input('qual é a primeira nota do aluno?'))
n2 = float(input('qual e a segunda nota do aluno?'))
print('a media entre {} e {} corresponde ha {}'.format(n1, n2, ((n1+n2)/2)))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex016.py
|
<gh_stars>1-10
import math
numero1 = float(input('digite um numero: '))
print('o numero {} tem a parte inteira de {}'.format(numero1, (math.trunc(numero1))))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex001.py
|
<gh_stars>1-10
nome = input ('qual e seu nome? ')
print('E um prazer em te conhecer {}{}{}'.format('\033[0;31m', nome, '\033[m'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.