repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
koukyo1994/streamlit-audio | components/__init__.py | <filename>components/__init__.py<gh_stars>1-10
from .augmentation import augmentations_on_wave
from .base import *
from .plots import waveplot, specshow, waveplot_with_annotation, specshow_with_annotation
from .preprocessing import preprocess_on_wave
|
alanlivio/website | build_tex_partials_from_mkdocs_yml.py | <gh_stars>0
#!/bin/env python
import os
from jinja2 import FileSystemLoader
from latex import jinja2
import yaml
def build_tex_partials_from_mkdocs_yml():
# load reseacher data from mkdcos.yml
with open('mkdocs.yml', 'r') as file:
# ignore mkdocs.yml related emoji using !
yaml.add_multi_constructor(
'tag:', lambda loader, suffix, node: None, Loader=yaml.SafeLoader)
data = yaml.safe_load(file)
reseacher = data['extra']['reseacher']
# fill .jinja2.tex files with mkdcos.yml:researcher data and build cv.pdf
# https://pythonhosted.org/latex/
latex_dir = os.path.abspath(os.path.dirname(__file__) + "/latex")
env = jinja2.make_env(loader=FileSystemLoader(latex_dir))
shortbio = env.get_template('partials/shortbio.tex.jinja').render(**reseacher)
with open("latex/partials/shortbio.tex", "w") as file:
file.write(shortbio)
if __name__ == '__main__':
build_tex_partials_from_mkdocs_yml()
|
kristofbc/physical-interaction-video-prediction | src/models/predict_model.py | <reponame>kristofbc/physical-interaction-video-prediction
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Predict the next n frames from a trained model
# ==============================================
import numpy as np
import chainer
import chainer.functions as F
from train_model import Model
from train_model import concat_examples
import click
import os
import csv
import logging
import glob
import subprocess
import six.moves.cPickle as pickle
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageChops
import imageio
# ========================
# Helpers functions (hlpr)
# ========================
def get_data_info(data_dir, data_index):
data_map = []
with open(data_dir + '/map.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
data_map.append(row)
if len(data_map) <= 1: # empty or only header
raise ValueError("No file map found")
# Get the requested data to test
data_index = int(data_index)+1
if data_index > len(data_map)-1:
raise ValueError("Data index {} is out of range for available data".format(data_index))
image = np.float32(np.load(data_dir + '/' + data_map[data_index][2]))
image_pred = np.float32(np.load(data_dir + '/' + data_map[data_index][6]))
image_bitmap_pred = data_map[data_index][5]
action = np.float32(np.load(data_dir + '/' + data_map[data_index][3]))
state = np.float32(np.load(data_dir + '/' + data_map[data_index][4]))
return image, image_pred, image_bitmap_pred, action, state
# =================================================
# Main entry point of the training processes (main)
# =================================================
@click.command()
@click.argument('model_dir', type=click.STRING)
@click.argument('model_name', type=click.STRING)
@click.argument('data_index', type=click.INT)
@click.option('--models_dir', type=click.Path(exists=True), default='models', help='Directory containing the models.')
@click.option('--data_dir', type=click.Path(exists=True), default='data/processed/brain-robotics-data/push/push_testnovel', help='Directory containing data.')
@click.option('--time_step', type=click.INT, default=8, help='Number of time steps to predict.')
@click.option('--model_type', type=click.STRING, default='', help='Type of the trained model.')
@click.option('--schedsamp_k', type=click.FLOAT, default=-1, help='The k parameter for schedules sampling. -1 for no scheduled sampling.')
@click.option('--context_frames', type=click.INT, default=2, help='Number of frames before predictions.')
@click.option('--use_state', type=click.INT, default=1, help='Whether or not to give the state+action to the model.')
@click.option('--num_masks', type=click.INT, default=10, help='Number of masks, usually 1 for DNA, 10 for CDNA, STP.')
@click.option('--image_height', type=click.INT, default=64, help='Height of one predicted frame.')
@click.option('--image_width', type=click.INT, default=64, help='Width of one predicted frame.')
@click.option('--original_image_height', type=click.INT, default=512, help='Height of one predicted frame.')
@click.option('--original_image_width', type=click.INT, default=640, help='Width of one predicted frame.')
@click.option('--downscale_factor', type=click.FLOAT, default=0.5, help='Downscale the image by this factor.')
@click.option('--gpu', type=click.INT, default=-1, help='ID of the gpu to use')
@click.option('--gif', type=click.INT, default=1, help='Create a GIF of the predicted result.')
def main(model_dir, model_name, data_index, models_dir, data_dir, time_step, model_type, schedsamp_k, context_frames, use_state, num_masks, image_height, image_width, original_image_height, original_image_width, downscale_factor, gpu, gif):
""" Predict the next {time_step} frame based on a trained {model} """
logger = logging.getLogger(__name__)
path = models_dir + '/' + model_dir
if not os.path.exists(path + '/' + model_name):
raise ValueError("Directory {} does not exists".format(path))
if not os.path.exists(data_dir):
raise ValueError("Directory {} does not exists".format(data_dir))
logger.info("Loading data {}".format(data_index))
image, image_pred, image_bitmap_pred, action, state = get_data_info(data_dir, data_index)
img_pred, act_pred, sta_pred = concat_examples([[image_pred, action, state]])
# Extract the information about the model
if model_type == '':
split_name = model_dir.split("-")
if len(split_name) != 4:
raise ValueError("Model {} is not recognized, use --model_type to describe the type".format(model_dir))
model_type = split_name[2]
# Load the model for prediction
logger.info("Importing model {0}/{1} of type {2}".format(model_dir, model_name, model_type))
model = Model(
num_masks=num_masks,
is_cdna=model_type == 'CDNA',
is_dna=model_type == 'DNA',
is_stp=model_type == 'STP',
use_state=use_state,
scheduled_sampling_k=schedsamp_k,
num_frame_before_prediction=context_frames,
prefix='predict'
)
chainer.serializers.load_npz(path + '/' + model_name, model)
logger.info("Model imported successfully")
if gpu >= 0:
cuda.get_device(gpu).use()
model.to_gpu()
# Resize the image to fit the trained dimension
resize_img_pred = []
for i in xrange(len(img_pred)):
resize = F.resize_images(img_pred[i], (image_height, image_width))
resize = F.cast(resize, np.float32) / 255.0
resize_img_pred.append(resize.data)
resize_img_pred = np.asarray(resize_img_pred, dtype=np.float32)
# Predict the new images
with chainer.using_config('train', False):
loss = model([resize_img_pred, act_pred, sta_pred], 0)
predicted_images = model.gen_images
# Resize the predicted image
resize_predicted_images = []
for i in xrange(len(predicted_images)):
resize = predicted_images[i].data[0]
resize -= resize.min()
resize /= resize.max()
resize *= 255.0
resize_predicted_images.append(resize.astype(np.uint8))
# Print the images horizontally
# First row is the time_step
# Second row is the ground truth
# Third row is the generated image
frame_width = int(original_image_width * downscale_factor)
frame_height = int(original_image_height * downscale_factor)
text_width_x = frame_width
text_height_x = 50
text_width_y = frame_height
text_height_y = 50
total_width = frame_width * time_step + text_height_x
total_height = frame_height * 2 + text_height_x
if gif == 1:
total_width = total_width + frame_width
new_image = Image.new('RGBA', (total_width, total_height))
# Text label x
font_size = 18
font = ImageFont.truetype('Arial', font_size)
label = ["Time = {}".format(i+1) for i in xrange(time_step)]
if gif == 1:
label.append("Animated sequence")
for i in xrange(len(label)):
text = label[i]
text_container_img = Image.new('RGB', (text_width_x, text_height_x), 'white')
text_container_draw = ImageDraw.Draw(text_container_img)
w, h = text_container_draw.textsize(text, font=font)
text_container_draw.text(((text_width_x-w)/2, (text_height_x-h)/2), text, fill='black', font=font)
new_image.paste(text_container_img, (text_height_x + text_width_x*i, 0))
# Text label y
label = ["Ground truth", "Prediction"]
for i in xrange(len(label)):
text = label[i]
text_container_img = Image.new('RGB', (text_width_y, text_height_y), 'white')
text_container_draw = ImageDraw.Draw(text_container_img)
w, h = text_container_draw.textsize(text, font=font)
text_container_draw.text(((text_width_y-w)/2, (text_height_y-h)/2), text, fill='black', font=font)
text_container_img = text_container_img.rotate(90, expand=1)
new_image.paste(text_container_img, (0, text_height_x + text_width_y * i))
# Original
ground_truth_images_path = glob.glob(data_dir + '/' + image_bitmap_pred)
original_gif = []
for i in xrange(min(time_step, len(ground_truth_images_path))):
img = Image.open(ground_truth_images_path[i]).convert('RGB')
if downscale_factor != 1:
img = img.resize((frame_width, frame_height), Image.ANTIALIAS)
new_image.paste(img, (text_height_x + frame_width*i, text_height_x))
#original_gif.append(np.array(img.getdata(), np.uint8).reshape(img.size[1], img.size[0], 3))
original_gif.append(img)
# Prediction
predicted_gif = []
for i in xrange(len(resize_predicted_images)):
#img = resize_predicted_images[i].data[0]
img = resize_predicted_images[i]
img = np.rollaxis(img, 0, 3)
img = Image.fromarray(img, 'RGB')
# Resize the image to the original dimensions
img = img.resize((original_image_width, original_image_height), Image.ANTIALIAS)
if downscale_factor != 1:
img = img.resize((frame_width, frame_height), Image.ANTIALIAS)
new_image.paste(img, (text_height_x + frame_width*i, frame_height + text_height_x))
#predicted_gif.append(np.array(img.getdata(), np.uint8).reshape(img.size[1], img.size[0], 3))
predicted_gif.append(img)
# If enabled, create a GIF from the sequence of original and predicted image
if gif == 1:
# Create a tmp file
temp_original_gif_path = path + '/original-' + str(time_step) + model_name + '.gif'
temp_predicted_gif_path = path + '/predicted-' + str(time_step) + model_name + '.gif'
#imageio.mimsave(temp_original_gif_path, original_gif)
#imageio.mimsave(temp_predicted_gif_path, predicted_gif)
#original_gif_img = Image.open(temp_original_gif_path)
#predicted_gif_img = Image.open(temp_predicted_gif_path)
# Import the tmp file and reshape each frame to the whole scene width/height
gif_frames = []
for img in original_gif:
reshaped_original_gif_img = Image.new('RGB', (total_width, total_height))
reshaped_original_gif_img.paste(img, (text_height_x + frame_width * time_step, text_height_x))
gif_frames.append(reshaped_original_gif_img)
for img in predicted_gif:
reshaped_predicted_gif_img = Image.new('RGB', (total_width, total_height))
reshaped_predicted_gif_img.paste(img, (text_height_x + frame_width * time_step, text_height_x + frame_height))
gif_frames.append(reshaped_predicted_gif_img)
# Avoid flickering when gif is done: add a still under the gif
new_image.paste(original_gif[0], (text_height_x + frame_width * time_step, text_height_x))
new_image.paste(predicted_gif[0], (text_height_x + frame_width * time_step, text_height_x + frame_height))
# Clean the tmp files
#os.remove(temp_original_gif_path)
#os.remove(temp_predicted_gif_path)
if gif == 1:
new_image.save(path + '/prediction-' + str(time_step) + '-' + model_name + '.gif', save_all=True, append_images=gif_frames, transparency=0)
else:
new_image.save(path + '/prediction-' + str(time_step) + '-' + model_name + '.png')
print(model)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
#logging.basicConfig(level=logging.INFO, format=log_fmt, stream=sys.stdout)
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
kristofbc/physical-interaction-video-prediction | models/npz_keys.py | <filename>models/npz_keys.py<gh_stars>1-10
import sys
import numpy
if __name__ == '__main__':
with numpy.load(sys.argv[1]) as f:
print(f.keys())
|
kristofbc/physical-interaction-video-prediction | src/models/train_model.py | <filename>src/models/train_model.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Implementation in Chainer of https://github.com/tensorflow/models/tree/master/video_prediction
# ==============================================================================================
import types
import random
import math
from math import floor, log
import numpy as np
import subprocess
try:
import cupy
except:
cupy = np
pass
import chainer
from chainer import cuda
from chainer import variable
import chainer.functions as F
import chainer.links as L
from chainer.functions.connection import convolution_2d
from chainer import initializers
from chainer import serializers
from chainer.functions.math import square
from chainer.functions.activation import lstm
import sys
import os
import time
import glob
import csv
import click
import logging
import matplotlib.pyplot as plt
# Amount to use when lower bounding Variables
RELU_SHIFT = 1e-12
# Kernel size for DNA and CDNA
DNA_KERN_SIZE = 5
# =============================================
# Helpers functions used accross scripts (hlpe)
# =============================================
def concat_examples(batch):
img_training_set, act_training_set, sta_training_set = [], [], []
for idx in xrange(len(batch)):
img_training_set.append(batch[idx][0])
act_training_set.append(batch[idx][1])
sta_training_set.append(batch[idx][2])
img_training_set = np.array(img_training_set)
act_training_set = np.array(act_training_set)
sta_training_set = np.array(sta_training_set)
# Split the actions, states and images into timestep
act_training_set = np.split(ary=act_training_set, indices_or_sections=act_training_set.shape[1], axis=1)
act_training_set = [np.squeeze(act, axis=1) for act in act_training_set]
sta_training_set = np.split(ary=sta_training_set, indices_or_sections=sta_training_set.shape[1], axis=1)
sta_training_set = [np.squeeze(sta, axis=1) for sta in sta_training_set]
img_training_set = np.split(ary=img_training_set, indices_or_sections=img_training_set.shape[1], axis=1)
# Reshape the img training set to a Chainer compatible tensor : batch x channel x height x width instead of Tensorflow's: batch x height x width x channel
img_training_set = [np.rollaxis(np.squeeze(img, axis=1), 3, 1) for img in img_training_set]
return np.array(img_training_set), np.array(act_training_set), np.array(sta_training_set)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""
Sample batch with specified mix of ground truth and generated data points.
e.g: the final matrix is a mix of vectors from the ground_truth (gt) and prediction (p)
[gt1, gt2, gt3, gt4, gt5, gt6, gt7, gt8, gt9, gt10] = ground truth
[p1, p2, p3, p4, p5, p6] = prediction
[p1, gt2, gt3, gt4, p5, p6, gt7, gt8, gt9, gt10] = returns
Args:
ground_truth_x: tensor of ground-truth data point
generated_x: tensor of generated data point
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch
Returns:
New batch with num_ground_truth samples from ground_truth_x and the rest from generated_x
"""
xp = chainer.cuda.get_array_module(generated_x.data)
ground_truth_x = chainer.cuda.to_cpu(ground_truth_x)
generated_x = chainer.cuda.to_cpu(generated_x.data)
idx = np.arange(int(batch_size))
np.random.shuffle(idx)
ground_truth_idx = np.array(np.take(idx, np.arange(num_ground_truth)))
generated_idx = np.array(np.take(idx, np.arange(num_ground_truth, int(batch_size))))
reshaped_ground_truth_x = F.reshape(ground_truth_x, (int(batch_size), -1))
reshaped_genetated_x = F.reshape(generated_x, (int(batch_size), -1))
ground_truth_examps = np.take(reshaped_ground_truth_x.data, ground_truth_idx, axis=0)
generated_examps = np.take(reshaped_genetated_x.data, generated_idx, axis=0)
index_a = np.vstack((ground_truth_idx, np.zeros_like(ground_truth_idx)))
index_b = np.vstack((generated_idx, np.ones_like(generated_idx)))
ground_truth_generated_stacked = np.hstack((ground_truth_idx, generated_idx))
ground_truth_generated_stacked_sorted = np.argsort(ground_truth_generated_stacked)
order = np.hstack((index_a, index_b))[:, ground_truth_generated_stacked_sorted]
stitched = []
for i in xrange(len(order[0])):
if order[1][i] == 0:
pos = np.where(ground_truth_idx == i)
stitched.append(ground_truth_examps[pos])
continue
else:
pos = np.where(generated_idx == i)
stitched.append(generated_examps[pos])
continue
stitched = np.array(stitched, dtype=np.float32)
stitched = np.reshape(stitched, (ground_truth_x.shape[0], ground_truth_x.shape[1], ground_truth_x.shape[2], ground_truth_x.shape[3]))
return xp.array(stitched)
def peak_signal_to_noise_ratio(true, pred):
"""
Image quality metric based on maximal signal power vs. power of the noise
Args:
true: the ground truth image
pred: the predicted image
Returns:
Peak signal to noise ratio (PSNR)
"""
return 10.0 * F.log(1.0 / F.mean_squared_error(true, pred)) / log(10.0)
def broadcast_reshape(x, y, axis=0):
"""
Reshape y to correspond to shape of x
Args:
x: the broadcasted
y: the broadcastee
axis: where the reshape will be performed
Results:
Output variable of same shape of x
"""
y_shape = tuple([1] * axis + list(y.shape) +
[1] * (len(x.shape) - axis - len(y.shape)))
y_t = F.reshape(y, y_shape)
y_t = F.broadcast_to(y_t, x.shape)
return y_t
def broadcasted_division(x, y, axis=0):
"""
Apply a division x/y where y is broadcasted to x to be able to complete the operation
Args:
x: the numerator
y: the denominator
axis: where the reshape will be performed
Results:
Output variable of same shape of x
"""
y_t = broadcast_reshape(x, y, axis)
return x / y_t
def broadcast_scale(x, y, axis=0):
"""
Apply a multiplication x*y where y is broadcasted to x to be able to complete the operation
Args:
x: left hand operation
y: right hand operation
axis: where the reshape will be performed
Resuts:
Output variable of same shape of x
"""
y_t = broadcast_reshape(x, y, axis)
return x*y_t
# =============
# Chains (chns)
# =============
class LayerNormalizationConv2D(chainer.Chain):
def __init__(self):
super(LayerNormalizationConv2D, self).__init__()
with self.init_scope():
self.norm = L.LayerNormalization()
"""
Apply a "layer normalization" on the result of a convolution
Args:
inputs: input tensor, 4D, batch x channel x height x width
Returns:
Output variable of shape (batch x channels x height x width)
"""
def __call__(self, inputs):
batch_size, channels, height, width = inputs.shape[0:4]
inputs = F.reshape(inputs, (batch_size, -1))
inputs = self.norm(inputs)
inputs = F.reshape(inputs, (batch_size, channels, height, width))
return inputs
# =============
# Models (mdls)
# =============
class BasicConvLSTMCell(chainer.Chain):
""" Stateless convolutional LSTM, as seen in lstm_op.py from video_prediction model """
def __init__(self, out_size=None, filter_size=5):
super(BasicConvLSTMCell, self).__init__()
with self.init_scope():
# @TODO: maybe provide in channels because the concatenation
self.conv = L.Convolution2D(4*out_size, (filter_size, filter_size), pad=filter_size/2)
self.out_size = out_size
self.filter_size = filter_size
self.reset_state()
def reset_state(self):
self.c = None
self.h = None
def __call__(self, inputs, forget_bias=1.0):
"""Basic LSTM recurrent network cell, with 2D convolution connctions.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Args:
inputs: input Tensor, 4D, batch x channels x height x width
forget_bias: the initial value of the forget biases.
Returns:
a tuple of tensors representing output and the new state.
"""
# In Tensorflow: batch x height x width x channels
# In Chainer: batch x channel x height x width
# Create a state based on Finn's implementation
xp = chainer.cuda.get_array_module(*inputs.data)
if self.c is None:
self.c = xp.zeros((inputs.shape[0], self.out_size, inputs.shape[2], inputs.shape[3]), dtype=inputs[0].data.dtype)
if self.h is None:
self.h = xp.zeros((inputs.shape[0], self.out_size, inputs.shape[2], inputs.shape[3]), dtype=inputs[0].data.dtype)
#c, h = F.split_axis(state, indices_or_sections=2, axis=1)
#inputs_h = np.concatenate((inputs, h), axis=1)
inputs_h = F.concat((inputs, self.h), axis=1)
# Parameters of gates are concatenated into one conv for efficiency
#j_i_f_o = L.Convolution2D(in_channels=inputs_h.shape[1], out_channels=4*num_channels, ksize=(filter_size, filter_size), pad=filter_size/2)(inputs_h)
j_i_f_o = self.conv(inputs_h)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
j, i, f, o = F.split_axis(j_i_f_o, indices_or_sections=4, axis=1)
self.c = self.c * F.sigmoid(f + forget_bias) + F.sigmoid(i) * F.tanh(j)
self.h = F.tanh(self.c) * F.sigmoid(o)
#return new_h, np.concatenate((new_c, new_h), axis=1)
#return new_h, F.concat((new_c, new_h), axis=1)
return self.h
class StatelessCDNA(chainer.Chain):
"""
Build convolutional lstm video predictor using CDNA
* Because the CDNA does not keep states, it should be passed as a parameter if one wants to continue learning from previous states
"""
def __init__(self, num_masks):
super(StatelessCDNA, self).__init__()
with self.init_scope():
self.enc7 = L.Deconvolution2D(in_channels=64, out_channels=3, ksize=(1,1), stride=1)
self.cdna_kerns = L.Linear(in_size=None, out_size=DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks)
self.num_masks = num_masks
def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
"""
Learn through StatelessCDNA.
Args:
encs: An array of computed transformation
hiddens: An array of hidden layers
batch_size: Size of mini batches
prev_image: The image to transform
num_masks: Number of masks to apply
color_channels: Output color channels
Returns:
transformed: A list of masks to apply on the previous image
"""
logger = logging.getLogger(__name__)
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
img_height = prev_image.shape[2]
img_width = prev_image.shape[3]
# CDNA specific
enc7 = self.enc7(enc6)
enc7 = F.relu(enc7)
transformed_list = list([F.sigmoid(enc7)])
# CDNA specific
# Predict kernels using linear function of last layer
cdna_input = F.reshape(hidden5, (int(batch_size), -1))
cdna_kerns = self.cdna_kerns(cdna_input)
# Reshape and normalize
# B x C x H x W => B x NUM_MASKS x 1 x H x W
cdna_kerns = F.reshape(cdna_kerns, (int(batch_size), self.num_masks, 1, DNA_KERN_SIZE, DNA_KERN_SIZE))
cdna_kerns = F.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = F.sum(cdna_kerns, (2, 3, 4), keepdims=True)
cdna_kerns = broadcasted_division(cdna_kerns, norm_factor)
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# F.depthwise_convolution_2d can apply a different transformation to each sample.
cdna_kerns = F.reshape(cdna_kerns, (int(batch_size), self.num_masks, DNA_KERN_SIZE, DNA_KERN_SIZE))
cdna_kerns = F.transpose(cdna_kerns, (1, 0, 2, 3))
# Swap the batch and channel dimension.
prev_image = F.transpose(prev_image, (1, 0, 2, 3))
# Transform the image.
transformed = F.depthwise_convolution_2d(prev_image, cdna_kerns, stride=(1, 1), pad=DNA_KERN_SIZE/2)
# Transpose the dimensions where they belong.
transformed = F.reshape(transformed, (color_channels, int(batch_size), self.num_masks, img_height, img_width))
transformed = F.transpose(transformed, (2, 1, 0, 3, 4))
transformed = F.split_axis(transformed, indices_or_sections=self.num_masks, axis=0)
transformed = [F.squeeze(t, axis=0) for t in transformed]
transformed_list += transformed
return transformed_list, enc7
class StatelessDNA(chainer.Chain):
"""
Build convolutional lstm video predictor using DNA
* Because the DNA does not keep states, it should be passed as a parameter if one wants to continue learning from previous states
"""
def __init__(self, num_masks):
super(StatelessDNA, self).__init__()
with self.init_scope():
self.enc7 = L.Deconvolution2D(DNA_KERN_SIZE**2, (1, 1), stride=1)
self.num_masks = num_masks
def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
"""
Learn through StatelessDNA.
Args:
encs: An array of computed transformation
hiddens: An array of hidden layers
batch_size: Size of mini batches
prev_image: The image to transform
num_masks: Number of masks to apply
color_channels: Output color channels
Returns:
transformed: A list of masks to apply on the previous image
"""
logger = logging.getLogger(__name__)
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
# DNA specific
enc7 = self.enc7(enc6)
enc7 = F.relu(enc7)
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
# Construct translated images
img_height = prev_image.shape[2]
img_width = prev_image.shape[3]
prev_image_pad = F.pad(prev_image, pad_width=[[0,0], [0,0], [2,2], [2,2]], mode='constant', constant_values=0)
kernel_inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
#tmp = F.get_item(prev_image_pad, list([slice(0,prev_image_pad.shape[0]), slice(0,prev_image_pad.shape[1]), slice(xkern,img_height), slice(ykern,img_width)]))
tmp = prev_image_pad[:,:,xkern:img_height, ykern:img_width]
# ** Added this operation to make sure the size was still the original one!
tmp = F.pad(tmp, [[0,0], [0,0], [0, xkern], [0, ykern]], mode='constant', constant_values=0)
tmp = F.expand_dims(tmp, axis=1) # Previously axis=3 but our channel is on axis=1 ? ok!
kernel_inputs.append(tmp.data)
kernel_inputs = F.concat(kernel_inputs, axis=1) # Previously axis=3 but our channel us on axis=1 ? ok!
# Normalize channels to 1
kernel_normalized = F.relu(enc7 - RELU_SHIFT) + RELU_SHIFT
kernel_normalized_sum = F.sum(kernel_normalized, axis=1, keepdims=True) # Previously axis=3 but our channel are on axis 1 ? ok!
kernel_normalized = broadcasted_division(kernel_normalized, kernel_normalized_sum)
kernel_normalized = F.expand_dims(kernel_normalized, axis=2)
#kernel_normalized = F.scale(kernel_inputs, kernel_normalized, axis=0)
kernel_normalized = broadcast_scale(kernel_inputs, kernel_normalized)
kernel_normalized = F.sum(kernel_normalized, axis=1, keepdims=False)
transformed = [kernel_normalized]
return transformed, enc7
class StatelessSTP(chainer.Chain):
"""
Build convolutional lstm video predictor using STP
* Because the STP does not keep states, it should be passed as a parameter if one wants to continue learning from previous states
"""
def __init__(self, num_masks):
super(StatelessSTP, self).__init__()
with self.init_scope():
self.enc7 = L.Deconvolution2D(3, (1, 1), stride=1)
self.stp_input = L.Linear(100)
self.identity_params = L.Linear(6)
def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
"""
Learn through StatelessSTP.
Args:
encs: An array of computed transformation
hiddens: An array of hidden layers
batch_size: Size of mini batches
prev_image: The image to transform
num_masks: Number of masks to apply
color_channels: Output color channels
Returns:
transformed: A list of masks to apply on the previous image
"""
logger = logging.getLogger(__name__)
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
xp = chainer.cuda.get_array_module(enc6.data)
# STP specific
enc7 = self.enc7(enc6)
transformed = list([F.sigmoid(enc7)])
stp_input0 = F.reshape(hidden5, (int(batch_size), -1))
stp_input1 = self.stp_input(stp_input0)
stp_input1 = F.relu(stp_input1)
identity_params = np.array([[1.0, 0.0, 0.0, 0.0, 1.0, 0.0]], dtype=np.float32)
identity_params = np.repeat(identity_params, int(batch_size), axis=0)
identity_params = variable.Variable(xp.array(identity_params))
stp_transformations = []
for i in range(num_masks-1):
params = self.identity_params(stp_input1)
params = params + identity_params
params = F.reshape(params, (int(params.shape[0]), 2, 3))
grid = F.spatial_transformer_grid(params, (prev_image.shape[2], prev_image.shape[3]))
trans = F.spatial_transformer_sampler(prev_image, grid)
stp_transformations.append(trans)
transformed += stp_transformations
return transformed, enc7
class Model(chainer.Chain):
"""
This Model wrap other models like CDNA, STP or DNA.
It calls their training and get the generated images and states, it then compute the losses and other various parameters
"""
def __init__(self, num_masks, is_cdna=True, is_dna=False, is_stp=False, use_state=True, scheduled_sampling_k=-1, num_frame_before_prediction=2, prefix=None):
"""
Initialize a CDNA, STP or DNA through this 'wrapper' Model
Args:
is_cdna: if the model should be an extension of CDNA
is_dna: if the model should be an extension of DNA
is_stp: if the model should be an extension of STP
use_state: if the state should be concatenated
scheduled_sampling_k: schedule sampling hyperparameter k
num_frame_before_prediction: number of frame before prediction
prefix: appended to the results to differentiate between training and validation
learning_rate: learning rate
"""
super(Model, self).__init__()
with self.init_scope():
self.enc0 = L.Convolution2D(32, (5, 5), stride=2, pad=2)
self.enc1 = L.Convolution2D(32, (3, 3), stride=2, pad=1)
self.enc2 = L.Convolution2D(64, (3, 3), stride=2, pad=1)
self.enc3 = L.Convolution2D(64, (1, 1), stride=1)
self.enc4 = L.Deconvolution2D(128, (3, 3), stride=2, outsize=(16,16), pad=1)
self.enc5 = L.Deconvolution2D(96, (3, 3), stride=2, outsize=(32,32), pad=1)
self.enc6 = L.Deconvolution2D(64, (3, 3), stride=2, outsize=(64, 64), pad=1)
self.lstm1 = BasicConvLSTMCell(32)
self.lstm2 = BasicConvLSTMCell(32)
self.lstm3 = BasicConvLSTMCell(64)
self.lstm4 = BasicConvLSTMCell(64)
self.lstm5 = BasicConvLSTMCell(128)
self.lstm6 = BasicConvLSTMCell(64)
self.lstm7 = BasicConvLSTMCell(32)
self.norm_enc0 = LayerNormalizationConv2D()
self.norm_enc6 = LayerNormalizationConv2D()
self.hidden1 = LayerNormalizationConv2D()
self.hidden2 = LayerNormalizationConv2D()
self.hidden3 = LayerNormalizationConv2D()
self.hidden4 = LayerNormalizationConv2D()
self.hidden5 = LayerNormalizationConv2D()
self.hidden6 = LayerNormalizationConv2D()
self.hidden7 = LayerNormalizationConv2D()
self.masks = L.Deconvolution2D(num_masks+1, (1, 1), stride=1)
self.current_state = L.Linear(5)
model = None
if is_cdna:
model = StatelessCDNA(num_masks)
elif is_stp:
model = StatelessSTP(num_masks)
elif is_dna:
model = StatelessDNA(num_masks)
if model is None:
raise ValueError("No network specified")
else:
self.model = model
self.num_masks = num_masks
self.use_state = use_state
self.scheduled_sampling_k = scheduled_sampling_k
self.num_frame_before_prediction = num_frame_before_prediction
self.prefix = prefix
self.loss = 0.0
self.psnr_all = 0.0
self.summaries = []
self.conv_res = []
# Condition ops callback
def ops_smear(use_state):
def ops(args):
x = args.get("x")
if use_state:
state_action = args.get("state_action")
batch_size = args.get("batch_size")
smear = F.reshape(state_action, (int(batch_size), int(state_action.shape[1]), 1, 1))
smear = F.tile(smear, (1, 1, int(x.shape[2]), int(x.shape[3])))
x = F.concat((x, smear), axis=1) # Previously axis=3 but our channel is on axis=1? ok
return x
return ops
def ops_skip_connection(enc_idx):
def ops(args):
x = args.get("x")
enc = args.get("encs")[enc_idx]
# Skip connection (current input + target enc)
x = F.concat((x, enc), axis=1) # Previously axis=3 but our channel is on axis=1? ok!
return x
return ops
def ops_save(name):
def ops(args):
x = args.get("x")
save_map = args.get("map")
save_map[name] = x
return x
return ops
def ops_get(name):
def ops(args):
save_map = args.get("map")
return save_map[name]
return ops
# Create an executable array containing all the transformations
self.ops = [
[self.enc0, self.norm_enc0],
[self.lstm1, self.hidden1, ops_save("hidden1"), self.lstm2, self.hidden2, ops_save("hidden2"), self.enc1],
[self.lstm3, self.hidden3, ops_save("hidden3"), self.lstm4, self.hidden4, ops_save("hidden4"), self.enc2],
[ops_smear(use_state), self.enc3],
[self.lstm5, self.hidden5, ops_save("hidden5"), self.enc4],
[self.lstm6, self.hidden6, ops_save("hidden6"), ops_skip_connection(1), self.enc5],
[self.lstm7, self.hidden7, ops_save("hidden7"), ops_skip_connection(0), self.enc6, self.norm_enc6]
]
def reset_state(self):
"""
Reset the gradient of this model, but also the specific model
"""
self.loss = 0.0
self.psnr_all = 0.0
self.summaries = []
self.conv_res = []
self.lstm1.reset_state()
self.lstm2.reset_state()
self.lstm3.reset_state()
self.lstm4.reset_state()
self.lstm5.reset_state()
self.lstm6.reset_state()
self.lstm7.reset_state()
def __call__(self, x, iter_num=-1.0):
"""
Calls the training process
Args:
x: an array containing an array of:
images: an array of Tensor of shape batch x channels x height x width
actions: an array of Tensor of shape batch x action
states: an array of Tensor of shape batch x state
iter_num: iteration (epoch) index
Returns:
loss, all the peak signal to noise ratio, summaries
"""
logger = logging.getLogger(__name__)
# Split the images, actions and states from the input
if len(x) > 1:
images, actions, states = x
else:
images, actions, states = x[0], None, None
batch_size, color_channels, img_height, img_width = images[0].shape[0:4]
#img_training_set = [np.transpose(np.squeeze(img), (0, 3, 1, 2)) for img in img_training_set]
# Generated robot states and images
gen_states, gen_images = [], []
current_state = states[0]
# When validation/test, disable schedule sampling
if not chainer.config.train or self.scheduled_sampling_k == -1:
feedself = True
else:
# Scheduled sampling, inverse sigmoid decay
# Calculate number of ground-truth frames to pass in.
num_ground_truth = np.int32(
np.round(np.float32(batch_size) * (self.scheduled_sampling_k / (self.scheduled_sampling_k + np.exp(iter_num / self.scheduled_sampling_k))))
)
feedself = False
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep
reuse = bool(gen_images)
done_warm_start = len(gen_images) > self.num_frame_before_prediction - 1
if feedself and done_warm_start:
# Feed in generated image
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size, num_ground_truth)
prev_image = variable.Variable(prev_image)
else:
# Always feed in ground_truth
prev_image = variable.Variable(image)
# Predicted state is always fed back in
state_action = F.concat((action, current_state), axis=1)
""" Execute the ops array of transformations """
# If an ops has a name of "ops" it means it's a custom ops
encs = []
maps = {}
x = prev_image
for i in xrange(len(self.ops)):
for j in xrange(len(self.ops[i])):
op = self.ops[i][j]
if isinstance(op, types.FunctionType):
# Only these values are use now in the ops callback
x = op({
"x": x,
"encs": encs,
"map": maps,
"state_action": state_action,
"batch_size": batch_size
})
else:
x = op(x)
# ReLU at the end of each transformation
x = F.relu(x)
# At the end of j iteration = completed a enc transformation
encs.append(x)
# Extract the variables
hiddens = [
maps.get("hidden1"), maps.get("hidden2"), maps.get("hidden3"), maps.get("hidden4"),
maps.get("hidden5"), maps.get("hidden6"), maps.get("hidden7")
]
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
""" Specific model transformations """
transformed, enc7 = self.model(
encs, hiddens,
batch_size, prev_image, self.num_masks, int(color_channels)
)
encs.append(enc7)
""" Masks """
masks = self.masks(enc6)
masks = F.relu(masks)
masks = F.reshape(masks, (-1, self.num_masks + 1))
masks = F.softmax(masks)
masks = F.reshape(masks, (int(batch_size), self.num_masks+1, int(img_height), int(img_width))) # Previously num_mask at the end, but our channels are on axis=1? ok!
mask_list = F.split_axis(masks, indices_or_sections=self.num_masks+1, axis=1) # Previously axis=3 but our channels are on axis=1 ?
output = broadcast_scale(prev_image, mask_list[0])
for layer, mask in zip(transformed, mask_list[1:]):
output += broadcast_scale(layer, mask, axis=0)
gen_images.append(output)
current_state = self.current_state(state_action)
gen_states.append(current_state)
# End of transformations
self.conv_res = encs
# L2 loss, PSNR for eval
loss, psnr_all = 0.0, 0.0
summaries = []
for i, x, gx in zip(range(len(gen_images)), images[self.num_frame_before_prediction:], gen_images[self.num_frame_before_prediction - 1:]):
x = variable.Variable(x)
recon_cost = F.mean_squared_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
summaries.append(self.prefix + '_recon_cost' + str(i) + ': ' + str(recon_cost.data))
summaries.append(self.prefix + '_psnr' + str(i) + ': ' + str(psnr_i.data))
loss += recon_cost
#print(recon_cost.data)
for i, state, gen_state in zip(range(len(gen_states)), states[self.num_frame_before_prediction:], gen_states[self.num_frame_before_prediction - 1:]):
state = variable.Variable(state)
state_cost = F.mean_squared_error(state, gen_state) * 1e-4
summaries.append(self.prefix + '_state_cost' + str(i) + ': ' + str(state_cost.data))
loss += state_cost
summaries.append(self.prefix + '_psnr_all: ' + str(psnr_all.data if isinstance(psnr_all, variable.Variable) else psnr_all))
self.psnr_all = psnr_all
self.loss = loss = loss / np.float32(len(images) - self.num_frame_before_prediction)
summaries.append(self.prefix + '_loss: ' + str(loss.data if isinstance(loss, variable.Variable) else loss))
self.summaries = summaries
self.gen_images = gen_images
return self.loss
# =================================================
# Main entry point of the training processes (main)
# =================================================
@click.command()
@click.option('--data_dir', type=click.Path(exists=True), default='data/processed/brain-robotics-data/push/push_train', help='Directory containing data.')
@click.option('--output_dir', type=click.Path(), default='models', help='Directory for model checkpoints.')
@click.option('--event_log_dir', type=click.Path(), default='models', help='Directory for writing summary.')
@click.option('--num_iterations', type=click.INT, default=100000, help='Number of training iterations. Number of epoch is: num_iterations/batch_size.')
@click.option('--pretrained_model', type=click.Path(), default='', help='Filepath of a pretrained model to initialize from.')
@click.option('--pretrained_state', type=click.Path(), default='', help='Filepath of a pretrained state to initialize from.')
@click.option('--sequence_length', type=click.INT, default=10, help='Sequence length, including context frames.')
@click.option('--context_frames', type=click.INT, default=2, help='Number of frames before predictions.')
@click.option('--use_state', type=click.INT, default=1, help='Whether or not to give the state+action to the model.')
@click.option('--model_type', type=click.STRING, default='CDNA', help='Model architecture to use - CDNA, DNA, or STP.')
@click.option('--num_masks', type=click.INT, default=10, help='Number of masks, usually 1 for DNA, 10 for CDNA, STP.')
@click.option('--schedsamp_k', type=click.FLOAT, default=900.0, help='The k parameter for schedules sampling. -1 for no scheduled sampling.')
@click.option('--train_val_split', type=click.FLOAT, default=0.95, help='The percentage of data to use for the training set, vs. the validation set.')
@click.option('--batch_size', type=click.INT, default=32, help='Batch size for training.')
@click.option('--learning_rate', type=click.FLOAT, default=0.001, help='The base learning rate of the generator.')
@click.option('--gpu', type=click.INT, default=-1, help='ID of the gpu(s) to use')
@click.option('--validation_interval', type=click.INT, default=200, help='How often to run a batch through the validation model')
@click.option('--save_interval', type=click.INT, default=50, help='How often to save a model checkpoint')
@click.option('--debug', type=click.INT, default=0, help='Debug mode.')
def main(data_dir, output_dir, event_log_dir, num_iterations, pretrained_model, pretrained_state, sequence_length, context_frames, use_state, model_type, num_masks, schedsamp_k, train_val_split, batch_size, learning_rate, gpu, validation_interval, save_interval, debug):
if debug == 1:
chainer.set_debug(True)
""" Train the model based on the data saved in ../processed """
logger = logging.getLogger(__name__)
logger.info('Training the model')
logger.info('Model: {}'.format(model_type))
logger.info('GPU: {}'.format(gpu))
logger.info('# Minibatch-size: {}'.format(batch_size))
logger.info('# Num iterations: {}'.format(num_iterations))
logger.info('# epoch: {}'.format(round(num_iterations/batch_size)))
model_suffix_dir = "{0}-{1}-{2}".format(time.strftime("%Y%m%d-%H%M%S"), model_type, batch_size)
training_suffix = "{0}".format('training')
validation_suffix = "{0}".format('validation')
state_suffix = "{0}".format('state')
logger.info("Fetching the models and inputs")
data_map = []
with open(data_dir + '/map.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
data_map.append(row)
if len(data_map) <= 1: # empty or only header
logger.error("No file map found")
exit()
# Load the images, actions and states
images = []
actions = []
states = []
for i in xrange(1, len(data_map)): # Exclude the header
#logger.info("Loading data {0}/{1}".format(i, len(data_map)-1))
images.append(np.float32(np.load(data_dir + '/' + data_map[i][2])))
actions.append(np.float32(np.load(data_dir + '/' + data_map[i][3])))
states.append(np.float32(np.load(data_dir + '/' + data_map[i][4])))
images = np.asarray(images, dtype=np.float32)
actions = np.asarray(actions, dtype=np.float32)
states = np.asarray(states, dtype=np.float32)
train_val_split_index = int(np.floor(train_val_split * len(images)))
images_training = np.asarray(images[:train_val_split_index])
actions_training = np.asarray(actions[:train_val_split_index])
states_training = np.asarray(states[:train_val_split_index])
images_validation = np.asarray(images[train_val_split_index:])
actions_validation = np.asarray(actions[train_val_split_index:])
states_validation = np.asarray(states[train_val_split_index:])
logger.info('Data set contain {0}, {1} will be use for training and {2} will be use for validation'.format(len(images)-1, train_val_split_index, len(images)-1-train_val_split_index))
# Create the model
training_model = Model(
num_masks=num_masks,
is_cdna=model_type == 'CDNA',
is_dna=model_type == 'DNA',
is_stp=model_type == 'STP',
use_state=use_state,
scheduled_sampling_k=schedsamp_k,
num_frame_before_prediction=context_frames,
prefix='train'
)
# Create the optimizers for the models
optimizer = chainer.optimizers.Adam(alpha=learning_rate)
optimizer.setup(training_model)
# Load a previous model
if pretrained_model:
chainer.serializers.load_npz(pretrained_model, training_model)
logger.info("Loading pretrained model {}".format(pretrained_model))
if pretrained_state:
chainer.serializers.load_npz(pretrained_state, training_model)
logger.info("Loading pretrained state {}".format(pretrained_state))
# Save the current GIT commit corresponding to the current training.
# When predicting or visualizing the model, change the working directory to the GIT snapshot
# This way, instead of copying the files into the model folder, we use GIT functionality to preserve the training files
current_version = None
try:
subprocess.check_call(['git', 'status'])
def git_exec(args):
process = subprocess.Popen(['git'] + args, stdout=subprocess.PIPE)
res = process.communicate()[0].rstrip().strip()
#process.wait()
return res
current_version = git_exec(['rev-parse', '--abbrev-ref', 'HEAD']) + '\n' + git_exec(['rev-parse', 'HEAD'])
except:
pass
# Training
# Enable GPU support if defined
if gpu > -1:
chainer.cuda.get_device_from_id(gpu).use()
training_model.to_gpu()
xp = cupy
else:
xp = np
# Create the batches for Chainer's implementation of the iterator
# Group the images, actions and states
grouped_set_training = []
grouped_set_validation = []
for idx in xrange(len(images_training)):
group = []
group.append(images_training[idx])
group.append(actions_training[idx])
group.append(states_training[idx])
grouped_set_training.append(group)
for idx in xrange(len(images_validation)):
group = []
group.append(images_validation[idx])
group.append(actions_validation[idx])
group.append(states_validation[idx])
grouped_set_validation.append(group)
#train_iter = chainer.iterators.SerialIterator(grouped_set_training, batch_size)
train_iter = chainer.iterators.SerialIterator(grouped_set_training, batch_size, repeat=True, shuffle=True)
valid_iter = chainer.iterators.SerialIterator(grouped_set_validation, batch_size, repeat=False, shuffle=True)
# Run training
# As per Finn's implementation, one epoch is run on one batch size, randomly, but never more than once.
# At the end of the queue, if the epochs len is not reach, the queue is generated again.
local_losses = []
local_psnr_all = []
local_losses_valid = []
local_psnr_all_valid = []
global_losses = []
global_psnr_all = []
global_losses_valid = []
global_psnr_all_valid = []
summaries, summaries_valid = [], []
training_queue = []
validation_queue = []
#for epoch in xrange(epochs):
start_time = None
stop_time = None
itr = 0
while itr < num_iterations:
epoch = train_iter.epoch
batch = train_iter.next()
#x = concat_examples(batch)
img_training_set, act_training_set, sta_training_set = concat_examples(batch)
# Perform training
logger.info("Begining training for mini-batch {0}/{1} of epoch {2}".format(str(train_iter.current_position), str(len(images_training)), str(epoch+1)))
logger.info("Global iteration: {}".format(str(itr+1)))
#loss = training_model(img_training_set, act_training_set, sta_training_set, epoch, schedsamp_k, use_state, num_masks, context_frames)
if start_time is None:
start_time = time.time()
optimizer.update(training_model, [xp.array(img_training_set), xp.array(act_training_set), xp.array(sta_training_set)], itr)
loss = training_model.loss
psnr_all = training_model.psnr_all
summaries = training_model.summaries
loss_data_cpu = chainer.cuda.to_cpu(loss.data)
psnr_data_cpu = chainer.cuda.to_cpu(psnr_all.data)
local_losses.append(loss_data_cpu)
local_psnr_all.append(psnr_data_cpu)
training_model.reset_state()
logger.info("{0} {1}".format(str(epoch+1), str(loss.data)))
loss, psnr_all, loss_data_cpu, psnr_data_cpu = None, None, None, None
if train_iter.is_new_epoch:
stop_time = time.time()
logger.info("[TRAIN] Epoch #: {}".format(epoch+1))
logger.info("[TRAIN] Epoch elapsed time: {}".format(stop_time-start_time))
local_losses = np.array(local_losses)
local_psnr_all = np.array(local_psnr_all)
global_losses.append([local_losses.mean(), local_losses.std(), local_losses.min(), local_losses.max(), np.median(local_losses)])
global_psnr_all.append([local_psnr_all.mean(), local_psnr_all.std(), local_psnr_all.min(), local_psnr_all.max(), np.median(local_psnr_all)])
logger.info("[TRAIN] epoch loss: {}".format(local_losses.mean()))
logger.info("[TRAIN] epoch psnr: {}".format(local_psnr_all.mean()))
local_losses, local_psnr_all = [], []
start_time, stop_time = None, None
if train_iter.is_new_epoch and epoch+1 % validation_interval == 0:
start_time = time.time()
for batch in valid_iter:
logger.info("Begining validation for mini-batch {0}/{1} of epoch {2}".format(str(valid_iter.current_position), str(len(images_validation)), str(epoch+1)))
img_validation_set, act_validation_set, sta_validation_set = concat_examples(batch)
#x_validation = concat_examples(batch)
# Run through validation set
#loss_valid, psnr_all_valid, summaries_valid = validation_model(img_validation_set, act_validation_set, sta_validation_set, epoch, schedsamp_k, use_state, num_masks, context_frames)
with chainer.using_config('train', False):
loss_valid = training_model([xp.array(img_validation_set), xp.array(xp.act_validation_set), xp.array(sta_validation_set)], itr)
psnr_all_valid = training_model.psnr_all
summaries_valid = training_model.summaries
loss_valid_data_cpu = chainer.cuda.to_cpu(loss_valid.data)
psnr_all_valid_data_cpu = chainer.cuda.to_cpu(psnr_all_valid.data)
local_losses_valid.append(loss_valid_data_cpu)
local_psnr_all_valid.append(psnr_all_valid_data_cpu)
training_model.reset_state()
loss_valid, psnr_all_valid, loss_valid_data_cpu, psnr_all_valid_data_cpu = None, None, None, None
stop_time = time.time()
logger.info("[VALID] Epoch #: {}".format(epoch+1))
logger.info("[VALID] epoch elapsed time: {}".format(stop_time-start_time))
local_losses_valid = np.array(local_losses_valid)
local_psnr_all_valid = np.array(local_psnr_all_valid)
global_losses_valid.append([local_losses_valid.mean(), local_losses_valid.std(), local_losses_valid.min(), local_losses_valid.max(), np.median(local_losses_valid)])
global_psnr_all_valid.append([local_psnr_all_valid.mean(), local_psnr_all_valid.std(), local_psnr_all_valid.min(), local_psnr_all_valid.max(), np.median(local_psnr_all_valid)])
logger.info("[VALID] epoch loss: {}".format(local_losses_valid.mean()))
logger.info("[VALID] epoch psnr: {}".format(local_psnr_all_valid.mean()))
local_losses_valid, local_psnr_all_valid = [], []
start_time, stop_time = None, None
valid_iter.reset()
training_model.reset_state()
if train_iter.is_new_epoch and epoch % save_interval == 0:
#if epoch % save_interval == 0:
logger.info('Saving model')
save_dir = output_dir + '/' + model_suffix_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Save the version of the code
f = open(save_dir + '/version', 'w')
f.write(current_version + '\n')
f.close()
serializers.save_npz(save_dir + '/' + training_suffix + '-' + str(epoch), training_model)
#serializers.save_npz(save_dir + '/' + validation_suffix + '-' + str(epoch), validation_model)
serializers.save_npz(save_dir + '/' + state_suffix + '-' + str(epoch), optimizer)
np.save(save_dir + '/' + training_suffix + '-global_losses', np.array(global_losses))
np.save(save_dir + '/' + training_suffix + '-global_psnr_all', np.array(global_psnr_all))
np.save(save_dir + '/' + training_suffix + '-global_losses_valid', np.array(global_losses_valid))
np.save(save_dir + '/' + training_suffix + '-global_psnr_all', np.array(global_psnr_all_valid))
#for summ in summaries:
#logger.info(summ)
summaries = []
#for summ_valid in summaries_valid:
#logger.info(summ_valid)
summaries_valid = []
itr += 1
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
#logging.basicConfig(level=logging.INFO, format=log_fmt, stream=sys.stdout)
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
kristofbc/physical-interaction-video-prediction | src/data/make_dataset.py | # -*- coding: utf-8 -*-
import glob
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
import tensorflow as tf
import numpy as np
from PIL import Image
import csv
from matplotlib import pyplot as plt
@click.command()
@click.option('--data_dir', type=click.Path(exists=True), default='data/raw/brain-robotics-data/push/push_train', help='Directory containing data.')
@click.option('--out_dir', type=click.Path(), default='data/processed/brain-robotics-data/push/push_train', help='Output directory of the converted data.')
@click.option('--sequence_length', type=click.INT, default=10, help='Sequence length, including context frames.')
@click.option('--image_original_width', type=click.INT, default=640, help='Original width of the images.')
@click.option('--image_original_height', type=click.INT, default=512, help='Original height of the images.')
@click.option('--image_original_channel', type=click.INT, default=3, help='Original channels amount of the images.')
@click.option('--image_resize_width', type=click.INT, default=64, help='Resize width of the the images.')
@click.option('--image_resize_height', type=click.INT, default=64, help='Resize height of the the images.')
@click.option('--state_action_dimension', type=click.INT, default=5, help='Dimension of the state and action.')
@click.option('--create_img', type=click.INT, default=1, help='Create the bitmap image along the numpy RGB values')
@click.option('--create_img_prediction', type=click.INT, default=1, help='Create the bitmap image used in the prediction phase')
def main(data_dir, out_dir, sequence_length, image_original_width, image_original_height, image_original_channel, image_resize_width, image_resize_height, state_action_dimension, create_img, create_img_prediction):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
with tf.Session() as sess:
files = glob.glob(data_dir + '/*')
if len(files) == 0:
logger.error("No files found with extensions .tfrecords in directory {0}".format(out_dir))
exit()
queue = tf.train.string_input_producer(files, shuffle=False)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(queue)
image_seq, state_seq, action_seq = [], [], []
image_seq_raw = []
for i in xrange(sequence_length):
image_name = 'move/' + str(i) + '/image/encoded'
action_name = 'move/' + str(i) + '/commanded_pose/vec_pitch_yaw'
state_name = 'move/' + str(i) + '/endeffector/vec_pitch_yaw'
features = {
image_name: tf.FixedLenFeature([1], tf.string),
action_name: tf.FixedLenFeature([state_action_dimension], tf.float32),
state_name: tf.FixedLenFeature([state_action_dimension], tf.float32)
}
features = tf.parse_single_example(serialized_example, features=features)
image_buffer = tf.reshape(features[image_name], shape=[])
image = tf.image.decode_jpeg(image_buffer, channels=image_original_channel)
image.set_shape([image_original_height, image_original_width, image_original_channel])
# Untouched image used in prediction
if(create_img_prediction == 1):
image_pred = tf.identity(image)
image_pred = tf.reshape(image_pred, [1, image_original_height, image_original_width, image_original_channel])
image_seq_raw.append(image_pred)
crop_size = min(image_original_width, image_original_height)
image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)
image = tf.reshape(image, [1, crop_size, crop_size, image_original_channel])
# To obtain the original image, with no filter applied to it, comment: reshape, resize_bicubic and cast
#image = tf.reshape(image, tf.stack([crop_size, crop_size, image_original_channel]))
#image = tf.image.resize_bicubic(image, [image_resize_height, image_resize_width])
#image = tf.cast(image, tf.float32) / 255.0
image_seq.append(image)
state = tf.reshape(features[state_name], shape=[1, state_action_dimension])
state_seq.append(state)
action = tf.reshape(features[action_name], shape=[1, state_action_dimension])
action_seq.append(action)
image_seq = tf.concat(axis=0, values=image_seq)
state_seq = tf.concat(axis=0, values=state_seq)
action_seq = tf.concat(axis=0, values=action_seq)
image_seq_raw = tf.concat(axis=0, values=image_seq_raw)
#[image_batch, action_batch, state_batch] = tf.train.batch([image_seq, action_seq, state_seq], batch_size, num_threads=batch_size, capacity=100 * batch_size)
init_op = tf.initialize_all_variables()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
logger.info("Saving image_batch, action_batch, state_batch")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
csv_ref = []
for j in xrange(len(files)):
logger.info("Creating data from tsrecords {0}/{1}".format(j+1, len(files)))
imgs, act, sta, pred = sess.run([image_seq, action_seq, state_seq, image_seq_raw])
# Resize the image using PIL antialiasing method
raw = []
for k in xrange(len(imgs)):
tmp = Image.fromarray(imgs[k])
tmp = tmp.resize((image_resize_height, image_resize_width), Image.ANTIALIAS)
tmp = np.fromstring(tmp.tobytes(), dtype=np.uint8)
tmp = tmp.reshape((image_resize_height, image_resize_width, 3))
tmp = tmp.astype(np.float32) / 255.0
#plt.figure(1)
#plt.imshow(tmp)
#plt.show()
#exit()
raw.append(tmp)
raw = np.array(raw)
ref = []
ref.append(j)
if create_img == 1:
for k in xrange(raw.shape[0]):
img = Image.fromarray(raw[k], 'RGB')
img.save(out_dir + '/image_batch_' + str(j) + '_' + str(k) + '.png')
ref.append('image_batch_' + str(j) + '_*' + '.png')
else:
ref.append('')
np.save(out_dir + '/image_batch_' + str(j), raw)
np.save(out_dir + '/action_batch_' + str(j), act)
np.save(out_dir + '/state_batch_' + str(j), sta)
ref.append('image_batch_' + str(j) + '.npy')
ref.append('action_batch_' + str(j) + '.npy')
ref.append('state_batch_' + str(j) + '.npy')
# Image used in prediction
if create_img_prediction == 1:
np.save(out_dir + '/image_batch_pred_' + str(j), pred)
for k in xrange(pred.shape[0]):
img = Image.fromarray(pred[k], 'RGB')
img.save(out_dir + '/image_batch_pred_' + str(j) + '_' + str(k) + '.png')
ref.append('image_batch_pred_' + str(j) + '_*' + '.png')
ref.append('image_batch_pred_' + str(j) + '.npy')
else:
ref.append('')
ref.append('')
csv_ref.append(ref)
logger.info("Writing the results into map file '{0}'".format('map.csv'))
with open(out_dir + '/map.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['id', 'img_bitmap_path', 'img_np_path', 'action_np_path', 'state_np_path', 'img_bitmap_pred_path', 'img_np_pred_path'])
for row in csv_ref:
writer.writerow(row)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
kristofbc/physical-interaction-video-prediction | src/visualization/visualize.py | <reponame>kristofbc/physical-interaction-video-prediction
import sys
import os
import glob
import csv
import click
import logging
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import chainer
import chainer.functions as F
import chainer.links as L
from PIL import Image
# Put the main path in the systems path
sys.path.append("/".join(sys.path[0].split("/")[:-2]))
from src.models.train_model import Model
from src.models.train_model import concat_examples
from src.models.predict_model import get_data_info
# ===============================
# General Visualizer class (visc)
# ===============================
class Visualizer(object):
"""
Visualize the components of a network
"""
def __init__(self, network):
"""
Args:
network (chainer.Link): The trained network to visualize
"""
self._network = network
self._bitmap = {}
def _rescale(self, data):
"""
Rescale the data [0, 255]
Args:
data (float[]): the data to rescale
Returns:
(int[])
"""
data -= data.min()
data /= data.max()
data *= 255.0
return data.astype(np.uint8)
def _get_layer(self, layer_name):
"""
Get the layer from the network
Args:
layer_name (string|chainer.link): name of the layer to visualize or
the layer itself to visualize
"""
# Get the weight of the filter
if isinstance(layer_name, basestring):
return self._network[layer_name]
else:
return layer_name
def plot_filters(self, layer_name, **kwargs):
"""
Plot the weigths of a layer
Args:
layer_name (string|chainer.link): name of the layer to visualize or
the layer itself to visualize
Returns:
(pyplot)
"""
if not self._network[layer_name]:
raise ValueError("Layer {} does not exists in model")
# Get the weight of the filter
layer = self._get_layer(layer_name)
weights = None
try:
weights = layer.W.T
except:
weights = layer.W
bitmaps = [bitmap[0].data for bitmap in weights]
#bitmaps = np.rollaxis(weights.data, 1, 4)
# Plot the weigths
nrow = int(math.sqrt(len(bitmaps))) + 1
for i in xrange(len(bitmaps)):
ax = plt.subplot(nrow, nrow, i+1)
#ax.get_xaxis().set_visible(false)
#ax.get_yaxis().set_visible(false)
bitmap = bitmaps[i]
#bitmap = np.rollaxis(bitmaps[i], 0, 3)
plt.imshow(self._rescale(bitmap), **kwargs)
return plt
def plot_activation(self, layer_name, layer_transformation=None, **kwargs):
"""
Plot the layer activation (after "activating" a layer with data, e.g: after training/prediction)
Args:
layer_name (string|chainer.link): name of the layer to visualize or
the layer itself to visualize
layer_transformation (Function): apply a transformation to the layer before ploting it
Returns:
(pyplot)
"""
layer = self._get_layer(layer_name)
if layer.data.shape[0] > 1:
raise ValueError("Can only plot the activation of 1 image not {}".format(layer.data.shape[0]))
data = None
if layer_transformation is not None:
data = layer_transformation(layer)
else:
data = layer.data
# Plot the activation
nrow = int(math.sqrt(data.shape[1])) + 1
for i in xrange(data.shape[1]):
bitmap = data[0][i]
fmax = np.max(bitmap)
fmin = np.min(bitmap)
diff = fmax - fmin if (fmax - fmin) > 0 else 1
bitmap = ((bitmap - fmin) * 0xff / diff).astype(np.uint8)
ax = plt.subplot(nrow, nrow, i+1)
#ax.get_xaxis().set_visible(false)
#ax.get_yaxis().set_visible(false)
plt.imshow(bitmap, **kwargs)
return plt
def plot_output(self, layer_name, **kwargs):
"""
Plot the output at a particular layer
Args:
layer_name (string|chainer.link): name of the layer to visualize or
the layer itself to visualize
Returns:
(pyplot)
"""
layer = self._get_layer(layer_name)
output = layer.data
# Plot the output
N = layer.shape[0] * layer.shape[1]
nrow = int(math.sqrt(N)) + 1
for i in xrange(len(output)):
for j in xrange(len(output[i])):
ax = plt.subplot(nrow, nrow, (i) * output.shape[1] + (j+1))
ax.set_title('Filter: {0}-{1}'.format(i,j), fontsize=10)
#ax.get_xaxis().set_visible(false)
#ax.get_yaxis().set_visible(false)
plt.imshow(output[i][j], **kwargs)
return plt
# ========================
# Helpers functions (hlpr)
# ========================
def get_coordinates(data, std=[]):
"""
Extract the coordinate used for plotting for a network
Args:
data (float[]): 1D array containing the data to plot
std (float[]): 1D array to create the "box" arround the curve
Returns:
(float[]), (float[]), (float[])
"""
coord = []
box = []
y_min = np.min(data, axis=0)
y_max = np.max(data, axis=0)
# Scale the data between range [-1.0, 1.0]
#data = scale_data(data, mins=y_min, maxs=y_max)
for i in xrange(len(data)):
# Create the "box" around the curve
if len(std) == len(data):
box.append([data[i] - 1.0 * std[i], data[i] + 1.0 * std[i]])
coord.append([i, data[i]])
return np.array(coord, dtype=np.float32), np.array(box, dtype=np.float32), [0, len(coord), y_min, y_max]
def scale_data(data, high=1.0, low=-1.0, maxs=None, mins=None):
"""
Scale data between [low, high]
Args:
data (float[]): 1D array of values to scale
high (float): upperbound of the scale
low (float): lowerbound of the scale
maxs (float): max value in data
mins (float): min value in data
Returns:
(float[])
"""
if mins is None:
mins = np.min(data, axis=0)
if maxs is None:
maxs = np.max(data, axis=0)
rng = maxs - mins
return high - (((high - low) * (maxs - data)) / rng)
def plot_data(coordinate, box=[], plt_inst=None, **kwargs):
"""
Plot the coordinate with the "std box" around the curve
Args:
coordinate (float[]): 1D array of the coordinate to plot
box (float[]): 1D array of the box around the curve
plt_inst (pyplot): pyplot instance
Returns:
(plt_inst)
"""
if plt_inst is None:
plt_inst = plt
if len(box) == len(coordinate):
plt_inst.fill_between(np.arange(len(box)), box[:, 0:1].squeeze(), box[:, 1:].squeeze(), zorder=1, alpha=0.2)
plt_inst.plot(coordinate[:, 0:1].squeeze(), coordinate[:, 1:].squeeze(), **kwargs)
return plt_inst
def plot_losses_curves(train_network, valid_network, x_label="Epoch", y_label="Loss", title="Network loss"):
"""
Plot multiple curves on the same graph
Args:
train_network (float[]): the train loss
valid_network (float[]): the valid loss
x_label (string): label of x axis
y_label (string): label of y axis
title (string): title of the graph
Returns:
(plt)
"""
# Extract the coordinate of the losses
coord_network_train, box_network_train, stats_network_train = [], [], []
coord_network_valid, box_network_valid, stats_network_valid = [], [], []
if len(train_network) > 0:
coord_network_train, box_network_train, stats_network_train = get_coordinates(train_network[:, 0], train_network[:, 1])
if len(valid_network) > 0:
coord_network_valid, box_network_valid, stats_network_valid = get_coordinates(valid_network[:, 0], valid_network[:, 1])
plt.figure(1)
plt.subplot("{0}{1}{2}".format(1, 1, 1))
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title + " (iteration #{})".format(len(coord_network_train) if len(coord_network_train) > 0 else len(coord_network_valid)))
plt.ylim(
min(stats_network_train[2] if len(stats_network_train) > 0 else 0, stats_network_valid[2] if len(stats_network_valid) > 0 else 0),
max(stats_network_train[3] if len(stats_network_train) > 0 else 0, stats_network_valid[3] if len(stats_network_valid) > 0 else 0)
)
if len(coord_network_train) > 0:
plot_data(coord_network_train, box_network_train, plt, label="Train")
if len(coord_network_valid) > 0:
plot_data(coord_network_valid, box_network_valid, plt, label="Test")
plt.legend(ncol=2 if len(coord_network_train) > 0 and len(coord_network_valid) > 0 else 1, loc="upper right", fontsize=10)
return plt
def plot(ctx, xaxis, yaxis, title, cb):
plt.cla()
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.title(title)
xcoord = []
ycoord = []
for i in xrange(len(ctx)):
points = cb(ctx[i], i)
if len(points) != 0:
xcoord.append(points[0])
ycoord.append(points[1])
plt.plot(xcoord, ycoord)
return plt
def visualize_layer_activation(model, x, layer_idx):
logger = logging.getLogger(__name__)
activations = model.activations(layer_idx, x, 0)
# Rescale the activation [0, 255]
activations -= activations.min()
activations /= activations.max()
activations *= 255
activations = activations.astype(np.uint8)
n, c, h, w = activations.shape
# Plot non-deconvolution image
#rows = int(math.ceil(math.sqrt(c)))
#cols = int(round(math.sqrt(c)))
#plt.figure(1)
#for i in xrange(c):
# plt.subplot(rows, cols, i+1)
# plt.imshow(activations[0,i,:,:])
#return plt
# Plot deconvolution image
rows = int(math.ceil(math.sqrt(n)))
cols = int(round(math.sqrt(n)))
dpi=100
scale=1
plt.figure(1)
fig, axes = plt.subplots(rows, cols, figsize=(w*cols/dpi*scale, h*rows/dpi*scale), dpi=dpi)
for i, ax in enumerate(axes.flat):
if i < n:
ax.imshow(activations[i].transpose((1, 2, 0)))
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.1, hspace=0.1)
return plt
@click.command()
@click.argument('model', type=click.STRING)
@click.option('--layer_idx', type=click.INT, default=0, help='Convolution layer index.')
@click.option('--model_name', type=click.STRING, default=None, help='Name of the model to visualize.')
@click.option('--data_index', type=click.INT, default=None, help='Index of the data for the visualization.')
@click.option('--model_dir', type=click.Path(exists=True), default='models', help='Directory containing data.')
@click.option('--output_dir', type=click.Path(), default='reports', help='Directory for model checkpoints.')
@click.option('--data_dir', type=click.Path(exists=True), default='data/processed/brain-robotics-data/push/push_testnovel', help='Directory containing data.')
@click.option('--time_step', type=click.INT, default=8, help='Number of time steps to predict.')
@click.option('--model_type', type=click.STRING, default='', help='Type of the trained model.')
@click.option('--schedsamp_k', type=click.FLOAT, default=-1, help='The k parameter for schedules sampling. -1 for no scheduled sampling.')
@click.option('--context_frames', type=click.INT, default=2, help='Number of frames before predictions.')
@click.option('--use_state', type=click.INT, default=1, help='Whether or not to give the state+action to the model.')
@click.option('--num_masks', type=click.INT, default=10, help='Number of masks, usually 1 for DNA, 10 for CDNA, STP.')
@click.option('--image_height', type=click.INT, default=64, help='Height of one predicted frame.')
@click.option('--image_width', type=click.INT, default=64, help='Width of one predicted frame.')
def main(model, layer_idx, model_name, data_index, model_dir, output_dir, data_dir, time_step, model_type, schedsamp_k, context_frames, use_state, num_masks, image_height, image_width):
logger = logging.getLogger(__name__)
model_path = model_dir + '/' + model
visualization_path = output_dir + '/' + model
if not os.path.exists(model_path):
raise ValueError("Directory {} does not exists".format(model_path))
if not os.path.exists(visualization_path):
os.makedirs(visualization_path)
# @TODO Need to be dynamic reporting
training_global_losses = None
if os.path.exists(model_path + '/training-global_losses.npy'): training_global_losses = np.load(model_path + '/training-global_losses.npy')
training_global_losses_valid = None
if os.path.exists(model_path + '/training-global_losses_valid.npy'):
training_global_losses_valid = np.load(model_path + '/training-global_losses_valid.npy')
#graph = plot(training_global_losses, 'Epoch', 'Mean', 'Training global losses', lambda pos, i: [i, pos[0]] if pos[0] != 0 else [] )
#graph.savefig(visualization_path + '/training_global_losses')
#graph = plot(training_global_losses, 'Epoch', 'Mean', 'Training global losses valid', lambda pos, i: [i, pos[0]] if pos[0] != 0 else [] )
#graph.savefig(visualization_path + '/training_global_losses_valid')
# @TODO: fix the training loss
#plt_inst = plot_losses_curves(training_global_losses if training_global_losses is not None else [], training_global_losses_valid if training_global_losses_valid is not None else [])
logger.info("Plotting the loss curves")
plt_inst = plot_losses_curves(training_global_losses if training_global_losses is not None else [], [])
iteration_number = len(training_global_losses) if len(training_global_losses) > 0 else len(training_global_losses_valid)
plt_inst.savefig(visualization_path + "/" + model + "-iteration-{}".format(iteration_number) + ".png")
plt_inst = plot(training_global_losses, 'Epoch', 'Mean', 'Training global losses valid', lambda pos, i: [i, pos[0]] if pos[0] != 0 else [] )
plt_inst.savefig(visualization_path + "/" + model + "-validation-iteration-{}".format(iteration_number) + ".png")
# Plot the masks activation
if model_name is not None:
if not os.path.exists(model_path + '/' + model_name):
raise ValueError("Model name {} does not exists".format(model_name))
logger.info("Loading data {}".format(data_index))
image, image_pred, image_bitmap_pred, action, state = get_data_info(data_dir, data_index)
img_pred, act_pred, sta_pred = concat_examples([[image_pred, action, state]])
# Extract the information about the model
if model_type == '':
split_name = model.split("-")
if len(split_name) != 4:
raise ValueError("Model {} is not recognized, use --model_type to describe the type".format(model))
model_type = split_name[2]
# Load the model for prediction
logger.info("Importing model {0}/{1} of type {2}".format(model_dir, model, model_type))
pred_model = Model(
num_masks=num_masks,
is_cdna=model_type == 'CDNA',
is_dna=model_type == 'DNA',
is_stp=model_type == 'STP',
use_state=use_state,
scheduled_sampling_k=schedsamp_k,
num_frame_before_prediction=context_frames,
prefix='predict'
)
chainer.serializers.load_npz(model_path + '/' + model_name, pred_model)
logger.info("Model imported successfully")
logger.info("Predicting input for the activation map")
resize_img_pred = []
for i in xrange(len(img_pred)):
resize = F.resize_images(img_pred[i], (image_height, image_width))
resize = F.cast(resize, np.float32) / 255.0
resize_img_pred.append(resize.data)
resize_img_pred = np.asarray(resize_img_pred, dtype=np.float32)
# Only one image to visualize the activation
plt.cla()
with chainer.using_config('train', False):
pred_model([resize_img_pred[0:3], act_pred[0:3], sta_pred[0:3]], 0)
visualizer = Visualizer(pred_model)
def deconv(conv):
def ops(x):
out_size, in_size, kh, kw = conv.W.data.shape
#x = L.Deconvolution2D(out_size, in_size, (kh, kw), stride=conv.stride, pad=conv.pad, outsize=(64, 64))(x)
x = chainer.functions.deconvolution_2d(x, conv.W.data, stride=conv.stride, pad=conv.pad, outsize=(64,64))
return np.rollaxis(x.data, 1, 4)
return ops
#plt_instance = visualizer.plot_activation(model.conv_res[0], deconv(model.enc0))
logger.info("Creating the layer activation bitmaps")
for i in xrange(len(pred_model.conv_res)):
plt.cla()
plt.figure(1)
plt_instance = visualizer.plot_activation(pred_model.conv_res[i], interpolation="nearest", cmap="gray")
plt.savefig(visualization_path + "/" + model + "-iteration-{0}-activation-{1}".format(iteration_number, i) + ".png")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
#logging.basicConfig(level=logging.INFO, format=log_fmt, stream=sys.stdout)
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
super-ruilei/svcRec_lr | uitls.py | #coding=utf-8
import os, random, json
import pickle
from math import *
import math
import sys
# 用户字典
UserList = {}
# web服务字典
WsList = {}
# 生成的偏好矩阵
Pref_TPMatrix = {}
Pref_RTMatrix = {}
# 原始的访问数据,用来做对比
TPMatrix = []
RTMatrix = []
##原始数据
RAW_USRLIST = "./dataset1/userlist.txt"
RAW_WSLIST = "./dataset1/wslist.txt"
##参照数据
RAW_TPMATRIX = "./dataset1/tpMatrix.txt"
RAW_RTMATRIX = "./dataset1/rtMatrix.txt"
##生成的偏好矩阵
GEN_TPMATRIX = "./dataset1/tpMatrix.pref.txt"
GEN_RTMATRIX = "./dataset1/rtMatrix.pref.txt"
# 构造数据user结构
def WSUser(ID="0", IPAddr="0.0.0.0", Country="", IPNo="0", AS="0", Latitude="0", Longitude="0"):
result = {}
result['ID'] = ID
result['IPAddr'] = IPAddr
result['Country'] = Country
result['IPNo'] = IPNo
result['AS'] = AS
result['Latitude'] = Latitude
result['Longitude'] = Longitude
return result
# 构造service的数据结构
def WSService(ID="0", WSDLAddress="", ServiceProvider="", IPAddr="0.0.0.0", Country="", IPNo="0", AS="0", Latitude="0",
Longitude="0"):
result = {}
result['ID'] = ID
# result['WSDLAddress']=WSDLAddress
# result['ServiceProvider']=ServiceProvider
result['IPAddr'] = IPAddr
# result['Country']=Country
result['IPNo'] = IPNo
# result['AS']=AS
result['Latitude'] = Latitude
result['Longitude'] = Longitude
return result
# 读取用户列表
def readUserList():
uFile = open(RAW_USRLIST, "r")
for line in uFile:
if (line.startswith("[") or line.startswith("=")):
continue
# print line
lns = line.split('\t')
if (len(lns) < 6):
print
"Line: ", line, "has problem, skip"
continue
newUsr = WSUser(lns[0], lns[1], lns[2], lns[3], lns[4], lns[5], lns[6])
UserList[lns[0]] = newUsr
print
"total read User:", len(UserList)
# 读取web服务列表
def readWsList():
uFile = open(RAW_WSLIST, "r")
for line in uFile:
if (line.startswith("[") or line.startswith("=")):
continue
# print line
lns = line.split('\t')
if (len(lns) < 8):
print
"Line: ", line, "has problem, skip"
continue
newWs = WSService(lns[0], lns[1], lns[2], lns[3], lns[4], lns[5], lns[6], lns[7], lns[8])
WsList[lns[0]] = newWs
print
"total read Webservices:", len(WsList)
def getGeoDistance(Lat_A, Lng_A, Lat_B, Lng_B):
ra = 6378.140 # 赤道半径 (km)
rb = 6356.755 # 极半径 (km)
flatten = (ra - rb) / ra # 地球扁率
rad_lat_A = radians(Lat_A)
rad_lng_A = radians(Lng_A)
rad_lat_B = radians(Lat_B)
rad_lng_B = radians(Lng_B)
pA = atan(rb / ra * tan(rad_lat_A))
pB = atan(rb / ra * tan(rad_lat_B))
xx = acos(sin(pA) * sin(pB) + cos(pA) * cos(pB) * cos(rad_lng_A - rad_lng_B))
c1 = (sin(xx) - xx) * (sin(pA) + sin(pB)) ** 2 / cos(xx / 2) ** 2
c2 = (sin(xx) + xx) * (sin(pA) - sin(pB)) ** 2 / sin(xx / 2) ** 2
dr = flatten / 8 * (c1 - c2)
distance = ra * (xx + dr)
return distance
# 读取响应时间关系矩阵
def readTPandRX():
uFile = open(RAW_TPMATRIX, "r")
for line in uFile:
if (line.startswith("[") or line.startswith("=")):
continue
# print line
lns = line.split('\t')
TPMatrix.append(lns)
print
"total read user-item matrix of response-time.:", len(TPMatrix), "*", len(TPMatrix[0])
uFile = open(RAW_RTMATRIX, "r")
for line in uFile:
if (line.startswith("[") or line.startswith("=")):
continue
# print line
lns = line.split('\t')
RTMatrix.append(lns)
print
"total read user-item matrix for throughput.:", len(RTMatrix), "*", len(RTMatrix[0])
def loadObjsIfExist(filename):#启动的时候载入持久化的对象
result= None
if os.path.exists(filename):
pkl_file = open(filename, 'rb')
result = pickle.load(pkl_file)
pkl_file.close()
return result
def saveObj(obj,filename):#dump对象到本地
output = open(filename, 'wb+')
pickle.dump(obj,output)
output.close()
|
super-ruilei/svcRec_lr | source/main.py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def cal_weight(u, v):
uIndices, vIndices = np.where(~np.isnan(u)), np.where(~np.isnan(v))
intersect = np.intersect1d(uIndices, vIndices)
union = np.union1d(uIndices, vIndices)
return len(intersect) / len(union)
def calPre(u):
testM = np.nan_to_num(u)
f = np.repeat(testM,NUM_SERVICE,axis=0)
pre_val = f.T - f
non_value = np.where(testM == 0);
pre_val[non_value[1],:] = 0;
pre_val[:, non_value[1]] = 0;
return np.sign(pre_val)
NUM_USER_TRAIN = 299
NUM_USER_TEST = 1
NUM_SERVICE = 500
NUM_USER = 300
K = 60
# step1. load groundtruth data
RAW_TPMATRIX = "../dataset2/tpMatrix"
TPMatrix = np.loadtxt(RAW_TPMATRIX, delimiter='\t')
# step2. form sparse data with sparse_rate
spare_rate = np.array([0.1,0.3,0.5,1.0])
sample_num = (NUM_SERVICE*spare_rate).astype(int)
TPSparse = np.full([NUM_USER,NUM_SERVICE], np.nan)
for i in range(NUM_USER):
p = np.random.choice(TPMatrix.shape[1], sample_num[0], replace=False);
TPSparse[i,p] = TPMatrix[i,p]
# step3. split train and test
tpTrain = TPSparse[NUM_USER_TEST:,:]
gt_tp_test, tpTest = TPMatrix[0:NUM_USER_TEST,:],TPSparse[0:NUM_USER_TEST,:]
# step4. cal krcc
tptrain = pd.DataFrame(tpTrain)
tptest = pd.DataFrame(np.repeat(tpTest,NUM_USER_TRAIN,axis=0))
weight_u_v = tptrain.apply(lambda u: cal_weight(u, tptest), axis=1)
krcc_u_v = tptrain.corrwith(tptest, axis=1, method='kendall')
# sim_u_v = weight_u_v * krcc_u_v
sim_u_v = krcc_u_v
# step5. cal preference
preference_direct = calPre(tpTest)
top_k = [(i, v) for i, v in sorted(enumerate(sim_u_v), key=lambda t: t[1], reverse=True)][0:K]
pre_sim = np.zeros((NUM_SERVICE, NUM_SERVICE))
for (i, v) in top_k:
user = tpTrain[i]
user = user[np.newaxis,:]
# pre_sim = pre_sim + v * calPre(user)
pre_sim = pre_sim + calPre(user)
preference_neighbor = np.sign(pre_sim)
preference = np.where(preference_direct != 0, preference_direct, preference_neighbor)
# plt.matshow(preference[:10, :10])
# step6. cal recall
ind_d = np.sum(-preference,axis=1).argsort()[0:100]
gt_ind = np.array((-gt_tp_test).argsort()[0,0:100])
recall = np.intersect1d(ind_d,gt_ind)
print(recall.shape)
|
mjenior/bipartite_graphs | code/translate_probes.py | <gh_stars>1-10
import sys
import os
import math
exp_data = open(sys.argv[1], 'r') # raw microarray data
probeIDs = open(sys.argv[2], 'r') # translation key for probe IDs to genes
koIDs = open(sys.argv[3], 'r') # translation key for genes to KOs
infile_name = str(sys.argv[1]).split('/')[-1]
infile_name = infile_name.split('.')[0]
outfile_name = infile_name + '.weighted_KOs.tsv'
outfile = open(outfile_name,'w')
probe_dictionary = {}
for index in probeIDs:
probe_dictionary[index.split()[1]] = str(index.split()[0])
ko_dictionary = {}
for index in koIDs:
index_split = index.split()
ko_dictionary[index_split[0].strip('cdf:')] = str(index_split[1].strip('ko:'))
probeIDs.close()
koIDs.close()
gene = 'HELLO WORLD'
# Loop to translate probes
for index in exp_data:
index_split = index.split()
try:
gene = probe_dictionary[str(index_split[0])]
except KeyError:
print('Probe translation error: ' + str(index_split[0]) + ' included as probe')
ko = str(index_split[0])
continue
try:
ko = ko_dictionary[gene]
except KeyError:
print('KO translation error: ' + str(gene) + ' included as gene')
ko = gene
continue
outfile.write('\t'.join([ko, str(index_split[1])]))
outfile.write('\n')
exp_data.close()
outfile.close() |
mjenior/bipartite_graphs | code/bipartite_graph.py |
# Use: python bipartite_graph.py full_KO_list KOs_with_zscores
import sys
import os
import pickle
import math
# Set starting path
starting_directory = str(os.getcwd())
script_path = str(os.path.dirname(os.path.realpath(__file__)))
# Create dictionary for zscores of each KO
zscore_infile = open(sys.argv[2], 'r')
zscore_dictionary = {}
for index in zscore_infile:
index_split = index.split()
zscore_dictionary[index_split[0]] = float(index_split[1])
# Read in pickled KO to reaction dictionary
ko_reactionpkl_path = '../data/ko_reaction.pkl'
ko_dict = pickle.load(open(ko_reactionpkl_path, 'rb'))
# Read in pickled reaction to reaction_mapformula dictionary
reaction_mapformulapkl_path = '../data/reaction_mapformula.pkl'
reaction_dict = pickle.load(open(reaction_mapformulapkl_path, 'rb'))
infile = open(sys.argv[1], 'r') # Total KO list for the organism of interest
infile_name = str(sys.argv[1]).split('/')[-1]
infile_name = infile_name.split('.')[0]
# get condition from second input file
directory = '../results/' + infile_name + '.bipartite.files'
if not os.path.exists(directory):
os.makedirs(directory)
os.chdir(directory)
print('Output files located in: ' + directory)
outfile_name = infile_name + '.bipartite.graph'
outfile = open(outfile_name,'w')
# Create file for reporting key errors
errorfile_name = infile_name + '.key_error_log.txt'
errorfile = open(errorfile_name, 'w')
triedCountKO = 0
excludedCountKO = 0
triedCountReact = 0
excludedCountReact = 0
totalIncludedReact = 0
compound_list = []
network_list = []
zscore_list = []
# Nested loops to finally convert the KO list to a directed graph of input and output compounds
for line in infile:
current_ko = str(line.split()[1]).strip('ko:')
triedCountKO += 1
try:
reaction_number = ko_dict[current_ko]
except KeyError:
errorString = 'WARNING: ' + str(current_ko) + ' not found in KO-to-Reaction dictionary. Omitting.\n'
errorfile.write(errorString)
excludedCountKO += 1
continue # Go to next iteration since this data is necessary
for index in reaction_number:
triedCountReact += 1
try:
reaction_collection = reaction_dict[index]
except KeyError:
errorString = 'WARNING: ' + str(index) + ' not found in Reaction-to-Compound dictionary. Omitting.\n'
errorfile.write(errorString)
excludedCountReact += 1
continue
for x in reaction_collection:
totalIncludedReact += 1
# Spit reaction input and output as well as the list of compounds with each
reaction_info = x.split(':')
input_compounds = reaction_info[0].split('|')
output_compounds = reaction_info[2].split('|')
for input_index in input_compounds:
network_list.append(''.join([str(input_index), '\t', str(current_ko), '\n']))
compound_list.append(str(input_index))
for output_index in output_compounds:
network_list.append(''.join([str(current_ko), '\t', str(output_index), '\n']))
compound_list.append(str(output_index))
errorfile.write(''.join(['KOs successfully translated to Reactions: ', str(triedCountKO - excludedCountKO), '\n']))
errorfile.write(''.join(['KOs unsuccessfully translated to Reactions: ', str(excludedCountKO), '\n']))
errorfile.write(''.join(['Reactions successfully translated to Compounds: ', str(triedCountReact - excludedCountReact), '\n']))
errorfile.write(''.join(['Reactions unsuccessfully translated to Compounds: ', str(excludedCountReact), '\n']))
errorfile.close()
network_list = list(set(network_list))
compound_list = list(set(compound_list))
input_zscore_dict = {}
output_zscore_dict = {}
composite_zscore_dict = {}
for index in network_list:
outfile.write(index)
edge_info = index.split()
# Output
if edge_info[0][0] == 'K':
if not edge_info[1] in output_zscore_dict.keys():
try:
temp_zscore = zscore_dictionary[edge_info[0]]
except KeyError:
temp_zscore = 0
output_zscore_dict[edge_info[1]] = [temp_zscore]
else:
output_zscore_dict[edge_info[1]].append(temp_zscore)
# Composite 1
if not edge_info[1] in composite_zscore_dict.keys():
try:
temp_zscore = zscore_dictionary[edge_info[0]]
except KeyError:
temp_zscore = 0
composite_zscore_dict[edge_info[1]] = [temp_zscore]
else:
composite_zscore_dict[edge_info[1]].append(temp_zscore)
continue
# Input
elif edge_info[1][0] == 'K':
if not edge_info[0] in input_zscore_dict.keys():
try:
temp_zscore = zscore_dictionary[edge_info[1]]
except KeyError:
temp_zscore = 0
input_zscore_dict[edge_info[0]] = [temp_zscore]
else:
input_zscore_dict[edge_info[0]].append(temp_zscore)
# Composite 2
if not edge_info[0] in composite_zscore_dict.keys():
try:
temp_zscore = zscore_dictionary[edge_info[1]]
except KeyError:
temp_zscore = 0
composite_zscore_dict[edge_info[0]] = [temp_zscore]
else:
composite_zscore_dict[edge_info[0]].append(temp_zscore)
outfile.close()
# create and open composite compound zscore files
inputscorefile_name = infile_name + '.input_zscore.txt'
inputscorefile = open(inputscorefile_name, 'w')
outputscorefile_name = infile_name + '.output_zscore.txt'
outputscorefile = open(outputscorefile_name, 'w')
compositescorefile_name = infile_name + '.composite_zscore.txt'
compositescorefile = open(compositescorefile_name, 'w')
for index in compound_list:
try:
input_scores = input_zscore_dict[index]
final_score = sum(input_scores) / math.sqrt(len(input_scores))
inputscorefile.write('\t'.join([index, str(final_score), '\n']))
except KeyError:
pass
try:
output_scores = output_zscore_dict[index]
final_score = sum(output_scores) / math.sqrt(len(output_scores))
outputscorefile.write('\t'.join([index, str(final_score), '\n']))
except KeyError:
pass
try:
composite_scores = composite_zscore_dict[index]
final_score = sum(composite_scores) / math.sqrt(len(composite_scores))
compositescorefile.write('\t'.join([index, str(final_score), '\n']))
except KeyError:
pass
inputscorefile.close()
outputscorefile.close()
compositescorefile.close()
os.chdir(starting_directory)
|
lucasosouza/udacity-carnd-term1 | P3-Behavorial Cloning/neural_net_nvidia.py | # neural_net_nvidia.py
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
# dropped
# from keras.utils import np_utils
# from keras.optimizes import AdamOptimizer
def baseline_model():
model = Sequential()
# normalization
model.add(BatchNormalization(input_shape=(66, 200, 3)))
# 1
model.add(Convolution2D(24, 5, 5, subsample=(2,2)))
model.add(Activation('relu'))
# 2
model.add(Convolution2D(36, 5, 5, subsample=(2,2)))
model.add(Activation('relu'))
# 3
model.add(Convolution2D(48, 5, 5, subsample=(2,2)))
model.add(Activation('relu'))
# 4
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
# 5
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
# 6 fully connected
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(1))
# compile
model.compile(loss='mse', optimizer='adam')
return model
|
lucasosouza/udacity-carnd-term1 | P3-Behavorial Cloning/preprocessing.py | <reponame>lucasosouza/udacity-carnd-term1
# preprocessing.py
import pandas as pd
import numpy as np
from datetime import datetime
import cv2
from PIL import Image
######################################## MAIN FUNCTIONS
def load_data():
## 1. load data
columns = ['Center Image','Left Image','Right Image','Steering Angle','Throttle', 'Break', 'Speed']
df = pd.read_csv('driving_log.csv', names=columns)
return df
def capture_time(df):
""" Capture time in image name """
df['time'] = df['Center Image'].apply(to_time)
return df
def load_images(df, camera_position='Center Image', model_name='center'):
""" Load and preprocess images into the driving dataframe """
# 1. load images
c_load_image = lambda img:load_image(img, camera_position=camera_position)
df = df.apply(c_load_image, axis=1)
# 2. separate into X and y
X = np.array(df[camera_position].values.tolist())
y = df['Steering Angle']
## 3. improve images
# neural net version
X = np.array([process_image(img) for img in X]).reshape(-1,16,32,3)
return X, y
def remove_extreme_angles(df):
return df.ix[(df['Steering Angle'] < .95) & (df['Steering Angle'] > -.95), :]
def remove_zeros(df):
return df.ix[df['Steering Angle'] != 0, :]
def smooth_angles(df):
## get time differences
df['time'] = df['Center Image'].apply(to_time)
df['time_diff'] = (df['time'] - df['time'].shift(1)).fillna(value=0).apply(seconds_in_timedelta)
## divide into groups
df['group'] = 0
i = 1
for row in df.iterrows():
if row[1]['time_diff'] > 1:
i+=1
df.ix[row[0], 'group'] = i
## group by
vc = df['group'].value_counts()
# correction remaining
for group, count in zip(vc.index, vc):
if group not in vc.index[5:7]:
# window size will change depending on group size, to a max of 30
smooth_window_len=min(30,ceil(count/2))
if smooth_window_len > 3:
print(group, count, smooth_window_len)
df.ix[df['group']==group, 'Smoothed Angle'] = \
smooth(df.ix[df['group']==group, 'Steering Angle'], window_len=smooth_window_len)[:count]
else:
df.ix[df['group']==group, 'Smoothed Angle'] = df.ix[df['group']==group, 'Steering Angle']
return df
######################################## SUPPORT FUNCTIONS
def to_time(s):
""" Captures time from image name and convert to time variable """
s = s.replace('/Users/lucasosouza/Documents/CarND/P3-final/IMG/center_', '')
s = s.replace('.jpg', '')
s = datetime.strptime(s, '%Y_%m_%d_%H_%M_%S_%f')
return s
def seconds_in_timedelta(td):
""" Calculate the number of seconds in a timedelta """
return td.seconds
def center_scale(arr):
""" Center and scale values """
xmax, xmin = arr.max(), arr.min()
arr = ((arr - xmin) / (xmax - xmin)) -.5
return arr
def load_image(row, camera_position='Center Image'):
""" Converts file name into an array that corresponds to the image pixels """
base_path = '/Users/lucasosouza/Documents/CarND/P3/'
image_path = row[camera_position].replace(base_path, '').strip()
row[camera_position] = np.asarray(Image.open(image_path))
return row
def process_image_nvidia(img, resize=True, yuv=True):
""" Steps to enhance image prior to classification """
# resize
if resize:
img = cv2.resize(img, (66,200))
# convert to YUV space, isolate Y channel
if yuv:
img = cv2.cvtColor(img, cv2.COLOR_RGB2YCR_CB)
return img
def process_image(img, resize=True, yuv=False, histeq=False, adapthisteq=False, edge=False):
""" Steps to enhance image prior to classification """
# resize
if resize:
img = cv2.resize(img, (16,32))
# convert to YUV space, isolate Y channel
if yuv:
img = cv2.cvtColor(img, cv2.COLOR_RGB2YCR_CB)[:, :, 0]
# preprocess Y with global histogram equalization (histeq)
if histeq:
img = cv2.equalizeHist(img)
# preprocess Y with local histogram equalization (adapthisteq)
if adapthisteq:
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4))
img = clahe.apply(img)
# edge detection - substract blurred image from original image
if edge:
gaussian_filter = cv2.GaussianBlur(img, ksize=(5,5), sigmaX=3)
img = cv2.addWeighted(img, 1.5, gaussian_filter, -0.5, gamma=1)
return img #.reshape(32, 32, 1)
# smooth function, adapted from scipy formula at http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
if window == 'flat':
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
if __name__=='__main__':
df = load_data()
# df = remove_extreme_angles(df)
df = smooth_angles(df)
df.to_pickle('data.p')
|
lucasosouza/udacity-carnd-term1 | P3-Behavorial Cloning/drive.py | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from preprocessing import process_image, center_scale, process_image_nvidia
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
# img = image_array[None, :, :, :]
# transform image
img = process_image(image_array).reshape(-1,16,32,3)
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = calc_angle(img)
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = .25
print(steering_angle, throttle)
send_control(steering_angle, throttle)
def calc_angle(image):
""" Get average from the each cameras classifier """
return float(model.predict(image, batch_size=1))
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(), # converts float to string
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
# load model
with open('model.json', 'r') as jfile:
model = model_from_json(json.load(jfile))
# compile
model.compile("adam", "mse")
# load weights
model.load_weights('model.h5')
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) |
lucasosouza/udacity-carnd-term1 | P3-Behavorial Cloning/model.py | <filename>P3-Behavorial Cloning/model.py
# learn.py
from preprocessing import *
from neural_net import *
from json import dump, load
import pickle
from keras.models import model_from_json
from keras.optimizers import Adam
from datetime import datetime
def main(first_entry=False, correction=None, lr=1e-3):
# read driving_log into dataframe
df = load_data()
# get time
df = capture_time(df) # added a time variable to df
print(df.shape)
# corrections
if first_entry:
# create left and right correction lists
lr_corrections, rl_corrections, time_splits = [],[], []
pickle.dump(lr_corrections, open('lr_corrections.p', 'wb'))
pickle.dump(rl_corrections, open('rl_corrections.p', 'wb'))
else:
# load time splits to get last training date
time_splits = pickle.load(open('time_splits.p', 'rb'))
# upload left and right correction lists
lr_corrections = pickle.load(open('lr_corrections.p', 'rb'))
rl_corrections = pickle.load(open('rl_corrections.p', 'rb'))
# register start and end period for a left correction
if correction == 'left-right':
duration = df.ix[df['time'] > time_splits[-1], 'time']
lr_corrections.append((duration.min(), duration.max()))
print(lr_corrections)
# save
pickle.dump(lr_corrections, open('lr_corrections.p', 'wb'))
# register start and end period for a right correction
if correction == 'right-left':
duration = df.ix[df['time'] > time_splits[-1], 'time']
rl_corrections.append((duration.min(), duration.max()))
# save
pickle.dump(rl_corrections, open('rl_corrections.p', 'wb'))
# save last_date
time_splits.append(df['time'].max())
pickle.dump(time_splits, open('time_splits.p', 'wb'))
# fix left to right corrections (positives) - filter positive angles only
for lt, ut in lr_corrections:
df = df[(df['time'] < lt) | (df['time'] > ut) | (df['Steering Angle']>0)]
print(df.shape)
# fix right to left corrections (negatives) - filter negative angles only
for lt, ut in rl_corrections:
df = df[(df['time'] < lt) | (df['time'] > ut) | (df['Steering Angle']<0)]
print(df.shape)
# load images - at the time of train only, don't need to be saved with the dataframe
X, y = load_images(df)
# load neural network
if not first_entry:
with open('model.json', 'r') as f:
model = model_from_json(load(f))
optimizer = Adam(lr=lr)
model.compile(loss='mse', optimizer=optimizer)
model.load_weights('model.h5')
else:
model = baseline_model()
# train neural network
model.fit(X, y, batch_size=20, nb_epoch=10)
# save neural and network and last training date
with open('model.json', 'w') as f:
dump(model.to_json(), f)
model.save_weights('model.h5')
# force garbage collection
import gc; gc.collect()
if __name__ == '__main__':
# main(first_entry=True)
main(lr=1e-4)
# main(correction='left-right')
# main(correction='right-left')
|
lucasosouza/udacity-carnd-term1 | exercises/P5-Lessons/sliding_boxes.py | <gh_stars>1-10
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
w,h = img.shape[:2]
# If x and/or y start/stop positions not defined, set to image size
if not y_start_stop[0]:
y_start_stop = (0,h)
if not x_start_stop[0]:
x_start_stop = (0,w)
# Compute the span of the region to be searched
y_span = y_start_stop[1] - y_start_stop[0]
x_span = x_start_stop[1] - x_start_stop[0]
# Compute the number of pixels per step in x/y
y_pixels = int(xy_window[0]*(1 - xy_overlap[0]))
x_pixels = int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
y_slides = int(y_span/y_pixels) - 1
x_slides = int(x_span/x_pixels) - 1
print(y_span, x_span)
print(y_pixels, x_pixels)
print(y_slides, x_slides)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
for y_slide in range(y_slides):
for x_slide in range(x_slides):
# Calculate each window position
left_top = (y_slide*y_pixels, x_slide*x_pixels)
bottom_right = (left_top[0]+xy_window[0], left_top[1]+xy_window[1])
# Append window position to list
window_list.append((left_top, bottom_right))
# Return the list of windows
return window_list
windows = slide_window(image, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(128, 128), xy_overlap=(0.5, 0.5))
window_img = draw_boxes(image, windows, color=(0, 0, 255), thick=6)
plt.imshow(window_img) |
lucasosouza/udacity-carnd-term1 | P3-Behavorial Cloning/neural_net.py | # neuralnet.py
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
# dropped
# from keras.utils import np_utils
def baseline_model():
model = Sequential()
# normalization
model.add(BatchNormalization(input_shape=(16, 32, 3)))
# 1
model.add(Convolution2D(100, 5, 5))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2)))
# 2
model.add(Convolution2D(150, 4, 4))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2)))
# 3
model.add(Flatten())
model.add(Dense(300))
# model.add(Dropout(.5))
model.add(Activation('tanh'))
model.add(Dense(50))
model.add(Activation('tanh'))
model.add(Dense(1))
# compile
optimizer = Adam(lr=1e-3)
model.compile(loss='mse', optimizer=optimizer)
return model
|
lucasosouza/udacity-carnd-term1 | P5-Vehicle Detection/support_func.py | import matplotlib.image as mpimg
import cv2
import numpy as np
import glob
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def select_region(image):
""" 'Crop' the image to a 4-vertices polygonal image where the lane will most likely be situated """
# define shape
h, w = image.shape[:2]
vertices = np.array([[(50, h),
(int(w/2)-100, 415),
(int(w/2)+100, 415),
(w-50,h)]],
dtype=np.int32)
# apply mask
image = region_of_interest(image, vertices)
return image
def get_perspective_tranform(mtx, dist, nx=9, ny=6, offset=300, img_file='test_images/straight_lines1.jpg'):
""" Calculate source and destination to be used for warping image"""
# preprocess image
sample_img = mpimg.imread(img_file)
undist_img = cv2.undistort(sample_img, mtx, dist, None, mtx)
# define source
# src = np.array([[609, 440],[668, 440],[1030,675],[276,675]], dtype=np.float32)
#src = np.array([[594, 450],[686, 450],[1085,700],[220,700]], dtype=np.float32)
#src = np.array([[610, 440],[669, 440],[1090,700],[218,700]], dtype=np.float32)
src = np.array([[610, 440],[667, 440],[1050,675],[260,675]], dtype=np.float32)
# calculate destination
h,w = undist_img.shape[:2]
dst = np.array([[offset,0],[w-offset,0],
[w-offset, h],[offset, h]],
dtype=np.float32)
# get matrix M
M = cv2.getPerspectiveTransform(src,dst)
return M
def calibrate_camera(nx=9, ny=6, images_folder='camera_cal/calibration*.jpg'):
""" Use the corners to calibrate camera """
# prepare object points, like (0,0,0), (1,0,0)...(6,5,0)
# further study these two lines
objp = np.zeros((nx*ny,3), np.float32)
objp[:, :2] = np.mgrid[:nx, :ny].T.reshape(-1,2)
# Arrays to store object points and image points
objpoints = []
imgpoints = []
# List of calibration images using glob
images = glob.glob(images_folder)
for idx, fname in enumerate(images):
# load image and convert to single channel
img = mpimg.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# find chessboard corners
ret, corners= cv2.findChessboardCorners(gray, (nx, ny), None)
# if found, add object points and image points
if ret:
objpoints.append(objp)
imgpoints.append(corners)
# calibrate the camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return ret, mtx, dist, rvecs, tvecs
def color_thresh(img, thresh=(90,255)):
# isolate s channel from HLS
s_img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)[:, :, 2]
# apply color threshold
hls_binary = np.zeros_like(s_img)
hls_binary[(s_img >= thresh[0]) & (s_img <= thresh[1])] = 1
return hls_binary
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0,255)):
# calculate directional gradient
if orient=='x':
sobel = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
else:
sobel = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# get absolute value
abs_sobel = np.absolute(sobel)
# scale it
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# create binary mask and apply threshold
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel>=thresh[0]) & (scaled_sobel<=thresh[1])] = 1
return grad_binary
def mag_thresh(img, sobel_kernel=3, thresh=(0,255)):
# calculate gradient magnitude
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
grad_mag = np.sqrt(sobelx**2 + sobely**2)
# scale it
scaled_sobel = np.uint8(255*grad_mag/np.max(grad_mag))
# create binary mask and apply threshold
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel>=thresh[0]) & (scaled_sobel<=thresh[1])] = 1
return grad_binary
def dir_thresh(img, sobel_kernel=3, thresh=(0,np.pi/2)):
# calculate sobel
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# calculate gradient direction
grad_dir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# create binary mask and apply threshold
grad_binary = np.zeros_like(grad_dir)
grad_binary[(grad_dir>=thresh[0]) & (grad_dir<=thresh[1])] = 1
return grad_binary
def preprocess(img):
#color
color_binary = color_thresh(img, thresh=(150,255))
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#gradient
gradx_binary = abs_sobel_thresh(gray, orient='x', sobel_kernel=5, thresh=(40,100))
grady_binary = abs_sobel_thresh(gray, orient='y', sobel_kernel=7, thresh=(40,100))
mag_binary = mag_thresh(gray, sobel_kernel=15, thresh=(50,100))
combined = np.zeros_like(gray)
combined[(gradx_binary == 1) | (grady_binary == 1) | (mag_binary == 1) | (color_binary==1)] = 1
return combined
def preprocess(img):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#gradient
cropped_img = select_region(gray)
color_binary = color_thresh(img, thresh=(120,255))
gradx_binary = abs_sobel_thresh(gray, orient='x', sobel_kernel=5, thresh=(20,100))
grady_binary = abs_sobel_thresh(gray, orient='y', sobel_kernel=7, thresh=(30,100))
mag_binary = mag_thresh(gray, sobel_kernel=15, thresh=(40,120))
dir_binary = dir_thresh(gray, sobel_kernel=15, thresh=(0.70,1.3))
combined = np.zeros_like(gray)
combined[((gradx_binary == 1) | (grady_binary == 1) | (mag_binary == 1) | (color_binary==1)) & (cropped_img!=0) ] = 1
return combined
def get_lane_points_next_frame(binary_img, left_fit, right_fit, margin = 100):
""" Udacity version of sliding windows for the following frames
Args:
left_fit: polynomial equation for the left lane
right_fit : polynomial equation for the right lane
margin: width of the windows +- margin
"""
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return lefty, leftx, righty, rightx
def get_lane_points_first_frame(binary_img, margin = 100, nwindows=18, minpix=50):
""" Udacity version of sliding windows for the first frame
Args:
nwindows: number of sliding windows
margin: width of the windows +- margin
minpix: minimum number of pixels found to recenter window
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_img[int(binary_img.shape[0]/2):,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_img, binary_img, binary_img))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
#leftx_base = np.argmax(histogram[:midpoint])
#rightx_base = np.argmax(histogram[midpoint:]) + midpoint
leftx_base = np.argmax(histogram[150:400]) + 150
rightx_base = np.argmax(histogram[900:1150]) + 900
# Set height of windows
window_height = np.int(binary_img.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_img.shape[0] - (window+1)*window_height
win_y_high = binary_img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return lefty, leftx, righty, rightx
def measure_curvature(ploty, plotx, lane_fit, ym_per_pix = 30/720, xm_per_pix = 3.7/700):
""" Return curvature of the line
Example values for meters_curverad: 632.1 m 626.2 m
Args:
ym_per_pix: meters per pixel in y dimension
xm_per_pix: meters per pixel in x dimension
"""
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Fit new polynomials to x,y in world space
lane_fit_cr = np.polyfit(ploty*ym_per_pix, plotx*xm_per_pix, 2)
# Calculate radii of curvature in meters
curverad = ((1 + (2*lane_fit_cr[0]*y_eval*ym_per_pix + lane_fit_cr[1])**2)**1.5) / np.absolute(2*lane_fit_cr[0])
return curverad
def draw_lanes(img, fit_leftx, fit_rightx, fity, Minv):
""" Draw the lane lines based on fitted values for each lane
Use undistorted images
"""
# create an image to draw the lines on
color_warp = np.zeros_like(img).astype(np.uint8)
# recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([fit_leftx, fity]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([fit_rightx, fity])))])
pts = np.hstack((pts_left, pts_right))
# draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
return result
def debug_lanes(binary_img, left_fitx, right_fitx, ploty, left_fit, right_fit, Minv):
""" Draw the lane lines based on fitted values for each lane
Use undistorted images
"""
nonzero = binary_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_img, binary_img, binary_img))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
# out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
# out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(result, Minv, (binary_img.shape[1], binary_img.shape[0]))
return result
"""
def process_image(image):
## Undistort image, based on previous camera calibration
undist_img = cv2.undistort(image, mtx, dist, None, mtx)
# warp image
warped_img = cv2.warpPerspective(undist_img, M, (image.shape[1],image.shape[0]),
flags=cv2.INTER_LINEAR)
## Apply gradient threshold
ksize = 3 # choose a larger odd number to smooth gradient measurements
gray = cv2.cvtColor(warped_img, cv2.COLOR_RGB2GRAY)
# apply each of the thresholding functions
gradx_binary = abs_sobel_thresh(gray, orient='x', sobel_kernel=15, thresh=(20,60))
color_binary= color_thresh(warped_img, thresh=(120,255))
combined = np.zeros_like(gray)
combined[(gradx_binary == 1) | (color_binary == 1)] = 1
# gota retrieve the centers of the sliding windows as well, that will help me slide next
# what if I can't capture the center of all the windows?
# this is what has been bothering me
# maybe I should go back to the exercise and take a second look at how they are doing
## Get lane points and fit a curve
y_values, left_lane_x, right_lane_x = get_lane_points(combined)
# fitting a second order polynomial
left_fit = np.polyfit(y_values[:len(left_lane_x)], left_lane_x, 2)
right_fit = np.polyfit(y_values[:len(right_lane_x)], right_lane_x, 2)
# extrapolating points
fity = np.linspace(0, combined.shape[0]-1, combined.shape[0] )
fit_leftx = left_fit[0]*fity**2 + left_fit[1]*fity + left_fit[2]
fit_rightx = right_fit[0]*fity**2 + right_fit[1]*fity + right_fit[2]
left_lane.add_fit(fit_leftx)
avgfit_left = left_lane.average_fit()
right_lane.add_fit(fit_rightx)
avgfit_right = right_lane.average_fit()
## Warp lanes drawn back to original image
return draw_lanes(img, avgfit_left, avgfit_right, fity)
"""
"""
rl.recent_xfitted.append(rightx)
rl.best_fit = np.polyfit(righty, np.mean(rl.recent_xfitted, axis=0)[:len(righty)], 2)
rl.current_fit = right_fit
rl.radius_of_curvature = measure_curvature(righty, rightx, right_fit)
rl.line_base_pos = measure_distance_from_center(rightx[0], int(image.shape[1]/2))
rl.diffs = abs(right_fit - rl.best_fit)
# rl.allx = fit_rightx
# rl.ally = fity
"""
|
LiangJYu/xarray-sentinel | setup.py | <reponame>LiangJYu/xarray-sentinel<filename>setup.py
import setuptools # type: ignore
setuptools.setup(use_scm_version={"fallback_version": "999"})
|
LiangJYu/xarray-sentinel | xarray_sentinel/sentinel1.py | <filename>xarray_sentinel/sentinel1.py
import os
import typing as T
import warnings
import fsspec # type: ignore
import numpy as np
import pandas as pd # type: ignore
import rioxarray # type: ignore
import xarray as xr
from xarray_sentinel import conventions, esa_safe
def open_calibration_dataset(calibration_path: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_calibration_vectors(calibration_path)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ")
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=float, sep=" ")
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=float, sep=" ")
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=float, sep=" ")
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=float, sep=" ")
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = dict(
azimuth_time=xr.DataArray(azimuth_time_list, dims="line"),
sigmaNought=xr.DataArray(sigmaNought_list, dims=("line", "pixel")),
betaNought=xr.DataArray(betaNought_list, dims=("line", "pixel")),
gamma=xr.DataArray(gamma_list, dims=("line", "pixel")),
dn=xr.DataArray(dn_list, dims=("line", "pixel")),
)
coords = dict(
line=xr.DataArray(line_list, dims="line"),
pixel=xr.DataArray(pixel_list[0], dims="pixel"),
)
return xr.Dataset(data_vars=data_vars, coords=coords,) # type: ignore
def open_coordinateConversion_dataset(annotation_path: esa_safe.PathType) -> xr.Dataset:
coordinateConversionList = esa_safe.parse_tag_dict(
annotation_path, "product", ".//coordinateConversionList"
)
gr0 = []
sr0 = []
azimuthTime = []
slantRangeTime = []
srgrCoefficients = []
grsrCoefficients = []
for values in coordinateConversionList["coordinateConversion"]:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuthTime.append(values["azimuthTime"])
slantRangeTime.append(values["slantRangeTime"])
srgrCoefficients.append(
np.fromstring(values["srgrCoefficients"]["$"], dtype=float, sep=" ")
)
grsrCoefficients.append(
np.fromstring(values["grsrCoefficients"]["$"], dtype=float, sep=" ")
)
coords = {
"azimuth_time": xr.DataArray(azimuthTime, dims="azimuth_time"),
"exponent": xr.DataArray(np.arange(len(srgrCoefficients[0])), dims="exponent"),
}
data_vars = {
"gr0": xr.DataArray(gr0, dims="azimuth_time"),
"sr0": xr.DataArray(sr0, dims="azimuth_time"),
"slant_range_time": xr.DataArray(slantRangeTime, dims=("azimuth_time")),
"srgr_coefficients": xr.DataArray(
srgrCoefficients, dims=("azimuth_time", "exponent")
),
"grsr_coefficients": xr.DataArray(
grsrCoefficients, dims=("azimuth_time", "exponent")
),
}
return xr.Dataset(data_vars=data_vars, coords=coords,) # type: ignore
def get_fs_path(
urlpath_or_path: esa_safe.PathType, fs: T.Optional[fsspec.AbstractFileSystem] = None
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(urlpath_or_path)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = urlpath_or_path
return fs, path
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_geolocation_grid_points(annotation)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars, # type: ignore
coords={
"azimuth_time": [np.datetime64(dt) for dt in sorted(azimuth_time)],
"slant_range_time": sorted(slant_range_time),
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
conventions.update_attributes(ds, group="gcp")
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitude = esa_safe.parse_attitude(annotation)
shape = len(attitude)
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
time: T.List[T.Any] = []
data_vars: T.Dict[str, T.List[T.Any]] = {var: ("time", []) for var in variables} # type: ignore
for k in range(shape):
time.append(attitude[k]["time"])
for var in variables:
data_vars[var][1].append(attitude[k][var])
ds = xr.Dataset(
data_vars=data_vars, # type: ignore
coords={"time": [np.datetime64(dt) for dt in time]},
)
ds = ds.rename({"time": "azimuth_time"})
ds = conventions.update_attributes(ds, group="attitude")
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbit = esa_safe.parse_orbit(annotation)
shape = len(orbit)
reference_system = orbit[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
time: T.List[T.Any] = []
for k in range(shape):
time.append(orbit[k]["time"])
data["position"][0].append(orbit[k]["position"]["x"])
data["position"][1].append(orbit[k]["position"]["y"])
data["position"][2].append(orbit[k]["position"]["z"])
data["velocity"][0].append(orbit[k]["velocity"]["x"])
data["velocity"][1].append(orbit[k]["velocity"]["y"])
data["velocity"][2].append(orbit[k]["velocity"]["z"])
if orbit[k]["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs, # type: ignore
coords={"time": [np.datetime64(dt) for dt in time], "axis": ["x", "y", "z"]},
)
ds = ds.rename({"time": "azimuth_time"})
ds = conventions.update_attributes(ds, group="orbit")
return ds
def find_avalable_groups(
ancillary_data_paths: T.Dict[str, T.Dict[str, T.Dict[str, str]]],
product_attrs: T.Dict[str, T.Any],
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.Dict[str, T.Any]]:
ancillary_data_paths = filter_missing_path(ancillary_data_paths, fs)
groups: T.Dict[str, T.Dict[str, T.Any]] = {}
for subswath_id, subswath_data_path in ancillary_data_paths.items():
subswath_id = subswath_id.upper()
if len(subswath_data_path["annotation_path"]) == 0:
continue
annotation_path = list(subswath_data_path["annotation_path"].values())[0]
with fs.open(annotation_path) as annotation_file:
swath_timing = esa_safe.parse_swath_timing(annotation_file)
number_of_bursts = swath_timing["burstList"]["@count"]
burst_ids: T.List[str] = []
if number_of_bursts > 0:
with fs.open(annotation_path) as annotation_file:
gcp = open_gcp_dataset(annotation_file)
centres_lat, centres_lon = compute_burst_centres(gcp)
for k in range(len(centres_lat)):
burst_ids.append(
build_burst_id(
centres_lat[k],
centres_lon[k],
product_attrs["sat:relative_orbit"],
)
)
subgroups = list(METADATA_OPENERS.keys()) + ["calibration"] + burst_ids
groups[subswath_id] = {"subgroups": subgroups, **subswath_data_path}
groups[f"{subswath_id}/calibration"] = subswath_data_path
for subgroup in METADATA_OPENERS.keys():
groups[f"{subswath_id}/{subgroup}"] = subswath_data_path
for k, burst_id in enumerate(burst_ids):
groups[f"{subswath_id}/{burst_id}"] = {
"burst_position": k,
**subswath_data_path,
}
return groups
def filter_missing_path(
path_dict: T.Dict[str, T.Any],
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.Any]:
path_dict_copy = path_dict.copy()
for k in path_dict:
if isinstance(path_dict[k], dict):
path_dict_copy[k] = filter_missing_path(path_dict[k], fs)
else:
if not fs.exists(path_dict[k]):
del path_dict_copy[k]
return path_dict_copy
def open_root_dataset(
product_attrs: T.Dict[str, T.Any],
groups: T.Dict[str, T.Dict[str, T.Collection[str]]],
) -> xr.Dataset:
attrs = dict(product_attrs, groups=list(groups.keys()))
ds = xr.Dataset(attrs=attrs) # type: ignore
conventions.update_attributes(ds)
return ds
def open_swath_dataset(
manifest_path: esa_safe.PathType,
measurement_paths: T.Dict[str, esa_safe.PathType],
subgrups: T.List[int],
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
) -> xr.Dataset:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(manifest_path)
attrs = dict(product_attrs, groups=subgrups)
data_vars = {}
for pol, data_path in measurement_paths.items():
arr = rioxarray.open_rasterio(data_path, chunks=chunks)
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.assign_coords(
{
"x": np.arange(0, arr["x"].size, dtype=int),
"y": np.arange(0, arr["y"].size, dtype=int),
}
)
arr = arr.rename({"y": "line", "x": "pixel"})
data_vars[pol.upper()] = arr
ds = xr.Dataset(
data_vars=data_vars, # type: ignore
attrs=attrs, # type: ignore
)
conventions.update_attributes(ds)
return ds
def open_burst_dataset(
manifest_path: esa_safe.PathType,
burst_position: int,
measurement_paths: T.Dict[str, esa_safe.PathType],
annotation_path: esa_safe.PathType,
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
) -> xr.Dataset:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(manifest_path)
image_information = esa_safe.parse_image_information(annotation_path)
product_attrs['azimuthTimeInterval'] = image_information['azimuthTimeInterval']
product_attrs['slantRangeTime'] = image_information['slantRangeTime']
product_information = esa_safe.parse_product_information(annotation_path)
product_attrs['radarFrequency'] = product_information['radarFrequency']
product_attrs['rangeSamplingRate'] = product_information['rangeSamplingRate']
downlink_information = esa_safe.parse_downlink_information(annotation_path)
product_attrs['prf'] = downlink_information['prf']
swath_timing = esa_safe.parse_swath_timing(annotation_path)
linesPerBurst = swath_timing["linesPerBurst"]
samplesPerBurst = swath_timing["samplesPerBurst"]
first_azimuth_time = pd.to_datetime(
swath_timing["burstList"]["burst"][burst_position]["azimuthTime"]
)
product_attrs['sensingStart'] = first_azimuth_time
azimuth_time_interval = pd.to_timedelta(
image_information["azimuthTimeInterval"], "s"
)
azimuth_time = pd.date_range(
start=first_azimuth_time, periods=linesPerBurst, freq=azimuth_time_interval,
)
slantRangeTime = image_information["slantRangeTime"]
slant_range_sampling = 1 / product_information["rangeSamplingRate"]
slant_range_time = np.linspace(
slantRangeTime,
slantRangeTime + slant_range_sampling * (samplesPerBurst - 1),
samplesPerBurst,
)
burst_first_line = burst_position * linesPerBurst
burst_last_line = (burst_position + 1) * linesPerBurst - 1
burst_first_pixel = 0
burst_last_pixel = samplesPerBurst - 1
data_vars = {}
for pol, data_path in measurement_paths.items():
arr = rioxarray.open_rasterio(data_path, chunks=chunks)
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.isel(
x=slice(burst_first_pixel, burst_last_pixel + 1),
y=slice(burst_first_line, burst_last_line + 1),
)
arr = arr.assign_coords(
{
"x": np.arange(burst_first_pixel, burst_last_pixel + 1, dtype=int),
"y": np.arange(burst_first_line, burst_last_line + 1, dtype=int),
}
)
arr = arr.rename({"y": "line", "x": "pixel"})
data_vars[pol.upper()] = arr
ds = xr.Dataset(
data_vars=data_vars, # type: ignore
coords={
"azimuth_time": ("line", azimuth_time),
"slant_range_time": ("pixel", slant_range_time),
},
attrs=product_attrs, # type: ignore
)
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
conventions.update_attributes(ds)
return ds
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(gcp: xr.Dataset) -> T.Tuple[np.ndarray, np.ndarray]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values, centre.longitude.values
def open_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
fs, manifest_path = get_fs_path(product_urlpath, fs)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
base_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
ancillary_data_paths = esa_safe.get_ancillary_data_paths(base_path, product_files)
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
groups = find_avalable_groups(ancillary_data_paths, product_attrs, fs=fs)
if group is None:
ds = open_root_dataset(product_attrs, groups)
elif group not in groups:
raise ValueError(
f"Invalid group {group}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
elif "/" not in group:
ds = open_swath_dataset(
manifest_path, groups[group]["measurement_path"], groups[group]["subgroups"]
)
else:
subswath, subgroup = group.split("/", 1)
if subgroup in METADATA_OPENERS:
annotation_path = list(groups[group]["annotation_path"].values())[0]
with fs.open(annotation_path) as annotation_file:
ds = METADATA_OPENERS[subgroup](annotation_file)
elif subgroup == "calibration":
calibration_path = list(groups[group]["calibration_path"].values())[0]
with fs.open(calibration_path) as calibration_path:
ds = open_calibration_dataset(calibration_path)
else:
annotation_path = list(groups[group]["annotation_path"].values())[0]
ds = open_burst_dataset(
manifest_path,
measurement_paths=groups[group]["measurement_path"],
burst_position=groups[group]["burst_position"],
annotation_path=annotation_path,
chunks=chunks,
)
return ds
class Sentinel1Backend(xr.backends.common.BackendEntrypoint):
def open_dataset( # type: ignore
self,
filename_or_obj: str,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
) -> xr.Dataset:
return open_dataset(filename_or_obj, drop_variables=drop_variables, group=group)
def guess_can_open(self, filename_or_obj: T.Any) -> bool:
try:
_, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
return ext.lower() in {".safe", ".safe/"}
METADATA_OPENERS = {
"gcp": open_gcp_dataset,
"attitude": open_attitude_dataset,
"orbit": open_orbit_dataset,
}
|
LiangJYu/xarray-sentinel | tests/test_sentinel1.py | <gh_stars>0
import pathlib
import tempfile
import numpy as np
import xarray as xr
from xarray_sentinel import sentinel1
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
def test_filter_missing_path() -> None:
existing_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
missing_path = tempfile.mktemp()
ancillary_data_paths = {
"paths1": {"path1": existing_path, "path2": missing_path},
"paths2": {"path1": missing_path, "path2": missing_path},
}
res = sentinel1.filter_missing_path(ancillary_data_paths)
expected = {"paths1": {"path1": existing_path}, "paths2": {}}
assert res == expected
def test_build_burst_id() -> None:
lat = 11.8475875
lon = 47.16626783
relative_orbit = 168
burst_id = sentinel1.build_burst_id(lat=lat, lon=lon, relative_orbit=relative_orbit)
assert burst_id == "R168-N118-E0472"
def test_find_avalable_groups() -> None:
base_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
ancillary_data_paths = {
"iw1": {
"annotation_path": {
"vv": f"{base_path}/annotation/"
+ "s1b-iw1-slc-vv-20210401t052624-20210401t052649-026269-032297-004.xml",
},
},
}
product_attrs = {"sat:relative_orbit": 168}
expected_groups = {
"IW1",
"IW1/attitude",
"IW1/gcp",
"IW1/orbit",
"IW1/calibration",
"IW1/R168-N471-E0118",
"IW1/R168-N469-E0118",
"IW1/R168-N468-E0117",
"IW1/R168-N466-E0117",
"IW1/R168-N464-E0116",
"IW1/R168-N463-E0116",
"IW1/R168-N461-E0116",
"IW1/R168-N459-E0115",
"IW1/R168-N458-E0115",
}
groups = sentinel1.find_avalable_groups(ancillary_data_paths, product_attrs)
assert set(groups) == expected_groups
def test_compute_burst_centres() -> None:
gcp = xr.Dataset(
{
"latitude": xr.DataArray(
np.arange(5).reshape(5, 1), dims=("azimuth_time", "slant_range_time")
),
"longitude": xr.DataArray(
np.arange(5).reshape(5, 1) * 10,
dims=("azimuth_time", "slant_range_time"),
),
},
attrs={"burst_count": 4},
)
lat, lon = sentinel1.compute_burst_centres(gcp)
assert np.allclose(lat, [0.5, 1.5, 2.5, 3.5])
assert np.allclose(lon, [5, 15, 25, 35])
def test_open_dataset() -> None:
product_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
expected_groups = {
"IW1",
"IW1/gcp",
"IW1/attitude",
"IW1/orbit",
"IW1/calibration",
"IW1/R168-N471-E0118",
}
res = sentinel1.open_dataset(product_path)
assert isinstance(res, xr.Dataset)
assert set(res.attrs["groups"]) >= expected_groups
res = sentinel1.open_dataset(product_path, group="IW1/orbit")
assert isinstance(res, xr.Dataset)
assert res.dims == {"axis": 3, "azimuth_time": 17}
def test_open_dataset_zip() -> None:
zip_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.zip"
)
zip_urlpath = f"zip://*/manifest.safe::{zip_path}"
expected_groups = {
"IW1",
"IW1/gcp",
"IW1/attitude",
"IW1/calibration",
"IW1/orbit",
"IW1/R168-N471-E0118",
}
res = sentinel1.open_dataset(zip_urlpath)
assert isinstance(res, xr.Dataset)
assert set(res.attrs["groups"]) >= expected_groups
res = sentinel1.open_dataset(zip_urlpath, group="IW1/orbit")
assert isinstance(res, xr.Dataset)
assert res.dims == {"axis": 3, "azimuth_time": 17}
def test_open_dataset_chunks_bursts() -> None:
product_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
res = sentinel1.open_dataset(product_path, group="IW1/R168-N471-E0118", chunks=1000)
assert isinstance(res, xr.Dataset)
assert len(res.VH.dims) == 2
assert np.allclose(res.VH.chunks[0][:-1], 1000)
assert np.allclose(res.VH.chunks[1][:-1], 1000)
assert not np.all(np.isnan(res.VH))
assert not np.all(np.isnan(res.VH))
|
LiangJYu/xarray-sentinel | xarray_sentinel/conventions.py | <reponame>LiangJYu/xarray-sentinel
"""CF representation of metadata according to
Sentinel-1 Product Specification: S1-RS-MDA-52-7441, DI-MPC-PB, MPC-0240, 3/7, 27/02/2020 See:
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification
"""
import xarray as xr
from . import __version__
GROUP_ATTRIBUTES = {
"orbit": {
"title": "Orbit information used by the IPF during processing",
"comment": (
"The dataset contains a sets of orbit state vectors that are updated along azimuth."
" The values represent the interpolated values used by the IPF"
" and are derived from the sub-commutated ancillary data from the ISPs"
" or from an input auxiliary orbit file"
),
},
"attitude": {
"title": "Attitude information used by the IPF during processing",
"comment": (
"The dataset contains a sets of attitude data records that are updated along azimuth."
" The values represent the interpolated values used by the IPF"
" and are derived from the sub-commutated ancillary data from the ISPs"
" or from an input auxiliary orbit file"
),
},
"gcp": {
"title": "Geolocation grid",
"comment": (
"The dataset contains geolocation grid point entries for each line/pixel"
" combination based on a configured resolution."
" The list contains an entry for each update made along azimuth"
),
},
}
VARIABLE_ATTRIBUTES = {
"azimuth_time": {"long_name": "zero Doppler azimuth time", "standard_name": "time"},
# NOTE: `slant_range_time` is not expressed as `np.timedelta64[ns]` in order to keep enough
# accuracy for interferometric processing, i.e. c * 1ns / 2 ~= 15cm.
"slant_range_time": {"units": "s", "long_name": "slant range time / two-way delay"},
"latitude": {"units": "degrees_north", "long_name": "geodetic latitude"},
"longitude": {"units": "degrees_east", "long_name": "geodetic longitude"},
"height": {"units": "m", "long_name": "height above sea level"},
"incidenceAngle": {"units": "°", "long_name": "incidence angle"},
"elevationAngle": {"units": "°", "long_name": "elevation angle"},
"q0": {"units": "1", "long_name": "Q0 attitude quaternion"},
"q1": {"units": "1", "long_name": "Q1 attitude quaternion"},
"q2": {"units": "1", "long_name": "Q2 attitude quaternion"},
"q3": {"units": "1", "long_name": "Q3 attitude quaternion"},
"roll": {"units": "°", "long_name": "platform roll"},
"pitch": {"units": "°", "long_name": "platform pitch"},
"yaw": {"units": "°", "long_name": "platform yaw"},
"wx": {"units": "° s-1", "long_name": "X component of angular velocity vector"},
"wy": {"units": "° s-1", "long_name": "Y component of angular velocity vector"},
"wz": {"units": "° s-1", "long_name": "Z component of angular velocity vector"},
"time": {"standard_name": "time"},
"x": {"units": "m", "long_name": "position x"},
"y": {"units": "m", "long_name": "position y"},
"z": {"units": "m", "long_name": "position z"},
"vx": {"units": "m s-1", "long_name": "velocity x"},
"vy": {"units": "m s-1", "long_name": "velocity y"},
"vz": {"units": "m s-1", "long_name": "velocity z"},
}
def update_attributes(ds: xr.Dataset, group: str = "") -> xr.Dataset:
# NOTE: keep the version in sync with the capabilities of CF compliance checkers
ds.attrs["Conventions"] = "CF-1.7"
ds.attrs.update(GROUP_ATTRIBUTES.get(group, {})) # type: ignore
ds.attrs["history"] = f"created by xarray_sentinel-{__version__}"
for var in ds.variables:
attrs = VARIABLE_ATTRIBUTES.get(str(var), {})
ds.variables[var].attrs.update(attrs) # type: ignore
return ds
|
friedrichschoene/pegger | pegger/results.py | class ParsingSuccess:
def __init__(self, string, rule_type, start_pos, end_pos, children):
self.string = string
self.rule_type = rule_type
self.start_pos = start_pos
self.end_pos = end_pos
self.children = children
@property
def match_string(self):
return self.string[self.start_pos:self.end_pos]
|
friedrichschoene/pegger | tests/test_rules.py | import pytest
from pegger.grammar import Grammar
from pegger.rules import *
def test_grammar():
# {a^n b^n c^n | n \in N}
A = Choices()
A.add_rule(Sequence('a', A, 'b'))
A.add_rule('ab')
B = Choices()
B.add_rules(Sequence('b', B, 'c'), 'bc')
D = Choices(Not(Any()), Sequence(And(Sequence(A, Not('b'))), ZeroOrMore('a'), B, Not(Any())))
grammar = Grammar(D)
assert grammar.match_whole('')
assert grammar.match_whole('aaabbbccc')
assert grammar.match_whole('aaaaaabbbbbbcccccc')
assert not grammar.match_whole('a')
assert not grammar.match_whole('b')
assert not grammar.match_whole('c')
assert not grammar.match_whole('aabbbccc')
assert not grammar.match_whole('aaabbccc')
assert not grammar.match_whole('aaabbbcc')
|
friedrichschoene/pegger | pegger/rules.py | <filename>pegger/rules.py
from .results import ParsingSuccess
class Rule:
"""
Abstract base class every rule inherits from.
This class should not be used directly to generate a grammar.
"""
def __init__(self):
self.memoization_dict = {}
def parse(self, string, start_pos = 0):
"""
Parses an input string to an abstract syntax tree.
:param string: The string to parse.
:param start_pos: Starting position within the string.
:return: The AST.
"""
key = (hash(string), start_pos)
if key in self.memoization_dict:
return self.memoization_dict[key]
return self.memoization_dict.setdefault(key, self._parse(string, start_pos))
def _parse(self, string, start_pos):
"""
Abstract method which is implemented by the subclasses doing the main parsing magic.
:param string: The string to parse.
:param start_pos: Starting position whithin the string.
:return: The AST.
"""
raise NotImplementedError
@staticmethod
def cast_rule(rule):
"""
Static method which asserts if a rule is either a rule or can be processed as a rule.
Casts the given object to a Rule object.
:param rule: The object to check.
:return: The Rule object.
"""
assert isinstance(rule, Rule) or isinstance(rule, str)
return String(rule) if isinstance(rule, str) else rule
class AliasHasNoRuleException(Exception):
pass
class RuleAlias(Rule):
"""
Alias for a rule. Contains a rule name and contain the rule that it aliases.
Note: It is possible but strictly not recommended to have multiple aliases with the same name.
"""
def __init__(self, name, rule=None):
super().__init__()
self.name = name
if rule is None:
self._rule = None
else:
self.rule = rule
@property
def rule(self):
return self._rule
@rule.setter
def rule(self, rule):
self._rule = self.cast_rule(rule)
def _parse(self, string, start_pos):
if self.rule is None:
raise self.AliasHasNoRuleException()
return self.rule.parse(string, start_pos)
class RuleCollection(Rule):
"""
Abstract base class for rules that consist of multiple subrules.
"""
def __init__(self, *rules):
super().__init__()
self._rules = []
self.rules = rules
def add_rule(self, rule):
self._rules.append(self.cast_rule(rule))
def add_rules(self, *rules):
for rule in rules:
self._rules.append(self.cast_rule(rule))
@property
def rules(self):
return self._rules
@rules.setter
def rules(self, rules):
for rule in rules:
self._rules.append(self.cast_rule(rule))
class RuleWrapper(Rule):
"""
Abstract base class for rules that consist of a single subrule.
"""
def __init__(self, rule):
super().__init__()
self.rule = rule
@property
def rule(self):
return self._rule
@rule.setter
def rule(self, rule):
self._rule = self.cast_rule(rule)
class String(Rule):
"""
Rule consisting of just a string to match.
"""
def __init__(self, s):
assert type(s) == str
super().__init__()
self.s = s
def _parse(self, string, start_pos):
if string[start_pos:start_pos + len(self.s)] == self.s:
return ParsingSuccess(string, self.__class__, start_pos, start_pos + len(self.s), [])
return False
class Range(Rule):
"""
Range rule, e.g. `[1-9]`.
"""
def __init__(self, start_symbol, end_symbol=None):
assert len(start_symbol) == 1
assert end_symbol is None or len(end_symbol) == 1
super().__init__()
if end_symbol is None:
end_symbol = start_symbol
self.start_symbol_ord = ord(start_symbol)
self.end_symbol_ord = ord(end_symbol)
def _parse(self, string, start_pos):
char = string[start_pos:start_pos + 1]
if len(char) and self.start_symbol_ord <= ord(char) <= self.end_symbol_ord:
return ParsingSuccess(string, self.__class__, start_pos, start_pos + 1, [])
return False
class Any(Rule):
"""
Rule that matches any symbol, e.g. `.`.
"""
def _parse(self, string, start_pos):
if start_pos < len(string):
return ParsingSuccess(string, self.__class__, start_pos, start_pos + 1, [])
return False
class Choices(RuleCollection):
"""
Prioritized choice rule, e.g. `(A | B | C)`.
"""
def _parse(self, string, start_pos):
for rule in self.rules:
rule_result = rule.parse(string, start_pos)
if rule_result:
return ParsingSuccess(string, self.__class__, start_pos, rule_result.end_pos, [rule_result])
return False
class Sequence(RuleCollection):
"""
Sequence of rules, e.g. `A B C`.
"""
def _parse(self, string, start_pos):
pos = start_pos
children = []
for rule in self.rules:
rule_result = rule.parse(string, pos)
if rule_result:
children.append(rule_result)
pos = rule_result.end_pos
continue
return False
return ParsingSuccess(string, self.__class__, start_pos, pos, children)
class And(RuleWrapper):
"""
And (lookahead) rule that allows to check the string without consuming it, e.g. `&A`.
"""
def _parse(self, string, start_pos):
rule_result = self.rule.parse(string, start_pos)
if rule_result:
return ParsingSuccess(string, self.__class__, start_pos, start_pos, [])
return False
class Not(RuleWrapper):
"""
Not rule that checks the string if a rule is not applicable, e.g. `!A`.
"""
def _parse(self, string, start_pos):
if self.rule.parse(string, start_pos):
return False
return ParsingSuccess(string, self.__class__, start_pos, start_pos, [])
class ZeroOrMore(RuleWrapper):
"""
Zero or more rule, e.g. `A*`.
"""
def _parse(self, string, start_pos):
pos = start_pos
children = []
while True:
rule_result = self.rule.parse(string, pos)
if rule_result:
children.append(rule_result)
pos = rule_result.end_pos
continue
break
return ParsingSuccess(string, self.__class__, start_pos, pos, children)
# maybe think this code over, adds one iteration over the children list
class OneOrMore(RuleWrapper):
"""
One or more rule, e.g. `A+`.
"""
def __init__(self, rule):
super().__init__(rule)
self.zero_or_more_rule = ZeroOrMore(rule)
def _parse(self, string, start_pos):
rule_result = self.rule.parse(string, start_pos)
if rule_result:
zero_or_more_result = self.zero_or_more_rule.parse(string, rule_result.end_pos)
if zero_or_more_result:
return ParsingSuccess(string, self.__class__, start_pos, zero_or_more_result.end_pos, [rule_result] + zero_or_more_result.children)
return ParsingSuccess(string, self.__class__, start_pos, rule_result.end_pos, [rule_result])
return False
class Optional(RuleWrapper):
"""
Optional rule, e.g. `A?`.
"""
def _parse(self, string, start_pos):
rule_result = self.rule.parse(string, start_pos)
if rule_result:
return ParsingSuccess(string, self.__class__, start_pos, rule_result.end_pos, [rule_result])
return ParsingSuccess(string, self.__class__, start_pos, start_pos, [])
|
friedrichschoene/pegger | pegger/grammar_parser.py | # Pairs are used as parsing success objects.
# They consist of a parsing result object (which can be a Boolean) and an end_pos element.
from pegger.grammar import Grammar
from . import RuleAlias, String, Range, Any, Choices, Sequence, And, Not, ZeroOrMore, OneOrMore, Optional
class GrammarDefinitionNotParsableException(Exception):
pass
def generate_grammar(string):
base_rule = _grammar(string, 0)
if not base_rule:
raise GrammarDefinitionNotParsableException()
# create alias to rule dict
aliases = {}
visited = set()
for alias, rule in base_rule[0]:
alias.rule = rule
aliases[alias.name] = alias
for alias in aliases.values():
_replace_aliases(alias.rule, aliases, visited)
# return first alias
return Grammar(aliases[base_rule[0][0][0].name])
def _replace_aliases(rule, aliases, visited):
if rule in visited:
return
visited.add(rule)
if hasattr(rule, 'rule'):
if isinstance(rule.rule, RuleAlias):
rule.rule = aliases[rule.rule.name]
_replace_aliases(rule.rule, aliases, visited)
if hasattr(rule, 'rules'):
for i, child_rule in enumerate(rule.rules):
if isinstance(child_rule, RuleAlias):
child_rule = rule.rules[i] = aliases[child_rule.name]
_replace_aliases(child_rule, aliases, visited)
def memoize(f):
memoization_dict = {}
def helper(*args):
if args in memoization_dict:
return memoization_dict[args]
return memoization_dict.setdefault(args, f(*args))
return helper
### Hierarchical syntax
@memoize
def _grammar(string, start_pos):
spacing_success = _spacing(string, start_pos)
if spacing_success:
definition_success = _definition(string, spacing_success[1])
if definition_success:
definitions = [definition_success[0]]
while True:
definition_success_2 = _definition(string, definition_success[1])
if definition_success_2:
definition_success = definition_success_2
definitions.append(definition_success_2[0])
continue
break
end_of_file_success = _end_of_file(string, definition_success[1])
if end_of_file_success:
return definitions, end_of_file_success[1]
@memoize
def _definition(string, start_pos):
identifier_success = _identifier(string, start_pos)
if identifier_success:
left_arrow_success = _substring(string, identifier_success[1], ':=')
if left_arrow_success:
expression_success = _expression(string, left_arrow_success[1])
if expression_success:
return (identifier_success[0], expression_success[0]), expression_success[1]
@memoize
def _expression(string, start_pos):
sequence_success = _sequence(string, start_pos)
choices = Choices(sequence_success[0])
if sequence_success:
while True:
slash_success = _substring(string, sequence_success[1], '/')
if slash_success:
sequence_success_2 = _sequence(string, slash_success[1])
if sequence_success_2:
sequence_success = sequence_success_2
choices.add_rule(sequence_success_2[0])
continue
break
break
return choices, sequence_success[1]
@memoize
def _sequence(string, start_pos):
prefix_success = True, start_pos
sequence = Sequence()
while True:
prefix_success_2 = _prefix(string, prefix_success[1])
if prefix_success_2:
prefix_success = prefix_success_2
sequence.add_rule(prefix_success_2[0])
continue
break
return sequence, prefix_success[1]
@memoize
def _prefix(string, start_pos):
and_success = _substring(string, start_pos, '&')
if and_success:
suffix_success = _suffix(string, and_success[1])
if suffix_success:
return And(suffix_success[0]), suffix_success[1]
not_success = _substring(string, start_pos, '!')
if not_success:
suffix_success = _suffix(string, not_success[1])
if suffix_success:
return Not(suffix_success[0]), suffix_success[1]
suffix_success = _suffix(string, start_pos)
if suffix_success:
return suffix_success
@memoize
def _suffix(string, start_pos):
primary_success = _primary(string, start_pos)
if primary_success:
question_success = _substring(string, primary_success[1], '?')
if question_success:
return Optional(primary_success[0]), question_success[1]
star_success = _substring(string, primary_success[1], '*')
if star_success:
return ZeroOrMore(primary_success[0]), star_success[1]
plus_success = _substring(string, primary_success[1], '+')
if plus_success:
return OneOrMore(primary_success[0]), plus_success[1]
return primary_success
@memoize
def _primary(string, start_pos):
identifier_success = _identifier(string, start_pos)
if identifier_success:
left_arrow_success = _substring(string, identifier_success[1], ':=')
if not left_arrow_success:
return identifier_success
open_success = _substring(string, start_pos, '(')
if open_success:
expression_success = _expression(string, open_success[1])
if expression_success:
close_success = _substring(string, expression_success[1], ')')
if close_success:
return expression_success[0], close_success[1]
literal_success = _literal(string, start_pos)
if literal_success:
return literal_success
class_success = _class(string, start_pos)
if class_success:
return class_success
dot_success = _substring(string, start_pos, '.')
if dot_success:
return Any(), dot_success[1]
### Lexical syntax
@memoize
def _identifier(string, start_pos):
if string[start_pos:start_pos + 1] == '<':
ident_start_success = _ident_start(string, start_pos + 1)
if ident_start_success:
ident_cont_success = True, ident_start_success[1]
while True:
ident_cont_success_2 = _ident_cont(string, ident_cont_success[1])
if ident_cont_success_2:
ident_cont_success = ident_cont_success_2
continue
break
if string[ident_cont_success[1]:ident_cont_success[1] + 1] == '>':
spacing_success = _spacing(string, ident_cont_success[1] + 1)
if spacing_success:
return RuleAlias(string[start_pos + 1:ident_cont_success[1]]), spacing_success[1]
@memoize
def _ident_start(string, start_pos):
if len(string) > start_pos:
char_code = ord(string[start_pos])
if 65 <= char_code <= 90 or 97 <= char_code <= 122 or char_code == 95:
return True, start_pos + 1
@memoize
def _ident_cont(string, start_pos):
if len(string) > start_pos:
char_code = ord(string[start_pos])
if 65 <= char_code <= 90 or 97 <= char_code <= 122 or char_code == 95 or 48 <= char_code <= 57:
return True, start_pos + 1
@memoize
def _literal(string, start_pos):
def literal_helper(string, start_pos, quotation_mark):
if string[start_pos:start_pos + 1] == quotation_mark:
char_success = True, start_pos + 1
while True:
if string[char_success[1]:char_success[1] + 1] != quotation_mark:
char_success_2 = _char(string, char_success[1])
if char_success_2:
char_success = char_success_2
continue
break
break
if string[char_success[1]:char_success[1] + 1] == quotation_mark:
spacing_success = _spacing(string, char_success[1] + 1)
if spacing_success:
return String(string[start_pos + 1:char_success[1]]), spacing_success[1]
literal_success = literal_helper(string, start_pos, '\'')
if literal_success:
return literal_success
literal_success = literal_helper(string, start_pos, '\"')
if literal_success:
return literal_success
@memoize
def _class(string, start_pos):
if string[start_pos:start_pos + 1] == '[':
range_success = True, start_pos + 1
choices = Choices()
while True:
if string[range_success[1]:range_success[1] + 1] != ']':
range_success_2 = _range(string, range_success[1])
if range_success_2:
range_success = range_success_2
choices.add_rule(range_success_2[0])
continue
break
break
if string[range_success[1]:range_success[1] + 1] == ']':
spacing_success = _spacing(string, range_success[1] + 1)
if spacing_success:
return choices, spacing_success[1]
@memoize
def _range(string, start_pos):
char_success = _char(string, start_pos)
if char_success:
if string[char_success[1]:char_success[1] + 1] == '-':
char2_success = _char(string, char_success[1] + 1)
if char2_success:
return Range(string[start_pos], string[start_pos + 2]), char2_success[1]
return Range(string[start_pos]), char_success[1]
@memoize
def _char(string, start_pos):
if string[start_pos:start_pos + 1] == '\\':
if len(string) > start_pos + 1:
escaped_char_code = ord(string[start_pos + 1:start_pos + 2])
# 110 = n, 114 = r, 116 = t, 39 = ', 34 = ", 91 = [, 93 = ], 92 = \
if escaped_char_code in [110, 114, 116, 39, 34, 91, 93, 92]:
return True, start_pos + 2
else:
if len(string[start_pos:start_pos + 1]):
return True, start_pos + 1
@memoize
def _substring(string, start_pos, substring):
if string[start_pos:start_pos + len(substring)] == substring:
spacing_success = _spacing(string, start_pos + len(substring))
if spacing_success:
return True, spacing_success[1]
@memoize
def _spacing(string, start_pos):
spacing_success = True, start_pos
while True:
space_success = _space(string, spacing_success[1])
if space_success:
spacing_success = True, space_success[1]
continue
comment_success = _comment(string, spacing_success[1])
if comment_success:
spacing_success = True, comment_success[1]
continue
break
return spacing_success
@memoize
def _comment(string, start_pos):
if string[start_pos:start_pos + 1] == '#':
comment_success = True, start_pos + 1
while True:
if not _end_of_line(string, comment_success[1]):
if not _end_of_file(string, comment_success[1]):
comment_success = True, comment_success[1] + 1
continue
break
end_of_line_success = _end_of_line(string, comment_success[1])
if end_of_line_success:
return True, end_of_line_success[1]
return True, comment_success[1]
@memoize
def _space(string, start_pos):
if string[start_pos:start_pos + 1] == ' ':
return True, start_pos + 1
if string[start_pos:start_pos + 1] == '\t':
return True, start_pos + 1
end_of_line_success = _end_of_line(string, start_pos)
if end_of_line_success:
return True, end_of_line_success[1]
@memoize
def _end_of_line(string, start_pos):
if string[start_pos:start_pos + 2] == '\r\n':
return True, start_pos + 2
if string[start_pos:start_pos + 1] == '\n':
return True, start_pos + 1
if string[start_pos:start_pos + 1] == '\r':
return True, start_pos + 1
def _end_of_file(string, start_pos):
if len(string) <= start_pos:
return True, start_pos
|
friedrichschoene/pegger | tests/test_grammar_parser.py | import pytest
from pegger import grammar_parser
def test_end_of_file():
string = ''
assert grammar_parser._end_of_file(string, 0)
assert grammar_parser._end_of_file(string, 5)
string = 'hello'
assert not grammar_parser._end_of_file(string, 0)
assert not grammar_parser._end_of_file(string, 1)
end_of_file_success = grammar_parser._end_of_file(string, 5)
assert end_of_file_success
assert end_of_file_success[1] == 5
def test_end_of_line():
assert not grammar_parser._end_of_line('', 0)
string = 'hello\nhello'
end_of_line_success = grammar_parser._end_of_line(string, 5)
assert end_of_line_success
assert end_of_line_success[1] == 6
assert not grammar_parser._end_of_line(string, 11)
assert not grammar_parser._end_of_line(string, 4)
def test_space():
string = 'abc abc\t'
space_success = grammar_parser._space(string, 3)
assert space_success
assert space_success[1] == 4
space_success = grammar_parser._space(string, 7)
assert space_success
assert space_success[1] == 8
assert not grammar_parser._space(string, 0)
def test_comment():
string = '# this is /com\\ment\nand a newline'
assert grammar_parser._comment(string, 0)[1] == 20
assert not grammar_parser._comment(string, 1)
assert not grammar_parser._comment(string, 20)
string = '# this is /com\\ment\r\nand a newline'
assert grammar_parser._comment(string, 0)[1] == 21
string = 'no comment#comment'
assert grammar_parser._comment(string, 10)[1] == 18
def test_spacing():
string = 'this is\ta \t\t test string #with comment'
assert grammar_parser._spacing(string, 0)[1] == 0
assert grammar_parser._spacing(string, 4)[1] == 5
assert grammar_parser._spacing(string, 7)[1] == 8
assert grammar_parser._spacing(string, 9)[1] == 13
assert grammar_parser._spacing(string, 17)[1] == 18
assert grammar_parser._spacing(string, 24)[1] == 40
def test_substring():
string = 'string with substring'
assert grammar_parser._substring(string, 0, 'str')[1] == 3
assert grammar_parser._substring(string, 0, 'string')[1] == 7
assert not grammar_parser._substring(string, 6, 'with')
assert grammar_parser._substring(string, 7, 'with')[1] == 12
assert grammar_parser._substring(string, 12, 'substring')[1] == 21
def test_char():
string = 'test\\n\\r\\"'
assert grammar_parser._char(string, 0)[1] == 1
assert grammar_parser._char(string, 4)[1] == 6
assert grammar_parser._char(string, 6)[1] == 8
assert grammar_parser._char(string, 8)[1] == 10
assert not grammar_parser._char(string, 10)
def test_range():
string = 'a b-g A-Z '
assert grammar_parser._range(string, 0)[1] == 1
assert grammar_parser._range(string, 1)[1] == 2
assert grammar_parser._range(string, 2)[1] == 5
assert grammar_parser._range(string, 6)[1] == 9
def test_class():
string = 'test[a][ac-d][A-Z1-9] \t [A-Eabc]'
assert not grammar_parser._class(string, 0)
assert grammar_parser._class(string, 4)[1] == 7
assert grammar_parser._class(string, 7)[1] == 13
assert grammar_parser._class(string, 13)[1] == 24
assert grammar_parser._class(string, 24)[1] == 32
def test_literal():
string = 'test "teststring \' test \\"" \'test\\\' "" \''
assert not grammar_parser._literal(string, 0)
assert not grammar_parser._literal(string, 4)
assert grammar_parser._literal(string, 5)[1] == 28
assert grammar_parser._literal(string, 28)[1] == 40
def test_identifier():
string = '<test> <Test123>\t<123Test> <___abc> <123_test> <_> <Test test>'
assert grammar_parser._identifier(string, 0)[1] == 7
assert not grammar_parser._identifier(string, 1)
assert grammar_parser._identifier(string, 7)[1] == 17
assert not grammar_parser._identifier(string, 17)
assert grammar_parser._identifier(string, 27)[1] == 36
assert not grammar_parser._identifier(string, 36)
assert grammar_parser._identifier(string, 47)[1] == 51
assert not grammar_parser._identifier(string, 51)
def test_primary():
string = '<Test> <_test123> "string\n " <ident> := [12345] ([1-4][1-9]) . '
assert grammar_parser._primary(string, 0)[1] == 7
assert not grammar_parser._primary(string, 1)
assert grammar_parser._primary(string, 7)[1] == 18
assert grammar_parser._primary(string, 18)[1] == 29
assert not grammar_parser._primary(string, 29)
assert grammar_parser._primary(string, 44)[1] == 52
assert grammar_parser._primary(string, 52)[1] == 65
assert grammar_parser._primary(string, 65)[1] == 68
def test_suffix():
string = '[0]* .+ "string"? <Test> <123test>'
assert grammar_parser._suffix(string, 0)[1] == 5
assert grammar_parser._suffix(string, 5)[1] == 8
assert grammar_parser._suffix(string, 8)[1] == 18
assert grammar_parser._suffix(string, 18)[1] == 25
assert not grammar_parser._suffix(string, 25)
assert not grammar_parser._suffix(string, 7)
assert not grammar_parser._suffix(string, 10)
def test_prefix():
string = '! <test> &"2" !test <Test>? !<test>+'
assert grammar_parser._prefix(string, 0)[1] == 13
assert grammar_parser._prefix(string, 13)[1] == 18
assert not grammar_parser._prefix(string, 18)
assert grammar_parser._prefix(string, 24)[1] == 32
assert grammar_parser._prefix(string, 32)[1] == 40
def test_sequence():
string = '[123]<String> "test" <1> := <String> [5-78-9] /\t<test>'
assert grammar_parser._sequence(string, 0)[1] == 22
assert grammar_parser._sequence(string, 22)[1] == 22
assert grammar_parser._sequence(string, 23)[1] == 23
assert grammar_parser._sequence(string, 26)[1] == 26
assert grammar_parser._sequence(string, 29)[1] == 47
assert grammar_parser._sequence(string, 47)[1] == 47
assert grammar_parser._sequence(string, 48)[1] == 48
assert grammar_parser._sequence(string, 49)[1] == 55
def test_expression():
string = '[123]<String> "test" <1> := <String> [5-78-9] /\t<test> '
assert grammar_parser._expression(string, 0)[1] == 22
assert grammar_parser._expression(string, 29)[1] == 60
assert grammar_parser._expression(string, 1)[1] == 1
assert grammar_parser._expression(string, 49)[1] == 60
def test_definition():
string = '<A> := "string" / (<B>? <C>\t<abc>)+'
assert grammar_parser._definition(string, 0)[1] == len(string)
assert not grammar_parser._definition(string, 7)
string = '<B> := \n\n\r\n\t <b> <a> 123'
assert grammar_parser._definition(string, 0)[1] == len(string) - 3
def test_grammar():
string = '<A> := "a" <A> "b" / ""\n' \
'<B> := "b" <B> "c" / ""\n' \
'<D> := &(<A>!"b")"a"*<B>!.'
assert grammar_parser._grammar(string, 0)[1] == len(string)
def test_generate_grammar():
string = '<D> := !. / &(<A>!"b")"a"*<B>!.\n' \
'<A> := "a" <A> "b" / "ab"\n' \
'<B> := "b" <B> "c" / "bc"\n'
grammar = grammar_parser.generate_grammar(string)
for i in range(10):
assert grammar.match_whole('a'*i+'b'*i+'c'*i)
for i in range(1, 10):
assert not grammar.match('a'*i+'b'*(i-1)+'c'*(i-1))
string = '<A> := "(" <A> ")" <A> / ""'
grammar = grammar_parser.generate_grammar(string)
assert grammar.match_whole('(())()(((()))())(())')
assert not grammar.match_whole('(())()((((()))())(())')
|
friedrichschoene/pegger | pegger/grammar.py | <gh_stars>1-10
class Grammar:
"""
Provides parsing methods for a base Rule
"""
def __init__(self, base_rule):
self.base_rule = base_rule
def parse(self, string):
"""
Parses an input string to an abstract syntax tree.
Just a wrapper for the parse method of the base rule.
:param string: The string to parse.
:return: The AST.
"""
return self.base_rule.parse(string)
def match(self, string):
"""
Check if a prefix of a string matches the grammar.
:param string: The string to match.
:return: Boolean whether the string prefix matches or not.
"""
return bool(self.parse(string))
def match_whole(self, string):
"""
Check if a whole string matches the grammar.
:param string: The string to match.
:return: Boolean whether the string matches or not.
"""
parse_result = self.parse(string)
return bool(parse_result) and parse_result.end_pos == len(string)
|
friedrichschoene/pegger | pegger/__init__.py | <filename>pegger/__init__.py
from .rules import RuleAlias, String, Range, Any, Choices, Sequence, And, Not, ZeroOrMore, OneOrMore, Optional
|
munirjojoverge/rl_AD_urban_baselines | tests/test_utils.py | import numpy as np
from urban_AD_env.utils import rotated_rectangles_intersect
def test_rotated_rectangles_intersect():
assert rotated_rectangles_intersect(([12.86076812, 28.60182391], 5.0, 2.0, -0.4675779906495494),
([9.67753944, 28.90585412], 5.0, 2.0, -0.3417019364473201))
assert rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([0, 1], 2, 1, 0))
assert not rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([0, 2.1], 2, 1, 0))
assert not rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([1, 1.1], 2, 1, 0))
assert rotated_rectangles_intersect(([0, 0], 2, 1, np.pi/4), ([1, 1.1], 2, 1, 0))
|
munirjojoverge/rl_AD_urban_baselines | settings.py | <reponame>munirjojoverge/rl_AD_urban_baselines
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: January 10, 2019
# Author: <NAME>
#######################################################################
run_folder = './run/'
logs_folder = run_folder + 'logs'
models_folder = run_folder + 'models'
req_dirs = [run_folder, logs_folder, models_folder] |
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/vehicle/vehicle_params.py | <filename>urban_AD_env/vehicle/vehicle_params.py
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: January 10, 2019
# Author: <NAME>
#######################################################################
import math
SEED = 777
#### MTCS CONFIG
SEED = 7777
dt = 0.5 # MTCS time step (secs) for every action taken
HORIZON = 3.0 # Desired Simulation/prediction Horizon in Seconds
BEHAVIOR_PLANNER_dt = 1.0 # We will "look" at the diving scene every "this time" and plan for the next "HORIZON"
MTCS_DEPTH = int(HORIZON / dt) # ex: 3sec/0.5 = 6
NUM_ACTIONS_TO_SAMPLE = 2
NUM_ACTIONS_STEPS = 5 # Since our action space represents "Changes" (in steering, i.e steering rate, and acceleration, i.e jerk,
# we allow "NUM_ACTIONS_STEPS" to the left and the same to the right from the actual state.
# For example we might be accelerating at 1m/s2 and the recommended action might be:
# to stay, i.e 0 steps change from where we are.
# to deccelerate 2 steps (the sice of the step is determined by the dt used)
# or to accle 1 step.
# In the case of "NUM_ACTIONS_STEPS = 10" means that we have 21 possible actions: -10, -9,....0, 1, 2,...10. (Step increments)
### VEHICLE PARAMETERS
# Steering angle Max Change per step time allowed: Calculation
MAX_STEER_WHEEL_ANGLE_RATE = 6.981317008 #RADS/S #400.00 #DEGREES/S ===> THIS IS HARDWARE SPECIFIC. THE POWER STEERING CAN NOT GO FASTER
STEERING_WHEEL_TO_TIRE_RATIO = 16
MAX_STEER_ANGLE_RATE = MAX_STEER_WHEEL_ANGLE_RATE / STEERING_WHEEL_TO_TIRE_RATIO
MAX_STEER_WHEEL_ANGLE = 9.42478 # Rads = 540 Degrees
MAX_STEER_ANGLE = MAX_STEER_WHEEL_ANGLE / STEERING_WHEEL_TO_TIRE_RATIO
# Define Steering angle rates regions according to "DRIVING STYLES". For now just divide it 3 equal regions
# MAX_STEER_ANGLE_RATE => MSRA
MSAR_GENTLE = MAX_STEER_ANGLE_RATE / 3
MSAR_NORMAL = MSAR_GENTLE * 2
MSAR_SPORTY = MSAR_GENTLE * 3
#### THIS IS WHAT WILL USE TO GENERATE THE "CHANGE" COMMAND (MULTIPLIED BY OUR dt TO GET AN ANGLE CHANGE)
MSAR = [MSAR_GENTLE, MSAR_NORMAL, MSAR_SPORTY]
# STEERING RESOLUTION: MIN CHANGE IN ANGLE THAT THE POWER STEERING CAN PERFORM (NOT SURE WHAT IS THE MIN TIME STEP THAT IT CAN BE ACHIEVED)
STEERING_RESOLUTION = 0.00174533 # Radians, 0.1 DEGREES
# Acceleration Max Change per step time allowed
MAX_ACCEL = 2.94 # ==> VEHICLE/HARDWARE
MAX_DECEL = -6.00
# From: Determination of Minimum Horizontal Curve Radius Used in the Design
# of Transportation Structures, Depending on the Limit Value of Comfort
# Criterion Lateral Jerk
# <NAME> and <NAME>, Turkey
MAX_LAT_JERK_GENTLE = 0.3 # m/s^2. 0.0 <= max lat jerk < 0.3
MAX_LAT_JERK_NORMAL = 0.9 # m/s^2 0.3 <= max lat jerk < 0.9
MAX_LAT_JERK_SPORTY = 1.5 # m/s^2 0.9 <= max lat jerk < 1.5
MAX_LON_JERK_GENTLE = 0.5 # m/s^2. 0.0 <= max lon jerk < 0.5
MAX_LON_JERK_NORMAL = 1.1 # m/s^2 0.3 <= max lon jerk < 0.9
MAX_LON_JERK_SPORTY = 2.0 # m/s^2 0.9 <= max lon jerk < 1.5
# For simplicity we'll work with jerk magnitude
MAX_JERK_GENTLE = math.sqrt(MAX_LAT_JERK_GENTLE**2 + MAX_LON_JERK_GENTLE**2)
MAX_JERK_NORMAL = math.sqrt(MAX_LAT_JERK_NORMAL**2 + MAX_LON_JERK_NORMAL**2)
MAX_JERK_SPORTY = math.sqrt(MAX_LAT_JERK_SPORTY**2 + MAX_LON_JERK_SPORTY**2)
#### THIS IS WHAT WILL USE TO GENERATE THE "CHANGE" COMMAND (MULTIPLIED BY OUR dt TO GET AN ANGLE CHANGE)
MAX_JERK = [MAX_JERK_GENTLE, MAX_JERK_NORMAL, MAX_JERK_SPORTY]
NUM_EPISODES = int(3e3) # Max number of episodes
T_MAX = int(1e3) # Max Number of training steps (This is only in case within an Episode you never get it "done" - Never crashes, gets out the road, etc.. Which only shows a failure on the SIM)
MCTS_NUM_SIMS = int(NUM_ACTIONS_TO_SAMPLE**MTCS_DEPTH)
MEMORY_SIZE = int(1e6) # Experience replay memory capacity
TURNS_UNTIL_TAU0 = 10 # turn on which iteration starts playing deterministically
CPUCT = 1
EPSILON = 0.25 # Additional exploration is achieved by adding Dirichlet noise to the
# prior probabilities in the root node s 0 , specifically P (s, a) = (1 − eps)p a + eps*ηa , where η ∼ Dir(0.03)
# and eps = 0.25; this noise ensures that all moves may be tried, but the search may still overrule bad moves
ALPHA = 0.8 # For Dirichlet Noise: positive real number called the concentration parameter (also known as scaling parameter)
#### RETRAINING
BATCH_SIZE = 256
EPOCHS = 1
REG_CONST = 0.0001
LEARNING_RATE = 1e-3
MOMENTUM = 0.9
TRAINING_LOOPS = 1
INPUT_SHAPE = (3,12) # but we will have n frames
INPUT_FRAMES = 3
ACTION_SHAPE = ((2 * NUM_ACTIONS_STEPS) + 1) ** 2 # table steering changes Vs Accel Changes - That's the reason you see a squared (**2)
LEARNING_STARTS_RATIO = 1/500 # Number of steps before starting training = memory capacity * this ratio
LEARNING_FREQUENCY = 20 # Steps before we sample from the replay Memory again
reward_clip = 0.5 # Reward clipping (0 to disable)
HIDDEN_CNN_LAYERS = [
{'filters':75, 'kernel_size': (4,4)}
, {'filters':75, 'kernel_size': (4,4)}
, {'filters':75, 'kernel_size': (4,4)}
, {'filters':75, 'kernel_size': (4,4)}
, {'filters':75, 'kernel_size': (4,4)}
, {'filters':75, 'kernel_size': (4,4)}
]
#### EVALUATION
EVAL_NUM_EPISODES = 20
SCORING_THRESHOLD = 1.3 |
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/utils.py | <filename>urban_AD_env/envs/utils.py<gh_stars>1-10
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 7, 2019
# Author: <NAME>
#######################################################################
from __future__ import division, print_function, absolute_import
import numpy as np
def goal_distance(goal_a, goal_b, weights=None, scale_factor=1, p=0.5):
assert goal_a.shape == goal_b.shape
# if weights == None:
# try:
# if goal_a.shape[1]:
# weights = np.ones((goal_a.shape[1],))
# except:
# weights = np.ones(goal_a.shape)
# a = np.abs(goal_a - goal_b)
# b = scale_factor * a
# c = np.dot(b, weights)
# d = -np.power(c,p)
# return d
#return - np.power(np.dot(scale_factor * np.abs(goal_a - goal_b), weights), p)
return np.linalg.norm(goal_a - goal_b, axis=-1)
def rad(deg):
return deg*np.pi/180
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/parking_env.py | from __future__ import division, print_function, absolute_import
import numpy as np
import pandas
from gym import GoalEnv, spaces
from urban_AD_env.envs.abstract import AbstractEnv
from urban_AD_env.envs.graphics import EnvViewer
from urban_AD_env.road.lane import StraightLane, LineType
from urban_AD_env.road.road import Road, RoadNetwork
from urban_AD_env.vehicle.dynamics import Vehicle, Obstacle
from urban_AD_env.vehicle.control import MDPVehicle
class ParkingEnv(AbstractEnv, GoalEnv):
"""
A continuous control environment.
It implements a reach-type task, where the agent observes their position and velocity and must
control their acceleration and steering so as to reach a given goal.
Credits to <NAME> for the idea and initial implementation.
"""
STEERING_RANGE = np.pi / 4
ACCELERATION_RANGE = 5.0
COLLISION_REWARD = -1.0
MOVING_REWARD = +0.2
OVER_OTHER_PARKING_SPOT_REWARD = -0.2
REACHING_GOAL_REWARD = +1.0
PARKING_MAX_VELOCITY = 7.0 # m/s
OBS_SCALE = 100
REWARD_SCALE = np.absolute(COLLISION_REWARD)
REWARD_WEIGHTS = [5/100, 5/100, 1/100, 1/100, 5/10, 5/10]
SUCCESS_THRESHOLD = 0.27
DEFAULT_CONFIG = {
"other_vehicles_type": "urban_AD_env.vehicle.behavior.IDMVehicle",
"centering_position": [0.5, 0.5],
"parking_spots": 15, #'random', # Parking Spots Per side
"vehicles_count": 0, #'random', # Total number of cars
"screen_width": 600 * 2,
"screen_height": 600 * 2
}
OBSERVATION_FEATURES = ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h']
OBSERVATION_NEAR_EGO = 0 # How many vehicles "near" EGO you want to include in the "observation"/"state space"
OBSERVATION_NEAR_GOAL = 0 # How many vehicles "near" GOAL you want to include in the "observation"/"state space" (Testing a better awarnes around the goal)
NORMALIZE_OBS = False
def __init__(self):
super(ParkingEnv, self).__init__()
self.config = self.DEFAULT_CONFIG.copy()
if self.config["parking_spots"] == 'random':
self.parking_spots = self.np_random.randint(1,21)
else:
self.parking_spots = self.config["parking_spots"]
if self.config["vehicles_count"] == 'random':
self.vehicles_count = self.np_random.randint(self.parking_spots) * 2
else:
self.vehicles_count = self.config["vehicles_count"]
assert (self.vehicles_count < self.parking_spots*2)
obs = self.reset()
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=obs["desired_goal"].shape, dtype=np.float32),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype=np.float32),
observation=spaces.Box(-np.inf, np.inf, shape=obs["observation"].shape, dtype=np.float32),
))
self.action_space = spaces.Box(-1., 1., shape=(2,), dtype=np.float32)
self.REWARD_WEIGHTS = np.array(self.REWARD_WEIGHTS)
EnvViewer.SCREEN_HEIGHT = EnvViewer.SCREEN_WIDTH // 2
def step(self, action):
# Forward action to the vehicle
# self.vehicle.act({"steering": action[0] * self.STEERING_RANGE,
# "acceleration": action[1] * self.ACCELERATION_RANGE})
self.vehicle.act({
"acceleration": action[0] * self.ACCELERATION_RANGE,
"steering": action[1] * self.STEERING_RANGE
})
self._simulate()
obs = self._observation()
info = {
"is_success": self._is_success(obs['achieved_goal'], obs['desired_goal']),
"is_collision": int(self.vehicle.crashed),
"is_over_others_parking_spot": int(self.is_over_others_parking_spot(self.vehicle.position)),
"velocity_idx": self.vehicle.velocity/self.PARKING_MAX_VELOCITY
}
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
terminal = self._is_terminal()
return obs, reward, terminal, info
def is_over_others_parking_spot(self, position):
for _from, to_dict in self.road.network.graph.items():
for _to, lanes in to_dict.items():
for _id, lane in enumerate(lanes):
if lane != self.goal.lane:
over_others_parking_spots = lane.on_lane(position)
if (over_others_parking_spots):
return True
return False
def reset(self):
self._build_parking()
self._populate_parking()
return self._observation()
def configure(self, config):
self.config.update(config)
def rot(self, point, angle):
assert len(point)==2
x = point[0]
y = point[1]
cos_ang = np.cos(angle)
sin_ang = np.sin(angle)
x_rot = x*cos_ang - y*sin_ang
y_rot = x*sin_ang + y*cos_ang
return x_rot, y_rot
def _build_parking(self):
"""
Create a road composed of straight adjacent lanes.
We will have 4 parking configurations based on (parking angle):
https://www.webpages.uidaho.edu/niatt_labmanual/chapters/parkinglotdesign/theoryandconcepts/parkingstalllayoutconsiderations.htm
parking angle = 90, 75, 60, 45
"""
net = RoadNetwork()
lt = (LineType.CONTINUOUS, LineType.CONTINUOUS)
spots_offset = 0.0
parking_angles = np.deg2rad([90, 75, 60, 45, 0])
aisle_width = 10.0
length = 8.0
width = 4.0
# Let's start by randomly choosing the parking angle
#angle = parking_angles[self.np_random.randint(len(parking_angles))]
angle = 0 #np.pi/3
# Let's now build the parking lot
for k in range(self.parking_spots):
x1 = (k - self.parking_spots // 2) * (width + spots_offset) - width / 2
y1 = aisle_width/2
x2 = x1
y2 = y1 + length
x3 = x1
y3 = -y1
x4 = x3
y4 = -y2
x1, y1 = self.rot((x1,y1), angle)
x2, y2 = self.rot((x2,y2), angle)
x3, y3 = self.rot((x3,y3), angle)
x4, y4 = self.rot((x4,y4), angle)
net.add_lane("a", "b", StraightLane([x1, y1], [x2, y2], width=width, line_types=lt))
net.add_lane("b", "c", StraightLane([x3, y3], [x4, y4], width=width, line_types=lt))
self.road = Road(network=net,
np_random=self.np_random)
def _populate_parking(self):
"""
Create some new random vehicles of a given type, and add them on the road.
"""
##### ADDING EGO #####
self.vehicle = MDPVehicle(self.road, [0, 0], 2*np.pi*self.np_random.rand(), velocity=0)
self.vehicle.MAX_VELOCITY = self.PARKING_MAX_VELOCITY
self.road.vehicles.append(self.vehicle)
##### ADDING GOAL #####
parking_spots_used =[]
lane = self.np_random.choice(self.road.network.lanes_list())
parking_spots_used.append(lane)
goal_heading = lane.heading #+ self.np_random.randint(2) * np.pi
self.goal = Obstacle(self.road, lane.position(lane.length/2, 0), heading=goal_heading)
self.goal.COLLISIONS_ENABLED = False
self.road.vehicles.insert(0, self.goal)
##### ADDING OTHER VEHICLES #####
# vehicles_type = utils.class_from_path(scene.config["other_vehicles_type"])
for _ in range(self.vehicles_count):
while lane in parking_spots_used: # this loop should never be infinite since we assert that there should be more parking spots/lanes than vehicles
lane = self.np_random.choice(self.road.network.lanes_list()) # to-do: chceck for empty spots
parking_spots_used.append(lane)
vehicle_heading = lane.heading #+ self.np_random.randint(2) * np.pi
self.road.vehicles.append(Vehicle(self.road, lane.position(lane.length/2, 0), heading=vehicle_heading, velocity=0))
def _observation(self):
##### ADDING EGO #####
obs = pandas.DataFrame.from_records([self.vehicle.to_dict()])[self.OBSERVATION_FEATURES]
ego_obs = np.ravel(obs.copy())
##### ADDING NEARBY (TO EGO) TRAFFIC #####
close_vehicles = self.road.closest_vehicles_to(self.vehicle, self.OBSERVATION_NEAR_EGO)
if close_vehicles:
obs = obs.append(pandas.DataFrame.from_records(
[v.to_dict(self.vehicle)
for v in close_vehicles])[self.OBSERVATION_FEATURES],
ignore_index=True)
# Fill missing rows
needed = self.OBSERVATION_NEAR_EGO + 1
missing = needed - obs.shape[0]
if obs.shape[0] < (needed):
rows = -np.ones((missing, len(self.OBSERVATION_FEATURES)))
obs = obs.append(pandas.DataFrame(data=rows, columns=self.OBSERVATION_FEATURES), ignore_index=True)
##### ADDING NEARBY (TO GOAL) TRAFFIC #####
close_vehicles = self.road.closest_vehicles_to(self.goal, self.OBSERVATION_NEAR_GOAL)
if close_vehicles:
obs = obs.append(pandas.DataFrame.from_records(
[v.to_dict(self.vehicle)
for v in close_vehicles])[self.OBSERVATION_FEATURES],
ignore_index=True)
# Fill missing rows
needed = self.OBSERVATION_NEAR_EGO + self.OBSERVATION_NEAR_GOAL + 1
missing = needed - obs.shape[0]
if obs.shape[0] < (needed):
rows = -np.ones((missing, len(self.OBSERVATION_FEATURES)))
obs = obs.append(pandas.DataFrame(data=rows, columns=self.OBSERVATION_FEATURES), ignore_index=True)
# Reorder
obs = obs[self.OBSERVATION_FEATURES]
# Flatten
obs = np.ravel(obs)
# Goal
goal = np.ravel(pandas.DataFrame.from_records([self.goal.to_dict()])[self.OBSERVATION_FEATURES])
# Arrange it as required by Openai GoalEnv
obs = {
"observation": obs / self.OBS_SCALE,
"achieved_goal": ego_obs / self.OBS_SCALE,
"desired_goal": goal / self.OBS_SCALE
}
return obs
# obs = np.ravel(pandas.DataFrame.from_records([self.vehicle.to_dict()])[self.OBSERVATION_FEATURES])
# goal = np.ravel(pandas.DataFrame.from_records([self.goal.to_dict()])[self.OBSERVATION_FEATURES])
# obs = {
# "observation": obs / self.OBS_SCALE,
# "achieved_goal": obs / self.OBS_SCALE,
# "desired_goal": goal / self.OBS_SCALE
# }
# return obs
def distance_2_goal_reward(self, achieved_goal, desired_goal, p=0.5):
return - np.power(np.dot(self.OBS_SCALE * np.abs(achieved_goal - desired_goal), self.REWARD_WEIGHTS), p)
def compute_reward(self, achieved_goal, desired_goal, info, p=0.5):
"""
Proximity to the goal is rewarded
We use a weighted p-norm
:param achieved_goal: the goal that was achieved
:param desired_goal: the goal that was desired
:param info: any supplementary information
:param p: the Lp^p norm used in the reward. Use p<1 to have high kurtosis for rewards in [0, 1]
:return: the corresponding reward
"""
# return - np.power(np.dot(self.OBS_SCALE * np.abs(achieved_goal - desired_goal), self.REWARD_WEIGHTS), p)
# DISTANCE TO GOAL
distance_to_goal_reward = self.distance_2_goal_reward(achieved_goal, desired_goal, p)
# OVER OTHER PARKING SPOTS REWARD
over_other_parking_spots_reward = self.OVER_OTHER_PARKING_SPOT_REWARD * np.squeeze(info["is_over_others_parking_spot"])
# COLLISION REWARD
collision_reward = self.COLLISION_REWARD * np.squeeze(info["is_collision"])
# MOVING REWARD
# moving_reward = self.MOVING_REWARD * np.squeeze(info["velocity_idx"])
# REACHING THE GOAL REWARD
# reaching_goal_reward = self.REACHING_GOAL_REWARD * np.squeeze(info["is_success"])
reward = (distance_to_goal_reward + \
over_other_parking_spots_reward + \
# reverse_reward + \
# against_traffic_reward + \
# moving_reward +\
# reaching_goal_reward + \
collision_reward)
reward /= self.REWARD_SCALE
#print(reward)
return reward
def _reward(self, action):
raise NotImplementedError
def _is_success(self, achieved_goal, desired_goal):
# DISTANCE TO GOAL
distance_to_goal_reward = self.distance_2_goal_reward(achieved_goal, desired_goal)
#print(distance_to_goal_reward)
self.vehicle.is_success = (distance_to_goal_reward > -self.SUCCESS_THRESHOLD)
return self.vehicle.is_success
# Let's try something new: Dicouple everything
# Let me start defining the thresholds in SI units ( m, m/s, degrees)
# x_error_thr = 0.1
# y_error_thr = 0.1
# vx_error_thr = 0.1 #
# vy_error_thr = 0.1 #0.27
# heading_error_thr = np.deg2rad(5)
# cos_h_error_thr = np.cos(heading_error_thr)
# sin_h_error_thr = np.sin(heading_error_thr)
# thresholds = [x_error_thr, y_error_thr, vx_error_thr, vy_error_thr, cos_h_error_thr, sin_h_error_thr]
# errors = self.OBS_SCALE * np.abs(desired_goal - achieved_goal)
# success = np.less_equal(errors,thresholds)
# self.vehicle.is_success = np.all(success)
# return self.vehicle.is_success
def _is_terminal(self):
"""
The episode is over if the ego vehicle crashed or the goal is reached.
"""
# The episode cannot terminate unless all time steps are done. The reason for this is that HER + DDPG uses constant
# length episodes. If you plan to use other algorithms, please uncomment this line
#if info["is_collision"] or info["is_success"]:
if self.vehicle.crashed: # or self.vehicle.is_success:
self.reset()
return False # self.vehicle.crashed or self._is_success(obs['achieved_goal'], obs['desired_goal'])
|
munirjojoverge/rl_AD_urban_baselines | tests/envs/test_gym.py | <filename>tests/envs/test_gym.py
from __future__ import division, print_function
import gym
import urban_AD_env
def test_urban_AD_step():
env = gym.make('urban_AD-v1')
env.reset()
for i in range(3):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.close()
assert env.observation_space.contains(obs)
assert 0 <= reward <= 1
def test_merge_step():
env = gym.make('urban_AD-merge-v1')
env.reset()
for i in range(3):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.close()
assert env.observation_space.contains(obs)
assert 0 <= reward <= 1
def test_roundabout_step():
env = gym.make('urban_AD-roundabout-v1')
env.reset()
for i in range(3):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.close()
assert env.observation_space.contains(obs)
assert 0 <= reward <= 1
def test_parking_step():
env = gym.make('urban_AD-parking-v1')
env.reset()
for i in range(10):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.close()
assert action.size == 2
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/__init__.py | <filename>urban_AD_env/envs/__init__.py
from __future__ import absolute_import
from urban_AD_env.envs.multilane_env import MultiLaneEnv
from urban_AD_env.envs.merge_env import MergeEnv
from urban_AD_env.envs.roundabout_env import RoundaboutEnv
from urban_AD_env.envs.continuous_env import ContinuousEnv
from urban_AD_env.envs.continuous_multi_env import ContinuousMultiEnv
from urban_AD_env.envs.parking_env import ParkingEnv
from urban_AD_env.envs.sidepass_env import SidepassEnv |
munirjojoverge/rl_AD_urban_baselines | tests/envs/test.py | import numpy as np
OBS_SCALE = 1
REWARD_WEIGHTS = [5/100, 5/100, 1/100, 1/100, 5/10, 5/10]
#REWARD_WEIGHTS = [1/100, 1/100, 1/100, 1/100, 1/10, 1/10]
SUCCESS_THRESHOLD = 0.15
def distance_2_goal_reward(achieved_goal, desired_goal, p=0.5):
return - np.power(np.dot(OBS_SCALE * np.abs(achieved_goal - desired_goal), REWARD_WEIGHTS), p)
if __name__ == "__main__":
# lets suppose an error of 10 cm in x & y, 0.5m/s error and about 5 degrees = 0.0872665
err_angle = np.deg2rad(7)
achieved_goal = np.array([0.10, 0.10, 0.10, 0.10, np.cos(err_angle), np.sin(err_angle)])
desired_goal = np.array([0.0, 0.0, 0.0, 0.0, np.cos(0), np.sin(0)])
print (distance_2_goal_reward(achieved_goal, desired_goal))
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/continuous_multi_env.py | ######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 7, 2019
# Author: <NAME>
#######################################################################
from __future__ import division, print_function, absolute_import
import numpy as np
import random as rd
import pandas
from gym import GoalEnv, spaces
from urban_AD_env.envs.abstract import AbstractEnv
from urban_AD_env.envs.build_populate_scenes import _build_roundabout, _build_merge, _build_multilane, _populate_roundabout, _populate_merge, _populate_multilane
from urban_AD_env.envs.graphics import EnvViewer
from urban_AD_env.envs.utils import goal_distance
import urban_AD_env.vehicle.vehicle_params as vehicle_params
class ContinuousMultiEnv(AbstractEnv, GoalEnv):
"""
A continuous control environment.
It implements a reach-type task, where the agent observes their position and velocity and must
control their acceleration and steering so as to reach a given goal.
The agent gets trained in 3 environments simulataneously (each episode an env is randomly selected and
a goal is placed randomly in the road. The Agent must satisfy all goal elements: x, y, Vx, Vy, cos_heading, sin_heading..
and others might be used)
"""
STEERING_RANGE = np.pi / 4
ACCELERATION_RANGE = 5.0
COLLISION_REWARD = -1.0
REVERSE_REWARD = -0.8
OFF_ROAD_REWARD = -0.8
AGAINST_TRAIFFIC_REWARD = -0.9
MOVING_REWARD = +0.2
REACHING_GOAL_REWARD = +1.0
# Reward Weights on the Obs features - below
DISTANCE_TO_GOAL_REWARD = 2.0
REWARD_WEIGHTS = [1/100, 1/100, 1/100, 1/100, 1/10, 1/10]
#REWARD_WEIGHTS = [x * 20.0 for x in REWARD_WEIGHTS]
SUCCESS_THRESHOLD = 0.3
OBS_SCALE = 100
REWARD_SCALE = np.absolute(COLLISION_REWARD)
HEADING_ERR = np.pi / 4
SCENES = ['ROUNDABOUT', 'MERGE', 'MULTILANE']
SCENE_CONFIG = {
SCENES[0]: {
"other_vehicles_type": "urban_AD_env.vehicle.behavior.IDMVehicle",
"incoming_vehicle_destination": None,
"centering_position": [0.5, 0.6],
"num_vehicles_inside_roundabout": 0,
"num_vehicles_incoming": 0,
"num_vehicles_entering": 0,
"build_scene": _build_roundabout,
"populate_scene": _populate_roundabout,
"screen_width": 600,
"screen_height": 600
},
SCENES[1]: {
"other_vehicles_type": "urban_AD_env.vehicle.behavior.IDMVehicle",
"centering_position": [0.3, 0.5],
"vehicles_count": 0,
"build_scene": _build_merge,
"populate_scene": _populate_merge,
"screen_width": 600,
"screen_height": 150
},
SCENES[2]: {
"lanes_count": 4,
"initial_spacing": 3,
"vehicles_count": 0,
"duration": 40,
"other_vehicles_type": "urban_AD_env.vehicle.behavior.IDMVehicle",
"centering_position": [0.3, 0.5],
"build_scene": _build_multilane,
"populate_scene": _populate_multilane,
"screen_width": 600,
"screen_height": 150
}
}
# OBSERVATION_FEATURES = ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h', 'time_elapsed']
OBSERVATION_FEATURES = ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h']
OBSERVATION_VEHICLES = 1
NORMALIZE_OBS = False
def __init__(self):
super(ContinuousMultiEnv, self).__init__()
# self._max_episode_steps = 50
obs = self.reset()
self.prev_achieved_goal = obs['achieved_goal'].copy()
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=obs["desired_goal"].shape, dtype=np.float32),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype=np.float32),
observation=spaces.Box(-np.inf, np.inf, shape=obs["observation"].shape, dtype=np.float32),
))
self.action_space = spaces.Box(-1., 1., shape=(2,), dtype=np.float32)
def step(self, action):
# Forward action to the vehicle
self.vehicle.act({"steering": action[0] * self.STEERING_RANGE,
"acceleration": action[1] * self.ACCELERATION_RANGE})
self._simulate()
obs = self._observation()
##### EXTRA INFO
longitudinal_s = self.vehicle.lane.local_coordinates(self.vehicle.position)[0]
lane_heading = self.vehicle.lane.heading_at(longitudinal_s)
info = {
"is_success": self._is_success(obs['achieved_goal'], obs['desired_goal']),
"is_collision": int(self.vehicle.crashed),
"is_off_road": int(not self.road.network.is_inside_network(self.vehicle.position)),
"is_reverse": int(self.vehicle.velocity < 0),
#"prev_distance": float(goal_distance(self.prev_achieved_goal,obs['desired_goal'])),
"is_against_traffic": int(np.absolute(lane_heading - self.vehicle.heading) > self.HEADING_ERR),
"velocity_idx": self.vehicle.velocity/self.vehicle.MAX_VELOCITY
}
self.prev_achieved_goal = obs['achieved_goal'].copy()
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
terminal = self._is_terminal()
return obs, reward, terminal, info
def _select_scene(self, scene=None):
if scene==None:
self.scene = rd.randrange(0, len(self.SCENES)-1,1)
else:
self.scene = scene
self.config = self.SCENE_CONFIG[self.SCENES[self.scene]].copy()
EnvViewer.SCREEN_HEIGHT = self.config['screen_height']
EnvViewer.SCREEN_WIDTH = self.config['screen_width']
def configure(self, config):
self.config.update(config)
def _populate_scene(self):
populate_scene = self.config['populate_scene']
populate_scene(self)
def _build_scene(self):
build_scene = self.config['build_scene']
build_scene(self)
def internal_reset(self):
# Scene
self.road = None
self.vehicle = None
# Running
self.done = False
# Rendering
self.viewer = None
self.automatic_rendering_callback = None
self.should_update_rendering = True
self.rendering_mode = 'human'
self.enable_auto_render = False
def reset(self):
self.internal_reset()
self._select_scene(0)
self._build_scene()
self._populate_scene()
return self._observation()
def _observation(self):
# Add ego-vehicle
obs = pandas.DataFrame.from_records([self.vehicle.to_dict()])[self.OBSERVATION_FEATURES]
ego_obs = np.ravel(obs.copy())
# Add nearby traffic
close_vehicles = self.road.closest_vehicles_to(self.vehicle, self.OBSERVATION_VEHICLES - 1)
if close_vehicles:
obs = obs.append(pandas.DataFrame.from_records(
[v.to_dict(self.vehicle)
for v in close_vehicles[-self.OBSERVATION_VEHICLES+1:]])[self.OBSERVATION_FEATURES],
ignore_index=True)
# Fill missing rows
if obs.shape[0] < self.OBSERVATION_VEHICLES:
rows = -np.ones((self.OBSERVATION_VEHICLES - obs.shape[0], len(self.OBSERVATION_FEATURES)))
obs = obs.append(pandas.DataFrame(data=rows, columns=self.OBSERVATION_FEATURES), ignore_index=True)
# Reorder
obs = obs[self.OBSERVATION_FEATURES]
# Flatten
obs = np.ravel(obs)
# Goal
goal = np.ravel(pandas.DataFrame.from_records([self.goal.to_dict()])[self.OBSERVATION_FEATURES])
# Arrange it as required by Openai GoalEnv
obs = {
"observation": obs / self.OBS_SCALE,
"achieved_goal": ego_obs / self.OBS_SCALE,
"desired_goal": goal / self.OBS_SCALE
}
return obs
def distance_2_goal_reward(self, achieved_goal, desired_goal, p=0.5):
return - np.power(np.dot(self.OBS_SCALE * np.abs(achieved_goal - desired_goal), self.REWARD_WEIGHTS), p)
def compute_reward(self, achieved_goal, desired_goal, info, p=0.5):
"""
Proximity to the goal is rewarded
We use a weighted p-norm
:param achieved_goal: the goal that was achieved
:param desired_goal: the goal that was desired
:param info: any supplementary information
:param p: the Lp^p norm used in the reward. Use p<1 to have high kurtosis for rewards in [0, 1]
:return: the corresponding reward
"""
# DISTANCE TO GOAL
distance_to_goal_reward = self.distance_2_goal_reward(achieved_goal, desired_goal, p)
# ON/OFF ROAD REWARD
off_road_reward = self.OFF_ROAD_REWARD * np.squeeze(info["is_off_road"])
# COLLISION REWARD
collision_reward = self.COLLISION_REWARD * np.squeeze(info["is_collision"])
# MOVING REWARD
moving_reward = self.MOVING_REWARD * np.squeeze(info["velocity_idx"]) * np.abs(distance_to_goal_reward)
# REVERESE DRIVING REWARD
reverse_reward = self.REVERSE_REWARD * np.squeeze(info["is_reverse"])
# AGAINST TRAFFIC DRIVING REWARD
against_traffic_reward = self.AGAINST_TRAIFFIC_REWARD * np.squeeze(info["is_against_traffic"])
# REACHING THE GOAL REWARD
reaching_goal_reward = self.REACHING_GOAL_REWARD * np.squeeze(info["is_success"])
reward = (distance_to_goal_reward + \
off_road_reward + \
reverse_reward + \
against_traffic_reward + \
moving_reward +\
reaching_goal_reward + \
collision_reward)
# self.REWARD_SCALE = np.max(np.absolute([distance_to_goal_reward, off_road_reward, reverse_reward, against_traffic_reward, collision_reward]))
reward /= self.REWARD_SCALE
#print(reward)
return reward
def _is_success(self, achieved_goal, desired_goal):
# DISTANCE TO GOAL
distance_to_goal_reward = self.distance_2_goal_reward(achieved_goal, desired_goal)
#print(distance_to_goal_reward)
return distance_to_goal_reward > -self.SUCCESS_THRESHOLD
def compute_reward_2(self, achieved_goal, desired_goal, info):
"""
Proximity to the goal is rewarded
:param achieved_goal: the goal that was achieved
:param desired_goal: the goal that was desired
:param info: any supplementary information
:return: the corresponding reward
"""
curr_distance = goal_distance(achieved_goal, desired_goal) * self.OBS_SCALE
distance_to_goal_reward = (-1.0) * curr_distance * self.DISTANCE_TO_GOAL_REWARD
# HEADING TOWARDS THE GOAL REWARD: ( previous - current distances to goal)
# prev_distance = np.squeeze(info["prev_distance"] * self.OBS_SCALE)
# heading_towards_goal_reward = (prev_distance - curr_distance) * self.DISTANCE_TO_GOAL_REWARD
# ON/OFF ROAD REWARD
off_road_reward = self.OFF_ROAD_REWARD * np.squeeze(info["is_off_road"])
# COLLISION REWARD
collision_reward = self.COLLISION_REWARD * np.squeeze(info["is_collision"])
# # HIGH VELOCITY REWARD
# achieved_speed = np.linalg.norm([achieved_goal[2], achieved_goal[3]])
# moving_reward = self.MOVING_REWARD * achieved_speed
# REVERESE DRIVING REWARD
reverse_reward = self.REVERSE_REWARD * np.squeeze(info["is_reverse"])
reward = (distance_to_goal_reward + \
#heading_towards_goal_reward + \
off_road_reward + \
reverse_reward + \
collision_reward)
self.REWARD_SCALE = np.max(np.absolute([distance_to_goal_reward,off_road_reward, reverse_reward, collision_reward]))
return reward / self.REWARD_SCALE
def _reward(self, achieved_goal, desired_goal, info):
raise NotImplementedError
def _is_success_2(self, achieved_goal, desired_goal):
# d = goal_distance(achieved_goal, desired_goal) * self.OBS_SCALE
# return (d < self.SUCCESS_GOAL_DISTANCE).astype(np.float32)
#return np.linalg.norm(achieved_goal - desired_goal) * self.OBS_SCALE < self.SUCCESS_GOAL_DISTANCE
# DISTANCE TO GOAL
distance_to_goal_reward = self.distance_2_goal_reward(achieved_goal, desired_goal)
#print(distance_to_goal_reward)
self.vehicle.is_success = (distance_to_goal_reward > -self.SUCCESS_THRESHOLD)
return self.vehicle.is_success
def _is_terminal(self):
"""
The episode is over if the ego vehicle crashed or the goal is reached.
"""
# The episode cannot terminate unless all time steps are done. The reason for this is that HER + DDPG uses constant
# length episodes. If you plan to use other algorithms, please uncomment this line
if self.vehicle.crashed or self.vehicle.is_success:
self.reset()
return False # self.vehicle.crashed or self._is_success(obs['achieved_goal'], obs['desired_goal'])
|
munirjojoverge/rl_AD_urban_baselines | baselines_run.py | ######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 5, 2019
# Author: <NAME>
#######################################################################
import sys, os
from os.path import dirname, abspath
import time
file_path = sys.argv[0]
pathname = os.path.dirname(file_path)
open_ai_baselines_dir = pathname + '/open_ai_baselines'
print(open_ai_baselines_dir)
urban_AD_env_path = pathname + '/urban_AD_env/envs'
print(urban_AD_env_path)
sys.path.append(open_ai_baselines_dir)
sys.path.append(urban_AD_env_path)
import baselines.run as run
import urban_AD_env
from settings import req_dirs, models_folder
def create_dirs(req_dirs):
for dirName in req_dirs:
if not os.path.exists(dirName):
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
else:
print("Directory " , dirName , " already exists")
def default_args():
create_dirs(req_dirs)
currentDT = time.strftime("%Y%m%d-%H%M%S")
###############################################################
# DEFINE YOUR "BASELINES" PARAMETERS HERE
###############################################################
env = 'parking-v1' #'urban_AD-multilane-v1' #'sidepass-v0' #'urban_AD-merge-v1' #'parking-v1' #'continuous-multi-env-v1' #'continuous-env-v1' 'parking-v1'
alg = 'her'
network = 'default'
num_timesteps = '3e4'
save_folder = models_folder + '/' + env +'/'+ alg + '/' + network
save_file = save_folder + '/' + str(currentDT)
logger_path = save_file + '_log'
load_path = save_folder +'/'+ '20190228-174333' #her_default_20190212-141935' # Good with just Ego
# load_path = save_folder +'/'+ 'her_default_obs5_20190212-202901' # So-So with others
###############################################################
try:
os.mkdir(save_folder)
except OSError:
print ("Creation of the save path %s failed. It might already exist" % save_folder)
else:
print ("Successfully created the save path folder %s " % save_folder)
DEFAULT_ARGUMENTS = [
'--env=' + env,
'--alg=' + alg,
# '--network=' + network,
'--num_timesteps=' + num_timesteps,
# '--num_env=0',
'--save_path=' + save_file,
'--load_path=' + load_path,
'--logger_path=' + logger_path,
'--play'
]
return DEFAULT_ARGUMENTS
if __name__ == "__main__":
args = sys.argv
if len(args) <= 1:
args = default_args()
run.main(args)
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/build_populate_scenes.py | <reponame>munirjojoverge/rl_AD_urban_baselines<gh_stars>1-10
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 7, 2019
# Author: <NAME>
#######################################################################
from __future__ import division, print_function, absolute_import
import numpy as np
import random as rd
from datetime import timedelta
from urban_AD_env.road.lane import LineType, StraightLane, CircularLane, SineLane
from urban_AD_env.road.road import Road, RoadNetwork
from urban_AD_env.vehicle.dynamics import Vehicle, Obstacle
from urban_AD_env.vehicle.control import ControlledVehicle
from urban_AD_env import utils
from urban_AD_env.envs.utils import goal_distance, rad
ROUNDABOUT = [ ["se", "ex"],
["ex", "ee"],
["ee", "nx"],
["nx", "ne"],
["ne", "wx"],
["wx", "we"],
["we", "sx"],
["sx", "se"] ]
def _build_roundabout(scene):
# Circle lanes: (s)outh/(e)ast/(n)orth/(w)est (e)ntry/e(x)it.
center = [0, 0] # [m]
radius = 30 # [m]
alpha = 20 # [deg]
net = RoadNetwork()
radii = [radius, radius+4]
n, c, s = LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED
line = [[c, s], [n, c]]
for lane in [0, 1]:
net.add_lane("se", "ex", CircularLane(center, radii[lane], rad(90-alpha), rad(alpha), line_types=line[lane]))
net.add_lane("ex", "ee", CircularLane(center, radii[lane], rad(alpha), rad(-alpha), line_types=line[lane]))
net.add_lane("ee", "nx", CircularLane(center, radii[lane], rad(-alpha), rad(-90+alpha), line_types=line[lane]))
net.add_lane("nx", "ne", CircularLane(center, radii[lane], rad(-90+alpha), rad(-90-alpha), line_types=line[lane]))
net.add_lane("ne", "wx", CircularLane(center, radii[lane], rad(-90-alpha), rad(-180+alpha), line_types=line[lane]))
net.add_lane("wx", "we", CircularLane(center, radii[lane], rad(-180+alpha), rad(-180-alpha), line_types=line[lane]))
net.add_lane("we", "sx", CircularLane(center, radii[lane], rad(180-alpha), rad(90+alpha), line_types=line[lane]))
net.add_lane("sx", "se", CircularLane(center, radii[lane], rad(90+alpha), rad(90-alpha), line_types=line[lane]))
# Access lanes: (r)oad/(s)ine
access = 200 # [m]
dev = 120 # [m]
a = 5 # [m]
delta_st = 0.20*dev # [m]
delta_en = dev-delta_st
w = 2*np.pi/dev
net.add_lane("ser", "ses", StraightLane([2, access], [2, dev/2], line_types=[s, c]))
net.add_lane("ses", "se", SineLane([2+a, dev/2], [2+a, dev/2-delta_st], a, w, -np.pi/2, line_types=[c, c]))
net.add_lane("sx", "sxs", SineLane([-2-a, -dev/2+delta_en], [-2-a, dev/2], a, w, -np.pi/2+w*delta_en, line_types=[c, c]))
net.add_lane("sxs", "sxr", StraightLane([-2, dev / 2], [-2, access], line_types=[n, c]))
net.add_lane("eer", "ees", StraightLane([access, -2], [dev / 2, -2], line_types=[s, c]))
net.add_lane("ees", "ee", SineLane([dev / 2, -2-a], [dev / 2 - delta_st, -2-a], a, w, -np.pi / 2, line_types=[c, c]))
net.add_lane("ex", "exs", SineLane([-dev / 2 + delta_en, 2+a], [dev / 2, 2+a], a, w, -np.pi / 2 + w * delta_en, line_types=[c, c]))
net.add_lane("exs", "exr", StraightLane([dev / 2, 2], [access, 2], line_types=[n, c]))
net.add_lane("ner", "nes", StraightLane([-2, -access], [-2, -dev / 2], line_types=[s, c]))
net.add_lane("nes", "ne", SineLane([-2 - a, -dev / 2], [-2 - a, -dev / 2 + delta_st], a, w, -np.pi / 2, line_types=[c, c]))
net.add_lane("nx", "nxs", SineLane([2 + a, dev / 2 - delta_en], [2 + a, -dev / 2], a, w, -np.pi / 2 + w * delta_en, line_types=[c, c]))
net.add_lane("nxs", "nxr", StraightLane([2, -dev / 2], [2, -access], line_types=[n, c]))
road = Road(network=net, np_random=scene.np_random)
scene.road = road
def _populate_roundabout(scene):
"""
Populate a road with several vehicles on the urban_AD and on the merging lane, as well as an ego-vehicle.
:return: the ego-vehicle
"""
################### ADDING EGO VEHICLE ###################
# Ego-lane
ego_lane = scene.road.network.get_lane(("ser", "ses", 0))
# Ego Initial State
ego_ini_pos = ego_lane.position(140, 0)
ego_ini_heading = ego_lane.heading_at(140)
ego_ini_vel = 5 # m/s
#scene.vehicle = Vehicle(scene.road, [200, scene.np_random.randint(0, 12)], 2*np.pi*scene.np_random.rand(), 0)
# Ego Vehicle
ego_vehicle = Vehicle(scene.road,
ego_ini_pos,
velocity=ego_ini_vel,
heading=ego_ini_heading)
scene.road.vehicles.append(ego_vehicle)
scene.vehicle = ego_vehicle
################### ADDING ALL OTHER VEHICLES ###################
position_deviation = 2
velocity_deviation = 2
# Incoming vehicles
destinations = ["exr", "sxr", "nxr"]
other_vehicles_type = utils.class_from_path(scene.config["other_vehicles_type"])
for i in list(range(1, scene.config["num_vehicles_incoming"])) + list(range(-1, 0)):
vehicle = other_vehicles_type.make_on_lane(scene.road,
("we", "sx", 1),
longitudinal=5 + scene.np_random.randn()*position_deviation,
velocity=16 + scene.np_random.randn()*velocity_deviation)
if scene.config["incoming_vehicle_destination"] is not None:
destination = destinations[scene.config["incoming_vehicle_destination"]]
else:
destination = scene.np_random.choice(destinations)
vehicle.plan_route_to(destination)
vehicle.randomize_behavior()
scene.road.vehicles.append(vehicle)
# Vehicles inside the round-about
for i in list(range(1, scene.config["num_vehicles_inside_roundabout"])) + list(range(-1, 0)):
vehicle = other_vehicles_type.make_on_lane(scene.road,
("we", "sx", 0),
longitudinal=20*i + scene.np_random.randn()*position_deviation,
velocity=16 + scene.np_random.randn()*velocity_deviation)
vehicle.plan_route_to(scene.np_random.choice(destinations))
vehicle.randomize_behavior()
scene.road.vehicles.append(vehicle)
# Entering vehicle
for i in list(range(1, scene.config["num_vehicles_entering"])) + list(range(-1, 0)):
vehicle = other_vehicles_type.make_on_lane(scene.road,
("eer", "ees", 0),
longitudinal=50 + scene.np_random.randn() * position_deviation,
velocity=16 + scene.np_random.randn() * velocity_deviation)
vehicle.plan_route_to(scene.np_random.choice(destinations))
vehicle.randomize_behavior()
scene.road.vehicles.append(vehicle)
# The goal will be for now an obstacle that we want to reach. Once we reach it = collision and it will terminate the episode.
roundabout_section = int(scene.np_random.rand() * 7) + 1
lane_num = int(scene.np_random.rand() * 1) + 1
longitudinal = 5 + scene.np_random.randn()*position_deviation
scene.goal = Obstacle.make_on_lane(scene.road,
(ROUNDABOUT[roundabout_section][0], ROUNDABOUT[roundabout_section][1], lane_num),
longitudinal=longitudinal,
velocity=0)
# lane_coords = scene.goal.lane.local_coordinates(scene.goal.position)
# lane_next_coords = lane_coords[0]
scene.goal.heading = scene.goal.lane.heading_at(longitudinal)
#scene.goal = Obstacle(scene.road, np.array([ego_ini_pos[0], -ego_ini_pos[1]]))
# Let's calculate a reasonable manouver duration depending on where the Ego car is located and where the goal is located
# For these quick estimate we will use the average EGO speed based on:
# Vehicle.SPEED_MIN & Vehicle.SPEED_MAX
# and the distance between the EGO and the goal
# ego_ini_pos = scene.vehicle.position
#goal_pos = scene.goal.position
# d = goal_distance(ego_ini_pos, goal_pos)
# ego_avg_speed = (scene.vehicle.MAX_VELOCITY - 0.0)/2
# manouver_duration = d/ego_avg_speed
# scene.goal.manouver_duration = manouver_duration
scene.goal.COLLISIONS_ENABLED = False
scene.road.vehicles.insert(0, scene.goal)
#generate_random_goal(scene)
def _build_merge(scene):
"""
Make a road composed of a straight urban_AD and a merging lane.
:return: the road
"""
net = RoadNetwork()
# urban_AD lanes
ends = [150, 80, 80, 150] # Before, converging, merge, after
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
y = [0, StraightLane.DEFAULT_WIDTH]
line_type = [[c, s], [n, c]]
line_type_merge = [[c, s], [n, s]]
for i in range(2):
net.add_lane("a", "b", StraightLane([0, y[i]], [sum(ends[:2]), y[i]], line_types=line_type[i]))
net.add_lane("b", "c", StraightLane([sum(ends[:2]), y[i]], [sum(ends[:3]), y[i]], line_types=line_type_merge[i]))
net.add_lane("c", "d", StraightLane([sum(ends[:3]), y[i]], [sum(ends), y[i]], line_types=line_type[i]))
# Merging lane
amplitude = 3.25
ljk = StraightLane([0, 6.5 + 4 + 4], [ends[0], 6.5 + 4 + 4], line_types=[c, c], forbidden=True)
lkb = SineLane(ljk.position(ends[0], -amplitude), ljk.position(sum(ends[:2]), -amplitude),
amplitude, 2 * np.pi / (2*ends[1]), np.pi / 2, line_types=[c, c], forbidden=True)
lbc = StraightLane(lkb.position(ends[1], 0), lkb.position(ends[1], 0) + [ends[2], 0],
line_types=[n, c], forbidden=True)
net.add_lane("j", "k", ljk)
net.add_lane("k", "b", lkb)
net.add_lane("b", "c", lbc)
road = Road(network=net, np_random=scene.np_random)
road.vehicles.append(Obstacle(road, lbc.position(ends[2], 0)))
scene.road = road
def _populate_merge(scene):
"""
Populate a road with several vehicles on the urban_AD and on the merging lane, as well as an ego-vehicle.
:return: the ego-vehicle
"""
road = scene.road
ego_vehicle = Vehicle(road, road.network.get_lane(("a", "b", 1)).position(30, 0), velocity=30)
road.vehicles.append(ego_vehicle)
# other_vehicles_type = utils.class_from_path(scene.config["other_vehicles_type"])
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 0)).position(90, 0), velocity=29))
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 1)).position(70, 0), velocity=31))
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 0)).position(5, 0), velocity=31.5))
# merging_v = other_vehicles_type(road, road.network.get_lane(("j", "k", 0)).position(110, 0), velocity=20)
# merging_v.target_velocity = 30
# road.vehicles.append(merging_v)
scene.vehicle = ego_vehicle
generate_random_goal(scene)
def _build_multilane(scene):
"""
Create a road composed of straight adjacent lanes.
"""
scene.road = Road(network=RoadNetwork.straight_road_network(scene.config["lanes_count"]),
np_random=scene.np_random)
def _populate_multilane(scene):
"""
Create some new random vehicles of a given type, and add them on the road.
"""
scene.vehicle = Vehicle.create_random(scene.road, 25, spacing=scene.config["initial_spacing"])
scene.road.vehicles.append(scene.vehicle)
vehicles_type = utils.class_from_path(scene.config["other_vehicles_type"])
for _ in range(scene.config["vehicles_count"]):
scene.road.vehicles.append(vehicles_type.create_random(scene.road))
generate_random_goal(scene)
def generate_random_goal(scene, manouver_duration_goal=None):
scene.goal = Obstacle.create_random(scene.road)
scene.goal.COLLISIONS_ENABLED = False
if manouver_duration_goal == None:
# Let's calculate a reasonable manouver duration depending on where the Ego car is located and where the goal is located
# For these quick estimate we will use the average EGO speed based on:
# Vehicle.SPEED_MIN & Vehicle.SPEED_MAX
# and the distance between the EGO and the goal
ego_ini_pos = scene.vehicle.position
goal_pos = scene.goal.position
d = goal_distance(ego_ini_pos, goal_pos)
ego_avg_speed = (scene.vehicle.MAX_VELOCITY - 0.0)/2
manouver_duration_goal = d/ego_avg_speed
scene.goal.manouver_duration = manouver_duration_goal
scene.road.vehicles.insert(0, scene.goal)
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/continuous_env.py | from __future__ import division, print_function, absolute_import
import numpy as np
import pandas
from gym import GoalEnv, spaces
from urban_AD_env.envs.abstract import AbstractEnv
from urban_AD_env.road.road import Road, RoadNetwork
from urban_AD_env.vehicle.dynamics import Vehicle, Obstacle
class ContinuousEnv(AbstractEnv, GoalEnv):
"""
A continuous control environment.
It implements a reach-type task, where the agent observes their position and velocity and must
control their acceleration and steering so as to reach a given goal.
Credits to <NAME> for the idea and initial implementation.
"""
STEERING_RANGE = np.pi / 4
ACCELERATION_RANGE = 5.0
OBS_SCALE = 100
REWARD_SCALE = 100
SUCCESS_GOAL_DISTANCE = 5
DEFAULT_CONFIG = {
"centering_position": [0.5, 0.5]
}
#OBSERVATION_FEATURES = ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h']
OBSERVATION_FEATURES = ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h', 'duration']
OBSERVATION_VEHICLES = 1
NORMALIZE_OBS = False
def __init__(self):
super(ContinuousEnv, self).__init__()
self.config = self.DEFAULT_CONFIG.copy()
# self._max_episode_steps = 50
obs = self.reset()
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=obs["desired_goal"].shape, dtype=np.float32),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype=np.float32),
observation=spaces.Box(-np.inf, np.inf, shape=obs["observation"].shape, dtype=np.float32),
))
self.action_space = spaces.Box(-1., 1., shape=(2,), dtype=np.float32)
def step(self, action):
# Forward action to the vehicle
self.vehicle.act({"steering": action[0] * self.STEERING_RANGE,
"acceleration": action[1] * self.ACCELERATION_RANGE})
self._simulate()
obs = self._observation()
info = {"is_success": self._is_success(obs['achieved_goal'], obs['desired_goal'])}
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
terminal = self._is_terminal()
return obs, reward, terminal, info
def reset(self):
self._create_road()
self._create_vehicles()
return self._observation()
def configure(self, config):
self.config.update(config)
def _create_road(self):
"""
Create a road composed of straight adjacent lanes.
"""
self.road = Road(network=RoadNetwork.straight_road_network(lanes=4),
np_random=self.np_random)
def _create_vehicles(self):
"""
Create some new random vehicles of a given type, and add them on the road.
"""
self.vehicle = Vehicle(self.road, [200, self.np_random.randint(0, 12)], 2*np.pi*self.np_random.rand(), 0)
self.road.vehicles.append(self.vehicle)
self.goal = Obstacle(self.road, [self.vehicle.position[0] + self.np_random.randint(-100, 100),
self.np_random.randint(0, 12)])
self.goal.COLLISIONS_ENABLED = False
self.road.vehicles.insert(0, self.goal)
def _observation(self):
obs = np.ravel(pandas.DataFrame.from_records([self.vehicle.to_dict()])[self.OBSERVATION_FEATURES])
goal = np.ravel(pandas.DataFrame.from_records([self.goal.to_dict()])[self.OBSERVATION_FEATURES])
obs = {
"observation": obs / self.OBS_SCALE,
"achieved_goal": obs / self.OBS_SCALE,
"desired_goal": goal / self.OBS_SCALE
}
return obs
def compute_reward(self, achieved_goal, desired_goal, info):
"""
Proximity to the goal is rewarded
:param achieved_goal: the goal that was achieved
:param desired_goal: the goal that was desired
:param info: any supplementary information
:return: the corresponding reward
"""
return -np.linalg.norm(achieved_goal - desired_goal, axis=-1) * self.OBS_SCALE / self.REWARD_SCALE
def _reward(self, action):
raise NotImplementedError
def _is_success(self, achieved_goal, desired_goal):
return np.linalg.norm(achieved_goal - desired_goal) * self.OBS_SCALE < self.SUCCESS_GOAL_DISTANCE
def _is_terminal(self):
"""
The episode is over if the ego vehicle crashed or the goal is reached.
"""
#obs = self._observation()
return self.vehicle.crashed # or self._is_success(obs['achieved_goal'], obs['desired_goal'])
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/envs/sidepass_env.py | <filename>urban_AD_env/envs/sidepass_env.py
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 7, 2019
# Author: <NAME>
#######################################################################
from __future__ import division, print_function, absolute_import
import numpy as np
import random as rd
from gym import logger
from urban_AD_env import utils
from urban_AD_env.envs.abstract import AbstractEnv
from urban_AD_env.road.road import Road, RoadNetwork
from urban_AD_env.road.lane import LineType, StraightLane
from urban_AD_env.vehicle.control import MDPVehicle
from urban_AD_env.envs.graphics import EnvViewer
from urban_AD_env.vehicle.dynamics import Obstacle
class SidepassEnv(AbstractEnv):
"""
A urban_AD driving environment.
The vehicle is driving on a straight urban with only 2 lanes (in opposite direction), and is rewarded for
reaching a goal (position, speed, heading) that is placed at a certain distance infront of an obstacle located in its lane.
The vehicle must sidepass the obstacle and avoid collisions with vehicles coming in the opposite direction on the only adjacent lane.
"""
COLLISION_REWARD = -1
""" The reward received when colliding with a vehicle."""
RIGHT_LANE_REWARD = 0.1
""" The reward received when driving on the right-most lanes, linearly mapped to zero for other lanes."""
HIGH_VELOCITY_REWARD = 0.4
""" The reward received when driving at full speed, linearly mapped to zero for lower speeds."""
LANE_CHANGE_REWARD = -0
""" The reward received at each lane change action."""
SCENES = ['1 lane in each direction',
'2 lanes on Egos direction and 1 on the opposite']
SCENE_CONFIG = {
SCENES[0]: {
"lanes_count_Ego": 3,
"lanes_count_opposite": 0,
"road_length": 250,
"initial_spacing": 2,
"vehicles_count": 5,
"default_velocities": [10, 20],
"duration": 10,
"other_vehicles_type": "urban_AD_env.vehicle.behavior.IDMVehicle",
"centering_position": [0.3, 0.5],
"collision_reward": COLLISION_REWARD,
"screen_width": 1200,
"screen_height": 400
},
SCENES[1]: {
"lanes_count_Ego": 2,
"lanes_count_opposite": 1,
"road_length": 250,
"default_velocities": [10, 20],
"initial_spacing": 2,
"vehicles_count": 10,
"duration": 30,
"other_vehicles_type": "urban_AD_env.vehicle.behavior.IDMVehicle",
"centering_position": [0.3, 0.5],
"collision_reward": COLLISION_REWARD,
"screen_width": 1200,
"screen_height": 400
},
}
def __init__(self):
super(SidepassEnv, self).__init__()
self.steps = 0
self.reset()
def _select_scene(self, scene=None):
if scene==None:
self.scene = rd.randrange(0, len(self.SCENES)-1,1)
elif scene in range(0,len(self.SCENES)):
self.scene = scene
else:
raise ValueError("Invalid scene id. Choose among {}".format(str(range(0,len(self.SCENES)))))
self.config = self.SCENE_CONFIG[self.SCENES[self.scene]].copy()
EnvViewer.SCREEN_HEIGHT = self.config['screen_height']
EnvViewer.SCREEN_WIDTH = self.config['screen_width']
def internal_reset(self):
# Scene
self.road = None
self.vehicle = None
# Running
self.done = False
# Rendering
self.viewer = None
self.automatic_rendering_callback = None
self.should_update_rendering = True
self.rendering_mode = 'human'
self.enable_auto_render = False
def reset(self):
self.internal_reset()
self._select_scene(0)
self._build_scene()
self._populate_scene()
self.steps = 0
return self._observation()
def step(self, action):
self.steps += 1
return super(SidepassEnv, self).step(action)
def _build_scene(self):
"""
Create a road composed of straight adjacent lanes.
"""
self.road = Road(network=RoadNetwork.straight_road_network(self.config["lanes_count_Ego"]),
np_random=self.np_random)
# length = self.config["road_length"]
# c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
# net = RoadNetwork()
# for lane in range(self.config["lanes_count_Ego"]):
# origin = [0, lane * StraightLane.DEFAULT_WIDTH]
# end = [length, lane* StraightLane.DEFAULT_WIDTH]
# line_types = [s, c if lane == self.config["lanes_count_Ego"] - 1 else n]
# net.add_lane('a', 'b', StraightLane(origin, end, line_types=line_types))
# for lane in range(1, self.config["lanes_count_opposite"]+1):
# origin = [length, -lane * StraightLane.DEFAULT_WIDTH]
# end = [0, -lane * StraightLane.DEFAULT_WIDTH]
# line_types = [n if lane == 1 else s, c if lane == self.config["lanes_count_opposite"] else n]
# net.add_lane('a', 'b', StraightLane(origin, end, line_types=line_types))
# self.road = Road(network=net, np_random=self.np_random)
def _populate_scene(self):
"""
Create some new random vehicles of a given type, and add them on the road.
"""
### Ego ###
ego_vehicle = MDPVehicle.create_random(self.road, 25, spacing=self.config["initial_spacing"])
ego_vehicle.lane
self.vehicle = ego_vehicle
self.road.vehicles.append(self.vehicle)
# right_most_lane = self.config["lanes_count_Ego"]-1
# lane = self.road.network.get_lane(('a', 'b', right_most_lane))
# ego_vehicle = MDPVehicle(self.road, lane.position(30, 0), heading=lane.heading, velocity=10)
# self.vehicle = ego_vehicle
# self.road.vehicles.append(ego_vehicle)
### Obstacle ###
obstacle = Obstacle(self.road, ego_vehicle.position+[60,0])
self.road.vehicles.append(obstacle)
if ego_vehicle.lane == self.config["lanes_count_Ego"]-1:
obstacle = Obstacle(self.road, ego_vehicle.position+[120, -StraightLane.DEFAULT_WIDTH])
else:
obstacle = Obstacle(self.road, ego_vehicle.position+[120, -StraightLane.DEFAULT_WIDTH])
self.road.vehicles.append(obstacle)
### Other Vehicles
# vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
# for _ in range(self.config["vehicles_count"]):
# self.road.vehicles.append(vehicles_type.create_random(self.road))
### Other Vehicles
vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
for _ in range(self.config["vehicles_count"]):
self.road.vehicles.append(vehicles_type.create_random(self.road))
# other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
# _from = 'a'
# _to = 'b'
# spacing = 60
# DEFAULT_VELOCITIES = self.config["default_velocities"]
# for _ in range(self.config["vehicles_count"]):
# _id = self.road.np_random.choice(len(self.road.network.graph[_from][_to]))
# lane = self.road.network.get_lane((_from, _to, _id))
# velocity = self.road.np_random.uniform(DEFAULT_VELOCITIES[0], DEFAULT_VELOCITIES[1])
# default_spacing = 1.5*velocity
# offset = spacing + default_spacing * np.exp(-5 / 30 * len(self.road.network.graph[_from][_to]))
# x0 = np.max([v.position[0] for v in self.road.vehicles]) if len(self.road.vehicles) else 3*offset
# x0 += offset * self.road.np_random.uniform(0.9, 1.1)
# self.road.vehicles.append(other_vehicles_type(self.road, position=lane.position(x0, 0), heading=lane.heading, velocity=velocity))
#self.road.vehicles.append(vehicles_type(road, road.network.get_lane(('b', 'a', 1)).position(30, 0), velocity=10))
def _reward(self, action):
"""
The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.
:param action: the last action performed
:return: the corresponding reward
"""
action_reward = {0: self.LANE_CHANGE_REWARD, 1: 0, 2: self.LANE_CHANGE_REWARD, 3: 0, 4: 0}
neighbours = self.road.network.all_side_lanes(self.vehicle.lane_index)
state_reward = \
+ self.config["collision_reward"] * self.vehicle.crashed \
+ self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index / (self.vehicle.SPEED_COUNT - 1) \
+ self.RIGHT_LANE_REWARD * self.vehicle.target_lane_index[2] / (len(neighbours) - 1)
return utils.remap(action_reward[action] + state_reward,
[self.config["collision_reward"], self.HIGH_VELOCITY_REWARD+self.RIGHT_LANE_REWARD],
[0, 1])
def _observation(self):
return super(SidepassEnv, self)._observation()
def _is_terminal(self):
"""
The episode is over if the ego vehicle crashed or the time is out.
"""
return self.vehicle.crashed or self.steps >= self.config["duration"]
def _constraint(self, action):
"""
The constraint signal is the occurrence of collision
"""
return float(self.vehicle.crashed)
|
munirjojoverge/rl_AD_urban_baselines | tests/pygame_demo.py | # http://richard.cgpublisher.com/product/pub.84/prod.11
# INTIALISATION
import pygame, math, sys
from pygame.locals import *
TURN_SPEED = 5
ACCELERATION = 2
MAX_FORWARD_SPEED = 10
MAX_REVERSE_SPEED = 5
BG= (0,0,0)
MAX_Y = 768
MAX_X = 1024
# initialize the screen with size (MAX_X, MAX_Y)
screen = pygame.display.set_mode((MAX_X, MAX_Y))
# load picture file - Car: http://www.xnadevelopment.com/tutorials/introducingxnagamestudioexpress/Car.png / Turtle: http://www.wpclipart.com/animals/aquatic/turtle/turtle_2/cartoon_turtle.png.html
car = pygame.image.load('./Car.png')
# initialize the sound mixer
pygame.mixer.init()
# load horn sound http://www.freesound.org/people/KRAFTWERK2K1/sounds/32417/
horn = pygame.mixer.Sound('car-horn.wav')
clock = pygame.time.Clock() # load clock
k_up = k_down = k_left = k_right = 0 # init key values
speed = direction = 0 # start speed & direction
position = (100, 100) # start position
play = True
while play:
# USER INPUT
clock.tick(30)
# get events from the user
for event in pygame.event.get():
# not a key event
if not hasattr(event, 'key'):
continue
# check if presses a key or left it
down = event.type == KEYDOWN # key down or up?
# key events: http://pygame.org/docs/ref/key.html
if event.key == K_RIGHT:
k_right = down * TURN_SPEED
elif event.key == K_LEFT:
k_left = down * TURN_SPEED
elif event.key == K_UP:
k_up = down * ACCELERATION
elif event.key == K_DOWN:
k_down = down * ACCELERATION
elif event.key == K_RETURN:
horn.play() # TODO honk twice if you feel nice
elif event.key == K_ESCAPE:
horn.play()
play = False
screen.fill(BG)
# SIMULATION
# .. new speed and direction based on acceleration and turn
speed += (k_up - k_down)
if speed > MAX_FORWARD_SPEED:
speed = MAX_FORWARD_SPEED
if speed < MAX_REVERSE_SPEED:
speed = MAX_REVERSE_SPEED
direction += (k_right - k_left) # TODO is this the right direction?
# .. new position based on current position, speed and direction
x, y = position
rad = direction * math.pi / 180
x += speed*math.sin(rad)
y += speed*math.cos(rad)
# make sure the car doesn't exit the screen
if y < 0:
y = 0 # TODO is there another way to treat this?
elif y > MAX_Y:
y = MAX_Y
if x < 0:
x = 0
elif x > MAX_X:
x = MAX_X
position = (x, y)
# RENDERING
# .. rotate the car image for direction
rotated = pygame.transform.rotate(car, direction)
# .. position the car on screen
rect = rotated.get_rect()
rect.center = position
print(position)
# .. render the car to screen
screen.blit(rotated, rect)
pygame.display.flip()
sys.exit(0) # quit the game
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/test_basic.py | <filename>urban_AD_env/test_basic.py
from __future__ import division, print_function
#import gym
import sys, os
from os.path import dirname, abspath
file_path = sys.argv[0]
# print('sys.argv[0] =', file_path)
#pathname = os.path.dirname(file_path)
parent = dirname(dirname(abspath(file_path)))
#print('path =', pathname)
# print('full path =', os.path.abspath(pathname))
# print('parent =', parent)
sys.path.insert(0, parent)
from urban_AD_env.envs.parking_env import ParkingEnv
from urban_AD_env.envs.continuous_multi_env import ContinuousMultiEnv
from urban_AD_env.envs.sidepass_env import SidepassEnv
from urban_AD_env.envs.merge_env import MergeEnv
def test_urban_step():
env = gym.make('urban-v1')
env.reset()
for i in range(3):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.close()
assert env.observation_space.contains(obs)
assert 0 <= reward <= 1
def test_merge_step(num_runs = 5):
env = MergeEnv() # gym.make('urban-merge-v1')
env.reset()
for i in range(num_runs):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.render()
env.close()
assert env.observation_space.contains(obs)
assert 0 <= reward <= 1
def test_roundabout_step(num_runs = 5, max_steps=50):
env = ContinuousMultiEnv() #gym.make('urban-roundabout-v1')
#env = parking_env.ParkingEnv()
for run in range(num_runs):
env.reset()
done = False
i = 0
while not done and i <= max_steps:
action = env.action_space.sample()
# print('steering = {:.3f}'.format(action[0]))
# print('accel = {:.3f}'.format(action[1]))
obs, reward, done, info = env.step(action)
i += 1
env.render()
env.close()
# assert env.observation_space.contains(obs)
# assert 0 <= reward <= 1
def test_urban_sidepass(num_runs = 5, max_steps=50):
env = SidepassEnv()
for run in range(num_runs):
env.reset()
done = False
i = 0
while not done and i <= max_steps:
action = env.action_space.sample()
# print('steering = {:.3f}'.format(action[0]))
# print('accel = {:.3f}'.format(action[1]))
obs, reward, done, info = env.step(action)
i += 1
env.render()
env.close()
assert env.observation_space.contains(obs)
assert 0 <= reward <= 1
def test_any_step(env, num_runs = 5, max_steps=50):
for run in range(num_runs):
env.reset()
done = False
i = 0
while not done and i <= max_steps:
action = env.action_space.sample()
# print('steering = {:.3f}'.format(action[0]))
# print('accel = {:.3f}'.format(action[1]))
obs, reward, done, info = env.step(action)
i += 1
env.render()
env.close()
if __name__ == '__main__':
#test_urban_sidepass()
env = ParkingEnv()
test_any_step(env)
|
munirjojoverge/rl_AD_urban_baselines | urban_AD_env/__init__.py | <filename>urban_AD_env/__init__.py
from gym.envs.registration import register
register(
id='urban_AD-multilane-v1',
entry_point='urban_AD_env.envs:MultiLaneEnv',
)
register(
id='urban_AD-merge-v1',
entry_point='urban_AD_env.envs:MergeEnv',
)
register(
id='urban_AD-roundabout-v1',
entry_point='urban_AD_env.envs:RoundaboutEnv',
)
register(
id='urban_AD-continuous-v1',
entry_point='urban_AD_env.envs:ContinuousEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 20}
)
register(
id='continuous-multi-env-v1',
entry_point='urban_AD_env.envs:ContinuousMultiEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 20}
)
register(
id='parking-v1',
entry_point='urban_AD_env.envs:ParkingEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 20}
)
register(
id='sidepass-v0',
entry_point='urban_AD_env.envs:SidepassEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 30}
) |
imwiwiim90/con_quien_la_veo | data/profesores/crawl.py | import requests
from bs4 import BeautifulSoup
s = requests.Session()
r = s.get('http://ingenieria.javeriana.edu.co/docencia-sistemas')
soup = BeautifulSoup(r.text,'html.parser')
divs = soup.select(".journal-content-article")[:-3]
def save_image(filename,url):
r = requests.get(url,stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
f.close()
names = []
for d in divs:
name = d.find(id='tituloadmin').get_text().encode('latin-1')
url = 'http://ingenieria.javeriana.edu.co/' + d.find('img')['src']
names.append(name)
save_image(name+".jpeg",url)
f = open('names.txt','w')
for name in names:
f.write(name+'\n')
f.close()
|
RI-imaging/ODTbrain | tests/test_alg3d_bppt.py | """Test tilted backpropagation algorithm"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, \
cutout, get_test_parameter_set
def test_3d_backprop_phase_real():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
# reference
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64, onlyreal=True, **p)
rref.append(cutout(fref))
dataref = np.array(rref).flatten().view(float)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64, onlyreal=True,
**p)
r.append(cutout(f))
data = np.array(r).flatten().view(float)
assert np.allclose(data, dataref)
def test_3d_backprop_pad():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
# reference
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d(sino, angles, padval="edge",
dtype=np.float64, onlyreal=False, **p)
rref.append(cutout(fref))
dataref = np.array(rref).flatten().view(float)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval="edge",
dtype=np.float64, onlyreal=False,
**p)
r.append(cutout(f))
data = np.array(r).flatten().view(float)
assert np.allclose(data, dataref)
def test_3d_backprop_plane_rotation():
"""
A very soft test to check if planar rotation works fine
in the reconstruction with tilted angles.
"""
parameters = get_test_parameter_set(1)
results = []
# These are specially selected angles that don't give high results.
# Probably due to phase-wrapping, errors >2 may appear. Hence, we
# call it a soft test.
tilts = [1.1, 0.0, 0.234, 2.80922, -.29, 9.87]
for angz in tilts:
sino, angles = create_test_sino_3d_tilted(tilt_plane=angz, A=21)
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis = np.dot(rotmat, [0, 1, 0])
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d_tilted(sino, angles,
padval="edge",
tilted_axis=tilted_axis,
padding=(False, False),
dtype=np.float64,
onlyreal=False,
**p)
rref.append(cutout(fref))
data = np.array(rref).flatten().view(float)
results.append(data)
for ii in np.arange(len(results)):
assert np.allclose(results[ii], results[ii-1], atol=.2, rtol=.2)
def test_3d_backprop_plane_alignment_along_axes():
"""
Tests whether the reconstruction is always aligned with
the rotational axis (and not antiparallel).
"""
parameters = get_test_parameter_set(1)
p = parameters[0]
results = []
# These are specially selected angles that don't give high results.
# Probably due to phase-wrapping, errors >2 may appear. Hence, we
# call it a soft test.
tilts = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
for angz in tilts:
sino, angles = create_test_sino_3d_tilted(tilt_plane=angz, A=21)
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis = np.dot(rotmat, [0, 1, 0])
fref = odtbrain.backpropagate_3d_tilted(sino, angles,
padval="edge",
tilted_axis=tilted_axis,
padding=(False, False),
dtype=np.float64,
onlyreal=True,
**p)
results.append(fref)
for ii in np.arange(len(results)):
assert np.allclose(results[ii], results[ii-1], atol=.2, rtol=.2)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | tests/test_processing.py | <reponame>RI-imaging/ODTbrain<gh_stars>10-100
"""Tests refractive index conversion techniques"""
import sys
import numpy as np
import odtbrain
from odtbrain._prepare_sino import divmod_neg
from common_methods import write_results, get_results
WRITE_RES = False
def get_test_data_set():
"""returns 3D array and parameters"""
ln = 10
f = np.arange(ln**3).reshape(ln, ln, ln)
f = f + np.linspace(1, 2, ln)
res = 7
nm = 1.34
return f, res, nm
def get_test_data_set_sino(rytov=False):
"""returns 3D array"""
ln = 10
a = 2
sino = np.arange(ln*ln*a).reshape(a, ln, ln) / (ln*ln*a) * \
np.exp(1j*np.arange(ln*ln*a).reshape(a, ln, ln))
if rytov:
sino[0, 0, 0] = .1
return sino
def negative_modulo_rest(a, b):
"""returns modulo with closest result to zero"""
q = np.array(a / b, dtype=int)
r = a - b * q
# make sure r is close to zero
wrong = np.where(np.abs(r) > b/2)
r[wrong] -= b * np.sign(r[wrong])
return r
def negative_modulo_rest_imag(x, b):
"""only modulo the imaginary part"""
a = x.imag
return x.real + 1j*negative_modulo_rest(a, b)
def test_odt_to_ri():
myframe = sys._getframe()
f, res, nm = get_test_data_set()
ri = odtbrain.odt_to_ri(f=f, res=res, nm=nm)
if WRITE_RES:
write_results(myframe, ri)
assert np.allclose(np.array(ri).flatten().view(
float), get_results(myframe))
# Also test 2D version
ri2d = odtbrain.odt_to_ri(f=f[0], res=res, nm=nm)
assert np.allclose(ri2d, ri[0])
def test_opt_to_ri():
myframe = sys._getframe()
f, res, nm = get_test_data_set()
ri = odtbrain.opt_to_ri(f=f, res=res, nm=nm)
if WRITE_RES:
write_results(myframe, ri)
assert np.allclose(np.array(ri).flatten().view(
float), get_results(myframe))
# Also test 2D version
ri2d = odtbrain.opt_to_ri(f=f[0], res=res, nm=nm)
assert np.allclose(ri2d, ri[0])
def test_sino_radon():
myframe = sys._getframe()
sino = get_test_data_set_sino()
rad = odtbrain.sinogram_as_radon(sino)
twopi = 2*np.pi
# When moving from unwrap to skimage, there was an offset introduced.
# Since this particular array is not flat at the borders, there is no
# correct way here. We just subtract 2PI.
# 2019-04-18: It turns out that on Windows, this is not the case.
# Hence, we only subtract 2PI if the minimum of the array is above
# 2PI..
if rad.min() > twopi:
rad -= twopi
if WRITE_RES:
write_results(myframe, rad)
assert np.allclose(np.array(rad).flatten().view(
float), get_results(myframe))
# Check the 3D result with the 2D result. They should be the same except
# for a multiple of 2PI offset, because odtbrain._align_unwrapped
# subtracts the background such that the minimum phase change is closest
# to zero.
# 2D A
rad2d = odtbrain.sinogram_as_radon(sino[:, :, 0])
assert np.allclose(0, negative_modulo_rest(
rad2d - rad[:, :, 0], twopi), atol=1e-6)
# 2D B
rad2d2 = odtbrain.sinogram_as_radon(sino[:, 0, :])
assert np.allclose(0, negative_modulo_rest(
rad2d2 - rad[:, 0, :], twopi), atol=1e-6)
def test_sino_rytov():
myframe = sys._getframe()
sino = get_test_data_set_sino(rytov=True)
ryt = odtbrain.sinogram_as_rytov(sino)
twopi = 2*np.pi
if WRITE_RES:
write_results(myframe, ryt)
# When moving from unwrap to skimage, there was an offset introduced.
# Since this particular array is not flat at the borders, there is no
# correct way here. We just subtract 2PI.
# 2019-04-18: It turns out that on Windows, this is not the case.
# Hence, we only subtract 2PI if the minimum of the array is above
# 2PI..
if ryt.imag.min() > twopi:
ryt.imag -= twopi
assert np.allclose(np.array(ryt).flatten().view(
float), get_results(myframe))
# Check the 3D result with the 2D result. They should be the same except
# for a multiple of 2PI offset, because odtbrain._align_unwrapped
# subtracts the background such that the median phase change is closest
# to zero.
# 2D A
ryt2d = odtbrain.sinogram_as_rytov(sino[:, :, 0])
assert np.allclose(0, negative_modulo_rest_imag(
ryt2d - ryt[:, :, 0], twopi).view(float), atol=1e-6)
# 2D B
ryt2d2 = odtbrain.sinogram_as_rytov(sino[:, 0, :])
assert np.allclose(0, negative_modulo_rest_imag(
ryt2d2 - ryt[:, 0, :], twopi).view(float), atol=1e-6)
def test_divmod_neg():
assert np.allclose(divmod_neg(0, 2*np.pi), (0, 0))
assert np.allclose(divmod_neg(-1e-17, 2*np.pi), (0, 0))
assert np.allclose(divmod_neg(1e-17, 2*np.pi), (0, 0))
assert np.allclose(divmod_neg(-.1, 2*np.pi), (0, -.1))
assert np.allclose(divmod_neg(.1, 2*np.pi), (0, .1))
assert np.allclose(divmod_neg(3*np.pi, 2*np.pi), (1, np.pi))
assert np.allclose(divmod_neg(-.99*np.pi, 2*np.pi), (0, -.99*np.pi))
assert np.allclose(divmod_neg(-1.01*np.pi, 2*np.pi), (-1, .99*np.pi))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | odtbrain/_alg2d_fmp.py | <reponame>RI-imaging/ODTbrain
"""2D Fourier mapping"""
import numpy as np
import scipy.interpolate as intp
def fourier_map_2d(uSin, angles, res, nm, lD=0, semi_coverage=False,
coords=None, count=None, max_count=None, verbose=0):
r"""2D Fourier mapping with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by interpolation in
Fourier space.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
semi_coverage: bool
If set to `True`, it is assumed that the sinogram does not
necessarily cover the full angular range from 0 to 2π, but an
equidistant coverage over 2π can be achieved by inferring point
(anti)symmetry of the (imaginary) real parts of the Fourier
transform of f. Valid for any set of angles {X} that result in
a 2π coverage with the union set {X}U{X+π}.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backpropagation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
The interpolation in Fourier space (which is done with
:func:`scipy.interpolate.griddata`) may be unstable and lead to
artifacts if the data to interpolate contains sharp spikes. This
issue is not handled at all by this method (in fact, a test has
been removed in version 0.2.6 because ``griddata`` gave different
results on Windows and Linux).
"""
##
##
# TODO:
# - zero-padding as for backpropagate_2D - However this is not
# necessary as Fourier interpolation is not parallelizable with
# multiprocessing and thus unattractive. Could be interesting for
# specific environments without the Python GIL.
# - Deal with oversampled data. Maybe issue a warning.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += 4
# Check input data
assert len(uSin.shape) == 2, "Input data `uSin` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set"
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# Our Backpropagation Formula is with uaf convention of the Form
#
# F(k) = 1/sqrt(2π) U(kD)
#
# If we convert now to uof convention, we get
#
# F(k) = U(kD)
#
# This means that if we divide the Fourier transform of the input
# data by sqrt(2π) to convert f₃(ω) to f₁(ω/(2π)), the resulting
# value for F is off by a factor of 2π.
#
# Instead, we can just multiply *UB* by sqrt(2π) and calculate
# everything in uof.
# UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1))/np.sqrt(2*np.pi)
#
#
# Furthermore, we define
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) * np.sqrt(2 * np.pi)
# Corresponding sample frequencies
fx = np.fft.fftfreq(len(uSin[0])) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
if count is not None:
count.value += 1
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if verbose and np.max(kx**2) <= km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
print("......Measurement data is undersampled.")
else:
print("......Measurement data is oversampled.")
# raise NotImplementedError("Oversampled data not yet supported."+
# " Please rescale xD-axis of the input data.")
# DEAL WITH OVERSAMPLED DATA?
# lenk = len(kx)
# kx = np.fft.ifftshift(np.linspace(-np.sqrt(km),
# np.sqrt(km),
# len(fx), endpoint=False))
#
# F(kD-kₘs₀) = - i kₘ sqrt(2/π) / a₀ * M exp(-i kₘ M lD) * UB(kD)
# kₘM = sqrt( kₘ² - kx² )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# We create the 2D interpolation object F
# - We compute the real coordinates (krx,kry) = kD-kₘs₀
# - We set as grid points the right side of the equation
#
# The interpolated griddata may go up to sqrt(2)*kₘ for kx and ky.
kx = kx.reshape(1, -1)
# a0 should have same shape as kx and UB
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
filter_klp = (kx**2 < km**2)
M = 1. / km * np.sqrt(km**2 - kx**2)
# Fsin = -1j * km * np.sqrt(2/np.pi) / a0 * M * np.exp(-1j*km*M*lD)
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
Fsin = -1j * km * np.sqrt(2 / np.pi) * M * np.exp(-1j * km * (M-1) * lD)
# UB has same shape (len(angles), len(kx))
Fsin = Fsin * UB * filter_klp
ang = angles.reshape(-1, 1)
if semi_coverage:
Fsin = np.vstack((Fsin, np.conj(Fsin)))
ang = np.vstack((ang, ang + np.pi))
if count is not None:
count.value += 1
# Compute kxl and kyl (in rotated system ϕ₀)
kxl = kx
kyl = np.sqrt((km**2 - kx**2) * filter_klp) - km
# rotate kxl and kyl to where they belong
krx = np.cos(ang) * kxl + np.sin(ang) * kyl
kry = - np.sin(ang) * kxl + np.cos(ang) * kyl
Xf = krx.flatten()
Yf = kry.flatten()
Zf = Fsin.flatten()
# DEBUG: plot kry vs krx
# from matplotlib import pylab as plt
# plt.figure()
# for i in range(len(krx)):
# plt.plot(krx[i],kry[i],"x")
# plt.axes().set_aspect('equal')
# plt.show()
# interpolation on grid with same resolution as input data
kintp = np.fft.fftshift(kx.reshape(-1))
Fcomp = intp.griddata((Xf, Yf), Zf, (kintp[None, :], kintp[:, None]))
if count is not None:
count.value += 1
# removed nans
Fcomp[np.where(np.isnan(Fcomp))] = 0
# Filter data
kinx, kiny = np.meshgrid(np.fft.fftshift(kx), np.fft.fftshift(kx))
Fcomp[np.where((kinx**2 + kiny**2) > np.sqrt(2) * km)] = 0
# Fcomp[np.where(kinx**2+kiny**2<km)] = 0
# Fcomp is centered at K = 0 due to the way we chose kintp/coords
f = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(Fcomp)))
if count is not None:
count.value += 1
return f[::-1]
|
RI-imaging/ODTbrain | tests/test_angle_weights.py | """Tests 1D angular weights"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_2d, get_test_parameter_set
def test_angle_offset():
"""
Tests if things are still correct when there is a 2PI offset in the angles.
"""
sino, angles = create_test_sino_2d()
parameters = get_test_parameter_set(2)
# reference
r1 = []
for p in parameters:
f1 = odtbrain.backpropagate_2d(sino, angles, weight_angles=False, **p)
r1.append(f1)
# with offset
angles[::2] += 2*np.pi*np.arange(angles[::2].shape[0])
r2 = []
for p in parameters:
f2 = odtbrain.backpropagate_2d(sino, angles, weight_angles=False, **p)
r2.append(f2)
# with offset and weights
r3 = []
for p in parameters:
f3 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)
r3.append(f3)
assert np.allclose(np.array(r1).flatten().view(float),
np.array(r2).flatten().view(float))
assert np.allclose(np.array(r2).flatten().view(float),
np.array(r3).flatten().view(float))
def test_angle_swap():
"""
Test if everything still works, when angles are swapped.
"""
sino, angles = create_test_sino_2d()
# remove elements so that we can see that weighting works
angles = angles[:-2]
sino = sino[:-2, :]
parameters = get_test_parameter_set(2)
# reference
r1 = []
for p in parameters:
f1 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)
r1.append(f1)
# change order of angles
order = np.argsort(angles % .5)
angles = angles[order]
sino = sino[order, :]
r2 = []
for p in parameters:
f2 = odtbrain.backpropagate_2d(sino, angles, weight_angles=True, **p)
r2.append(f2)
assert np.allclose(np.array(r1).flatten().view(float),
np.array(r2).flatten().view(float))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | examples/backprop_from_fdtd_2d.py | """FDTD cell phantom
The *in silico* data set was created with the
:abbr:`FDTD (Finite Difference Time Domain)` software `meep`_. The data
are 1D projections of a 2D refractive index phantom. The
reconstruction of the refractive index with the Rytov approximation
is in good agreement with the phantom that was used in the
simulation.
.. _`meep`: http://ab-initio.mit.edu/wiki/index.php/Meep
"""
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
from example_helper import load_data
sino, angles, phantom, cfg = load_data("fdtd_2d_sino_A100_R13.zip",
f_angles="fdtd_angles.txt",
f_sino_imag="fdtd_imag.txt",
f_sino_real="fdtd_real.txt",
f_info="fdtd_info.txt",
f_phantom="fdtd_phantom.txt",
)
print("Example: Backpropagation from 2D FDTD simulations")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Performing backpropagation.")
# Apply the Rytov approximation
sino_rytov = odt.sinogram_as_rytov(sino)
# perform backpropagation to obtain object function f
f = odt.backpropagate_2d(uSin=sino_rytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"] * cfg["res"]
)
# compute refractive index n from object function
n = odt.odt_to_ri(f, res=cfg["res"], nm=cfg["nm"])
# compare phantom and reconstruction in plot
fig, axes = plt.subplots(1, 3, figsize=(8, 2.8))
axes[0].set_title("FDTD phantom")
axes[0].imshow(phantom, vmin=phantom.min(), vmax=phantom.max())
sino_phase = np.unwrap(np.angle(sino), axis=1)
axes[1].set_title("phase sinogram")
axes[1].imshow(sino_phase, vmin=sino_phase.min(), vmax=sino_phase.max(),
aspect=sino.shape[1] / sino.shape[0],
cmap="coolwarm")
axes[1].set_xlabel("detector")
axes[1].set_ylabel("angle [rad]")
axes[2].set_title("reconstructed image")
axes[2].imshow(n.real, vmin=phantom.min(), vmax=phantom.max())
# set y ticks for sinogram
labels = np.linspace(0, 2 * np.pi, len(axes[1].get_yticks()))
labels = ["{:.2f}".format(i) for i in labels]
axes[1].set_yticks(np.linspace(0, len(angles), len(labels)))
axes[1].set_yticklabels(labels)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | odtbrain/_alg2d_int.py | """2D slow integration"""
import numpy as np
def integrate_2d(uSin, angles, res, nm, lD=0, coords=None,
count=None, max_count=None, verbose=0):
r"""(slow) 2D reconstruction with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by summation in real
space, which is extremely slow.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None or (2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backprojection
fourier_map_2d: implementation by Fourier interpolation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This method is not meant for production use. The computation time
is very long and the reconstruction quality is bad. This function
is included in the package, because of its educational value,
exemplifying the backpropagation algorithm.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
if coords is None:
lx = uSin.shape[1]
x = np.linspace(-lx/2, lx/2, lx, endpoint=False)
xv, yv = np.meshgrid(x, x)
coords = np.zeros((2, lx**2))
coords[0, :] = xv.flat
coords[1, :] = yv.flat
if max_count is not None:
max_count.value += coords.shape[1] + 1
# Cut-Off frequency
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# We have a one-dimensional (n=1) Fourier transform and UB in the
# script is equivalent to f₃(ω). Because we are working with the
# uaf, we divide by sqrt(2π) after computing the fft with the uof.
#
# We calculate the fourier transform of uB further below. This is
# necessary for memory control.
# Corresponding sample frequencies
fx = np.fft.fftfreq(uSin[0].shape[0]) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if np.max(kx**2) <= 2 * km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
if verbose:
print("......Measurement data is undersampled.")
else:
if verbose:
print("......Measurement data is oversampled.")
raise NotImplementedError("Oversampled data not yet supported." +
" Please rescale input data")
# Differentials for integral
dphi0 = 2 * np.pi / len(angles)
dkx = kx[1] - kx[0]
# We will later multiply with phi0.
# Make sure we are using correct shapes
kx = kx.reshape(1, kx.shape[0])
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# a0 will be multiplied with kx
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
# Create the integrand
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
#
# everything that is not dependent on phi0:
#
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / ((2 * np.pi)**(3. / 2))
prefactor *= dphi0 * dkx
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Initiate function f
f = np.zeros(len(coords[0]), dtype=np.complex128)
lenf = len(f)
lenu0 = len(uSin[0]) # lenu0 = len(kx[0])
# Initiate vector r that corresponds to calculating a value of f.
r = np.zeros((2, 1, 1))
# Everything is normal.
# Get the angles ϕ₀.
phi0 = angles.reshape(-1, 1)
# Compute the Fourier transform of uB.
# This is true: np.fft.fft(UB)[0] == np.fft.fft(UB[0])
# because axis -1 is always used.
#
#
# Furthermore, The notation in the our optical tomography script for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually usethe other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consisten with programs like Meep or our scattering
# script for a dielectric cylinder, we want to use the latter sign
# convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) / np.sqrt(2 * np.pi)
UBi = UB.reshape(len(angles), lenu0)
if count is not None:
count.value += 1
for j in range(lenf):
# Get r (We compute f(r) in this for-loop)
r[0][:] = coords[0, j] # x
r[1][:] = coords[1, j] # y
# Integrand changes with r, so we have to create a new
# array:
integrand = prefactor * UBi
# We save memory by directly applying the following to
# the integrand:
#
# Vector along which we measured
# s0 = np.zeros((2, phi0.shape[0], kx.shape[0]))
# s0[0] = -np.sin(phi0)
# s0[1] = +np.cos(phi0)
# Vector perpendicular to s0
# t_perp_kx = np.zeros((2, phi0.shape[0], kx.shape[1]))
#
# t_perp_kx[0] = kx*np.cos(phi0)
# t_perp_kx[1] = kx*np.sin(phi0)
#
# term3 = np.exp(1j*np.sum(r*( t_perp_kx + (gamma-km)*s0 ), axis=0))
# integrand* = term3
#
# Reminder:
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UB(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ(M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
integrand *= np.exp(1j * (
r[0] * (kx * np.cos(phi0) - km * (M - 1) * np.sin(phi0)) +
r[1] * (kx * np.sin(phi0) + km * (M - 1) * np.cos(phi0))))
# Calculate the integral for the position r
# integrand.sort()
f[j] = np.sum(integrand)
# free memory
del integrand
if count is not None:
count.value += 1
return f.reshape(lx, lx)
|
RI-imaging/ODTbrain | odtbrain/_prepare_sino.py | """Sinogram preparation"""
import numpy as np
from scipy.stats import mode
from skimage.restoration import unwrap_phase
def align_unwrapped(sino):
"""Align an unwrapped phase array to zero-phase
All operations are performed in-place.
"""
samples = []
if len(sino.shape) == 2:
# 2D
# take 1D samples at beginning and end of array
samples.append(sino[:, 0])
samples.append(sino[:, 1])
samples.append(sino[:, 2])
samples.append(sino[:, -1])
samples.append(sino[:, -2])
elif len(sino.shape) == 3:
# 3D
# take 1D samples at beginning and end of array
samples.append(sino[:, 0, 0])
samples.append(sino[:, 0, -1])
samples.append(sino[:, -1, 0])
samples.append(sino[:, -1, -1])
samples.append(sino[:, 0, 1])
# find discontinuities in the samples
steps = np.zeros((len(samples), samples[0].shape[0]))
for i in range(len(samples)):
t = np.unwrap(samples[i])
steps[i] = samples[i] - t
# if the majority believes so, add a step of PI
remove = mode(steps, axis=0)[0][0]
# obtain divmod min
twopi = 2*np.pi
minimum = divmod_neg(np.min(sino), twopi)[0]
remove += minimum*twopi
for i in range(len(sino)):
sino[i] -= remove[i]
def divmod_neg(a, b):
"""Return divmod with closest result to zero"""
q, r = divmod(a, b)
# make sure r is close to zero
sr = np.sign(r)
if np.abs(r) > b/2:
q += sr
r -= b * sr
return q, r
def sinogram_as_radon(uSin, align=True):
r"""Compute the phase from a complex wave field sinogram
This step is essential when using the ray approximation before
computation of the refractive index with the inverse Radon
transform.
Parameters
----------
uSin: 2d or 3d complex ndarray
The background-corrected sinogram of the complex scattered wave
:math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates
through the angles :math:`\phi_0`.
align: bool
Tries to correct for a phase offset in the phase sinogram.
Returns
-------
phase: 2d or 3d real ndarray
The unwrapped phase array corresponding to `uSin`.
See Also
--------
skimage.restoration.unwrap_phase: phase unwrapping
radontea.backproject_3d: e.g. reconstruction via backprojection
"""
ndims = len(uSin.shape)
if ndims == 2:
# unwrapping is very important
phiR = np.unwrap(np.angle(uSin), axis=-1)
else:
# Unwrap gets the dimension of the problem from the input
# data. Since we have a sinogram, we need to pass it the
# slices one by one.
phiR = np.angle(uSin)
for ii in range(len(phiR)):
phiR[ii] = unwrap_phase(phiR[ii], seed=47)
if align:
align_unwrapped(phiR)
return phiR
def sinogram_as_rytov(uSin, u0=1, align=True):
r"""Convert the complex wave field sinogram to the Rytov phase
This method applies the Rytov approximation to the
recorded complex wave sinogram. To achieve this, the following
filter is applied:
.. math::
u_\mathrm{B}(\mathbf{r}) = u_\mathrm{0}(\mathbf{r})
\ln\!\left(
\frac{u_\mathrm{R}(\mathbf{r})}{u_\mathrm{0}(\mathbf{r})}
+1 \right)
This filter step effectively replaces the Born approximation
:math:`u_\mathrm{B}(\mathbf{r})` with the Rytov approximation
:math:`u_\mathrm{R}(\mathbf{r})`, assuming that the scattered
field is equal to
:math:`u(\mathbf{r})\approx u_\mathrm{R}(\mathbf{r})+
u_\mathrm{0}(\mathbf{r})`.
Parameters
----------
uSin: 2d or 3d complex ndarray
The sinogram of the complex wave
:math:`u_\mathrm{R}(\mathbf{r}) + u_\mathrm{0}(\mathbf{r})`.
The first axis iterates through the angles :math:`\phi_0`.
u0: ndarray of dimension as `uSin` or less, or int.
The incident plane wave
:math:`u_\mathrm{0}(\mathbf{r})` at the detector.
If `u0` is "1", it is assumed that the data is already
background-corrected (
`uSin` :math:`= \frac{u_\mathrm{R}(\mathbf{r})}{
u_\mathrm{0}(\mathbf{r})} + 1`
). Note that if the reconstruction distance :math:`l_\mathrm{D}`
of the original experiment is non-zero and `u0` is set to 1,
then the reconstruction will be wrong; the field is not focused
to the center of the reconstruction volume.
align: bool
Tries to correct for a phase offset in the phase sinogram.
Returns
-------
uB: 2d or 3d real ndarray
The Rytov-filtered complex sinogram
:math:`u_\mathrm{B}(\mathbf{r})`.
See Also
--------
skimage.restoration.unwrap_phase: phase unwrapping
"""
ndims = len(uSin.shape)
# imaginary part of the complex Rytov phase
phiR = np.angle(uSin / u0)
# real part of the complex Rytov phase
lna = np.log(np.absolute(uSin / u0))
if ndims == 2:
# unwrapping is very important
phiR[:] = np.unwrap(phiR, axis=-1)
else:
# Unwrap gets the dimension of the problem from the input
# data. Since we have a sinogram, we need to pass it the
# slices one by one.
for ii in range(len(phiR)):
phiR[ii] = unwrap_phase(phiR[ii], seed=47)
if align:
align_unwrapped(phiR)
# rytovSin = u0*(np.log(a/a0) + 1j*phiR)
# u0 is one - we already did background correction
# complex rytov phase:
rytovSin = 1j * phiR + lna
return u0 * rytovSin
|
RI-imaging/ODTbrain | odtbrain/apple.py | import multiprocessing as mp
import numpy as np
import pyfftw
import scipy.ndimage as ndi
def apple_core_3d(shape, res, nm):
r"""Return a binary array with the apple core in 3D
Parameters
----------
shape: list-like, length 3
Shape of the reconstruction volume for which to compute
the apple core; The second (y-) axis is assumed to be the
axis of symmetry (according to ODTbrain standard notation)
res: float
Size of the vacuum wave length :math:`\lambda` in pixels
nm: float
Refractive index of the medium :math:`n_\mathrm{m}`
Returns
-------
core: 3D ndarray
The mask is `True` for positions within the apple core
"""
km = (2 * np.pi * nm) / res
lNx, lNy, lNz = shape
if lNx != lNz:
raise ValueError("`shape[0]` and `shape[2]` must be identical!")
fx = np.fft.fftfreq(lNx).reshape(-1, 1, 1)
fy = np.fft.fftfreq(lNy).reshape(1, -1, 1)
fz = np.fft.fftfreq(lNz).reshape(1, 1, -1)
ky = 2*np.pi * fy
kxz = 2*np.pi * np.sqrt(fx**2 + fz**2)
kr = 2*np.pi * np.sqrt(fx**2 + fy**2 + fz**2)
# 1. initiate empy array
core = np.zeros(shape, dtype=bool)
# 2. fill core
root = 2*km*kxz - kxz**2
root[root < 0] = 0
core[np.abs(ky) > np.sqrt(root)] = True
# 3. remove enveloping sphere (resolution limit)
core[kr > np.sqrt(2) * km] = False
return core
def constraint_nn(data, mask=None, bg_shell=None):
"""Non-negativity constraint"""
# No imaginary RI (no absorption)
if np.iscomplexobj(data):
data.imag[:] = 0
# background medium shell
if bg_shell is not None:
data.real[bg_shell] = 0
# Also remove outer shell
spov = spillover_region(data.shape)
data.real[spov] = 0
lowri = data.real < 0
if mask is not None:
# honor given mask
lowri *= mask
data.real[lowri] = 0
def constraint_sh(data, mask=None, bg_shell=None):
"""Symmetric histogram background data constraint"""
# No imaginary RI (no absorption)
if np.iscomplexobj(data):
data.imag[:] = 0
# determine range of medium RI (using background support)
spov = spillover_region(data.shape)
if bg_shell is not None:
spov |= bg_shell
fmin = np.min(data.real[spov])
fmax = np.max(data.real[spov])
# center
full_hist, full_edge = np.histogram(
data.real, bins=100, range=(fmin, fmax))
de = full_edge[1] - full_edge[0]
full_f = full_edge[1:] - de/2
# center index (actually we would expect f_c==0)
idx_c = np.argmax(full_hist)
# half-maximum indices
idx_start = idx_c - count_to_half(full_hist[:idx_c][::-1])
idx_end = idx_c + count_to_half(full_hist[idx_c:])
# RI values outside
below = (data.real > fmin) * (data.real < full_f[idx_start])
above = (data.real > full_f[idx_end]) * (data.real < fmax)
out = below | above
if mask is not None:
# honor given mask
out *= mask
# push RI values to zero
data.real[out] *= .5
if bg_shell is not None:
# push known background data to zero
data.real[bg_shell] *= .5
def correct(f, res, nm, method="nn", mask=None, bg_shell_width=None,
enforce_envelope=0.95, max_iter=100, min_diff=.01,
count=None, max_count=None):
r"""Fill the missing apple core of the object function
Parameters
----------
f: 3D ndarray
Complex objec function :math:`f(\mathbf{r})`
res: float
Size of the vacuum wave length :math:`\lambda` in pixels
nm: float
Refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`n(\mathbf{r})`
method: str
One of:
- "nn": non-negativity constraint (`f > 0`). This method
resembles classic missing apple core correction.
- "sh": symmetric histogram constraint (background data in
`f`). This method works well for sparse-gradient data (e.g.
works better than "nn" for simulated data), but might result
in stripe-like artifacts when applied to experimental data.
The imaginary part of the refractive index is suppressed
in both cases.
Note that these constraints are soft, i.e. after the final
inverse Fourier transform, the conditions might not be met.
mask: 3D boolean ndarray, or None
Optional, defines background region(s) used for enforcing
`method`. If a boolean ndarray, the values set to `True` define
the used background regions.
bg_shell_width: float
Optional, defines the width of an ellipsoid shell (outer radii
matching image shape) that is used additionally for enforcing
`method`.
enforce_envelope: float in interval [0,1] or False
Set the suppression factor for frequencies that are above
the envelope function; disabled if set to False or 0
max_iter: int
Maximum number of iterations to perform
min_diff: float
Stopping criterion computed as the relative difference
(relative to the first iteration `norm`) of the changes applied
during the current iteration `cur_diff`:
``np.abs(cur_diff/norm) < min_diff``
count: multiprocessing.Value
May be used for tracking progress. At each iteration
`count.value` is incremented by one.
max_count: multiprocessing.Value
May be used for tracking progress; is incremented initially.
Notes
-----
Internally, the Fourier transform is performed with single-precision
floating point values (complex64).
"""
if enforce_envelope < 0 or enforce_envelope > 1:
raise ValueError("`enforce_envelope` must be in interval [0, 1]")
if max_count is not None:
with max_count.get_lock():
max_count.value += max_iter + 2
# Location of the apple core
core = apple_core_3d(shape=f.shape, res=res, nm=nm)
if count is not None:
with count.get_lock():
count.value += 1
data = pyfftw.empty_aligned(f.shape, dtype='complex64')
ftdata = pyfftw.empty_aligned(f.shape, dtype='complex64')
fftw_forw = pyfftw.FFTW(data, ftdata,
axes=(0, 1, 2),
direction="FFTW_FORWARD",
flags=["FFTW_MEASURE"],
threads=mp.cpu_count())
# Note: input array `ftdata` is destroyed when invoking `fftw_back`
fftw_back = pyfftw.FFTW(ftdata, data,
axes=(0, 1, 2),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"],
threads=mp.cpu_count())
data.real[:] = f.real
data.imag[:] = 0
fftw_forw.execute()
ftdata_orig = ftdata.copy()
if count is not None:
with count.get_lock():
count.value += 1
if enforce_envelope:
# Envelope function of Fourier amplitude
ftevlp = envelope_gauss(ftdata_orig, core)
init_state = np.sum(np.abs(ftdata_orig[core])) / data.size
prev_state = init_state
if bg_shell_width is not None:
bg_shell = ellipsoid_shell(data.shape, width=bg_shell_width)
else:
bg_shell = None
for ii in range(max_iter):
if method == "nn":
# non-negativity
constraint_nn(data=data, mask=mask, bg_shell=bg_shell)
elif method == "sh":
# symmetric histogram
constraint_sh(data=data, mask=mask, bg_shell=bg_shell)
# Go into Fourier domain
fftw_forw.execute()
if enforce_envelope:
# Suppress large frequencies with the envelope
high = np.abs(ftdata) > ftevlp
ftdata[high] *= enforce_envelope
if method == "sh":
# update dc term
ftdata_orig[0, 0, 0] = (ftdata_orig[0, 0, 0] + ftdata[0, 0, 0])/2
# Enforce original data
ftdata[~core] = ftdata_orig[~core]
fftw_back.execute()
data[:] /= fftw_forw.N
if count is not None:
with count.get_lock():
count.value += 1
cur_state = np.sum(np.abs(ftdata[core])) / data.size
cur_diff = cur_state - prev_state
if ii == 0:
norm = cur_diff
else:
if np.abs(cur_diff/norm) < min_diff:
break
prev_state = cur_state
if count is not None:
with count.get_lock():
# add skipped counts (due to stopping criterion)
count.value += max_iter - ii - 1
return data
def count_to_half(array):
"""Determination of half-initial value index
Return first index at which array values decrease below 1/2 of
the initial initial value `array[0]`.
"""
num = 0
for item in array[1:]:
if item < array[0] / 2:
break
else:
num += 1
return num
def ellipsoid_shell(shape, width=20):
"""Return background ellipsoid shell"""
spov_outer = spillover_region(shape, shell=0)
spov_inner = spillover_region(shape, shell=width)
reg = spov_outer ^ spov_inner
return reg
def envelope_gauss(ftdata, core):
r"""Compute a gaussian-filtered envelope, without apple core
Parameters
----------
ftdata: 3D ndarray
Fourier transform of the object function data
(zero frequency not shifted to center of array)
core: 3D ndarray (same shape as ftdata)
Apple core (as defined by :func:`apple_core_3d`)
Returns
-------
envelope: 3D ndarray
Envelope function in Fourier space
"""
hull = np.abs(ftdata)
hull[core] = np.nan # label core data with nans
# Fill the apple core region with data from known regions from
# the other axes (we only need an estimate if the envelope, so
# this is a very good estimation of the Fourier amplitudes).
shx, shy, _ = hull.shape
maxsh = max(shx, shy)
dsh = abs(shy - shx) // 2
# Determine the slice
if shx > shy:
theslice = (slice(0, shx),
slice(dsh, shy+dsh),
slice(0, shx))
else:
theslice = (slice(dsh, shx+dsh),
slice(0, shy),
slice(dsh, shx+dsh),
)
# 1. Create padded versions of the arrays, because shx and shy
# can be different and inserting a transposed array will not work.
hull_pad = np.zeros((maxsh, maxsh, maxsh), dtype=float)
hull_pad[theslice] = np.fft.fftshift(hull)
core_pad = np.zeros((maxsh, maxsh, maxsh), dtype=bool)
core_pad[theslice] = np.fft.fftshift(core)
# 2. Fill values from other axes were data are missing.
hull_pad[core_pad] = np.transpose(hull_pad, (1, 0, 2))[core_pad]
# 3. Fill any remaining nan-values (due to different shape or tilt)
# with nearest neighbors. Use a distance transform for nearest
# neighbor interpolation.
invalid = np.isnan(hull_pad)
ind = ndi.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
hull_pad[:] = hull_pad[tuple(ind)]
# 4. Write the data back to the original array.
hull[:] = np.fft.ifftshift(hull_pad[theslice])
# Perform gaussian blurring (shift data to make it smooth)
gauss = ndi.gaussian_filter(input=np.fft.fftshift(hull),
sigma=np.max(ftdata.shape)/100,
mode="constant",
cval=0,
truncate=4.0)
# Shift back gauss
shifted_gauss = np.fft.ifftshift(gauss)
return shifted_gauss
def spillover_region(shape, shell=0):
"""Return boolean array for region outside ellipsoid"""
x = np.arange(shape[0]).reshape(-1, 1, 1)
y = np.arange(shape[1]).reshape(1, -1, 1)
z = np.arange(shape[2]).reshape(1, 1, -1)
cx, cy, cz = np.array(shape) / 2
spov = (((x-cx)/(cx-shell))**2
+ ((y-cy)/(cy-shell))**2
+ ((z-cz)/(cz-shell))**2) > 1
return spov
|
RI-imaging/ODTbrain | odtbrain/__init__.py | <reponame>RI-imaging/ODTbrain<filename>odtbrain/__init__.py
"""Algorithms for scalar diffraction tomography"""
from ._alg2d_bpp import backpropagate_2d # noqa F401
from ._alg2d_fmp import fourier_map_2d # noqa F401
from ._alg2d_int import integrate_2d # noqa F401
from ._alg3d_bpp import backpropagate_3d # noqa F401
from ._alg3d_bppt import backpropagate_3d_tilted # noqa F401
from ._prepare_sino import sinogram_as_radon, sinogram_as_rytov # noqa F401
from ._translate_ri import odt_to_ri, opt_to_ri # noqa F401
from ._version import version as __version__ # noqa F401
from ._version import longversion as __version_full__ # noqa F401
from . import apple # noqa F401
__author__ = "<NAME>"
__license__ = "BSD (3 clause)"
|
RI-imaging/ODTbrain | tests/test_save_memory.py | """Test save memory options"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_3d, get_test_parameter_set
def test_back3d():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
parameters = get_test_parameter_set(2)
# complex
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
save_memory=False, **p)
r.append(f)
# real
r2 = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
save_memory=True, **p)
r2.append(f)
assert np.allclose(np.array(r), np.array(r2))
def test_back3d_tilted():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
parameters = get_test_parameter_set(2)
# complex
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64,
save_memory=False, **p)
r.append(f)
# real
r2 = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64,
save_memory=True, **p)
r2.append(f)
assert np.allclose(np.array(r), np.array(r2))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | tests/test_weighting.py | """Test sinogram weighting"""
import numpy as np
import platform
import odtbrain
from common_methods import create_test_sino_3d, get_test_parameter_set
def test_3d_backprop_weights_even():
"""
even weights
"""
platform.system = lambda: "Windows"
sino, angles = create_test_sino_3d()
p = get_test_parameter_set(1)[0]
f1 = odtbrain.backpropagate_3d(sino, angles, weight_angles=False, **p)
f2 = odtbrain.backpropagate_3d(sino, angles, weight_angles=True, **p)
data1 = np.array(f1).flatten().view(float)
data2 = np.array(f2).flatten().view(float)
assert np.allclose(data1, data2)
def test_3d_backprop_tilted_weights_even():
"""
even weights
"""
platform.system = lambda: "Windows"
sino, angles = create_test_sino_3d()
p = get_test_parameter_set(1)[0]
f1 = odtbrain.backpropagate_3d_tilted(
sino, angles, weight_angles=False, **p)
f2 = odtbrain.backpropagate_3d_tilted(
sino, angles, weight_angles=True, **p)
data1 = np.array(f1).flatten().view(float)
data2 = np.array(f2).flatten().view(float)
assert np.allclose(data1, data2)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | tests/test_spherecoords_from_angles_and_axis.py | <reponame>RI-imaging/ODTbrain<gh_stars>10-100
"""3D backpropagation with tilted axis of rotation: sphere coordinates"""
import numpy as np
import odtbrain
import odtbrain._alg3d_bppt
def test_simple_sphere():
"""simple geometrical tests"""
angles = np.array([0, np.pi/2, np.pi])
axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]]
results = []
for tilted_axis in axes:
angle_coords = odtbrain._alg3d_bppt.sphere_points_from_angles_and_tilt(
angles, tilted_axis)
results.append(angle_coords)
s2 = 1/np.sqrt(2)
correct = np.array([[[1, 0, 0], [1, 0, 0], [1, 0, 0]],
[[0, 0, 1], [1, 0, 0], [0, 0, -1]],
[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 0, 1], [s2, .5, .5], [0, 1, 0]],
[[s2, 0, s2], [s2, 0, s2], [s2, 0, s2]],
[[s2, 0, s2],
[0.87965281125489458, s2/3*2, 0.063156230327168605],
[s2/3, s2/3*4, s2/3]],
])
assert np.allclose(correct, np.array(results))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, _zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
axes = [[0, 1, 0], [0, 1, 0.1], [0, 1, -1], [1, 0.1, 0]]
colors = ["k", "blue", "red", "green"]
angles = np.linspace(0, 2*np.pi, 100)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
for i in range(len(axes)):
tilted_axis = axes[i]
color = colors[i]
tilted_axis = np.array(tilted_axis)
tilted_axis = tilted_axis/np.sqrt(np.sum(tilted_axis**2))
angle_coords = odtbrain._alg3d_bppt.sphere_points_from_angles_and_tilt(
angles, tilted_axis)
u, v, w = tilted_axis
a = Arrow3D([0, u], [0, v], [0, w], mutation_scale=20,
lw=1, arrowstyle="-|>", color=color)
ax.add_artist(a)
ax.scatter(angle_coords[:, 0], angle_coords[:, 1],
angle_coords[:, 2], c=color, marker='o')
radius = 1
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim(-radius*1.5, radius*1.5)
ax.set_ylim(-radius*1.5, radius*1.5)
ax.set_zlim(-radius*1.5, radius*1.5)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | tests/test_counters.py | """Tests progress counters"""
import multiprocessing as mp
import numpy as np
import odtbrain
from common_methods import create_test_sino_2d, create_test_sino_3d, \
get_test_parameter_set
def test_integrate_2d():
sino, angles = create_test_sino_2d(N=10)
p = get_test_parameter_set(1)[0]
# complex
jmc = mp.Value("i", 0)
jmm = mp.Value("i", 0)
odtbrain.integrate_2d(sino, angles,
count=jmc,
max_count=jmm,
**p)
assert jmc.value == jmm.value
assert jmc.value != 0
def test_fmp_2d():
sino, angles = create_test_sino_2d(N=10)
p = get_test_parameter_set(1)[0]
# complex
jmc = mp.Value("i", 0)
jmm = mp.Value("i", 0)
odtbrain.fourier_map_2d(sino, angles,
count=jmc,
max_count=jmm,
**p)
assert jmc.value == jmm.value
assert jmc.value != 0
def test_bpp_2d():
sino, angles = create_test_sino_2d(N=10)
p = get_test_parameter_set(1)[0]
# complex
jmc = mp.Value("i", 0)
jmm = mp.Value("i", 0)
odtbrain.backpropagate_2d(sino, angles, padval=0,
count=jmc,
max_count=jmm,
**p)
assert jmc.value == jmm.value
assert jmc.value != 0
def test_back3d():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
# complex
jmc = mp.Value("i", 0)
jmm = mp.Value("i", 0)
odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
count=jmc,
max_count=jmm,
**p)
assert jmc.value == jmm.value
assert jmc.value != 0
def test_back3d_tilted():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
# complex
jmc = mp.Value("i", 0)
jmm = mp.Value("i", 0)
odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64,
count=jmc,
max_count=jmm,
**p)
assert jmc.value == jmm.value
assert jmc.value != 0
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | tests/test_alg2d_fmp.py | <gh_stars>10-100
"""Test Fourier mapping algorithm"""
import sys
import numpy as np
import odtbrain
import pytest
from common_methods import create_test_sino_2d, cutout, \
get_test_parameter_set, write_results, get_results
WRITE_RES = False
@pytest.mark.xfail(sys.platform == "darwin", reason="don't know why")
def test_2d_fmap():
myframe = sys._getframe()
sino, angles = create_test_sino_2d()
parameters = get_test_parameter_set(1)
r = []
for p in parameters:
f = odtbrain.fourier_map_2d(sino, angles, **p)
r.append(cutout(f))
if WRITE_RES:
write_results(myframe, r)
assert np.allclose(np.array(r).flatten().view(float), get_results(myframe))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | examples/backprop_from_mie_2d_cylinder_offcenter.py | """Mie off-center cylinder
The *in silico* data set was created with the
softare `miefield <https://github.com/RI-imaging/miefield>`_.
The data are 1D projections of an off-center cylinder of constant
refractive index. The Born approximation is error-prone due to
a relatively large radius of the cylinder (30 wavelengths) and
a refractive index difference of 0.006 between cylinder and
surrounding medium. The reconstruction of the refractive index
with the Rytov approximation is in good agreement with the
input data. When only 50 projections are used for the reconstruction,
artifacts appear. These vanish when more projections are used for
the reconstruction.
"""
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
from example_helper import load_data
# simulation data
sino, angles, cfg = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="sino_imag.txt",
f_sino_real="sino_real.txt",
f_angles="mie_angles.txt",
f_info="mie_info.txt")
A, size = sino.shape
# background sinogram computed with Mie theory
# miefield.GetSinogramCylinderRotation(radius, nmed, nmed, lD, lC, size, A,res)
u0 = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="u0_imag.txt",
f_sino_real="u0_real.txt")
# create 2d array
u0 = np.tile(u0, size).reshape(A, size).transpose()
# background field necessary to compute initial born field
# u0_single = mie.GetFieldCylinder(radius, nmed, nmed, lD, size, res)
u0_single = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="u0_single_imag.txt",
f_sino_real="u0_single_real.txt")
print("Example: Backpropagation from 2D Mie simulations")
print("Refractive index of medium:", cfg["nmed"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Performing backpropagation.")
# Set measurement parameters
# Compute scattered field from cylinder
radius = cfg["radius"] # wavelengths
nmed = cfg["nmed"]
ncyl = cfg["ncyl"]
lD = cfg["lD"] # measurement distance in wavelengths
lC = cfg["lC"] # displacement from center of image
size = cfg["size"]
res = cfg["res"] # px/wavelengths
A = cfg["A"] # number of projections
x = np.arange(size) - size / 2
X, Y = np.meshgrid(x, x)
rad_px = radius * res
phantom = np.array(((Y - lC * res)**2 + X**2) < rad_px**2,
dtype=np.float) * (ncyl - nmed) + nmed
# Born
u_sinB = (sino / u0 * u0_single - u0_single) # fake born
fB = odt.backpropagate_2d(u_sinB, angles, res, nmed, lD * res)
nB = odt.odt_to_ri(fB, res, nmed)
# Rytov
u_sinR = odt.sinogram_as_rytov(sino / u0)
fR = odt.backpropagate_2d(u_sinR, angles, res, nmed, lD * res)
nR = odt.odt_to_ri(fR, res, nmed)
# Rytov 50
u_sinR50 = odt.sinogram_as_rytov((sino / u0)[::5, :])
fR50 = odt.backpropagate_2d(u_sinR50, angles[::5], res, nmed, lD * res)
nR50 = odt.odt_to_ri(fR50, res, nmed)
# Plot sinogram phase and amplitude
ph = odt.sinogram_as_radon(sino / u0)
am = np.abs(sino / u0)
# prepare plot
vmin = np.min(np.array([phantom, nB.real, nR50.real, nR.real]))
vmax = np.max(np.array([phantom, nB.real, nR50.real, nR.real]))
fig, axes = plt.subplots(2, 3, figsize=(8, 5))
axes = np.array(axes).flatten()
phantommap = axes[0].imshow(phantom, vmin=vmin, vmax=vmax)
axes[0].set_title("phantom \n(non-centered cylinder)")
amplmap = axes[1].imshow(am, cmap="gray")
axes[1].set_title("amplitude sinogram \n(background-corrected)")
phasemap = axes[2].imshow(ph, cmap="coolwarm")
axes[2].set_title("phase sinogram [rad] \n(background-corrected)")
axes[3].imshow(nB.real, vmin=vmin, vmax=vmax)
axes[3].set_title("reconstruction (Born) \n(250 projections)")
axes[4].imshow(nR50.real, vmin=vmin, vmax=vmax)
axes[4].set_title("reconstruction (Rytov) \n(50 projections)")
axes[5].imshow(nR.real, vmin=vmin, vmax=vmax)
axes[5].set_title("reconstruction (Rytov) \n(250 projections)")
# color bars
cbkwargs = {"fraction": 0.045}
plt.colorbar(phantommap, ax=axes[0], **cbkwargs)
plt.colorbar(amplmap, ax=axes[1], **cbkwargs)
plt.colorbar(phasemap, ax=axes[2], **cbkwargs)
plt.colorbar(phantommap, ax=axes[3], **cbkwargs)
plt.colorbar(phantommap, ax=axes[4], **cbkwargs)
plt.colorbar(phantommap, ax=axes[5], **cbkwargs)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# project documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 22 09:35:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os.path as op
import sys
import odtbrain
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# include parent directory
pdir = op.dirname(op.dirname(op.abspath(__file__)))
sys.path.insert(0, pdir)
sys.path.append(op.abspath('extensions'))
# http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
# Order class attributes and functions in separate blocks
autodoc_member_order = 'bysource'
# Display link to GitHub repo instead of doc on rtfd
rst_prolog = """
:github_url: https://github.com/RI-imaging/ODTbrain
"""
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
'fancy_include',
'github_changelog',
]
# specify bibtex files (required for sphinxcontrib.bibtex>=2.0)
bibtex_bibfiles = ['odtbrain.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
year = "2015"
name = "odtbrain"
author = "<NAME>"
authors = [author]
projectname = name
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#
# The full version, including alpha/beta/rc tags.
# This gets 'version'
version = odtbrain.__version__
release = version
project = projectname
copyright = year + ", " + author # @ReservedAssignment
github_project = 'RI-imaging/' + project
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = projectname+'doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', projectname+'.tex', projectname+' Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', projectname, projectname+' Documentation',
authors, 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', projectname, 'ODTbrain Documentation',
author, projectname,
"Algorithms for optical diffraction tomography",
'Numeric'),
]
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
intersphinx_mapping = {
"python": ('https://docs.python.org/', None),
"nrefocus": ('http://nrefocus.readthedocs.io/en/stable', None),
"numpy": ('http://docs.scipy.org/doc/numpy', None),
"cellsino": ('http://cellsino.readthedocs.io/en/stable', None),
"qpimage": ('http://qpimage.readthedocs.io/en/stable', None),
"radontea": ('http://radontea.readthedocs.io/en/stable', None),
"scipy": ('https://docs.scipy.org/doc/scipy/reference/', None),
"skimage": ('http://scikit-image.org/docs/stable/', None),
}
|
RI-imaging/ODTbrain | examples/backprop_from_fdtd_3d_tilted.py | """FDTD cell phantom with tilted axis of rotation
The *in silico* data set was created with the
:abbr:`FDTD (Finite Difference Time Domain)` software `meep`_. The data
are 2D projections of a 3D refractive index phantom that is rotated
about an axis which is tilted by 0.2 rad (11.5 degrees) with respect
to the imaging plane. The example showcases the method
:func:`odtbrain.backpropagate_3d_tilted` which takes into account
such a tilted axis of rotation. The data are downsampled by a factor
of two. A total of 220 projections are used for the reconstruction.
Note that the information required for reconstruction decreases as the
tilt angle increases. If the tilt angle is 90 degrees w.r.t. the
imaging plane, then we get a rotating image of a cell (not images of a
rotating cell) and tomographic reconstruction is impossible. A brief
description of this algorithm is given in :cite:`Mueller2015tilted`.
The first column shows the measured phase, visualizing the
tilt (compare to other examples). The second column shows a
reconstruction that does not take into account the tilted axis of
rotation; the result is a blurry reconstruction. The third column
shows the improved reconstruction; the known tilted axis of rotation
is used in the reconstruction process.
.. _`meep`: http://ab-initio.mit.edu/wiki/index.php/Meep
"""
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
from example_helper import load_data
if __name__ == "__main__":
sino, angles, phantom, cfg = \
load_data("fdtd_3d_sino_A220_R6.500_tiltyz0.2.tar.lzma")
A = angles.shape[0]
print("Example: Backpropagation from 3D FDTD simulations")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Axis tilt in y-z direction:", cfg["tilt_yz"])
print("Number of projections:", A)
print("Performing normal backpropagation.")
# Apply the Rytov approximation
sinoRytov = odt.sinogram_as_rytov(sino)
# Perform naive backpropagation
f_naiv = odt.backpropagate_3d(uSin=sinoRytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"]
)
print("Performing tilted backpropagation.")
# Determine tilted axis
tilted_axis = [0, np.cos(cfg["tilt_yz"]), np.sin(cfg["tilt_yz"])]
# Perform tilted backpropagation
f_tilt = odt.backpropagate_3d_tilted(uSin=sinoRytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"],
tilted_axis=tilted_axis,
)
# compute refractive index n from object function
n_naiv = odt.odt_to_ri(f_naiv, res=cfg["res"], nm=cfg["nm"])
n_tilt = odt.odt_to_ri(f_tilt, res=cfg["res"], nm=cfg["nm"])
sx, sy, sz = n_tilt.shape
px, py, pz = phantom.shape
sino_phase = np.angle(sino)
# compare phantom and reconstruction in plot
fig, axes = plt.subplots(2, 3, figsize=(8, 4.5))
kwri = {"vmin": n_tilt.real.min(), "vmax": n_tilt.real.max()}
kwph = {"vmin": sino_phase.min(), "vmax": sino_phase.max(),
"cmap": "coolwarm"}
# Sinogram
axes[0, 0].set_title("phase projection")
phmap = axes[0, 0].imshow(sino_phase[A // 2, :, :], **kwph)
axes[0, 0].set_xlabel("detector x")
axes[0, 0].set_ylabel("detector y")
axes[1, 0].set_title("sinogram slice")
axes[1, 0].imshow(sino_phase[:, :, sino.shape[2] // 2],
aspect=sino.shape[1] / sino.shape[0], **kwph)
axes[1, 0].set_xlabel("detector y")
axes[1, 0].set_ylabel("angle [rad]")
# set y ticks for sinogram
labels = np.linspace(0, 2 * np.pi, len(axes[1, 1].get_yticks()))
labels = ["{:.2f}".format(i) for i in labels]
axes[1, 0].set_yticks(np.linspace(0, len(angles), len(labels)))
axes[1, 0].set_yticklabels(labels)
axes[0, 1].set_title("normal (center)")
rimap = axes[0, 1].imshow(n_naiv[sx // 2].real, **kwri)
axes[0, 1].set_xlabel("x")
axes[0, 1].set_ylabel("y")
axes[1, 1].set_title("normal (nucleolus)")
axes[1, 1].imshow(n_naiv[int(sx / 2 + 2 * cfg["res"])].real, **kwri)
axes[1, 1].set_xlabel("x")
axes[1, 1].set_ylabel("y")
axes[0, 2].set_title("tilt correction (center)")
axes[0, 2].imshow(n_tilt[sx // 2].real, **kwri)
axes[0, 2].set_xlabel("x")
axes[0, 2].set_ylabel("y")
axes[1, 2].set_title("tilt correction (nucleolus)")
axes[1, 2].imshow(n_tilt[int(sx / 2 + 2 * cfg["res"])].real, **kwri)
axes[1, 2].set_xlabel("x")
axes[1, 2].set_ylabel("y")
# color bars
cbkwargs = {"fraction": 0.045,
"format": "%.3f"}
plt.colorbar(phmap, ax=axes[0, 0], **cbkwargs)
plt.colorbar(phmap, ax=axes[1, 0], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 2], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 2], **cbkwargs)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | tests/test_alg3d_bpp.py | """Test 3D backpropagation algorithm"""
import ctypes
import multiprocessing as mp
import platform
import sys
import numpy as np
import odtbrain
from odtbrain import _alg3d_bpp
from common_methods import create_test_sino_3d, cutout, \
get_test_parameter_set, write_results, get_results
WRITE_RES = False
def test_3d_backprop_phase():
myframe = sys._getframe()
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64, **p)
r.append(cutout(f))
if WRITE_RES:
write_results(myframe, r)
data = np.array(r).flatten().view(float)
assert np.allclose(data, get_results(myframe))
return data
def test_3d_backprop_nopadreal():
"""
- no padding
- only real result
"""
platform.system = lambda: "Windows"
myframe = sys._getframe()
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles, padding=(False, False),
dtype=np.float64, onlyreal=True, **p)
r.append(cutout(f))
if WRITE_RES:
write_results(myframe, r)
data = np.array(r).flatten().view(float)
assert np.allclose(data, get_results(myframe))
def test_3d_backprop_windows():
"""
We assume that we are not running these tests on windows.
So we perform a test with fake windows to increase coverage.
"""
datalin = test_3d_backprop_phase()
real_system = platform.system
datawin = test_3d_backprop_phase()
platform.system = real_system
assert np.allclose(datalin, datawin)
def test_3d_backprop_real():
"""
Check if the real reconstruction matches the real part
of the complex reconstruction.
"""
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
parameters = get_test_parameter_set(2)
# complex
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
onlyreal=False, **p)
r.append(f)
# real
r2 = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
onlyreal=True, **p)
r2.append(f)
assert np.allclose(np.array(r).real, np.array(r2))
def test_3d_backprop_phase32():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d(sino, angles,
dtype=np.float32,
padval=0,
**p)
r.append(cutout(f))
data32 = np.array(r).flatten().view(np.float32)
data64 = test_3d_backprop_phase()
assert np.allclose(data32, data64, atol=6e-7, rtol=0)
def test_3d_mprotate():
myframe = sys._getframe()
ln = 10
ln2 = 2*ln
initial_array = np.arange(ln2**3).reshape((ln2, ln2, ln2))
shared_array = mp.RawArray(ctypes.c_double, ln2 * ln2 * ln2)
arr = np.frombuffer(shared_array).reshape(ln2, ln2, ln2)
arr[:, :, :] = initial_array
_alg3d_bpp.mprotate_dict["X"] = shared_array
_alg3d_bpp.mprotate_dict["X_shape"] = (ln2, ln2, ln2)
pool = mp.Pool(processes=mp.cpu_count(),
initializer=_alg3d_bpp._init_worker,
initargs=(shared_array, (ln2, ln2, ln2), np.dtype(float)))
_alg3d_bpp._mprotate(2, ln, pool, 2)
if WRITE_RES:
write_results(myframe, arr)
assert np.allclose(np.array(arr).flatten().view(
float), get_results(myframe))
if __name__ == "__main__":
test_3d_backprop_phase32()
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | odtbrain/_alg2d_bpp.py | """2D backpropagation algorithm"""
import numpy as np
import scipy.ndimage
from . import util
def backpropagate_2d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True,
onlyreal=False, padding=True, padval=0,
count=None, max_count=None, verbose=0):
r"""2D backpropagation with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This method implements the 2D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{1D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{1D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \}
}{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
radontea.backproject: backprojection based on the Fourier slice
theorem
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
##
##
# TODO:
# - combine the 2nd filter and the rotation in the for loop
# to save memory. However, memory is not a big issue in 2D.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += A + 2
# Check input data
assert len(uSin.shape) == 2, "Input data `uB` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set " +
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation defines
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1)
sinogram = uSin * weights
else:
sinogram = uSin
# Size of the input data
ln = sinogram.shape[1]
# We perform padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
order = max(64., 2**np.ceil(np.log(ln * 2.1) / np.log(2)))
if padding:
pad = order - ln
else:
pad = 0
padl = int(np.ceil(pad / 2))
padr = int(pad - padl)
if padval is None:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="edge")
if verbose > 0:
print("......Padding with edge values.")
else:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="linear_ramp",
end_values=(padval,))
if verbose > 0:
print("......Verifying padding value: {}".format(padval))
# zero-padded length of sinogram.
lN = sino.shape[1]
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)^(3/2) a₀)
# * iint dϕ₀ dkx
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
if count is not None:
count.value += 1
# Corresponding sample frequencies
fx = np.fft.fftfreq(lN) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, x
kx = kx.reshape(1, -1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Perform filtering of the sinogram
projection = np.fft.fft(sino, axis=-1) * prefactor
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i kₘ (M - 1) yD )
#
# xD = x cos(ϕ₀) + y sin(ϕ₀)
# yD = - x sin(ϕ₀) + y cos(ϕ₀)
# Everything is in pixels
center = ln / 2.0
x = np.arange(lN) - center + .5
# Meshgrid for output array
yv = x.reshape(-1, 1)
Mp = M.reshape(1, -1)
filter2 = np.exp(1j * yv * km * (Mp - 1)) # .reshape(1,lN,lN)
projection = projection.reshape(A, 1, lN) # * filter2
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, ln))
else:
outarr = np.zeros((ln, ln), dtype=np.dtype(complex))
if count is not None:
count.value += 1
# Calculate backpropagations
for i in np.arange(A):
# Create an interpolation object of the projection.
# interpolation of the rotated fourier transformed projection
# this is already tiled onto the entire image.
sino_filtered = np.fft.ifft(projection[i] * filter2, axis=-1)
# Resize filtered sinogram back to original size
sino = sino_filtered[:ln, padl:padl + ln]
rotated_projr = scipy.ndimage.interpolation.rotate(
sino.real, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
# Append results
outarr += rotated_projr
if not onlyreal:
outarr += 1j * scipy.ndimage.interpolation.rotate(
sino.imag, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
if count is not None:
count.value += 1
return outarr
|
RI-imaging/ODTbrain | tests/test_apple.py | <reponame>RI-imaging/ODTbrain
"""Test apple core correction"""
import multiprocessing as mp
import sys
import numpy as np
import odtbrain
from common_methods import create_test_sino_3d, cutout, \
get_test_parameter_set, write_results, get_results
WRITE_RES = False
def test_apple_core_3d_values():
try:
odtbrain.apple.apple_core_3d(shape=(10, 10, 5),
res=.1,
nm=1)
except ValueError:
pass
else:
assert False, "bad input shape should raise ValueError"
def test_correct_counter():
count = mp.Value("I", lock=True)
max_count = mp.Value("I", lock=True)
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
copy=False, **p)
odtbrain.apple.correct(f=f,
res=p["res"],
nm=p["nm"],
enforce_envelope=.95,
max_iter=100,
min_diff=0.01,
count=count,
max_count=max_count)
assert count.value == max_count.value
def test_correct_reproduce():
myframe = sys._getframe()
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
sryt = odtbrain.sinogram_as_rytov(uSin=sino, u0=1, align=False)
f = odtbrain.backpropagate_3d(sryt, angles, padval=0,
dtype=np.float64,
copy=False, **p)
fc = odtbrain.apple.correct(f=f,
res=p["res"],
nm=p["nm"],
enforce_envelope=.95,
max_iter=100,
min_diff=0.01)
fo = cutout(fc)
fo = np.array(fo, dtype=np.complex128)
if WRITE_RES:
write_results(myframe, fo)
data = fo.flatten().view(float)
assert np.allclose(data, get_results(myframe))
def test_correct_values():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
f = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
copy=False, **p)
try:
odtbrain.apple.correct(f=f,
res=p["res"],
nm=p["nm"],
enforce_envelope=1.05,
)
except ValueError:
pass
else:
assert False, "`enforce_envelope` must be in [0, 1]"
def test_envelope_gauss_shape():
"""Make sure non-cubic input shape works"""
# non-cubic reconstruction volume (1st and 3rd axis still have same length)
shape = (60, 50, 60)
ftdata = np.ones(shape)
core = odtbrain.apple.apple_core_3d(shape=shape, res=.1, nm=1)
envlp = odtbrain.apple.envelope_gauss(ftdata=ftdata, core=core)
assert envlp.shape == shape
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | examples/backprop_from_fdtd_3d.py | <filename>examples/backprop_from_fdtd_3d.py
"""FDTD cell phantom
The *in silico* data set was created with the
:abbr:`FDTD (Finite Difference Time Domain)` software `meep`_. The data
are 2D projections of a 3D refractive index phantom. The reconstruction
of the refractive index with the Rytov approximation is in good
agreement with the phantom that was used in the simulation. The data
are downsampled by a factor of two. The rotational axis is the `y`-axis.
A total of 180 projections are used for the reconstruction. A detailed
description of this phantom is given in :cite:`Mueller2015`.
.. _`meep`: http://ab-initio.mit.edu/wiki/index.php/Meep
"""
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
from example_helper import load_data
if __name__ == "__main__":
sino, angles, phantom, cfg = \
load_data("fdtd_3d_sino_A180_R6.500.tar.lzma")
A = angles.shape[0]
print("Example: Backpropagation from 3D FDTD simulations")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Number of projections:", A)
print("Performing backpropagation.")
# Apply the Rytov approximation
sinoRytov = odt.sinogram_as_rytov(sino)
# perform backpropagation to obtain object function f
f = odt.backpropagate_3d(uSin=sinoRytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"]
)
# compute refractive index n from object function
n = odt.odt_to_ri(f, res=cfg["res"], nm=cfg["nm"])
sx, sy, sz = n.shape
px, py, pz = phantom.shape
sino_phase = np.angle(sino)
# compare phantom and reconstruction in plot
fig, axes = plt.subplots(2, 3, figsize=(8, 4))
kwri = {"vmin": n.real.min(), "vmax": n.real.max()}
kwph = {"vmin": sino_phase.min(), "vmax": sino_phase.max(),
"cmap": "coolwarm"}
# Phantom
axes[0, 0].set_title("FDTD phantom center")
rimap = axes[0, 0].imshow(phantom[px // 2], **kwri)
axes[0, 0].set_xlabel("x")
axes[0, 0].set_ylabel("y")
axes[1, 0].set_title("FDTD phantom nucleolus")
axes[1, 0].imshow(phantom[int(px / 2 + 2 * cfg["res"])], **kwri)
axes[1, 0].set_xlabel("x")
axes[1, 0].set_ylabel("y")
# Sinogram
axes[0, 1].set_title("phase projection")
phmap = axes[0, 1].imshow(sino_phase[A // 2, :, :], **kwph)
axes[0, 1].set_xlabel("detector x")
axes[0, 1].set_ylabel("detector y")
axes[1, 1].set_title("sinogram slice")
axes[1, 1].imshow(sino_phase[:, :, sino.shape[2] // 2],
aspect=sino.shape[1] / sino.shape[0], **kwph)
axes[1, 1].set_xlabel("detector y")
axes[1, 1].set_ylabel("angle [rad]")
# set y ticks for sinogram
labels = np.linspace(0, 2 * np.pi, len(axes[1, 1].get_yticks()))
labels = ["{:.2f}".format(i) for i in labels]
axes[1, 1].set_yticks(np.linspace(0, len(angles), len(labels)))
axes[1, 1].set_yticklabels(labels)
axes[0, 2].set_title("reconstruction center")
axes[0, 2].imshow(n[sx // 2].real, **kwri)
axes[0, 2].set_xlabel("x")
axes[0, 2].set_ylabel("y")
axes[1, 2].set_title("reconstruction nucleolus")
axes[1, 2].imshow(n[int(sx / 2 + 2 * cfg["res"])].real, **kwri)
axes[1, 2].set_xlabel("x")
axes[1, 2].set_ylabel("y")
# color bars
cbkwargs = {"fraction": 0.045,
"format": "%.3f"}
plt.colorbar(phmap, ax=axes[0, 1], **cbkwargs)
plt.colorbar(phmap, ax=axes[1, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 0], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 0], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 2], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 2], **cbkwargs)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | setup.py | <gh_stars>10-100
from os.path import dirname, realpath, exists
from setuptools import setup
import sys
author = "<NAME>"
authors = [author]
description = 'Algorithms for diffraction tomography'
name = 'odtbrain'
year = "2015"
sys.path.insert(0, realpath(dirname(__file__))+"/"+name)
from _version import version # noqa: E402
if __name__ == "__main__": # required by Windows/pytest (multiprocessing)
setup(
name=name,
author=author,
author_email='<EMAIL>',
url='https://github.com/RI-imaging/ODTbrain',
version=version,
packages=[name],
package_dir={name: name},
license="BSD (3 clause)",
description=description,
long_description=open('README.rst').read()
if exists('README.rst') else '',
install_requires=["numexpr", # 3D backpropagation
"numpy>=1.7.0",
"pyfftw>=0.9.2", # 3D backpropagation
"scikit-image>=0.11.0", # phase-unwrapping
"scipy>=1.4.0", # Updated QHull in griddata
],
python_requires='>=3.5, <4',
keywords=["odt", "opt", "diffraction", "born", "rytov", "radon",
"backprojection", "backpropagation", "inverse problem",
"Fourier diffraction theorem", "Fourier slice theorem"],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Science/Research'
],
platforms=['ALL'],
)
|
RI-imaging/ODTbrain | tests/common_methods.py | <reponame>RI-imaging/ODTbrain<filename>tests/common_methods.py
"""Test helper functions"""
import pathlib
import tempfile
import warnings
import zipfile
import numpy as np
from scipy.ndimage import rotate
def create_test_sino_2d(A=9, N=22, max_phase=5.0,
ampl_range=(1.0, 1.0)):
"""
Creates 2D test sinogram for optical diffraction tomography.
The sinogram is generated from a Gaussian that is shifted
according to the rotational position of a non-centered
object.
Parameters
----------
A : int
Number of angles of the sinogram.
N : int
Size of one acquisition.
max_phase : float
Phase normalization. If this is greater than
2PI, then it also tests the unwrapping
capabilities of the reconstruction algorithm.
ampl_range : tuple of floats
Determines the min/max range of the amplitude values.
Equal values means constant amplitude.
"""
# initiate array
resar = np.zeros((A, N), dtype=np.complex128)
# 2pi coverage
angles = np.linspace(0, 2*np.pi, A, endpoint=False)
# x-values of Gaussain
x = np.linspace(-N/2, N/2, N, endpoint=True)
# SD of Gaussian
dev = np.sqrt(N/2)
# Off-centered rotation:
off = N/7
for ii in range(A):
# Gaussian distribution sinogram
x0 = np.cos(angles[ii])*off
phase = np.exp(-(x-x0)**2/dev**2)
phase = normalize(phase, vmax=max_phase)
if ampl_range[0] == ampl_range[1]:
# constant amplitude
ampl = ampl_range[0]
else:
# ring
ampldev = dev/5
amploff = off*.3
ampl1 = np.exp(-(x-x0-amploff)**2/ampldev**2)
ampl2 = np.exp(-(x-x0+amploff)**2/ampldev**2)
ampl = ampl1+ampl2
ampl = normalize(ampl, vmin=ampl_range[0], vmax=ampl_range[1])
resar[ii] = ampl*np.exp(1j*phase)
return resar, angles
def create_test_sino_3d(A=9, Nx=22, Ny=22, max_phase=5.0,
ampl_range=(1.0, 1.0)):
"""
Creates 3D test sinogram for optical diffraction tomography.
The sinogram is generated from a Gaussian that is shifted
according to the rotational position of a non-centered
object. The simulated rotation is about the second (y)/[1]
axis.
Parameters
----------
A : int
Number of angles of the sinogram.
Nx : int
Size of the first axis.
Ny : int
Size of the second axis.
max_phase : float
Phase normalization. If this is greater than
2PI, then it also tests the unwrapping
capabilities of the reconstruction algorithm.
ampl_range : tuple of floats
Determines the min/max range of the amplitude values.
Equal values means constant amplitude.
Returns
"""
# initiate array
resar = np.zeros((A, Ny, Nx), dtype=np.complex128)
# 2pi coverage
angles = np.linspace(0, 2*np.pi, A, endpoint=False)
# x-values of Gaussian
x = np.linspace(-Nx/2, Nx/2, Nx, endpoint=True).reshape(1, -1)
y = np.linspace(-Ny/2, Ny/2, Ny, endpoint=True).reshape(-1, 1)
# SD of Gaussian
dev = min(np.sqrt(Nx/2), np.sqrt(Ny/2))
# Off-centered rotation about second axis:
off = Nx/7
for ii in range(A):
# Gaussian distribution sinogram
x0 = np.cos(angles[ii])*off
phase = np.exp(-(x-x0)**2/dev**2) * np.exp(-(y)**2/dev**2)
phase = normalize(phase, vmax=max_phase)
if ampl_range[0] == ampl_range[1]:
# constant amplitude
ampl = ampl_range[0]
else:
# ring
ampldev = dev/5
amploff = off*.3
ampl1 = np.exp(-(x-x0-amploff)**2/ampldev**2)
ampl2 = np.exp(-(x-x0+amploff)**2/ampldev**2)
ampl = ampl1+ampl2
ampl = normalize(ampl, vmin=ampl_range[0], vmax=ampl_range[1])
resar[ii] = ampl*np.exp(1j*phase)
return resar, angles
def create_test_sino_3d_tilted(A=9, Nx=22, Ny=22, max_phase=5.0,
ampl_range=(1.0, 1.0),
tilt_plane=0.0):
"""
Creates 3D test sinogram for optical diffraction tomography.
The sinogram is generated from a Gaussian that is shifted
according to the rotational position of a non-centered
object. The simulated rotation is about the second (y)/[1]
axis.
Parameters
----------
A : int
Number of angles of the sinogram.
Nx : int
Size of the first axis.
Ny : int
Size of the second axis.
max_phase : float
Phase normalization. If this is greater than
2PI, then it also tests the unwrapping
capabilities of the reconstruction algorithm.
ampl_range : tuple of floats
Determines the min/max range of the amplitude values.
Equal values means constant amplitude.
tilt_plane : float
Rotation tilt offset [rad].
Returns
"""
# initiate array
resar = np.zeros((A, Ny, Nx), dtype=np.complex128)
# 2pi coverage
angles = np.linspace(0, 2*np.pi, A, endpoint=False)
# x-values of Gaussain
x = np.linspace(-Nx/2, Nx/2, Nx, endpoint=True).reshape(1, -1)
y = np.linspace(-Ny/2, Ny/2, Ny, endpoint=True).reshape(-1, 1)
# SD of Gaussian
dev = min(np.sqrt(Nx/2), np.sqrt(Ny/2))
# Off-centered rotation about second axis:
off = Nx/7
for ii in range(A):
# Gaussian distribution sinogram
x0 = np.cos(angles[ii])*off
phase = np.exp(-(x-x0)**2/dev**2) * np.exp(-(y)**2/dev**2)
phase = normalize(phase, vmax=max_phase)
if ampl_range[0] == ampl_range[1]:
# constant amplitude
ampl = np.ones((Nx, Ny))*ampl_range[0]
else:
# ring
ampldev = dev/5
amploff = off*.3
ampl1 = np.exp(-(x-x0-amploff)**2/ampldev**2)
ampl2 = np.exp(-(x-x0+amploff)**2/ampldev**2)
ampl = ampl1+ampl2
ampl = normalize(ampl, vmin=ampl_range[0], vmax=ampl_range[1])
# perform in-plane rotation
ampl = rotate(ampl, np.rad2deg(tilt_plane), reshape=False, cval=1)
phase = rotate(phase, np.rad2deg(tilt_plane), reshape=False, cval=0)
resar[ii] = ampl*np.exp(1j*phase)
return resar, angles
def cutout(a):
""" cut out circle/sphere from 2D/3D square/cubic array
"""
x = np.arange(a.shape[0])
c = a.shape[0] / 2
if len(a.shape) == 2:
x = x.reshape(-1, 1)
y = x.reshape(1, -1)
zero = ((x-c)**2 + (y-c)**2) < c**2
elif len(a.shape) == 3:
x = x.reshape(-1, 1, 1)
y = x.reshape(1, -1, 1)
z = x.reshape(1, -1, 1)
zero = ((x-c)**2 + (y-c)**2 + (z-c)**2) < c**2
else:
raise ValueError("Cutout array must have dimension 2 or 3!")
a *= zero
return a
def get_results(frame):
""" Get the results from the frame of a method """
filen = frame.f_globals["__file__"]
funcname = frame.f_code.co_name
identifier = "{}__{}".format(filen.split("test_", 1)[1][:-3],
funcname)
wdir = pathlib.Path(__file__).parent / "data"
zipf = wdir / (identifier + ".zip")
text = "data.txt"
tdir = tempfile.gettempdir()
if zipf.exists():
with zipfile.ZipFile(str(zipf)) as arc:
arc.extract(text, tdir)
else:
raise ValueError("No reference found for test: {}".format(text))
tfile = pathlib.Path(tdir) / text
data = np.loadtxt(str(tfile))
tfile.unlink()
return data
def get_test_parameter_set(set_number=1):
res = 2.1
lD = 0
nm = 1.333
parameters = []
for _i in range(set_number):
parameters.append({"res": res,
"lD": lD,
"nm": nm})
res += .1
lD += np.pi
nm *= 1.01
return parameters
def normalize(av, vmin=0., vmax=1.):
"""
normalize an array to the range vmin/vmax
"""
if vmin == vmax:
return np.ones_like(av)*vmin
elif vmax < vmin:
warnings.warn("swapping vmin and vmax, because vmax < vmin.")
vmin, vmax = vmax, vmin
norm_one = (av - np.min(av))/(np.max(av)-np.min(av))
return norm_one * (vmax-vmin) + vmin
def write_results(frame, r):
"""
Used for writing the results to zip-files in the current directory.
If put in the directory "data", these files will be used for tests.
"""
# cast single precision to double precision
if np.iscomplexobj(r):
r = np.array(r, dtype=complex)
else:
r = np.array(r, dtype=float)
data = np.array(r).flatten().view(float)
filen = frame.f_globals["__file__"]
funcname = frame.f_code.co_name
identifier = "{}__{}".format(filen.split("test_", 1)[1][:-3],
funcname)
text = pathlib.Path("data.txt")
zipf = pathlib.Path(identifier+".zip")
# remove existing files
if text.exists():
text.unlink()
if zipf.exists():
zipf.unlink()
# save text
np.savetxt(str(text), data, fmt="%.10f")
# make zip
with zipfile.ZipFile(str(zipf),
"w",
compression=zipfile.ZIP_DEFLATED) as arc:
arc.write(str(text))
text.unlink()
|
RI-imaging/ODTbrain | examples/backprop_from_mie_2d_weights_angles.py | """Mie cylinder with unevenly spaced angles
Angular weighting can significantly improve reconstruction quality
when the angular projections are sampled at non-equidistant
intervals :cite:`Tam1981`. The *in silico* data set was created with
the softare `miefield <https://github.com/RI-imaging/miefield>`_.
The data are 1D projections of a non-centered cylinder of constant
refractive index 1.339 embedded in water with refractive index 1.333.
The first column shows the used sinograms (missing angles are displayed
as zeros) that were created from the original sinogram with 250
projections. The second column shows the reconstruction without angular
weights and the third column shows the reconstruction with angular
weights. The keyword argument `weight_angles` was introduced in version
0.1.1.
"""
import matplotlib.pylab as plt
import numpy as np
import unwrap
import odtbrain as odt
from example_helper import load_data
sino, angles, cfg = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_angles="mie_angles.txt",
f_sino_real="sino_real.txt",
f_sino_imag="sino_imag.txt",
f_info="mie_info.txt")
A, size = sino.shape
# background sinogram computed with Mie theory
# miefield.GetSinogramCylinderRotation(radius, nmed, nmed, lD, lC, size, A,res)
u0 = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="u0_imag.txt",
f_sino_real="u0_real.txt")
# create 2d array
u0 = np.tile(u0, size).reshape(A, size).transpose()
# background field necessary to compute initial born field
# u0_single = mie.GetFieldCylinder(radius, nmed, nmed, lD, size, res)
u0_single = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="u0_single_imag.txt",
f_sino_real="u0_single_real.txt")
print("Example: Backpropagation from 2D FDTD simulations")
print("Refractive index of medium:", cfg["nmed"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Performing backpropagation.")
# Set measurement parameters
# Compute scattered field from cylinder
radius = cfg["radius"] # wavelengths
nmed = cfg["nmed"]
ncyl = cfg["ncyl"]
lD = cfg["lD"] # measurement distance in wavelengths
lC = cfg["lC"] # displacement from center of image
size = cfg["size"]
res = cfg["res"] # px/wavelengths
A = cfg["A"] # number of projections
x = np.arange(size) - size / 2.0
X, Y = np.meshgrid(x, x)
rad_px = radius * res
phantom = np.array(((Y - lC * res)**2 + X**2) < rad_px **
2, dtype=np.float) * (ncyl - nmed) + nmed
u_sinR = odt.sinogram_as_rytov(sino / u0)
# Rytov 200 projections
# remove 50 projections from total of 250 projections
remove200 = np.argsort(angles % .0002)[:50]
angles200 = np.delete(angles, remove200, axis=0)
u_sinR200 = np.delete(u_sinR, remove200, axis=0)
ph200 = unwrap.unwrap(np.angle(sino / u0))
ph200[remove200] = 0
fR200 = odt.backpropagate_2d(u_sinR200, angles200, res, nmed, lD*res)
nR200 = odt.odt_to_ri(fR200, res, nmed)
fR200nw = odt.backpropagate_2d(u_sinR200, angles200, res, nmed, lD*res,
weight_angles=False)
nR200nw = odt.odt_to_ri(fR200nw, res, nmed)
# Rytov 50 projections
remove50 = np.argsort(angles % .0002)[:200]
angles50 = np.delete(angles, remove50, axis=0)
u_sinR50 = np.delete(u_sinR, remove50, axis=0)
ph50 = unwrap.unwrap(np.angle(sino / u0))
ph50[remove50] = 0
fR50 = odt.backpropagate_2d(u_sinR50, angles50, res, nmed, lD*res)
nR50 = odt.odt_to_ri(fR50, res, nmed)
fR50nw = odt.backpropagate_2d(u_sinR50, angles50, res, nmed, lD*res,
weight_angles=False)
nR50nw = odt.odt_to_ri(fR50nw, res, nmed)
# prepare plot
kw_ri = {"vmin": 1.330,
"vmax": 1.340}
kw_ph = {"vmin": np.min(np.array([ph200, ph50])),
"vmax": np.max(np.array([ph200, ph50])),
"cmap": "coolwarm"}
fig, axes = plt.subplots(2, 3, figsize=(8, 4))
axes = np.array(axes).flatten()
phmap = axes[0].imshow(ph200, **kw_ph)
axes[0].set_title("Phase sinogram (200 proj.)")
rimap = axes[1].imshow(nR200nw.real, **kw_ri)
axes[1].set_title("RI without angular weights")
axes[2].imshow(nR200.real, **kw_ri)
axes[2].set_title("RI with angular weights")
axes[3].imshow(ph50, **kw_ph)
axes[3].set_title("Phase sinogram (50 proj.)")
axes[4].imshow(nR50nw.real, **kw_ri)
axes[4].set_title("RI without angular weights")
axes[5].imshow(nR50.real, **kw_ri)
axes[5].set_title("RI with angular weights")
# color bars
cbkwargs = {"fraction": 0.045,
"format": "%.3f"}
plt.colorbar(phmap, ax=axes[0], **cbkwargs)
plt.colorbar(phmap, ax=axes[3], **cbkwargs)
plt.colorbar(rimap, ax=axes[1], **cbkwargs)
plt.colorbar(rimap, ax=axes[2], **cbkwargs)
plt.colorbar(rimap, ax=axes[5], **cbkwargs)
plt.colorbar(rimap, ax=axes[4], **cbkwargs)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | tests/test_alg2d_int.py | """Test slow integration algorithm"""
import sys
import numpy as np
import odtbrain
from common_methods import create_test_sino_2d, cutout, \
get_test_parameter_set, write_results, get_results
WRITE_RES = False
def test_2d_integrate():
myframe = sys._getframe()
sino, angles = create_test_sino_2d()
parameters = get_test_parameter_set(2)
r = list()
for p in parameters:
f = odtbrain.integrate_2d(sino, angles, **p)
r.append(cutout(f))
if WRITE_RES:
write_results(myframe, r)
assert np.allclose(np.array(r).flatten().view(float), get_results(myframe))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | examples/backprop_from_fdtd_3d_tilted2.py | """FDTD cell phantom with tilted and rolled axis of rotation
The *in silico* data set was created with the
:abbr:`FDTD (Finite Difference Time Domain)` software `meep`_. The data
are 2D projections of a 3D refractive index phantom that is rotated
about an axis which is tilted by 0.2 rad (11.5 degrees) with respect to
the imaging plane and rolled by -.42 rad (-24.1 degrees) within the
imaging plane. The data are the same as were used in the previous
example. A brief description of this algorithm is given in
:cite:`Mueller2015tilted`.
.. _`meep`: http://ab-initio.mit.edu/wiki/index.php/Meep
"""
import matplotlib.pylab as plt
import numpy as np
from scipy.ndimage import rotate
import odtbrain as odt
from example_helper import load_data
if __name__ == "__main__":
sino, angles, phantom, cfg = \
load_data("fdtd_3d_sino_A220_R6.500_tiltyz0.2.tar.lzma")
# Perform titlt by -.42 rad in detector plane
rotang = -0.42
rotkwargs = {"mode": "constant",
"order": 2,
"reshape": False,
}
for ii in range(len(sino)):
sino[ii].real = rotate(
sino[ii].real, np.rad2deg(rotang), cval=1, **rotkwargs)
sino[ii].imag = rotate(
sino[ii].imag, np.rad2deg(rotang), cval=0, **rotkwargs)
A = angles.shape[0]
print("Example: Backpropagation from 3D FDTD simulations")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Axis tilt in y-z direction:", cfg["tilt_yz"])
print("Number of projections:", A)
# Apply the Rytov approximation
sinoRytov = odt.sinogram_as_rytov(sino)
# Determine tilted axis
tilted_axis = [0, np.cos(cfg["tilt_yz"]), np.sin(cfg["tilt_yz"])]
rotmat = np.array([
[np.cos(rotang), -np.sin(rotang), 0],
[np.sin(rotang), np.cos(rotang), 0],
[0, 0, 1],
])
tilted_axis = np.dot(rotmat, tilted_axis)
print("Performing tilted backpropagation.")
# Perform tilted backpropagation
f_tilt = odt.backpropagate_3d_tilted(uSin=sinoRytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"],
tilted_axis=tilted_axis,
)
# compute refractive index n from object function
n_tilt = odt.odt_to_ri(f_tilt, res=cfg["res"], nm=cfg["nm"])
sx, sy, sz = n_tilt.shape
px, py, pz = phantom.shape
sino_phase = np.angle(sino)
# compare phantom and reconstruction in plot
fig, axes = plt.subplots(1, 3, figsize=(8, 2.4))
kwri = {"vmin": n_tilt.real.min(), "vmax": n_tilt.real.max()}
kwph = {"vmin": sino_phase.min(), "vmax": sino_phase.max(),
"cmap": "coolwarm"}
# Sinogram
axes[0].set_title("phase projection")
phmap = axes[0].imshow(sino_phase[A // 2, :, :], **kwph)
axes[0].set_xlabel("detector x")
axes[0].set_ylabel("detector y")
axes[1].set_title("sinogram slice")
axes[1].imshow(sino_phase[:, :, sino.shape[2] // 2],
aspect=sino.shape[1] / sino.shape[0], **kwph)
axes[1].set_xlabel("detector y")
axes[1].set_ylabel("angle [rad]")
# set y ticks for sinogram
labels = np.linspace(0, 2 * np.pi, len(axes[1].get_yticks()))
labels = ["{:.2f}".format(i) for i in labels]
axes[1].set_yticks(np.linspace(0, len(angles), len(labels)))
axes[1].set_yticklabels(labels)
axes[2].set_title("tilt correction (nucleolus)")
rimap = axes[2].imshow(n_tilt[int(sx / 2 + 2 * cfg["res"])].real, **kwri)
axes[2].set_xlabel("x")
axes[2].set_ylabel("y")
# color bars
cbkwargs = {"fraction": 0.045,
"format": "%.3f"}
plt.colorbar(phmap, ax=axes[0], **cbkwargs)
plt.colorbar(phmap, ax=axes[1], **cbkwargs)
plt.colorbar(rimap, ax=axes[2], **cbkwargs)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | tests/test_alg2d_bpp.py | <filename>tests/test_alg2d_bpp.py
"""Test 2d backpropagation"""
import sys
import numpy as np
import odtbrain
from common_methods import create_test_sino_2d, cutout, \
get_test_parameter_set, write_results, get_results
WRITE_RES = False
def test_2d_backprop_phase():
myframe = sys._getframe()
sino, angles = create_test_sino_2d()
parameters = get_test_parameter_set(2)
r = list()
for p in parameters:
f = odtbrain.backpropagate_2d(sino, angles, **p)
r.append(cutout(f))
if WRITE_RES:
write_results(myframe, r)
assert np.allclose(np.array(r).flatten().view(float), get_results(myframe))
def test_2d_backprop_full():
myframe = sys._getframe()
sino, angles = create_test_sino_2d(ampl_range=(0.9, 1.1))
parameters = get_test_parameter_set(2)
r = list()
for p in parameters:
f = odtbrain.backpropagate_2d(sino, angles, **p)
r.append(cutout(f))
if WRITE_RES:
write_results(myframe, r)
assert np.allclose(np.array(r).flatten().view(float), get_results(myframe))
def test_2d_backprop_real():
"""
Check if the real reconstruction matches the real part
of the complex reconstruction.
"""
sino, angles = create_test_sino_2d()
parameters = get_test_parameter_set(2)
# complex
r = list()
for p in parameters:
f = odtbrain.backpropagate_2d(sino, angles, padval=0, **p)
r.append(f)
# real
r2 = list()
for p in parameters:
f = odtbrain.backpropagate_2d(sino, angles, padval=0,
onlyreal=True, **p)
r2.append(f)
assert np.allclose(np.array(r).real, np.array(r2))
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | examples/example_helper.py | <reponame>RI-imaging/ODTbrain<gh_stars>10-100
"""Miscellaneous methods for example data handling"""
import lzma
import os
import pathlib
import tarfile
import tempfile
import warnings
import zipfile
import numpy as np
datapath = pathlib.Path(__file__).parent / "data"
webloc = "https://github.com/RI-imaging/ODTbrain/raw/master/examples/data/"
def dl_file(url, dest, chunk_size=6553):
"""Download `url` to `dest`"""
import urllib3
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with dest.open('wb') as out:
while True:
data = r.read(chunk_size)
if data is None or len(data) == 0:
break
out.write(data)
r.release_conn()
def extract_lzma(path):
"""Extract an lzma file and return the temporary file name"""
tlfile = pathlib.Path(path)
# open lzma file
with tlfile.open("rb") as td:
data = lzma.decompress(td.read())
# write temporary tar file
fd, tmpname = tempfile.mkstemp(prefix="odt_ex_", suffix=".tar")
with open(fd, "wb") as fo:
fo.write(data)
return tmpname
def get_file(fname, datapath=datapath):
"""Return path of an example data file
Return the full path to an example data file name.
If the file does not exist in the `datapath` directory,
tries to download it from the ODTbrain GitHub repository.
"""
# download location
datapath = pathlib.Path(datapath)
datapath.mkdir(parents=True, exist_ok=True)
dlfile = datapath / fname
if not dlfile.exists():
print("Attempting to download file {} from {} to {}.".
format(fname, webloc, datapath))
try:
dl_file(url=webloc+fname, dest=dlfile)
except BaseException:
warnings.warn("Download failed: {}".format(fname))
raise
return dlfile
def load_data(fname, **kwargs):
"""Load example data"""
fname = get_file(fname)
if fname.suffix == ".lzma":
return load_tar_lzma_data(fname)
elif fname.suffix == ".zip":
return load_zip_data(fname, **kwargs)
def load_tar_lzma_data(tlfile):
"""Load example sinogram data from a .tar.lzma file"""
tmpname = extract_lzma(tlfile)
# open tar file
fields_real = []
fields_imag = []
phantom = []
parms = {}
with tarfile.open(tmpname, "r") as t:
members = t.getmembers()
members.sort(key=lambda x: x.name)
for m in members:
n = m.name
f = t.extractfile(m)
if n.startswith("fdtd_info"):
for ln in f.readlines():
ln = ln.decode()
if ln.count("=") == 1:
key, val = ln.split("=")
parms[key.strip()] = float(val.strip())
elif n.startswith("phantom"):
phantom.append(np.loadtxt(f))
elif n.startswith("field"):
if n.endswith("imag.txt"):
fields_imag.append(np.loadtxt(f))
elif n.endswith("real.txt"):
fields_real.append(np.loadtxt(f))
try:
os.remove(tmpname)
except OSError:
pass
phantom = np.array(phantom)
sino = np.array(fields_real) + 1j * np.array(fields_imag)
angles = np.linspace(0, 2 * np.pi, sino.shape[0], endpoint=False)
return sino, angles, phantom, parms
def load_zip_data(zipname, f_sino_real, f_sino_imag,
f_angles=None, f_phantom=None, f_info=None):
"""Load example sinogram data from a .zip file"""
ret = []
with zipfile.ZipFile(str(zipname)) as arc:
sino_real = np.loadtxt(arc.open(f_sino_real))
sino_imag = np.loadtxt(arc.open(f_sino_imag))
sino = sino_real + 1j * sino_imag
ret.append(sino)
if f_angles:
angles = np.loadtxt(arc.open(f_angles))
ret.append(angles)
if f_phantom:
phantom = np.loadtxt(arc.open(f_phantom))
ret.append(phantom)
if f_info:
with arc.open(f_info) as info:
cfg = {}
for li in info.readlines():
li = li.decode()
if li.count("=") == 1:
key, val = li.split("=")
cfg[key.strip()] = float(val.strip())
ret.append(cfg)
return ret
|
RI-imaging/ODTbrain | examples/backprop_from_qlsi_3d_hl60.py | <filename>examples/backprop_from_qlsi_3d_hl60.py
"""HL60 cell
The quantitative phase data of an HL60 S/4 cell were recorded using
:abbr:`QLSI (quadri-wave lateral shearing interferometry)`.
The original dataset was used in a previous publication
:cite:`Schuermann2017` to illustrate the capabilities of combined
fluorescence and refractive index tomography.
The example data set is already aligned and background-corrected as
described in the original publication and the fluorescence data are
not included. The lzma-archive contains the sinogram data stored in
the :ref:`qpimage <qpimage:index>` file format and the rotational
positions of each sinogram image as a text file.
The figure reproduces parts of figure 4 of the original manuscript.
Note that minor deviations from the original figure can be attributed
to the strong compression (scale offset filter) and due to the fact
that the original sinogram images were cropped from 196x196 px to
140x140 px (which in particular affects the background-part of the
refractive index histogram).
The raw data is available
`on figshare <https://doi.org/10.6084/m9.figshare.8055407.v1>`
(hl60_sinogram_qpi.h5).
"""
import pathlib
import tarfile
import tempfile
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
import qpimage
from example_helper import get_file, extract_lzma
if __name__ == "__main__":
# ascertain the data
path = get_file("qlsi_3d_hl60-cell_A140.tar.lzma")
tarf = extract_lzma(path)
tdir = tempfile.mkdtemp(prefix="odtbrain_example_")
with tarfile.open(tarf) as tf:
tf.extract("series.h5", path=tdir)
angles = np.loadtxt(tf.extractfile("angles.txt"))
# extract the complex field sinogram from the qpimage series data
h5file = pathlib.Path(tdir) / "series.h5"
with qpimage.QPSeries(h5file=h5file, h5mode="r") as qps:
qp0 = qps[0]
meta = qp0.meta
sino = np.zeros((len(qps), qp0.shape[0], qp0.shape[1]),
dtype=np.complex)
for ii in range(len(qps)):
sino[ii] = qps[ii].field
# perform backpropagation
u_sinR = odt.sinogram_as_rytov(sino)
res = meta["wavelength"] / meta["pixel size"]
nm = meta["medium index"]
fR = odt.backpropagate_3d(uSin=u_sinR,
angles=angles,
res=res,
nm=nm)
ri = odt.odt_to_ri(fR, res, nm)
# plot results
ext = meta["pixel size"] * 1e6 * 70
kw = {"vmin": ri.real.min(),
"vmax": ri.real.max(),
"extent": [-ext, ext, -ext, ext]}
fig, axes = plt.subplots(1, 3, figsize=(8, 2.5))
axes[0].imshow(ri[70, :, :].real, **kw)
axes[0].set_xlabel("x [µm]")
axes[0].set_ylabel("y [µm]")
x = np.linspace(-ext, ext, 140)
axes[1].plot(x, ri[70, :, 70], label="line plot x=0")
axes[1].plot(x, ri[70, 70, :], label="line plot y=0")
axes[1].set_xlabel("distance from center [µm]")
axes[1].set_ylabel("refractive index")
axes[1].legend()
hist, xh = np.histogram(ri.real, bins=100)
axes[2].plot(xh[1:], hist)
axes[2].set_yscale('log')
axes[2].set_xlabel("refractive index")
axes[2].set_ylabel("histogram")
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | examples/backprop_from_rytov_3d_phantom_apple.py | """Missing apple core correction
The missing apple core :cite:`Vertu2009` is a phenomenon in diffraction
tomography that is a result of the fact the the Fourier space is not
filled completely when the sample is rotated only about a single axis.
The resulting artifacts include ringing and blurring in the
reconstruction parallel to the original rotation axis. By enforcing
constraints (refractive index real-valued and larger than the
surrounding medium), these artifacts can be attenuated.
This example generates an artificial sinogram using the Python
library :ref:`cellsino <cellsino:index>` (The example parameters
are reused from :ref:`this example <cellsino:example_simple_cell>`).
The sinogram is then reconstructed with the backpropagation algorithm
and the missing apple core correction is applied.
.. note::
The missing apple core correction :func:`odtbrain.apple.correct`
was implemented in version 0.3.0 and is thus not used in the
older examples.
"""
import matplotlib.pylab as plt
import numpy as np
import cellsino
import odtbrain as odt
if __name__ == "__main__":
# number of sinogram angles
num_ang = 160
# sinogram acquisition angles
angles = np.linspace(0, 2*np.pi, num_ang, endpoint=False)
# detector grid size
grid_size = (250, 250)
# vacuum wavelength [m]
wavelength = 550e-9
# pixel size [m]
pixel_size = 0.08e-6
# refractive index of the surrounding medium
medium_index = 1.335
# initialize cell phantom
phantom = cellsino.phantoms.SimpleCell()
# initialize sinogram with geometric parameters
sino = cellsino.Sinogram(phantom=phantom,
wavelength=wavelength,
pixel_size=pixel_size,
grid_size=grid_size)
# compute sinogram (field with Rytov approximation and fluorescence)
sino = sino.compute(angles=angles, propagator="rytov", mode="field")
# reconstruction of refractive index
sino_rytov = odt.sinogram_as_rytov(sino)
f = odt.backpropagate_3d(uSin=sino_rytov,
angles=angles,
res=wavelength/pixel_size,
nm=medium_index)
ri = odt.odt_to_ri(f=f,
res=wavelength/pixel_size,
nm=medium_index)
# apple core correction
fc = odt.apple.correct(f=f,
res=wavelength/pixel_size,
nm=medium_index,
method="sh")
ric = odt.odt_to_ri(f=fc,
res=wavelength/pixel_size,
nm=medium_index)
# plotting
idx = ri.shape[2] // 2
# log-scaled power spectra
ft = np.log(1 + np.abs(np.fft.fftshift(np.fft.fftn(ri))))
ftc = np.log(1 + np.abs(np.fft.fftshift(np.fft.fftn(ric))))
plt.figure(figsize=(7, 5.5))
plotkwri = {"vmax": ri.real.max(),
"vmin": ri.real.min(),
"interpolation": "none",
}
plotkwft = {"vmax": ft.max(),
"vmin": 0,
"interpolation": "none",
}
ax1 = plt.subplot(221, title="plain refractive index")
mapper = ax1.imshow(ri[:, :, idx].real, **plotkwri)
plt.colorbar(mappable=mapper, ax=ax1)
ax2 = plt.subplot(222, title="corrected refractive index")
mapper = ax2.imshow(ric[:, :, idx].real, **plotkwri)
plt.colorbar(mappable=mapper, ax=ax2)
ax3 = plt.subplot(223, title="Fourier space (visible apple core)")
mapper = ax3.imshow(ft[:, :, idx], **plotkwft)
plt.colorbar(mappable=mapper, ax=ax3)
ax4 = plt.subplot(224, title="Fourier space (with correction)")
mapper = ax4.imshow(ftc[:, :, idx], **plotkwft)
plt.colorbar(mappable=mapper, ax=ax4)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | odtbrain/_alg3d_bppt.py | """3D backpropagation algorithm with a tilted axis of rotation"""
import multiprocessing as mp
import warnings
import numexpr as ne
import numpy as np
import pyfftw
import scipy.ndimage
from . import util
_ncores = mp.cpu_count()
def estimate_major_rotation_axis(loc):
"""
For a list of points on the unit sphere, estimate the main
rotational axis and return a list of angles that correspond
to the rotational position for each point.
"""
# TODO:
raise NotImplementedError("estimation of rotational axis not implemented.")
def norm_vec(vector):
"""Normalize the length of a vector to one"""
assert len(vector) == 3
v = np.array(vector)
return v/np.sqrt(np.sum(v**2))
def rotate_points_to_axis(points, axis):
"""Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points.
"""
axis = norm_vec(axis)
u, v, w = axis
points = np.array(points)
# Determine the rotational angle in the x-z plane
phi = np.arctan2(u, w)
# Determine the tilt angle w.r.t. the y-axis
theta = np.arccos(v)
# Negative rotation about y-axis
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
# Negative rotation about x-axis
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)],
])
DR1 = np.dot(Rtheta, Rphi)
# Rotate back by -phi such that effective rotation was only
# towards [0,1,0].
DR = np.dot(Rphi.T, DR1)
rotpoints = np.zeros((len(points), 3))
for ii, pnt in enumerate(points):
rotpoints[ii] = np.dot(DR, pnt)
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in rotpoints:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return rotpoints
def rotation_matrix_from_point(point, ret_inv=False):
"""Compute rotation matrix to go from [0,0,1] to `point`.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
x, y, z = point
# azimuthal angle
phi = np.arctan2(x, z)
# angle in polar direction (negative)
theta = -np.arctan2(y, np.sqrt(x**2+z**2))
# Rotation in polar direction
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
])
# rotation in x-z-plane
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
D = np.dot(Rphi, Rtheta)
# The inverse of D
Dinv = np.dot(Rtheta.T, Rphi.T)
if ret_inv:
return D, Dinv
else:
return D
def rotation_matrix_from_point_planerot(point, plane_angle, ret_inv=False):
"""
Compute rotation matrix to go from [0,0,1] to `point`,
while taking into account the tilted axis of rotation.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
axis: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
# These matrices are correct if there is no tilt of the
# rotational axis within the detector plane (x-y).
D, Dinv = rotation_matrix_from_point(point, ret_inv=True)
# We need an additional rotation about the z-axis to correct
# for the tilt for all the the other cases.
angz = plane_angle
Rz = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
DR = np.dot(D, Rz)
DRinv = np.dot(Rz.T, Dinv)
if ret_inv:
return DR, DRinv
else:
return DR
def sphere_points_from_angles_and_tilt(angles, tilted_axis):
"""
For a given tilt of the rotational axis `tilted_axis`, compute
the points on a unit sphere that correspond to the distribution
`angles` along the great circle about this axis.
Parameters
----------
angles: 1d ndarray
The angles that will be distributed on the great circle.
tilted_axis: list of length 3
The tilted axis of rotation that determines the great
circle.
Notes
-----
The reference axis is always [0,1,0].
`theta` is the azimuthal angle measured down from the y-axis.
`phi` is the polar angle in the x-z plane measured from z towards x.
"""
assert len(angles.shape) == 1
# Normalize tilted axis.
tilted_axis = norm_vec(tilted_axis)
[u, v, w] = tilted_axis
# Initial distribution of points about great circle (x-z).
newang = np.zeros((angles.shape[0], 3), dtype=float)
# We subtract angles[0], because in step (a) we want that
# newang[0]==[0,0,1]. This only works if we actually start
# at that point.
newang[:, 0] = np.sin(angles-angles[0])
newang[:, 2] = np.cos(angles-angles[0])
# Compute rotational angles w.r.t. [0,1,0].
# - Draw a unit sphere with the y-axis pointing up and the
# z-axis pointing right
# - The rotation of `tilted_axis` can be described by two
# separate rotations. We will use these two angles:
# (a) Rotation from y=1 within the y-z plane: theta
# This is the rotation that is critical for data
# reconstruction. If this angle is zero, then we
# have a rotational axis in the imaging plane. If
# this angle is PI/2, then our sinogram consists
# of a rotating image and 3D reconstruction is
# impossible. This angle is counted from the y-axis
# onto the x-z plane.
# (b) Rotation in the x-z plane: phi
# This angle is responsible for matching up the angles
# with the correct sinogram images. If this angle is zero,
# then the projection of the rotational axis onto the
# x-y plane is aligned with the y-axis. If this angle is
# PI/2, then the axis and its projection onto the x-y
# plane are identical. This angle is counted from the
# positive z-axis towards the positive x-axis. By default,
# angles[0] is the point that touches the great circle
# that lies in the x-z plane. angles[1] is the next point
# towards the x-axis if phi==0.
# (a) This angle is the azimuthal angle theta measured from the
# y-axis.
theta = np.arccos(v)
# (b) This is the polar angle measured in the x-z plane starting
# at the x-axis and measured towards the positive z-axis.
if np.allclose(u, 0) and np.allclose(w, 0):
# Avoid flipping the axis of rotation due to numerical
# errors during its computation.
phi = 0
else:
phi = np.arctan2(u, w)
# Determine the projection points on the unit sphere.
# The resulting circle meets the x-z-plane at phi, and
# is tilted by theta w.r.t. the y-axis.
# (a) Create a tilted data set. This is achieved in 3 steps.
# a1) Determine radius of tilted circle and get the centered
# circle with a smaller radius.
rtilt = np.cos(theta)
newang *= rtilt
# a2) Rotate this circle about the x-axis by theta
# (right-handed/counter-clockwise/basic/elemental rotation)
Rx = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
for ii in range(newang.shape[0]):
newang[ii] = np.dot(Rx, newang[ii])
# a3) Shift newang such that newang[0] is located at (0,0,1)
newang = newang - (newang[0] - np.array([0, 0, 1])).reshape(1, 3)
# (b) Rotate the entire thing with phi about the y-axis
# (right-handed/counter-clockwise/basic/elemental rotation)
Ry = np.array([
[+np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]
])
for jj in range(newang.shape[0]):
newang[jj] = np.dot(Ry, newang[jj])
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in newang:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return newang
def backpropagate_3d_tilted(uSin, angles, res, nm, lD=0,
tilted_axis=[0, 1, 0],
coords=None, weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval="edge",
intp_order=2, dtype=None,
num_cores=_ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation with a tilted axis of rotation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm with
a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}`
w.r.t. the imaging plane :cite:`Mueller2015tilted`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}`
and a different filter in Fourier space
:math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared
to :func:`backpropagate_3d`.
.. versionadded:: 0.1.2
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: ndarray of shape (A,3) or 1D array of length A
If the shape is (A,3), then `angles` consists of vectors
on the unit sphere that correspond to the direction
of illumination and acquisition (s₀). If the shape is (A,),
then `angles` is a one-dimensional array of angles in radians
that determines the angular position :math:`\phi_j`.
In both cases, `tilted_axis` must be set according to the
tilt of the rotational axis.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
tilted_axis: list of floats
The coordinates [x, y, z] on a unit sphere representing the
tilted axis of rotation. The default is (0,1,0),
which corresponds to a rotation about the y-axis and
follows the behavior of :func:`odtbrain.backpropagate_3d`.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
This currently only works when `angles` has the shape (A,).
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values greater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float or "edge"
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is "edge", then the edge values are used for
padding (see documentation of :func:`numpy.pad`). If `padval`
is a float, then padding is done with a linear ramp.
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.affine_transform` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This implementation can deal with projection angles that are not
distributed along a circle about the rotational axis. If there are
slight deviations from this circle, simply pass the 3D rotational
positions instead of the 1D angles to the `angles` argument. In
principle, this should improve the reconstruction. The general
problem here is that the backpropagation algorithm requires a
ramp filter in Fourier space that is oriented perpendicular to the
rotational axis. If the sample does not rotate about a single axis,
then a 1D parametric representation of this rotation must be found
to correctly determine the filter in Fourier space. Such a
parametric representation could e.g. be a spiral between the poles
of the unit sphere (but this kind of rotation is probably difficult
to implement experimentally).
If you have input images with rectangular shape, e.g. Nx!=Ny and
the rotational axis deviates by approximately PI/2 from the axis
(0,1,0), then data might get cropped in the reconstruction volume.
You can avoid that by rotating your input data and the rotational
axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to
[0,1,0] and `np.rot90` the sinogram images.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.shape[0]
if angles.shape not in [(A,), (A, 1), (A, 3)]:
raise ValueError("`angles` must have shape (A,) or (A,3)!")
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > _ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(_ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * int(dtype.name.strip("float"))))
# progess monitoring
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
angles = np.array(angles, copy=copy)
angles = np.squeeze(angles) # support shape (A,1)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
if padding[0]:
orderx = int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
padx = orderx - lnx
else:
padx = 0
if padding[1]:
ordery = int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
pady = ordery - lny
else:
pady = 0
padyl = int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx = lnx + padx
lNy = lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# `tilted_axis` is required for several things:
# 1. the filter |kDx*v + kDy*u| with (u,v,w)==tilted_axis
# 2. the alignment of the rotational axis with the y-axis
# 3. the determination of the point coordinates if only
# angles in radians are given.
# For (1) we need the exact axis that corresponds to our input data.
# For (2) and (3) we need `tilted_axis_yz` (see below) which is the
# axis `tilted_axis` rotated in the detector plane such that its
# projection onto the detector coincides with the y-axis.
# Normalize input axis
tilted_axis = norm_vec(tilted_axis)
# `tilted_axis_yz` is computed by performing the inverse rotation in
# the x-y plane with `angz`. We will again use `angz` in the transform
# within the for-loop to rotate each projection according to its
# acquisition angle.
angz = np.arctan2(tilted_axis[0], tilted_axis[1])
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis_yz = norm_vec(np.dot(rotmat, tilted_axis))
if len(angles.shape) == 1:
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
# compute the 3D points from tilted axis
angles = sphere_points_from_angles_and_tilt(angles, tilted_axis_yz)
else:
if weight_angles:
warnings.warn("3D angular weighting not yet supported!")
weights = 1
# normalize and rotate angles
for ii in range(angles.shape[0]):
# angles[ii] = norm_vec(angles[ii]) #-> not correct
# instead rotate like `tilted_axis` onto the y-z plane.
angles[ii] = norm_vec(np.dot(rotmat, angles[ii]))
if weight_angles:
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# The notation in the our optical tomography script for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# if lNx != lNy:
# raise NotImplementedError("Input data must be square shaped!")
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
# The filter is now dependent on the rotational position of the
# specimen. We have to include information from the angles.
# We want to estimate the rotational axis for every frame. We
# do that by computing the cross-product of the vectors in
# angles from the current and previous image.
u, v, _w = tilted_axis
filterabs = np.abs(kx*v+ky*u) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
#
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
# x = np.linspace(-centerx, centerx, lNx, endpoint=False)
# x = np.arange(lNx) - center + .5
# Meshgrid for output array
# zv, yv, xv = np.meshgrid(x,x,x)
# z, y, x
# xv = x.reshape( 1, 1,-1)
# yv = x.reshape( 1,-1, 1)
# z = np.arange(ln) - center + .5
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
# (this requires more RAM but is faster)
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv},
casting="same_kind")
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW:
# Flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
# print(inarr.flags)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Rotate all points such that we are effectively rotating everything
# about the y-axis.
angles = rotate_points_to_axis(points=angles, axis=tilted_axis_yz)
for aa in np.arange(A):
if not (padding[0] and padding[1]):
# no padding
oneslice[:] = uSin[aa]
elif padval == "edge":
# padding with edge values
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
# padding with linear ramp
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor, filter
oneslice *= filterabs * prefactor / (lNx * lNy)
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
casting="same_kind",
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:padyl+lny, padxl:padxl+lnx]
# The Cartesian axes in our array are ordered like this: [z,y,x]
# However, the rotation matrix requires [x,y,z]. Therefore, we
# need to np.transpose the first and last axis and also invert the
# y-axis.
fil_p_t = filtered_proj.transpose(2, 1, 0)[:, ::-1, :]
# get rotation matrix for this point and also rotate in plane
_drot, drotinv = rotation_matrix_from_point_planerot(angles[aa],
plane_angle=angz,
ret_inv=True)
# apply offset required by affine_transform
# The offset is only required for the rotation in
# the x-z-plane.
# This could be achieved like so:
# The offset "-.5" assures that we are rotating about
# the center of the image and not the value at the center
# of the array (this is also what `scipy.ndimage.rotate` does.
c = 0.5 * np.array(fil_p_t.shape) - .5
offset = c - np.dot(drotinv, c)
# Perform rotation
# We cannot split the inplace-rotation into multiple subrotations
# as we did in _Back_3d_tilted.backpropagate_3d, because the rotation
# axis is arbitrarily placed in the 3d array. Rotating single
# slices does not yield the same result as rotating the entire
# array. Instead of using affine_transform, map_coordinates might
# be faster for multiple cores.
# Also undo the axis transposition that we performed previously.
outarr.real += scipy.ndimage.interpolation.affine_transform(
fil_p_t.real, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if not onlyreal:
outarr.imag += scipy.ndimage.interpolation.affine_transform(
fil_p_t.imag, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if count is not None:
count.value += 1
return outarr
|
RI-imaging/ODTbrain | odtbrain/_translate_ri.py | <filename>odtbrain/_translate_ri.py<gh_stars>10-100
"""Translate reconstructed object functions to refractive index"""
import numpy as np
def odt_to_ri(f, res, nm):
r"""Convert the ODT object function to refractive index
In :abbr:`ODT (Optical Diffraction Tomography)`, the object function
is defined by the Helmholtz equation
.. math::
f(\mathbf{r}) = k_\mathrm{m}^2 \left[
\left( \frac{n(\mathbf{r})}{n_\mathrm{m}} \right)^2 - 1
\right]
with :math:`k_\mathrm{m} = \frac{2\pi n_\mathrm{m}}{\lambda}`.
By inverting this equation, we obtain the refractive index
:math:`n(\mathbf{r})`.
.. math::
n(\mathbf{r}) = n_\mathrm{m}
\sqrt{\frac{f(\mathbf{r})}{k_\mathrm{m}^2} + 1 }
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
Because this function computes the root of a complex number, there
are several solutions to the refractive index. Always the positive
(real) root of the refractive index is used.
"""
km = (2 * np.pi * nm) / res
ri = nm * np.sqrt(f / km**2 + 1)
# Always take the positive root as the refractive index.
# Because f can be imaginary, numpy cannot return the correct
# positive root of f. However, we know that *ri* must be postive and
# thus we take the absolute value of ri.
# This also is what happens in Slaneys
# diffract/Src/back.c in line 414.
negrootcoord = np.where(ri.real < 0)
ri[negrootcoord] *= -1
return ri
def opt_to_ri(f, res, nm):
r"""Convert the OPT object function to refractive index
In :abbr:`OPT (Optical Projection Tomography)`, the object function
is computed from the raw phase data. This method converts phase data
to refractive index data.
.. math::
n(\mathbf{r}) = n_\mathrm{m} +
\frac{f(\mathbf{r}) \cdot \lambda}{2 \pi}
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
This function is not meant to be used with diffraction tomography
data. For ODT, use :py:func:`odt_to_ri` instead.
"""
ri = nm + f / (2 * np.pi) * res
return ri
|
RI-imaging/ODTbrain | tests/test_copy.py | <reponame>RI-imaging/ODTbrain
"""Test copying arrays"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_3d, get_test_parameter_set
def test_back3d():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
# complex
f1 = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
copy=False, **p)
f2 = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64,
copy=True, **p)
assert np.allclose(f1, f2)
def test_back3d_tilted():
sino, angles = create_test_sino_3d(Nx=10, Ny=10)
p = get_test_parameter_set(1)[0]
f1 = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64,
copy=False, **p)
f2 = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64,
copy=True, **p)
assert np.allclose(f1, f2)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
RI-imaging/ODTbrain | examples/backprop_from_mie_2d_incomplete_coverage.py | <gh_stars>10-100
"""Mie cylinder with incomplete angular coverage
This example illustrates how the backpropagation algorithm of ODTbrain
handles incomplete angular coverage. All examples use 100 projections
at 100%, 60%, and 40% total angular coverage. The keyword argument
`weight_angles` that invokes angular weighting is set to `True` by
default. The *in silico* data set was created with the
softare `miefield <https://github.com/RI-imaging/miefield>`_.
The data are 1D projections of a non-centered cylinder of constant
refractive index 1.339 embedded in water with refractive index 1.333.
The first column shows the used sinograms (missing angles are displayed
as zeros) that were created from the original sinogram with 250
projections. The second column shows the reconstruction without angular
weights and the third column shows the reconstruction with angular
weights. The keyword argument `weight_angles` was introduced in version
0.1.1.
A 180 degree coverage results in a good reconstruction of the object.
Angular weighting as implemented in the backpropagation algorithm
of ODTbrain automatically addresses uneven and incomplete angular
coverage.
"""
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
from example_helper import load_data
sino, angles, cfg = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_angles="mie_angles.txt",
f_sino_real="sino_real.txt",
f_sino_imag="sino_imag.txt",
f_info="mie_info.txt")
A, size = sino.shape
# background sinogram computed with Mie theory
# miefield.GetSinogramCylinderRotation(radius, nmed, nmed, lD, lC, size, A,res)
u0 = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="u0_imag.txt",
f_sino_real="u0_real.txt")
# create 2d array
u0 = np.tile(u0, size).reshape(A, size).transpose()
# background field necessary to compute initial born field
# u0_single = mie.GetFieldCylinder(radius, nmed, nmed, lD, size, res)
u0_single = load_data("mie_2d_noncentered_cylinder_A250_R2.zip",
f_sino_imag="u0_single_imag.txt",
f_sino_real="u0_single_real.txt")
print("Example: Backpropagation from 2D FDTD simulations")
print("Refractive index of medium:", cfg["nmed"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Performing backpropagation.")
# Set measurement parameters
# Compute scattered field from cylinder
radius = cfg["radius"] # wavelengths
nmed = cfg["nmed"]
ncyl = cfg["ncyl"]
lD = cfg["lD"] # measurement distance in wavelengths
lC = cfg["lC"] # displacement from center of image
size = cfg["size"]
res = cfg["res"] # px/wavelengths
A = cfg["A"] # number of projections
x = np.arange(size) - size / 2.0
X, Y = np.meshgrid(x, x)
rad_px = radius * res
phantom = np.array(((Y - lC * res)**2 + X**2) < rad_px **
2, dtype=np.float) * (ncyl - nmed) + nmed
u_sinR = odt.sinogram_as_rytov(sino / u0)
# Rytov 100 projections evenly distributed
removeeven = np.argsort(angles % .002)[:150]
angleseven = np.delete(angles, removeeven, axis=0)
u_sinReven = np.delete(u_sinR, removeeven, axis=0)
pheven = odt.sinogram_as_radon(sino / u0)
pheven[removeeven] = 0
fReven = odt.backpropagate_2d(u_sinReven, angleseven, res, nmed, lD * res)
nReven = odt.odt_to_ri(fReven, res, nmed)
fRevennw = odt.backpropagate_2d(
u_sinReven, angleseven, res, nmed, lD * res, weight_angles=False)
nRevennw = odt.odt_to_ri(fRevennw, res, nmed)
# Rytov 100 projections more than 180
removemiss = 249 - \
np.concatenate((np.arange(100), 100 + np.arange(150)[::3]))
anglesmiss = np.delete(angles, removemiss, axis=0)
u_sinRmiss = np.delete(u_sinR, removemiss, axis=0)
phmiss = odt.sinogram_as_radon(sino / u0)
phmiss[removemiss] = 0
fRmiss = odt.backpropagate_2d(u_sinRmiss, anglesmiss, res, nmed, lD * res)
nRmiss = odt.odt_to_ri(fRmiss, res, nmed)
fRmissnw = odt.backpropagate_2d(
u_sinRmiss, anglesmiss, res, nmed, lD * res, weight_angles=False)
nRmissnw = odt.odt_to_ri(fRmissnw, res, nmed)
# Rytov 100 projections less than 180
removebad = 249 - np.arange(150)
anglesbad = np.delete(angles, removebad, axis=0)
u_sinRbad = np.delete(u_sinR, removebad, axis=0)
phbad = odt.sinogram_as_radon(sino / u0)
phbad[removebad] = 0
fRbad = odt.backpropagate_2d(u_sinRbad, anglesbad, res, nmed, lD * res)
nRbad = odt.odt_to_ri(fRbad, res, nmed)
fRbadnw = odt.backpropagate_2d(
u_sinRbad, anglesbad, res, nmed, lD * res, weight_angles=False)
nRbadnw = odt.odt_to_ri(fRbadnw, res, nmed)
# prepare plot
kw_ri = {"vmin": np.min(np.array([phantom, nRmiss.real, nReven.real])),
"vmax": np.max(np.array([phantom, nRmiss.real, nReven.real]))}
kw_ph = {"vmin": np.min(np.array([pheven, phmiss])),
"vmax": np.max(np.array([pheven, phmiss])),
"cmap": "coolwarm"}
fig, axes = plt.subplots(3, 3, figsize=(8, 6.5))
axes[0, 0].set_title("100% coverage ({} proj.)".format(angleseven.shape[0]))
phmap = axes[0, 0].imshow(pheven, **kw_ph)
axes[0, 1].set_title("RI without angular weights")
rimap = axes[0, 1].imshow(nRevennw.real, **kw_ri)
axes[0, 2].set_title("RI with angular weights")
rimap = axes[0, 2].imshow(nReven.real, **kw_ri)
axes[1, 0].set_title("60% coverage ({} proj.)".format(anglesmiss.shape[0]))
axes[1, 0].imshow(phmiss, **kw_ph)
axes[1, 1].set_title("RI without angular weights")
axes[1, 1].imshow(nRmissnw.real, **kw_ri)
axes[1, 2].set_title("RI with angular weights")
axes[1, 2].imshow(nRmiss.real, **kw_ri)
axes[2, 0].set_title("40% coverage ({} proj.)".format(anglesbad.shape[0]))
axes[2, 0].imshow(phbad, **kw_ph)
axes[2, 1].set_title("RI without angular weights")
axes[2, 1].imshow(nRbadnw.real, **kw_ri)
axes[2, 2].set_title("RI with angular weights")
axes[2, 2].imshow(nRbad.real, **kw_ri)
# color bars
cbkwargs = {"fraction": 0.045,
"format": "%.3f"}
plt.colorbar(phmap, ax=axes[0, 0], **cbkwargs)
plt.colorbar(phmap, ax=axes[1, 0], **cbkwargs)
plt.colorbar(phmap, ax=axes[2, 0], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[2, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 2], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 2], **cbkwargs)
plt.colorbar(rimap, ax=axes[2, 2], **cbkwargs)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | odtbrain/_alg3d_bpp.py | """3D backpropagation algorithm"""
import ctypes
import multiprocessing as mp
import numexpr as ne
import numpy as np
import pyfftw
import scipy.ndimage
from . import util
ncores = mp.cpu_count()
mprotate_dict = {}
def _cleanup_worker():
if "X" in mprotate_dict:
mprotate_dict.pop("X")
if "X_shape" in mprotate_dict:
mprotate_dict.pop("X_shape")
if "X_dtype" in mprotate_dict:
mprotate_dict.pop("X_dtype")
def _init_worker(X, X_shape, X_dtype):
"""Initializer for pool for _mprotate"""
# Using a dictionary is not strictly necessary. You can also
# use global variables.
mprotate_dict["X"] = X
mprotate_dict["X_shape"] = X_shape
mprotate_dict["X_dtype"] = X_dtype
def _mprotate(ang, lny, pool, order):
"""Uses multiprocessing to wrap around _rotate
4x speedup on an intel i7-3820 CPU @ 3.60GHz with 8 cores.
The function calls _rotate which accesses the `mprotate_dict`.
Data is rotated in-place.
Parameters
----------
ang: float
rotation angle in degrees
lny: int
total number of rotations to perform
pool: instance of multiprocessing.pool.Pool
the pool object used for the computation
order: int
interpolation order
"""
targ_args = list()
slsize = int(np.floor(lny / ncores))
for t in range(ncores):
ymin = t * slsize
ymax = (t + 1) * slsize
if t == ncores - 1:
ymax = lny
targ_args.append((ymin, ymax, ang, order))
pool.map(_rotate, targ_args)
def _rotate(d):
arr = np.frombuffer(mprotate_dict["X"],
dtype=mprotate_dict["X_dtype"]).reshape(
mprotate_dict["X_shape"])
(ymin, ymax, ang, order) = d
return scipy.ndimage.interpolation.rotate(
arr[:, ymin:ymax, :], # input
angle=-ang, # angle
axes=(0, 2), # axes
reshape=False, # reshape
output=arr[:, ymin:ymax, :], # output
order=order, # order
mode="constant", # mode
cval=0)
def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval="edge",
intp_order=2, dtype=None,
num_cores=ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float or "edge"
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is "edge", then the edge values are used for
padding (see documentation of :func:`numpy.pad`). If `padval`
is a float, then padding is done with a linear ramp.
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.size
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * int(dtype.name.strip("float"))))
# set ctype
ct_dt_map = {np.dtype(np.float32): ctypes.c_float,
np.dtype(np.float64): ctypes.c_double
}
# progress
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
# The z-size of the output array must match the x-size.
# The rotation is performed about the y-axis (lny).
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
if padding[0]:
orderx = int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
padx = orderx - lnx
else:
padx = 0
if padding[1]:
ordery = int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
pady = ordery - lny
else:
pady = 0
padyl = int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx = lnx + padx
lNy = lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# prefactor *= np.sqrt(((kx**2+ky**2)) * filter_klp )
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# z, y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv},
casting="same_kind")
# occupies some amount of ram, but yields faster
# computation later
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW
# save memory by in-place operations
# projection = np.fft.fft2(sino, axes=(-1,-2)) * prefactor
# FFTW-flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# inarr[:] = (projection[0]*filter2)[0,:,:]
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
# print(inarr.flags)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# Setup a shared array
shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx)
arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx)
# Initialize the pool with the shared array
pool4loop = mp.Pool(processes=num_cores,
initializer=_init_worker,
initargs=(shared_array, (ln, lny, lnx), dtype))
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
for aa in np.arange(A):
if not (padding[0] and padding[1]):
# no padding
oneslice[:] = uSin[aa]
elif padval == "edge":
# padding with edge values
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
# padding with linear ramp
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor
oneslice *= prefactor / (lNx * lNy)
# 14x Speedup with fftw3 compared to numpy fft and
# memory reduction by a factor of 2!
# ifft will be computed in-place
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
casting="same_kind",
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl]
# resize image to original size
# The copy is necessary to prevent memory leakage.
arr[:] = filtered_proj.real
phi0 = np.rad2deg(angles[aa])
if not onlyreal:
filtered_proj_imag = filtered_proj.imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.real += arr
if not onlyreal:
arr[:] = filtered_proj_imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.imag += arr
if count is not None:
count.value += 1
pool4loop.terminate()
pool4loop.join()
_cleanup_worker()
return outarr
|
RI-imaging/ODTbrain | tests/test_rotation_matrices.py | <gh_stars>10-100
"""Test 3D backpropagation with tilted axis of ration: matrices"""
import numpy as np
import odtbrain
import odtbrain._alg3d_bppt
def test_rotate_points_to_axis():
# rotation of axis itself always goes to y-axis
rot1 = odtbrain._alg3d_bppt.rotate_points_to_axis(
points=[[1, 2, 3]], axis=[1, 2, 3])
assert rot1[0][0] < 1e-14
assert rot1[0][2] < 1e-14
rot2 = odtbrain._alg3d_bppt.rotate_points_to_axis(
points=[[-3, .6, .1]], axis=[-3, .6, .1])
assert rot2[0][0] < 1e-14
assert rot2[0][2] < 1e-14
sq2 = np.sqrt(2)
# rotation to 45deg about x
points = [[0, 0, 1], [1, 0, 0], [1, 1, 0]]
rot3 = odtbrain._alg3d_bppt.rotate_points_to_axis(
points=points, axis=[0, 1, 1])
assert np.allclose(rot3[0], [0, 1/sq2, 1/sq2])
assert np.allclose(rot3[1], [1, 0, 0])
assert np.allclose(rot3[2], [1, 1/sq2, -1/sq2])
# rotation to 45deg about y
points = [[0, 0, 1], [1, 0, 0], [0, -1, 0]]
rot4 = odtbrain._alg3d_bppt.rotate_points_to_axis(
points=points, axis=[1, 0, 1])
assert np.allclose(rot4[0], [-.5, 1/sq2, .5])
assert np.allclose(rot4[1], [.5, 1/sq2, -.5])
assert np.allclose(rot4[2], [1/sq2, 0, 1/sq2])
# Visualization
# plt, Arrow3D = setup_mpl()
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in points:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w], mutation_scale=20,
# lw=1, arrowstyle="-|>", color="k")
# ax.add_artist(a)
# for vec in rot4:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w], mutation_scale=20, lw=1,
# arrowstyle="-|>", color="b")
# ax.add_artist(a)
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
# rotation to -90deg about z
points = [[0, 0, 1], [1, 0, 1], [1, -1, 0]]
rot4 = odtbrain._alg3d_bppt.rotate_points_to_axis(
points=points, axis=[1, 0, 0])
assert np.allclose(rot4[0], [0, 0, 1])
assert np.allclose(rot4[1], [0, 1, 1])
assert np.allclose(rot4[2], [1, 1, 0])
# negative axes
# In this case, everything is rotated in the y-z plane
# (this case is not physical for tomogrpahy)
points = [[0, 0, 1], [1, 0, 0], [1, -1, 0]]
rot4 = odtbrain._alg3d_bppt.rotate_points_to_axis(
points=points, axis=[0, -1, 0])
assert np.allclose(rot4[0], [0, 0, -1])
assert np.allclose(rot4[1], [1, 0, 0])
assert np.allclose(rot4[2], [1, 1, 0])
def test_rotation_matrix_from_point():
"""
`rotation_matrix_from_point` generates a matrix that rotates a point at
[0,0,1] to the position of the argument of the method.
"""
sq2 = np.sqrt(2)
# identity
m1 = odtbrain._alg3d_bppt.rotation_matrix_from_point([0, 0, 1])
assert np.allclose(np.dot(m1, [1, 2, 3]), [1, 2, 3])
assert np.allclose(np.dot(m1, [-3, .5, -.6]), [-3, .5, -.6])
# simple
m2 = odtbrain._alg3d_bppt.rotation_matrix_from_point([0, 1, 1])
assert np.allclose(np.dot(m2, [0, 0, 1]), [0, 1/sq2, 1/sq2])
assert np.allclose(np.dot(m2, [0, 1, 1]), [0, sq2, 0])
assert np.allclose(np.dot(m2, [1, 0, 0]), [1, 0, 0])
# negative
m3 = odtbrain._alg3d_bppt.rotation_matrix_from_point([-1, 1, 0])
assert np.allclose(np.dot(m3, [1, 0, 0]), [0, 0, -1])
assert np.allclose(np.dot(m3, [0, 1, 1]), [0, sq2, 0])
assert np.allclose(np.dot(m3, [0, -1/sq2, -1/sq2]), [0, -1, 0])
assert np.allclose(np.dot(m3, [0, 1/sq2, -1/sq2]), [-1, 0, 0])
assert np.allclose(np.dot(m3, [0, -1/sq2, 1/sq2]), [1, 0, 0])
def setup_mpl():
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D # noqa F01
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, _zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
return plt, Arrow3D
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
import scipy.ndimage
plt, Arrow3D = setup_mpl()
# Testarray
N = 50
A = 41
proj = np.zeros((N, N, N))
proj[(N)/2, (N)/2, :(N)/2] = np.abs(np.linspace(-10, 1, (N)/2))
# By default, the rotational axis in _Back_3D_tilted is the y-axis.
# Define a rotational axis with a slight offset in x and in z.
axis = np.array([0.0, 1, .1])
axis /= np.sqrt(np.sum(axis**2))
# Now, obtain the 3D angles that are equally distributed on the unit
# sphere and correspond to the positions of projections that we would
# measure.
angles = np.linspace(0, 2*np.pi, A, endpoint=False)
# The first point in that array will be in the x-z-plane.
points = odtbrain._alg3d_bppt.sphere_points_from_angles_and_tilt(
angles, axis)
# The following steps are exactly those that are used in
# odtbrain._alg3d_bppt.backpropagate_3d_tilted
# to perform 3D reconstruction with tilted angles.
u, v, w = axis
theta = np.arccos(v)
# We need three rotations.
# IMPROTANT:
# We perform the reconstruction such that the rotational axis
# is equal to the y-axis! This is easier than implementing a
# rotation about the rotational axis and tilting with theta
# before and afterwards.
# This is the rotation that tilts the projection in the
# direction of the rotation axis (new y-axis).
Rtilt = np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)],
])
out = np.zeros((N, N, N))
vectors = []
for ang, pnt in zip(angles, points):
Rcircle = np.array([
[np.cos(ang), 0, np.sin(ang)],
[0, 1, 0],
[-np.sin(ang), 0, np.cos(ang)],
])
DR = np.dot(Rtilt, Rcircle)
# pnt are already rotated by R1
vectors.append(np.dot(Rtilt, pnt))
# We need to give this rotation the correct offset
c = 0.5*np.array(proj.shape)
offset = c-c.dot(DR.T)
rotate = scipy.ndimage.interpolation.affine_transform(
proj, DR, offset=offset,
mode="constant", cval=0, order=2)
proj *= 0.98
out += rotate
# visualize the axes
out[0, 0, 0] = np.max(out) # origin
out[-1, 0, 0] = np.max(out)/2 # x
out[0, -1, 0] = np.max(out)/3 # z
# show arrows pointing at projection directions
# (should form cone aligned with y)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
for vec in vectors:
u, v, w = vec
a = Arrow3D([0, u], [0, v], [0, w],
mutation_scale=20, lw=1, arrowstyle="-|>")
ax.add_artist(a)
radius = 1
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim(-radius*1.5, radius*1.5)
ax.set_ylim(-radius*1.5, radius*1.5)
ax.set_zlim(-radius*1.5, radius*1.5)
plt.tight_layout()
plt.show()
|
RI-imaging/ODTbrain | examples/backprop_from_mie_3d_sphere.py | <filename>examples/backprop_from_mie_3d_sphere.py<gh_stars>10-100
r"""Mie sphere
The *in silico* data set was created with the Mie calculation software
`GMM-field`_. The data consist of a two-dimensional projection of a
sphere with radius :math:`R=14\lambda`,
refractive index :math:`n_\mathrm{sph}=1.006`,
embedded in a medium of refractive index :math:`n_\mathrm{med}=1.0`
onto a detector which is :math:`l_\mathrm{D} = 20\lambda` away from the
center of the sphere.
The package :mod:`nrefocus` must be used to numerically focus
the detected field prior to the 3D backpropagation with ODTbrain.
In :func:`odtbrain.backpropagate_3d`, the parameter `lD` must
be set to zero (:math:`l_\mathrm{D}=0`).
The figure shows the 3D reconstruction from Mie simulations of a
perfect sphere using 200 projections. Missing angle artifacts are
visible along the :math:`y`-axis due to the :math:`2\pi`-only
coverage in 3D Fourier space.
.. _`GMM-field`: https://code.google.com/p/scatterlib/wiki/Nearfield
"""
import matplotlib.pylab as plt
import nrefocus
import numpy as np
import odtbrain as odt
from example_helper import load_data
if __name__ == "__main__":
Ex, cfg = load_data("mie_3d_sphere_field.zip",
f_sino_imag="mie_sphere_imag.txt",
f_sino_real="mie_sphere_real.txt",
f_info="mie_info.txt")
# Manually set number of angles:
A = 200
print("Example: Backpropagation from 3D Mie scattering")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Number of angles for reconstruction:", A)
print("Performing backpropagation.")
# Reconstruction angles
angles = np.linspace(0, 2 * np.pi, A, endpoint=False)
# Perform focusing
Ex = nrefocus.refocus(Ex,
d=-cfg["lD"]*cfg["res"],
nm=cfg["nm"],
res=cfg["res"],
)
# Create sinogram
u_sin = np.tile(Ex.flat, A).reshape(A, int(cfg["size"]), int(cfg["size"]))
# Apply the Rytov approximation
u_sinR = odt.sinogram_as_rytov(u_sin)
# Backpropagation
fR = odt.backpropagate_3d(uSin=u_sinR,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=0,
padfac=2.1,
save_memory=True)
# RI computation
nR = odt.odt_to_ri(fR, cfg["res"], cfg["nm"])
# Plotting
fig, axes = plt.subplots(2, 3, figsize=(8, 5))
axes = np.array(axes).flatten()
# field
axes[0].set_title("Mie field phase")
axes[0].set_xlabel("detector x")
axes[0].set_ylabel("detector y")
axes[0].imshow(np.angle(Ex), cmap="coolwarm")
axes[1].set_title("Mie field amplitude")
axes[1].set_xlabel("detector x")
axes[1].set_ylabel("detector y")
axes[1].imshow(np.abs(Ex), cmap="gray")
# line plot
axes[2].set_title("line plots")
axes[2].set_xlabel("distance [px]")
axes[2].set_ylabel("real refractive index")
center = int(cfg["size"] / 2)
x = np.arange(cfg["size"]) - center
axes[2].plot(x, nR[:, center, center].real, label="x")
axes[2].plot(x, nR[center, center, :].real, label="z")
axes[2].plot(x, nR[center, :, center].real, label="y")
axes[2].legend(loc=4)
axes[2].set_xlim((-center, center))
dn = abs(cfg["nsph"] - cfg["nm"])
axes[2].set_ylim((cfg["nm"] - dn / 10, cfg["nsph"] + dn))
axes[2].ticklabel_format(useOffset=False)
# cross sections
axes[3].set_title("RI reconstruction\nsection at x=0")
axes[3].set_xlabel("z")
axes[3].set_ylabel("y")
axes[3].imshow(nR[center, :, :].real)
axes[4].set_title("RI reconstruction\nsection at y=0")
axes[4].set_xlabel("x")
axes[4].set_ylabel("z")
axes[4].imshow(nR[:, center, :].real)
axes[5].set_title("RI reconstruction\nsection at z=0")
axes[5].set_xlabel("y")
axes[5].set_ylabel("x")
axes[5].imshow(nR[:, :, center].real)
plt.tight_layout()
plt.show()
|
karthik-r-rao/parallel-computing-it301 | IsingAnnealing/plot.py | <reponame>karthik-r-rao/parallel-computing-it301<gh_stars>0
import matplotlib.pyplot as plt
file1 = open("results.txt", "r")
file1 = file1.readlines()
vertices = []
s_times = []
p_times = []
for line in file1:
line = line.split()
if line[0][0] == 'S':
s_times.append(float(line[2]))
vertices.append(int(line[1]))
elif line[0][0] == 'P':
p_times.append(float(line[2]))
plt.scatter(vertices, s_times, label="sequential program")
plt.scatter(vertices, p_times, label="parallel program")
plt.title('Time taken for max-cut via Ising Annealing')
plt.legend()
plt.xlabel('Number of vertices')
plt.ylabel('Time taken in seconds')
plt.show() |
karthik-r-rao/parallel-computing-it301 | IsingAnnealing/check.py | print('\nStarting checker script...')
file1 = open("results.txt", "r")
file1 = file1.readlines()
vertices = []
s_init = []
s_final = []
p_init = []
p_final = []
for line in file1:
line = line.split()
if line[0][0] == 'S':
s_init.append(float(line[3]))
s_final.append(float(line[4]))
vertices.append(int(line[1]))
elif line[0][0] == 'P':
p_init.append(float(line[3]))
p_final.append(float(line[4]))
correct = 0
for i in range(len(vertices)):
if s_init[i] == p_init[i] and s_final[i] == p_final[i]:
correct+=1
print(f'{correct} out of {len(vertices)} are correct') |
Sciguy324/Maze-Game-Python | Tilemap Maker/Tilemap Builder.py | <filename>Tilemap Maker/Tilemap Builder.py
from tkinter import *
from tkinter.messagebox import *
from tkinter.filedialog import *
from ast import *
from shutil import copy2
from os import path
import json
def build_matrix(width, height):
row = []
matrix = []
for i in range(0, width):
row.append(0)
for i in range(0, height):
matrix.append(row)
return matrix
def add_row(matrix):
new_row = []
for i in range(0, len(matrix[0])):
new_row.append(0)
return matrix + [new_row]
def add_column(matrix):
clone = list(matrix)
build = []
for i in clone:
row = list(i)
row += [0]
build += [row]
return build
def delete_row(matrix):
del matrix[len(matrix) - 1]
return matrix
def delete_column(matrix):
del matrix[0][0]
return matrix
class Dialog(Toplevel):
def __init__(self, parent, title=None, text="", args=None):
Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.iconbitmap('assets/hammer.ico')
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body, text, args)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50, parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self, master, txt, args=None):
'''Override this function'''
pass
def buttonbox(self):
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set()
return
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
self.parent.focus_set()
self.destroy()
def validate(self):
'''Override this function'''
pass
def apply(self):
'''Override this function'''
pass
class EnterNumber(Dialog):
def body(self, master, txt, args=None):
Label(master, text=txt).grid(row=0)
self.entry = Entry(master)
self.entry.grid(row=0, column=1)
return self.entry
def validate(self):
try:
self.result = int(self.entry.get())
return 1
except ValueError:
showwarning(
"Invalid Input",
"Please enter a number."
)
return 0
def apply(self):
self.result = int(self.entry.get())
class Enter2Numbers(Dialog):
def body(self, master, txt, args=None):
Label(master, text=txt).grid(row=0)
self.entry_x = Entry(master)
self.entry_y = Entry(master)
self.entry_x.grid(row=0, column=1)
self.entry_y.grid(row=0, column=2)
return self.entry_x
def validate(self):
try:
self.result = (int(self.entry_x.get()), int(self.entry_y.get()))
return 1
except ValueError:
showwarning(
"Invalid Input",
"Please enter a number."
)
return 0
def apply(self):
self.result = (int(self.entry_x.get()), int(self.entry_y.get()))
class EnterText(Dialog):
def body(self, master, txt, args=None):
Label(master, text=txt).grid(row=0)
self.entry = Entry(master)
self.entry.grid(row=0, column=1)
return self.entry
def validate(self):
try:
self.result = str(self.entry.get())
return 1
except ValueError:
showwarning(
"Invalid Input",
"Please enter a number."
)
return 0
def apply(self):
self.result = str(self.entry.get())
class EditLoadDestination(Dialog):
def body(self, master, txt=None, args=None):
Label(master, text="Tile X: ").grid(row=0, column=0)
Label(master, text="Tile Y: ").grid(row=0, column=2)
Label(master, text="Level : ").grid(row=1, column=0)
self.entry_x = Entry(master)
self.entry_y = Entry(master)
self.entry_level = Entry(master)
self.entry_x.grid(row=0, column=1)
self.entry_y.grid(row=0, column=3)
self.entry_level.grid(row=1, column=1)
if args:
self.entry_x.insert(0, args[1][0])
self.entry_y.insert(0, args[1][1])
self.entry_level.insert(0, args[0])
def validate(self):
try:
self.result = [str(self.entry_level.get()), (int(self.entry_x.get()), int(self.entry_y.get()))]
return 1
except ValueError:
showwarning(
"Invalid Input",
"Please enter coordinates and a level to load."
)
return 0
def apply(self):
self.result = [str(self.entry_level.get()), (int(self.entry_x.get()), int(self.entry_y.get()))]
class EditTileIds(Dialog):
def body(self, master, txt, args=None):
self.vbar = Scrollbar(master)
self.vbar.grid(row = 0, column=1, sticky=N+S)
self.canvas = Canvas(master, height=256)
self.canvas.grid(row=0, column=0)
self.canvas.grid_propagate(False)
self.vbar.config(command=self.canvas.yview)
self.vbar.activate("slider")
y = 0
self.entry_list = []
for i in list(App.tile_ids.items()):
y += 1
label = Label(self.canvas, text=i[0])
self.entry_list.append(Entry(self.canvas))
self.entry_list[len(self.entry_list) - 1].insert(0, str(i[1]))
self.canvas.create_window(80, 20 * y + 20, window=label)
self.canvas.create_window(256, 20 * y + 20, window=self.entry_list[len(self.entry_list) - 1])
self.canvas.config(scrollregion=self.canvas.bbox("all"), yscrollcommand=self.vbar.set)
def validate(self):
try:
out = {}
index = -1
for i in self.entry_list:
index += 1
image = list(App.tile_ids.items())[index][0]
out[image] = int(i.get())
self.result = out
return 1
except ValueError:
showwarning("Invalid Input", "Image IDs must be an integer.")
return 0
def apply(self):
out = {}
index = -1
for i in self.entry_list:
index += 1
image = list(App.tile_ids.items())[index][0]
out[image] = int(i.get())
self.result = out
class EditDecoIds(Dialog):
def body(self, master, txt, args=None):
self.vbar = Scrollbar(master)
self.vbar.grid(row = 0, column=1, sticky=N+S)
self.canvas = Canvas(master, height=256)
self.canvas.grid(row=0, column=0)
self.canvas.grid_propagate(False)
self.vbar.config(command=self.canvas.yview)
self.vbar.activate("slider")
y = 0
self.entry_list = []
for i in list(App.deco_ids.items()):
y += 1
label = Label(self.canvas, text=i[0])
self.entry_list.append(Entry(self.canvas))
self.entry_list[len(self.entry_list) - 1].insert(0, str(i[1]))
self.canvas.create_window(80, 20 * y + 20, window=label)
self.canvas.create_window(256, 20 * y + 20, window=self.entry_list[len(self.entry_list) - 1])
self.canvas.config(scrollregion=self.canvas.bbox("all"), yscrollcommand=self.vbar.set)
def validate(self):
try:
out = {}
index = -1
for i in self.entry_list:
index += 1
image = list(App.deco_ids.items())[index][0]
out[image] = int(i.get())
self.result = out
return 1
except ValueError:
showwarning("Invalid Input", "Decoration IDs must be an integer.")
return 0
def apply(self):
out = {}
index = -1
for i in self.entry_list:
index += 1
image = list(App.deco_ids.items())[index][0]
out[image] = int(i.get())
self.result = out
class EditColliderList(Dialog):
def body(self, master, txt, args=None):
#print("In:", args)
self.vbar = Scrollbar(master)
self.vbar.grid(row = 0, column=1, sticky=N+S)
self.canvas = Canvas(master, height=256, width=72)
self.canvas.grid(row=0, column=0)
self.canvas.grid_propagate(False)
self.vbar.config(command=self.canvas.yview)
self.vbar.activate("slider")
y = 0
self.button_list = []
self.value_list = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in tile_list:
y += 1
#label = Label(self.canvas, text=i[0])
self.value_list.append(IntVar(master))
self.button_list.append(Checkbutton(self.canvas, variable=self.value_list[y-1], onvalue=i[1], offvalue=0, image=App.translate_f2tk[i[0]], indicatoron=False, highlightthickness=3, bg='gray'))
#print("Checking index", y-1)
if y-1 in args:
self.value_list[y-1].set(i[1])
#print("Found {} with ID {}, given value {}".format(i[0], y-1, i[1]))
self.canvas.create_window(0, 80 * y + 20, window=self.button_list[len(self.button_list) - 1])
self.canvas.config(scrollregion=self.canvas.bbox("all"), yscrollcommand=self.vbar.set)
def validate(self):
try:
out = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in self.value_list:
#print("The image {} with ID {} was found in the list".format(tile_list[i.get()][0], i.get()))
t_id = tile_list[i.get()][1]
if t_id != 0:
out.append(t_id)
self.result = list(out)
return 1
except ValueError:
showwarning("Invalid Input", "Decoration IDs must be an integer.")
return 0
def apply(self):
out = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in self.value_list:
t_id = tile_list[i.get()][1]
if t_id != 0:
out.append(t_id)
self.result = list(out)
class EditCatagories(Dialog):
def body(self, master, txt, args=None):
self.selected_group = StringVar(master)
self.selected_group.set("All")
self.group_dict = args
def new_group():
new_name = EnterText(master, title="New Group", text="Enter New Group Name:").result
if new_name == None:
return
self.group_dict[new_name] = []
rebuild_selection()
def switch_group(event, something, var_mode):
# Build list of tiles in this group
tile_out = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in self.tile_value_list:
t_id = tile_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if t_id != 'tiles/missing.png':
tile_out.append(t_id)
# Build list of decos in this group
deco_out = []
deco_list = list(App.deco_ids.items())
del deco_list[0]
for i in self.deco_value_list:
d_id = deco_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if d_id != 'tiles/box.png':
deco_out.append(d_id)
self.group_dict[self.current_group] = tile_out + deco_out
self.current_group = self.selected_group.get()
rebuild_selection()
self.selected_group.trace('w', switch_group)
def build_selection():
self.current_group = self.selected_group.get()
options = list(i[0] for i in self.group_dict.items())
self.groups_menu = OptionMenu(master, self.selected_group, *options).grid(row=0, column=0)
self.new_group_button = Button(master, text="New Group", command=new_group)
self.new_group_button.grid(row=0, column=1)
self.tile_vbar = Scrollbar(master)
self.tile_vbar.grid(row=1, column=2, sticky=N+S)
self.tile_canvas = Canvas(master, height=256, width=72)
self.tile_canvas.grid(row=1, column=0)
self.tile_canvas.grid_propagate(False)
self.tile_vbar.config(command=self.tile_canvas.yview)
self.tile_vbar.activate("slider")
self.deco_vbar = Scrollbar(master)
self.deco_vbar.grid(row=1, column=3, sticky=N+S)
self.deco_canvas = Canvas(master, height=256, width=72)
self.deco_canvas.grid(row=1, column=1)
self.deco_canvas.grid_propagate(False)
self.deco_vbar.config(command=self.deco_canvas.yview)
self.deco_vbar.activate("slider")
# Tile selection panel
y = 0
self.tile_button_list = []
self.tile_value_list = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in tile_list:
y += 1
self.tile_value_list.append(IntVar(master))
self.tile_button_list.append(Checkbutton(self.tile_canvas, variable=self.tile_value_list[y-1], onvalue=i[1], offvalue=0, image=App.translate_f2tk[i[0]], indicatoron=False, highlightthickness=3, bg='gray'))
# This code determines if a button is already set, and configures it accordingly
if i[0] in self.group_dict[self.selected_group.get()]:
self.tile_value_list[y-1].set(i[1])
self.tile_canvas.create_window(0, 80 * y + 20, window=self.tile_button_list[len(self.tile_button_list) - 1])
self.tile_canvas.config(scrollregion=self.tile_canvas.bbox("all"), yscrollcommand=self.tile_vbar.set)
# Deco selection panel
y = 0
self.deco_button_list = []
self.deco_value_list = []
deco_list = list(App.deco_ids.items())
del deco_list[0]
for i in deco_list:
y += 1
self.deco_value_list.append(IntVar(master))
self.deco_button_list.append(Checkbutton(self.deco_canvas, variable=self.deco_value_list[y-1], onvalue=i[1], offvalue=0, image=App.translate_f2tk[i[0]], indicatoron=False, highlightthickness=3, bg='gray'))
# This code determines if a button is already set, and configures it accordingly
if i[0] in self.group_dict[self.selected_group.get()]:
self.deco_value_list[y-1].set(i[1])
self.deco_canvas.create_window(0, 80 * y + 20, window=self.deco_button_list[len(self.deco_button_list) - 1])
self.deco_canvas.config(scrollregion=self.deco_canvas.bbox("all"), yscrollcommand=self.deco_vbar.set)
def rebuild_selection():
self.tile_canvas.destroy()
self.deco_canvas.destroy()
self.tile_vbar.destroy()
self.deco_vbar.destroy()
build_selection()
build_selection()
def validate(self):
try:
# Build list of tiles in this group
tile_out = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in self.tile_value_list:
t_id = tile_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if t_id != 'tiles/missing.png':
tile_out.append(t_id)
# Build list of decos in this group
deco_out = []
deco_list = list(App.deco_ids.items())
del deco_list[0]
for i in self.deco_value_list:
d_id = deco_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if d_id != 'tiles/box.png':
deco_out.append(d_id)
self.group_dict[self.current_group] = tile_out + deco_out
self.result = self.group_dict
return 1
except ValueError:
showwarning("An unknown error occured")
return 0
def apply(self):
# Build list of tiles in this group
tile_out = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in self.tile_value_list:
t_id = tile_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if t_id != 'tiles/missing.png':
tile_out.append(t_id)
# Build list of decos in this group
deco_out = []
deco_list = list(App.deco_ids.items())
del deco_list[0]
for i in self.deco_value_list:
d_id = deco_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if d_id != 'tiles/box.png':
deco_out.append(d_id)
self.group_dict[self.current_group] = tile_out + deco_out
self.result = self.group_dict
class App:
tiles = []
decos = []
translate_tk2f = {0: 0}
translate_f2tk = {0: 0}
load_tiles = {}
try:
with open('assets/image_config.config', 'r') as rf:
tile_ids = literal_eval(rf.readline())
deco_ids = literal_eval(rf.readline())
groups = literal_eval(rf.readline())
except FileNotFoundError:
with open('assets/image_config.config', 'w') as wf:
wf.write('{0: 0, "tiles/missing.png": 0, "tiles/block.png": 1}')
wf.write('{0: 0, "tiles/box.png": 0}')
wf.write('{"All": ["tiles/missing.png", "tiles/block.png"]}')
tile_ids = {0: 0, "tiles/missing.png": 0, "tiles/block.png": 1}
deco_ids = {0: 0, "tiles/box.png": 0}
groups = {"All": ["tiles/missing.png", "tiles/block.png"]}
def __init__(self, master):
master.title("Worldbuilder")
master.iconbitmap('assets/hammer.ico')
master.state('zoomed')
frame = Frame(master)
frame.pack(fill=NONE, expand=0)
saved = IntVar(master) # 0: Not saved, 1: Saved
saved.set(1)
selected_image = IntVar(master) # Currently selected base-layer tile
selected_deco = IntVar(master) # Currently selected decoration tile
selected_load = IntVar(master) # Currently selected loading zone tile
selected_light = IntVar(master) # Currently selected lightmap tile
cursor_mode = IntVar(master) # 0: Regular mode, 1: Pan mode, 2: Busy mode
cursor_mode.set(0)
view_mode = IntVar(master) # 0: View ground layer, 1: View decoration layer, 2: View loading zones
force_grid = IntVar(master) # 0: Do not force grid, 1: Force grid
catagories = StringVar(master)
self.tilemap = build_matrix(16, 9)
self.decomap = build_matrix(16, 9)
self.directory = "no_file"
self.colliders = []
self.loading_zones = {}
self.light_sources = []
self.default_start = (0, 0)
self.copied_load_settings = None
# Frame setup + coordinate indicator setup
self.menu_frame = Frame(frame)
self.menu_frame.pack(side=TOP, anchor=N+W)
self.coords_label = Label(frame, text="¯\_(ツ)_/¯")
self.coords_label.pack(side=BOTTOM, anchor=W)
self.map_frame = Frame(frame, bd=2, relief=SUNKEN, bg="WHITE", width=64*16, height=64*9)
self.map_frame.pack(padx=10, pady=10, side=LEFT, anchor=CENTER, expand=0)
self.tile_frame = Frame(frame, bd=2, relief=SUNKEN)
self.tile_frame.pack(padx=5, pady=5, side=RIGHT, anchor=E, expand=0)
# Additional Options Panel
self.pointer = PhotoImage(file="assets/pointer_cursor.png")
self.mover = PhotoImage(file="assets/movement_cursor.png")
self.forcegrid = PhotoImage(file="assets/grid.png")
self.menu_selection = Radiobutton(self.menu_frame, image=self.pointer, variable=cursor_mode, value=0, indicatoron=0)
self.menu_selection.grid(row=0, column=0)
self.menu_selection = Radiobutton(self.menu_frame, image=self.mover, variable=cursor_mode, value=1, indicatoron=0)
self.menu_selection.grid(row=0, column=1)
self.menu_selection = Checkbutton(self.menu_frame, image=self.forcegrid, variable=force_grid, indicatoron=0, offvalue=0, onvalue=1)
self.menu_selection.grid(row=0, column=2)
self.menu_spacing = Frame(self.menu_frame, width=80, height=40, bd=2)
self.menu_spacing.grid(row=0, column=3)
# Layer control panel initialization
self.ground = PhotoImage(file="assets/ground.png")
self.decoration = PhotoImage(file="assets/decoration.png")
self.loadzone = PhotoImage(file="assets/loading_zone.png")
self.lightmap = PhotoImage(file="assets/lightbulb.png")
self.view_selection = Radiobutton(self.menu_frame, image=self.ground, variable=view_mode, value=0, indicatoron=0)
self.view_selection.grid(row=0, column=4)
self.view_selection = Radiobutton(self.menu_frame, image=self.decoration, variable=view_mode, value=1, indicatoron=0)
self.view_selection.grid(row=0, column=5)
self.view_selection = Radiobutton(self.menu_frame, image=self.loadzone, variable=view_mode, value=2, indicatoron=0)
self.view_selection.grid(row=0, column=6)
self.view_selection = Radiobutton(self.menu_frame, image=self.lightmap, variable=view_mode, value=3, indicatoron=0)
self.view_selection.grid(row=0, column=7)
self.menu_spacing2 = Frame(self.menu_frame, width=80, height=40, bd=2)
self.menu_spacing2.grid(row=0, column=8)
# Category panel initialization
options = list(i[0] for i in App.groups.items())
catagories.set(options[0])
self.groups_menu = OptionMenu(self.menu_frame, catagories, *options)
self.groups_menu.grid(row=0, column=9)
# Changed palette group action
def set_group(event, something, var_mode):
redraw_panels()
catagories.trace('w', set_group)
# Selected image action
def set_cursor_icon(event, something, var_mode):
if cursor_mode.get() == 0:
self.map_canvas.config(cursor="")
elif cursor_mode.get() == 1:
self.map_canvas.config(cursor="fleur")
elif cursor_mode.get() == 2:
self.map_canvas.config(cursor="wait")
else:
pass
cursor_mode.trace('w', set_cursor_icon)
# Selected mode action
def set_view_mode(event, something, var_mode):
if view_mode.get() == 0:
self.deco_canvas.grid_remove()
self.deco_vbar.grid_remove()
self.load_canvas.grid_remove()
self.load_vbar.grid_remove()
self.light_canvas.grid_remove()
self.light_vbar.grid_remove()
self.tile_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.img_vbar.grid(row=0, column=1, sticky=N+S)
elif view_mode.get() == 1:
self.tile_canvas.grid_remove()
self.img_vbar.grid_remove()
self.load_canvas.grid_remove()
self.load_vbar.grid_remove()
self.light_canvas.grid_remove()
self.light_vbar.grid_remove()
self.deco_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.deco_vbar.grid(row=0, column=1, sticky=N+S)
elif view_mode.get() == 2:
self.deco_canvas.grid_remove()
self.deco_vbar.grid_remove()
self.tile_canvas.grid_remove()
self.img_vbar.grid_remove()
self.light_canvas.grid_remove()
self.light_vbar.grid_remove()
self.load_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.load_vbar.grid(row=0, column=1, sticky=N+S)
else:
self.deco_canvas.grid_remove()
self.deco_vbar.grid_remove()
self.tile_canvas.grid_remove()
self.img_vbar.grid_remove()
self.load_canvas.grid_remove()
self.load_vbar.grid_remove()
self.light_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.light_vbar.grid(row=0, column=1, sticky=N+S)
redraw_map_canvas()
view_mode.trace('w', set_view_mode)
# Toggled force grid action
def toggle_grid(event, something, var_mode):
redraw_map_canvas()
force_grid.trace('w', toggle_grid)
# Change heading in accordance to whether or not the file is saved
def save_update(event, something, var_mode):
if saved.get() == 1:
master.title("Worldbuilder")
else:
master.title("*Worldbuilder*")
saved.trace('w', save_update)
def img_setup():
'''Function to set up images for tile/deco panels'''
App.translate_tk2f = {0: 0}
App.translate_f2tk = {0: 0}
App.tiles = []
App.decos = []
App.load_tiles = {}
for tile in list(App.tile_ids.items()):
if tile[0] != 0:
img = PhotoImage(file=tile[0]).zoom(64).subsample(16)
App.tiles.append(img)
App.translate_tk2f[img] = tile[0]
App.translate_f2tk[tile[0]] = img
for deco in list(App.deco_ids.items()):
if deco[0] != 0:
img = PhotoImage(file=deco[0]).zoom(64).subsample(16)
App.decos.append(img)
App.translate_tk2f[img] = deco[0]
App.translate_f2tk[deco[0]] = img
index = -1
for load in ["assets/inactive_zone.png", "assets/reserved_zone.png", "assets/active_zone.png"]:
index += 1
App.load_tiles[index] = PhotoImage(file=load).zoom(64).subsample(16)
def tile_panel_setup():
self.tile_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.tile_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.tile_canvas.grid_propagate(False)
self.img_vbar = Scrollbar(self.tile_frame)
self.img_vbar.config(command=self.tile_canvas.yview)
self.img_vbar.grid(row=0, column=1, sticky=N+S)
self.img_vbar.activate("slider")
self.tile_x = -1
self.tile_y = 0
index = -1
for tile in App.tiles:
index += 1
if App.translate_tk2f[tile] in App.groups[catagories.get()]:
self.tile_x += 1
if self.tile_x > 2:
self.tile_x = 0
self.tile_y += 1
radiobutton = Radiobutton(self.tile_canvas, image=tile, variable=selected_image, value=index, indicatoron=0)
self.tile_canvas.create_window(self.tile_x * 72 + 36, self.tile_y * 72 + 36, window=radiobutton)
self.tile_canvas.config(scrollregion=self.tile_canvas.bbox("all"), yscrollcommand=self.img_vbar.set)
def deco_panel_setup():
'''Function to set up deco panel'''
self.deco_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.deco_canvas.grid_propagate(False)
self.deco_vbar = Scrollbar(self.tile_frame)
self.deco_vbar.config(command=self.deco_canvas.yview)
self.deco_vbar.activate("slider")
self.deco_x = -1
self.deco_y = 0
index = -1
for deco in App.decos:
self.deco_x += 1
index += 1
if self.deco_x > 2:
self.deco_x = 0
self.deco_y += 1
radiobutton = Radiobutton(self.deco_canvas, image=deco, variable=selected_deco, value=index, indicatoron=0)
self.deco_canvas.create_window(self.deco_x * 72 + 36, self.deco_y * 72 + 36, window=radiobutton)
self.deco_canvas.config(scrollregion=self.deco_canvas.bbox("all"), yscrollcommand=self.deco_vbar.set)
def load_panel_setup():
'''Function to set up loading zone panel'''
self.load_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.load_canvas.grid_propagate(False)
self.load_vbar = Scrollbar(self.tile_frame)
self.load_vbar.config(command=self.load_canvas.yview)
self.load_vbar.activate("slider")
self.load_x = -1
self.load_y = 0
index = -1
options = ["assets/delete_zone.png", "assets/new_zone.png", "assets/set_zone_destination.png", "assets/copy_zone_settings.png", "assets/paste_zone_settings.png"]
self.load_imgs = []
for i in options:
self.load_imgs.append(PhotoImage(file=i).zoom(64).subsample(16))
for img in self.load_imgs:
self.load_x += 1
index += 1
if self.load_x > 2:
self.load_x = 0
self.load_y += 1
radiobutton = Radiobutton(self.load_canvas, image=img, variable=selected_load, value=index, indicatoron=0)
self.load_canvas.create_window(self.load_x * 72 + 36, self.load_y * 72 + 36, window=radiobutton)
self.load_canvas.config(scrollregion=self.load_canvas.bbox("all"), yscrollcommand=self.load_vbar.set)
def light_panel_setup():
'''Function to set up loading zone panel'''
self.light_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.light_canvas.grid_propagate(False)
self.light_vbar = Scrollbar(self.tile_frame)
self.light_vbar.config(command=self.light_canvas.yview)
self.light_vbar.activate("slider")
self.light_x = -1
self.light_y = 0
index = -1
options = ["assets/delete_light.png", "assets/3x3_light_source.png"]
self.light_imgs = []
for i in options:
self.light_imgs.append(PhotoImage(file=i).zoom(64).subsample(16))
for img in self.light_imgs:
self.light_x += 1
index += 1
if self.light_x > 2:
self.light_x = 0
self.light_y += 1
radiobutton = Radiobutton(self.light_canvas, image=img, variable=selected_light, value=index, indicatoron=0)
self.light_canvas.create_window(self.light_x * 72 + 36, self.light_y * 72 + 36, window=radiobutton)
self.light_canvas.config(scrollregion=self.light_canvas.bbox("all"), yscrollcommand=self.light_vbar.set)
def redraw_panels():
'''Redraw tile/deco panels if needed'''
original_cursor = cursor_mode.get()
cursor_mode.set(2)
master.update()
self.tile_canvas.destroy()
self.deco_canvas.destroy()
self.load_canvas.destroy()
self.light_canvas.destroy()
img_setup()
tile_panel_setup()
deco_panel_setup()
load_panel_setup()
light_panel_setup()
redraw_map_canvas()
cursor_mode.set(original_cursor)
# Actually set up tile and deco panels
img_setup()
tile_panel_setup()
deco_panel_setup()
load_panel_setup()
light_panel_setup()
# Tilemap window setup
def draw_map(matrix):
y = -1
for i in matrix:
y += 1
x = -1
for j in i:
x += 1
if matrix[y][x] != 0:
try:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.translate_f2tk[matrix[y][x]])
except KeyError:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.translate_f2tk['tiles/missing.png'])
print("An error occured while loading tilemap, \"{}\" was not found".format(matrix[y][x]))
def draw_grid():
new_height = 64 * len(self.tilemap)
new_width = 64 * len(self.tilemap[0])
self.map_canvas.config(scrollregion=(0, 0, new_width, new_height))
for i in range(0, new_height + 1):
self.map_canvas.create_line(0, 64 * i, new_width, 64 * i)
for j in range(0, new_width + 1):
self.map_canvas.create_line(64 * j, 0, 64 * j, new_height)
def redraw_map_canvas():
self.map_canvas.delete("all")
if force_grid.get() == 0:
draw_grid()
draw_map(self.tilemap)
draw_map(self.decomap)
if view_mode.get() == 2:
for i in list(self.loading_zones.items()):
if i[1] != []:
self.map_canvas.create_image((32 + 64 * i[0][0], 32 + 64 * i[0][1]), image=App.load_tiles[2])
else:
self.map_canvas.create_image((32 + 64 * i[0][0], 32 + 64 * i[0][1]), image=App.load_tiles[0])
elif view_mode.get() == 3:
for i, j in self.light_sources:
self.map_canvas.create_image((32 + 64 * i, 32 + 64 * j), image=self.light_imgs[1])
if force_grid.get() == 1:
draw_grid()
self.start = None
def mark_start(event):
'''Marks starting position of mouse for canvas dragging'''
self.start = (event.x, event.y)
callback(event)
def callback(event):
saved.set(0)
if cursor_mode.get() == 0:
# Canvas painting mode
try:
dx = self.map_canvas.xview()[0] * len(self.tilemap[0])
dy = self.map_canvas.yview()[0] * len(self.tilemap)
x = max(int(dx + event.x / 64), 0)
y = max(int(dy + event.y / 64), 0)
# Regular base-layer tile mode
if view_mode.get() == 0:
map_of_tiles = list(self.tilemap)
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.tiles[selected_image.get()])
row = list(map_of_tiles[y])
row[x] = App.translate_tk2f[App.tiles[selected_image.get()]]
map_of_tiles[y] = row
# Decoration layer mode
elif view_mode.get() == 1:
map_of_tiles = list(self.decomap)
row = list(map_of_tiles[y])
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.decos[selected_deco.get()])
if selected_deco.get() != 0:
row[x] = App.translate_tk2f[App.decos[selected_deco.get()]]
else:
row[x] = 0
map_of_tiles[y] = row
# Loading zone mode
elif view_mode.get() == 2:
# Delete loading zone
if selected_load.get() == 0:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[0])
try:
del self.loading_zones[(x, y)]
except:
pass
# Add loading zone
elif selected_load.get() == 1:
self.loading_zones[(x, y)] = []
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[1])
# Configure loading zone
elif selected_load.get() == 2:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[2])
if (x, y) in self.loading_zones:
new_loading_zone = EditLoadDestination(master, title="Edit Destination", args=self.loading_zones[(x, y)]).result
if new_loading_zone != None:
self.loading_zones[(x, y)] = new_loading_zone
redraw_map_canvas()
# Copy loading zone settings
elif selected_load.get() == 3:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[3])
if (x, y) in self.loading_zones:
self.copied_load_settings = self.loading_zones[(x, y)]
# Paste loading zone settings
elif selected_load.get() == 4:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[4])
if (x, y) in self.loading_zones and self.copied_load_settings != None:
self.loading_zones[(x, y)] = self.copied_load_settings
else:
print("Unknown instruction")
# Lightmap mode
else:
# Delete light source
if selected_light.get() == 0:
try:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.light_imgs[0])
self.light_sources.remove((x, y))
except ValueError:
pass
# Add basic 3x3 light source
if selected_light.get() == 1:
if not (x, y) in self.light_sources:
self.light_sources.append((x, y))
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.light_imgs[1])
except IndexError:
return
if view_mode.get() == 0:
self.tilemap = map_of_tiles
elif view_mode.get() == 1:
self.decomap = map_of_tiles
else:
pass
elif cursor_mode.get() == 1:
# Canvas dragging mode
if self.start != None:
self.map_canvas.scan_mark(self.start[0], self.start[1])
self.start = None
self.map_canvas.scan_dragto(event.x, event.y, gain=1)
else:
# Do nothing, usually to lock-out input while canvas is reloading.
pass
def redraw(event):
redraw_map_canvas()
def set_coords_label(event):
dx = self.map_canvas.xview()[0] * len(self.tilemap[0])
dy = self.map_canvas.yview()[0] * len(self.tilemap)
x = int(dx + event.x / 64)
y = int(dy + event.y / 64)
self.coords_label.config(text="x: {}, y: {}".format(x, y))
self.map_canvas = Canvas(self.map_frame, width=64*16, height=64*9, bg="WHITE", bd=0)
self.map_canvas.grid(row=0, column=0)
self.map_vbar = Scrollbar(self.map_frame)
self.map_vbar.config(command=self.map_canvas.yview)
self.map_vbar.grid(row=0, column=1, sticky=N+S)
self.map_vbar.activate("slider")
self.map_hbar = Scrollbar(self.map_frame, orient=HORIZONTAL)
self.map_hbar.config(command=self.map_canvas.xview)
self.map_hbar.grid(row=1, column=0, sticky=E+W)
self.map_hbar.activate("slider")
self.map_canvas.config(scrollregion=(0, 0, 64*16, 64*9), xscrollcommand=self.map_hbar.set, yscrollcommand=self.map_vbar.set)
for i in range(0, 9 + 1):
self.map_canvas.create_line(0, 64 * i, 64 * 16, 64 * i)
for j in range(0, 16 + 1):
self.map_canvas.create_line(64 * j, 0, 64 * j, 64 * 9)
self.map_canvas.bind("<Button-1>", callback)
self.map_canvas.bind("<ButtonPress-1>", mark_start)
self.map_canvas.bind("<ButtonRelease-1>", redraw)
self.map_canvas.bind("<B1-Motion>", callback)
self.map_canvas.bind("<Motion>", set_coords_label)
# Menu bar function setup
def open_file():
f = askopenfilename(filetypes=[("Tilemap", "*.tilemap")], defaultextension=[("Tilemap", "*.tilemap")])
if f is '':
return
current_tilemap = self.tilemap
current_decomap = self.decomap
current_directory = self.directory
current_colliders = self.colliders
current_loading_zones = self.loading_zones
#try:
with open(f) as rf:
new_tilemap = literal_eval(rf.readline())
print("Found {} by {} tilemap".format(len(new_tilemap), len(new_tilemap[0])))
self.tilemap = new_tilemap
new_decomap = literal_eval(rf.readline())
print("Found {} by {} decomap".format(len(new_decomap), len(new_decomap[0])))
self.decomap = new_decomap
new_colliders = literal_eval(rf.readline())
print("Found collision list:", new_colliders)
self.colliders = new_colliders
new_loading_zones = literal_eval(rf.readline())
print("Found loading zone dictionary:", new_loading_zones)
self.loading_zones = new_loading_zones
new_light_sources = literal_eval(rf.readline())
print("Found light source list:", new_light_sources)
self.light_sources = new_light_sources
new_default_start = literal_eval(rf.readline())
print("Found default spawn:", new_default_start)
self.default_start = new_default_start
redraw_map_canvas()
self.directory = f
saved.set(1)
## except:
## showwarning("File Error", "Error: Could not open file.")
## self.tilemap = current_tilemap
## self.decomap = current_decomap
## self.directory = current_directory
## self.colliders = current_colliders
## self.loading_zones = current_loading_zones
## redraw_map_canvas()
## saved.set(0)
def save_file():
#print(list(len(i) for i in self.tilemap))
if self.directory == "no_file":
save_file_as()
else:
f = open(self.directory, "w")
data_to_save = str(self.tilemap)
data_to_save += "\n"
data_to_save += str(self.decomap)
data_to_save += "\n"
data_to_save += str(self.colliders)
data_to_save += "\n"
data_to_save += str(self.loading_zones)
data_to_save += "\n"
data_to_save += str(self.light_sources)
data_to_save += "\n"
data_to_save += str(self.default_start)
f.write(data_to_save)
f.close()
saved.set(1)
def save_file_as():
f = asksaveasfile(mode="w", filetypes=[("Tilemap", "*.tilemap")], defaultextension=[("Tilemap", "*.tilemap")])
if f is None:
return
data_to_save = str(self.tilemap)
data_to_save += "\n"
data_to_save += str(self.decomap)
data_to_save += "\n"
data_to_save += str(self.colliders)
data_to_save += "\n"
data_to_save += str(self.loading_zones)
data_to_save += "\n"
data_to_save += str(self.light_sources)
data_to_save += "\n"
data_to_save += str(self.default_start)
self.directory = f.name
f.write(data_to_save)
f.close()
saved.set(1)
def new():
if saved.get() == 0:
action = askyesnocancel("Worldbuilder", "Progress is unsaved. Would you like to save first?", icon='warning')
if action == False:
pass
elif action == True:
save_file()
return
else:
return
self.tilemap = build_matrix(16, 9)
self.decomap = build_matrix(16, 9)
self.directory = "no_file"
self.colliders = []
self.loading_zones = {}
self.copied_load_settings = None
redraw_map_canvas()
def tilemap2string(tilemap, ids, spacing):
translated_map = []
used_list = []
rev_ids = dict((v,k) for k,v in ids.items())
for i in range(len(tilemap)):
translated_map.append([ids.get(item, item) for item in tilemap[i]])
for i in translated_map:
for j in i:
if not j in used_list:
used_list.append(j)
used_list.sort()
used = dict((item, rev_ids[item]) for item in used_list)
string_version = "["
for i in range(len(tilemap) - 1):
string_version += str(translated_map[i]) + ",\n" + " " * (spacing + 1)
string_version += str(translated_map[:-1]) + "]"
return string_version, used
def export_file():
f = asksaveasfile(mode='w', filetypes=[('JSON File', '*.json')], defaultextension=[('JSON File', '*.json')])
if f is None:
return
try:
export_dict = {}
# Translate the tilemap and decomap to numerical ID's
translated_tilemap = []
for i in self.tilemap:
translated_tilemap.append([App.tile_ids.get(item, item) for item in i])
translated_decomap = []
for i in self.decomap:
translated_decomap.append([App.deco_ids.get(item, item) for item in i])
# Export only colliders that are being used
used_colliders = []
for i in translated_tilemap:
for j in i:
if j in self.colliders:
used_colliders.append(j)
export_dict["colliders"] = used_colliders
# Export tilemap and decomap
export_dict["tilemap"] = translated_tilemap
export_dict["decomap"] = translated_decomap
# Export loading zones
export_dict["loading_zones"] = []
for i, j in self.loading_zones.items():
export_dict["loading_zones"].append({"zone": i,
"target_level": j[0],
"target_pos": j[1]
})
# Export lightmap, default spawn, and level name
export_dict["lightmap"] = self.light_sources
export_dict["spawn"] = self.default_start
export_dict["name"] = path.splitext(path.basename(f.name))[0]
# Save dictionary as .json file
json.dump(export_dict, f)
except KeyError:
showwarning("Export Error", "One of the exported images has not been assigned an ID.")
f.close()
def add_rows():
number = EnterNumber(master, title="Add Rows", text="Add Rows").result
if number is None or number < 0:
return
for i in range(number):
self.tilemap = add_row(self.tilemap)
self.decomap = add_row(self.decomap)
redraw_map_canvas()
def add_columns():
number = EnterNumber(master, title="Add Column", text="Add Column").result
if number is None or number < 0:
return
for i in range(number):
self.tilemap = add_column(self.tilemap)
self.decomap = add_column(self.decomap)
redraw_map_canvas()
def delete_rows():
number = EnterNumber(master, title="Delete Column", text="Delete Column").result
if number is None:
return
if len(self.tilemap) - number >= 9:
for i in range(number):
self.tilemap = delete_row(self.tilemap)
self.decomap = delete_row(self.decomap)
redraw_map_canvas()
else:
showwarning("Invalid Size", "Tilemaps cannot have a height smaller than 9 tiles.")
def delete_columns():
number = EnterNumber(master, title="Delete Column", text="Delete Column").result
if number is None:
return
if len(self.tilemap[0]) - number >= 16:
for i in range(0, number):
self.tilemap = delete_column(self.tilemap)
self.decomap = delete_column(self.decomap)
redraw_map_canvas()
else:
showwarning("Invalid Size", "Tilemaps cannot have a width smaller than 16 tiles.")
def edit_default_pos():
new_pos = Enter2Numbers(master, title="Default Spawn Position", text="(x, y)").result
if new_pos is None:
return
if not(0 <= new_pos[0] <= len(self.tilemap[0])) or not(0 <= new_pos[1] <= len(self.tilemap)):
showwarning("Invalid Position", "Spawn position must be on the map")
else:
self.default_start = new_pos
def check_config():
tile_ids_copy = dict(list(self.tile_ids.items())[1:])
ids = list(i[1] for i in tile_ids_copy.items())
dupes = [value for index, value in enumerate(ids) if value in ids[:index]]
if dupes != []:
showwarning("Tile Id List Error", 'Duplicate id(s) were found in the tile id list:\n{}\n\nPlease edit the list using "Manage Tile Ids" and check again for errors using "Check ID List"'.format(dupes))
deco_ids_copy = dict(list(self.deco_ids.items())[1:])
ids = list(i[1] for i in deco_ids_copy.items())
dupes = [value for index, value in enumerate(ids) if value in ids[:index]]
if dupes != []:
showwarning("Decoration Id List Error", 'Duplicate id(s) were found in the decoration id list:\n{}\n\nPlease edit the list using "Manage Decoration Ids" and check again for errors using "Check ID List"'.format(dupes))
def import_image():
f = askopenfilename(defaultextension=".png")
if f is '':
return
name = "tiles/" + path.basename(f)
if path.isfile(name):
print("Image is a duplicate")
copy2(f, "tiles")
redraw_panels()
return
try:
copy2(f, "tiles")
except:
print("An error occured while loading image. Is the image a duplicate?")
img = PhotoImage(file=name).zoom(64).subsample(16)
App.translate_tk2f[img] = name
App.translate_f2tk[name] = img
if view_mode.get() == 0:
App.tiles.append(img)
self.tile_x += 1
if self.tile_x > 2:
self.tile_x = 0
self.tile_y += 1
radiobutton = Radiobutton(self.tile_canvas, image=img, variable=selected_image, value=len(App.tiles)-1, indicatoron=0)
self.tile_canvas.create_window(self.tile_x * 72 + 36, self.tile_y * 72 + 36, window=radiobutton)
self.tile_canvas.config(scrollregion=self.tile_canvas.bbox("all"))
App.tile_ids[name] = len(App.tile_ids) - 1
with open("assets/image_config.config", 'w') as rf:
rf.write(str(App.tile_ids))
rf.write("\n")
rf.write(str(App.deco_ids))
rf.write("\n")
rf.write(str(App.groups))
else:
App.decos.append(img)
self.deco_x += 1
if self.deco_x > 2:
self.deco_x = 0
self.deco_y += 1
radiobutton = Radiobutton(self.deco_canvas, image=img, variable=selected_deco, value=len(App.decos)-1, indicatoron=0)
self.deco_canvas.create_window(self.deco_x * 72 + 36, self.deco_y * 72 + 36, window=radiobutton)
self.deco_canvas.config(scrollregion=self.deco_canvas.bbox("all"))
App.deco_ids[name] = len(App.deco_ids) - 1
with open("assets/image_config.config", 'w') as rf:
rf.write(str(App.tile_ids))
rf.write("\n")
rf.write(str(App.deco_ids))
rf.write("\n")
rf.write(str(App.groups))
App.groups["All"].append(name)
check_config()
# Catch window quiting when file is unsaved in order to prevent CATASTROPHE
def catch_quit():
if saved.get() == 0:
action = askyesnocancel("Worldbuilder", "Progress is unsaved. Would you like to save first?", icon='warning')
if action == False:
root.destroy()
elif action == True:
save_file()
else:
pass
else:
root.destroy()
master.protocol('WM_DELETE_WINDOW', catch_quit)
def edit_tile_ids():
new_ids = EditTileIds(master, title="Edit Image IDs").result
if new_ids != None:
App.tile_ids = new_ids
with open("assets/image_config.config", 'w') as wf:
wf.write(str(App.tile_ids))
wf.write("\n")
wf.write(str(App.deco_ids))
wf.write("\n")
wf.write(str(App.groups))
def edit_deco_ids():
new_ids = EditDecoIds(master, title="Edit Decoration IDs").result
if new_ids != None:
App.deco_ids = new_ids
with open("assets/image_config.config", 'w') as wf:
wf.write(str(App.tile_ids))
wf.write("\n")
wf.write(str(App.deco_ids))
wf.write("\n")
wf.write(str(App.groups))
def edit_colliders():
new_list = EditColliderList(master, title="Edit Collider List", args=self.colliders).result
if new_list != None:
self.colliders = new_list
def edit_groups():
new_groups = EditCatagories(master, title="Edit Tile Catagories", args=App.groups).result
if new_groups == None:
return
for i in new_groups:
new_groups[i].insert(0, "tiles/missing.png")
App.groups = new_groups
with open("assets/image_config.config", 'w') as wf:
wf.write(str(App.tile_ids) + "\n" + str(App.deco_ids) + "\n" + str(App.groups))
options = list(i[0] for i in App.groups.items())
catagories.set(options[0])
self.groups_menu = OptionMenu(self.menu_frame, catagories, *options)
self.groups_menu.grid(row=0, column=8)
redraw_panels()
def copy_tile_dict():
temp_dict = dict((j, i) for i, j in App.tile_ids.items() if j != 0)
#print(temp_dict)
root.clipboard_clear()
#root.clipboard_append('i can has clipboardz?')
root.clipboard_append(str(temp_dict))
def copy_deco_dict():
temp_dict = dict((j, i) for i, j in App.deco_ids.items() if j != 0)
#print(temp_dict)
root.clipboard_clear()
#root.clipboard_append('i can has clipboardz?')
root.clipboard_append(str(temp_dict))
def about():
showwarning("Error", "This feature is not yet available.")
# Menubar setup
self.menubar = Menu(master)
# File menubar
self.filemenu = Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label="Open Ctrl+O", command=open_file)
self.filemenu.add_command(label="Save Ctrl+S", command=save_file)
self.filemenu.add_command(label="Save As", command=save_file_as)
self.filemenu.add_command(label="New Ctrl+N", command=new)
self.filemenu.add_command(label="Export Tilemap Ctrl+E", command=export_file)
self.filemenu.add_separator()
self.filemenu.add_command(label="Copy Tile Dictionary to Clipboard", command=copy_tile_dict)
self.filemenu.add_command(label="Copy Deco Dictionary to Clipboard", command=copy_deco_dict)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=master.quit)
self.menubar.add_cascade(label="File", menu=self.filemenu)
# Edit menubar
self.editmenu = Menu(self.menubar, tearoff=0)
self.editmenu.add_command(label="Add Rows", command=add_rows)
self.editmenu.add_command(label="Add Columns", command=add_columns)
self.editmenu.add_command(label="Delete Rows", command=delete_rows)
self.editmenu.add_command(label="Delete Columns", command=delete_columns)
self.editmenu.add_command(label="Default Spawn Position", command=edit_default_pos)
self.menubar.add_cascade(label="Edit", menu=self.editmenu)
# Palette menubar
self.palettemenu = Menu(self.menubar, tearoff=0)
self.palettemenu.add_command(label="Import Image Ctrl+I", command=import_image)
self.palettemenu.add_command(label="Manage Tile Ids", command=edit_tile_ids)
self.palettemenu.add_command(label="Manage Decoration Ids", command=edit_deco_ids)
self.palettemenu.add_command(label="Manage Colliders", command=edit_colliders)
self.palettemenu.add_separator()
self.palettemenu.add_command(label="Manage Palette Groups", command=edit_groups)
self.palettemenu.add_separator()
self.palettemenu.add_command(label="Check ID List", command=check_config)
self.menubar.add_cascade(label="Palette", menu=self.palettemenu)
# Help menubar
self.helpmenu = Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label="About", command=about)
self.menubar.add_cascade(label="Help", menu=self.helpmenu)
# Finish menubar setup
master.config(menu=self.menubar)
# Function shortcut keys
def ctrl_s(event):
save_file()
def ctrl_o(event):
open_file()
def ctrl_n(event):
new()
def ctrl_e(event):
export_file()
def ctrl_i(event):
import_image()
def mode_cursor(event):
cursor_mode.set(0)
def mode_pan(event):
cursor_mode.set(1)
def mode_ground(event):
view_mode.set(0)
def mode_decoration(event):
view_mode.set(1)
def mode_loading_zones(event):
view_mode.set(2)
def mode_lightmap(event):
view_mode.set(3)
master.bind('<Control-s>', ctrl_s)
master.bind('<Control-o>', ctrl_o)
master.bind('<Control-n>', ctrl_n)
master.bind('<Control-e>', ctrl_e)
master.bind('<Control-i>', ctrl_i)
master.bind("<Key-1>", mode_cursor)
master.bind("<Key-2>", mode_pan)
master.bind("<Key-3>", mode_ground)
master.bind("<Key-4>", mode_decoration)
master.bind("<Key-5>", mode_loading_zones)
master.bind("<Key-6>", mode_lightmap)
check_config()
print("Todo:")
print("-Do help menu")
print("-Add more controls for easier use")
print("\t*Add/Delete row/column at some position button")
print("\t*Shift field button")
print("\t*Control-z feature?")
print("\nPlanned:")
print("-Sprite layer (npcs, enemies, interactables, etc.)")
root = Tk()
app = App(root)
root.mainloop()
try:
root.destroy()
except TclError:
pass
|
Sciguy324/Maze-Game-Python | Maze (Multiplayer Tests)/Maze (Client 2)/networks.py | # Code from freeCodeCamp.org at https://www.youtube.com/watch?v=McoDjOCb2Zo
import socket
class Network:
def __init__(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server = input("Enter server IP: ")
self.port = 5555
self.addr = (self.server, self.port)
self.id = self.connect()
print("Your entity ID is:", self.id)
def connect(self):
self.client.connect(self.addr)
return self.client.recv(2048).decode()
def send(self, data):
try:
# print("Sent:", data)
self.client.send(str.encode(data))
result = self.client.recv(2048).decode("utf-8")
# if data == "spr":
# print("Entity count:", result)
# print("Received:", result)
# print("--------------")
return result
except socket.error as e:
print(e)
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 4/Maze.py | <reponame>Sciguy324/Maze-Game-Python<filename>Maze (Pygame) Checkpoint 4/Maze.py
import pygame
import os
from sys import platform
import matplotlib.pyplot as plt
import time
# Pygame initialization
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
# If the platform is windows, ensure that the system is not stretching the game window to the display.
if platform == "win32":
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
os.environ['SDL_VIDEODRIVER'] = 'directx'
screen_width = pygame.display.Info().current_w
screen_height = pygame.display.Info().current_h
# Window settup
win_width = 64 * 16
win_height = 64 * 9
win = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption("Maze")
icon = pygame.image.load(os.path.join('assets', 'icon.bmp'))
pygame.display.set_icon(icon)
# Worlds module must be called after the window mode is set, and sprite module depends on worlds module.
from worlds import *
from sprite import *
#missing = pygame.image.load(os.path.join('tiles/missing.png'))
#missing = pygame.transform.scale(missing, (64, 64)).convert()
# Dialog system initialization
Dialog.dialog_init((win_width, win_height), win)
def draw_tilemap(tiles, window, dispx, dispy, litmap, deco=False):
'''Function to draw tilemap. Only draws visible area.'''
if deco:
id_list = Level.deco_ids
else:
id_list = Level.tile_ids
x_tile = int(abs(dispx / 64))
y_tile = int(abs(dispy / 64))
y = y_tile - 1
for i in tiles[y_tile:y_tile + 10]:
y += 1
x = x_tile - 1
for j in i[x_tile:x_tile + 17]:
x += 1
if j in id_list:
if litmap[y][x]:
id_list[j].draw_norm(window, (dispx + x * 64, dispy + y * 64))
else:
id_list[j].draw(window, (dispx + x * 64, dispy + y * 64))
return window
def stitch_tilemap(lvl, dispx, dispy, deco=False):
'''Function to stitch tiles together into a single image, as defined by a tilemap matrix.'''
if deco:
id_list = Level.deco_ids
lvl_map = lvl.decomap
else:
id_list = Level.tile_ids
lvl_map = lvl.tilemap
result = pygame.Surface((lvl.width, lvl.height))
y = -64
for i in lvl.tilemap:
y += 64
x = -64
for j in i:
x += 64
if j in id_list:
(id_list[j]).draw(result, (x, y))
return result
def generate_litmap(lightmap, tilemap):
'''Generate a reference map for illuminated tiles'''
row = list(0 for i in range(len(tilemap[0])))
matrix = list(list(row) for i in range(len(tilemap)))
rects = []
for x, y in lightmap:
for i in range(-1, 2):
for j in range(-1, 2):
matrix[max(y + j, 0)][max(x + i, 0)] = 1
rects.append(pygame.Rect((x - 1) * 64, (y - 1) * 64, 192, 192))
return matrix, rects
def illuminate(window, lightmap, dispx, dispy, light):
'''Draw light sources using the lightmap'''
x_tile = int(abs(dispx / 64))
y_tile = int(abs(dispy / 64))
for x, y in lightmap:
if x in range(x_tile, x_tile + 17) and y in range(y_tile, y_tile + 10):
window.blit(light, (dispx + (x - 1) * 64, dispy + (y - 1) * 64), special_flags=pygame.BLEND_SUB)
def center_at(userx, usery, world_width, world_height):
'''Function to center the screen on the player'''
dispx = max(min(win_width / 2 - userx, 0), win_width - world_width)
dispy = max(min(win_height / 2 - usery, 0), win_height - world_height)
return dispx, dispy
def wipe_basic(wipe, screenshot, start):
'''Function to either wipe or unwipe the screen black, depending on the argument.
start = 0: wipe
start = win.get_height(): unwipe
Requires copy of the screen.'''
scaled_wipe = pygame.transform.scale(wipe, (win.get_width(), win.get_height() * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (win.get_width(), win.get_height()))
i = 0
while i < win.get_height() * 2:
t1 = time.perf_counter_ns()
win.blit(scaled_screenshot, (0, 0))
win.blit(scaled_wipe, (0, start - i))
pygame.display.flip()
time_step = (time.perf_counter_ns() - t1) / 600000
i += time_step * win.get_height() / win_height
def play_level(start_level):
'''Play a level, requires starting position of player and map of level'''
global win
global step_list
# Internal window
win2 = pygame.Surface((win_width, win_height))
Dialog.internal_window = win2
full = False
# Setup all sprites
player_sprite = sprite_setup(start_level)
Sprite.focus = player_sprite
# Misc.
dispx, dispy = center_at(Sprite.focus.rect.x, Sprite.focus.rect.y, Sprite.focus.level.width, Sprite.focus.level.height)
time_step = 1
clock = pygame.time.Clock()
time_taken = 0
# Nightime-shading setup
light_area = pygame.image.load(os.path.join('assets/light.png')).convert_alpha()
light = pygame.Surface((192, 192))
light.fill((100, 100, 100))
light.blit(light_area, (0, 0))
light.convert()
litmap, lit_rects = generate_litmap(Sprite.focus.level.lightmap, Sprite.focus.level.tilemap)
night = False
# Screen wipe setup
wipe_up = pygame.image.load(os.path.join('assets/wipe_up.png')).convert_alpha()
wipe_down = pygame.image.load(os.path.join('assets/wipe_down.png')).convert_alpha()
Sprite.behavour_args["screen_wipe"] = True
# Main loop
done = False
while not done:
time_start = time.perf_counter_ns()
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
return
elif event.key == pygame.K_F11:
full = not(full)
if full:
win = pygame.display.set_mode((screen_width, screen_height), pygame.FULLSCREEN | pygame.DOUBLEBUF)
else:
win = pygame.display.set_mode((win_width, win_height))
elif event.key == pygame.K_f:
print(1000 / time_taken, "fps. Fullscreen:", full, "Night:", night)
elif event.key == pygame.K_z:
night = True
elif event.key == pygame.K_x:
night = False
elif event.key == pygame.K_n:
Level.darken_imgs(100)
Sprite.darken_all(100)
elif event.key == pygame.K_m:
Level.reset_imgs()
Sprite.reset_imgs()
# Get keyboard input for sprite arguments
keys = pygame.key.get_pressed()
# Update internal window.
Sprite.behavour_args['keys'] = keys
Sprite.behavour_args['time_step'] = time_step
Sprite.behave_all()
# Wipe the screen if the player changed zone.
if Sprite.behavour_args['screen_wipe']:
wipe_basic(wipe_up, win2, win.get_height())
Sprite.reload_loaded_locals()
litmap, lit_rects = generate_litmap(Sprite.focus.level.lightmap, Sprite.focus.level.tilemap)
#world_image = stitch_tilemap(Sprite.focus.level, dispx, dispy)
#world_image.convert()
dispx, dispy = center_at(Sprite.focus.x, Sprite.focus.y, Sprite.focus.level.width, Sprite.focus.level.height)
win2 = draw_tilemap(Sprite.focus.level.tilemap, win2, dispx, dispy, litmap) # 6-7 ms
#win2.blit(world_image, dispx, dispy, area=pygame.Rect(0, 0, win_width, win_height).move_ip(dispx, dispy))
if night:
Sprite.blit_all_night(dispx, dispy, win2, litmap, lit_rects)
win2 = draw_tilemap(Sprite.focus.level.decomap, win2, dispx, dispy, litmap, deco=True)
illuminate(win2, Sprite.focus.level.lightmap, dispx, dispy, light)
else:
Sprite.blit_all(dispx, dispy, win2)
win2 = draw_tilemap(Sprite.focus.level.decomap, win2, dispx, dispy, litmap, deco=True)
# Transfer internal window to displayed window and update screen.
pygame.transform.scale(win2, (win.get_width(), win.get_height()), win) # 2-3 ms
if Sprite.behavour_args['screen_wipe']:
wipe_basic(wipe_down, win2, 0)
Sprite.behavour_args['screen_wipe'] = False
pygame.display.flip()
#step_list.append((time.perf_counter_ns() - time_start2) / 1000000)
#timer.tick()
if keys[pygame.K_v]:
time_step = min(clock.tick(60) / 10, 2.5)
else:
time_taken = time.perf_counter_ns() - time_start
time_taken_ms = time_taken / 1000000
time_step = min(time_taken_ms / 9, 12)
Tile.advance_frame(time_step)
step_list.append(time_step)
step_list = []
if __name__ == "__main__":
print("Debug Controls:")
print("\t-z: Night")
print("\t-x: Day")
print("\t-f: Print framerate")
print("\t-v: Hold for old time-step calculation")
print("\t-Backspace: Stop game")
print("\nTodo:")
print("\t-Animate more tiles")
print("\t-Add more to sprite system")
print("\t\t-Add more behaviours")
print("\t-Transfer sprite drawing responcibilty to Animation class")
print("\t\t-This will be important for clipped sprite lighting")
play_level(demo)
pygame.quit()
if len(step_list) > 0:
step_list.pop(0)
print("Average Value:", sum(step_list) / len(step_list))
graph = plt.figure()
graph.suptitle('Timestep Each Tick')
plt.plot(step_list, label="Timestep")
plt.legend()
plt.show()
|
Sciguy324/Maze-Game-Python | Maze (Multiplayer Tests)/Maze (Client 2)/Maze.py | import pygame
import os
from sys import platform
import matplotlib.pyplot as plt
import time
from networks import *
import threading
# # PRE-INITIALIZATION:
# Pygame initialization
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
# If the platform is windows, ensure that the system is not stretching the game window to the display.
if platform == "win32":
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
os.environ['SDL_VIDEODRIVER'] = 'directx'
# missing = pygame.image.load(os.path.join('tiles/missing.png'))
# missing = pygame.transform.scale(missing, (64, 64)).convert()
def setup_light():
light_area = pygame.image.load(os.path.join('assets/light.png')).convert_alpha()
light = pygame.Surface((192, 192))
light.fill((100, 100, 100))
light.blit(light_area, (0, 0))
light.convert()
return light
def center_at(userx, usery, world_width, world_height):
"""Function to center the screen on a sprite"""
dispx = max(min(1024 / 2 - userx, 0), 1024 - world_width)
dispy = max(min(576 / 2 - usery, 0), 576 - world_height)
return dispx, dispy
class Game:
"""Overarching class for Maze"""
# General class variable setup
screen_width = pygame.display.Info().current_w
screen_height = pygame.display.Info().current_h
win_width = 64 * 16
win_height = 64 * 9
win = pygame.display.set_mode((win_width, win_height))
ext_width = win.get_width()
ext_height = win.get_height()
focus = None
light = None
player = None
def __init__(self, start_level):
# Display window setup
pygame.display.set_caption("Maze (Client)")
icon = pygame.image.load(os.path.join('assets', 'icon.bmp'))
pygame.display.set_icon(icon)
# Dialog system initialization
Dialog.dialog_init((Game.win_width, Game.win_height), Game.win)
# Internal window and module setup
self.win2 = pygame.Surface((Game.win_width, Game.win_height))
Dialog.internal_window = self.win2
self.full = False
self.night = False
# Client connection initialization
self.net = Network()
time.sleep(0.1)
# Download maps
self.download_basic_levels()
time.sleep(0.1)
self.inquire_level(start_level)
# Setup all sprites
player_sprite = minor_sprite_setup(start_level)
player_sprite.entity_id = self.net.id
Game.player = player_sprite
Sprite.focus = player_sprite
Game.focus = player_sprite
# Early rendering
Game.light = setup_light()
self.generate_litmap(player_sprite)
# Finish client connection setup
time.sleep(0.1)
self.download_sprites()
time.sleep(0.1)
self.net.send("pst" + self.player.jsonify2())
send_thread = threading.Thread(target=Game.send_thread, args=(self,))
send_thread.daemon = True
send_thread.start()
def draw_tilemap(self, dispx, dispy, deco=False):
"""Function to draw tilemap. Only draws visible area."""
if deco:
draw_map = self.focus.level.decomap
id_list = Level.deco_ids
else:
draw_map = self.focus.level.tilemap
id_list = Level.tile_ids
window = self.win2
litmap = self.litmap
x_tile = int(abs(dispx / 64))
y_tile = int(abs(dispy / 64))
y = y_tile - 1
for i in draw_map[y_tile:y_tile + 10]:
y += 1
x = x_tile - 1
for j in i[x_tile:x_tile + 17]:
x += 1
if j in id_list:
if litmap[y][x]:
id_list[j].draw_norm(window, (dispx + x * 64, dispy + y * 64))
else:
id_list[j].draw(window, (dispx + x * 64, dispy + y * 64))
def stitch_tilemap(self, deco=False):
"""Function to stitch tiles together into a single image, as defined by a tilemap matrix."""
if deco:
id_list = Level.deco_ids
lvl_map = self.focus.level.decomap
else:
id_list = Level.tile_ids
lvl_map = self.focus.level.tilemap
result = pygame.Surface((self.focus.level.width, self.focus.level.height))
y = -64
for i in lvl_map:
y += 64
x = -64
for j in i:
x += 64
if j in id_list:
id_list.draw(result, (x, y))
return result
def generate_litmap(self, focus):
"""Generate a reference map for illuminated tiles"""
focus_level = focus.level
row = [0] * (focus_level.width // 64)
matrix = [list(row) for i in range(focus_level.height // 64)]
rects = []
for x, y in focus_level.lightmap:
for i in range(-1, 2):
for j in range(-1, 2):
matrix[max(y + j, 0)][max(x + i, 0)] = 1
rects.append(pygame.Rect((x - 1) * 64, (y - 1) * 64, 192, 192))
self.litmap, self.lit_rects = list(matrix), list(rects)
def illuminate(self, dispx, dispy):
"""Draw light sources using the lightmap"""
window = self.win2
light = Game.light
x_tile = int(abs(dispx / 64))
y_tile = int(abs(dispy / 64))
for x, y in self.focus.level.lightmap:
if x in range(x_tile, x_tile + 17) and y in range(y_tile, y_tile + 10):
window.blit(light, (dispx + (x - 1) * 64, dispy + (y - 1) * 64), special_flags=pygame.BLEND_SUB)
def wipe_effect(self):
"""Function that wipes the screen black, and then unwipes it again"""
wipe_up = pygame.image.load(os.path.join('assets/wipe_up.png')).convert_alpha()
self.wipe_basic(wipe_up, self.win2.copy(), Game.ext_height)
self.generate_litmap(self.focus)
while self.focus.inquiring:
pass
self.render()
wipe_down = pygame.image.load(os.path.join('assets/wipe_down.png')).convert_alpha()
self.wipe_basic(wipe_down, self.win2.copy(), 0)
def wipe_basic(self, wipe, screenshot, start):
"""Generic function to wipe/unwipe the screen black."""
scaled_wipe = pygame.transform.scale(wipe, (Game.ext_width, Game.ext_height * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (Game.ext_width, Game.ext_height))
i = 0
while i < Game.ext_height * 2:
t1 = time.perf_counter_ns()
Game.win.blit(scaled_screenshot, (0, 0))
Game.win.blit(scaled_wipe, (0, start - i))
pygame.display.flip()
time_step = (time.perf_counter_ns() - t1) / 600000
i += time_step * Game.ext_height / Game.win_height
def render(self):
"""Standard rendering function"""
win2 = self.win2
focus = self.focus
# Wipe the screen if the player changed zone.
if Sprite.behavior_args['screen_wipe']:
Sprite.behavior_args['screen_wipe'] = False
self.wipe_effect()
Sprite.reload_loaded_locals()
self.generate_litmap(focus)
# world_image = stitch_tilemap(Sprite.focus.level, dispx, dispy)
# world_image.convert()
dispx, dispy = center_at(focus.x, focus.y, focus.level.width, focus.level.height)
self.draw_tilemap(dispx, dispy) # 6-7 ms
# win2.blit(world_image, dispx, dispy, area=pygame.Rect(0, 0, win_width, win_height).move_ip(dispx, dispy))
if self.night:
Sprite.blit_all_night(dispx, dispy, win2, self.litmap, self.lit_rects)
self.draw_tilemap(dispx, dispy, deco=True)
self.illuminate(dispx, dispy)
else:
Sprite.blit_all(dispx, dispy, win2)
self.draw_tilemap(dispx, dispy, deco=True)
# Transfer internal window to displayed window and update screen.
pygame.transform.scale(win2, (Game.ext_width, Game.ext_height), Game.win) # 2-3 ms
def send_thread(self):
"""Thread that handles sending player sprite information"""
formatted_response = ""
while True:
focus = self.focus
# If the focus sprite is attempting to join another map, first run that request
if focus.inquiring:
focus.level = self.inquire_level(focus.inquire_of)
focus.inquiring = False
time.sleep(0.1)
# Standard sprite data updating
response = self.net.send("set" + self.player.jsonify2())
if response:
# Check for override
# Server has ordered refresh of client's guest dictionaries.
if response[:3] == "rel":
self.download_sprites()
# Server reports the addition of a new sprite
elif response[:3] == "add":
print(response[3:], "joined the server!")
GuestSprite.add_sprite(int(response[3:]))
# Server reports the removal of an existing sprite
elif response[:3] == "del":
print(response[-1], "left the server!")
Sprite.delete_sprite(int(response[-1]))
# Server has sprite data to inform the client about
elif response[:3] == "upd":
try:
entity_count = int(response[3:])
for i in range(entity_count):
response = self.net.send("next")
formatted_response = json.loads(response)
s = Sprite.sprite_dict[formatted_response["id"]]
s.x = formatted_response["x"]
s.y = formatted_response["y"]
s.facing.set(formatted_response["facing"])
s.direction.set(formatted_response["direction"])
s.animation.running = formatted_response["animated"]
s.speed_mult = formatted_response["speed"]
s.level = Level.reference_dict[formatted_response["level"]]
except Exception as e:
print("Failed to parse sprite data, was:\n", formatted_response)
print("Registered sprites was:\n", Sprite.sprite_dict)
raise e
else:
print("Read Error")
# time.sleep(0.01)
def inquire_level(self, level):
"""Ask server for JSON level data"""
print("Downloading level:", level)
package_count = int(self.net.send("lvl" + level))
time.sleep(0.1)
data = ""
for i in range(package_count):
data += self.net.send("next")
print(data)
return load_from_string(data)
def download_basic_levels(self):
"""Ask server about what levels currently exist, and set up the bare minimum"""
print("Downloading basic level instances")
level_count = int(self.net.send("blv"))
time.sleep(0.1)
data = []
for i in range(level_count):
data.append(self.net.send("next"))
for i in data:
Level([], build_matrix(16, 9), build_matrix(16, 9), [], [], (0, 0), i)
def download_sprites(self):
print("Downloading sprites")
for i, j in list(Sprite.sprite_dict.items()):
if i != self.player.entity_id:
Sprite.delete_sprite(i)
entity_count = int(self.net.send("spr"))
for i in range(entity_count):
package_count = int(self.net.send("next"))
if package_count == 1:
data = self.net.send("next")
else:
data = ""
for j in range(package_count):
data += self.net.send("next")
Sprite.add_sprite(json.loads(data))
time.sleep(0.1)
print("Finished downloading sprites")
def play(self):
"""Play a level, requires starting position of player and map of level"""
# Misc.
time_step = 1
time_taken = 0
Sprite.behavior_args["screen_wipe"] = True
# Main loop
done = False
while not done:
time_start = time.perf_counter_ns()
# # CLIENT INPUT
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
return
elif event.key == pygame.K_F11:
self.full = not self.full
if self.full:
Game.win = pygame.display.set_mode((Game.screen_width, Game.screen_height),
pygame.FULLSCREEN | pygame.DOUBLEBUF)
Game.ext_width = int(Game.screen_width)
Game.ext_height = int(Game.screen_height)
else:
Game.win = pygame.display.set_mode((Game.win_width, Game.win_height))
Game.ext_width = int(Game.win_width)
Game.ext_height = int(Game.win_height)
elif event.key == pygame.K_f:
print(1000 / time_taken, "fps. Fullscreen:", self.full, "Night:", self.night)
elif event.key == pygame.K_z:
self.night = True
Level.darken_imgs(100)
Sprite.darken_all(100)
elif event.key == pygame.K_x:
self.night = False
Level.reset_imgs()
Sprite.reset_imgs()
elif event.key == pygame.K_c:
for i, j in Sprite.sprite_dict.items():
print(i, ">", j)
# # SERVER FUNCTIONS (BUT NOT QUITE, CURRENTLY)
# Update entities
Sprite.behavior_args['keys'] = pygame.key.get_pressed()
Sprite.behavior_args['time_step'] = time_step
Sprite.behave_all()
# # CLIENT FUNCTIONS
self.render()
pygame.display.flip()
# # UNIVERSAL (SERVER + CLIENT) FUNCTIONS
time_taken = time.perf_counter_ns() - time_start
time_taken_ms = time_taken / 1000000
time_step = min(time_taken_ms / 9, 12)
Tile.advance_frame(time_step)
step_list.append(time_step)
step_list = []
if __name__ == "__main__":
print("Debug Controls:")
print("\t-z: Night")
print("\t-x: Day")
print("\t-f: Print framerate")
print("\t-Backspace: Stop game")
print("\nTodo:")
print("\t-Add more to sprite system")
print("\t\t-Add more behaviours")
print("\t-Debug lighting engine")
print("\t-Complete major overhaul No. 3 billion")
print("\t-Make Static class more consistent")
# Worlds module must be called after the window mode is set, and sprite module depends on worlds module.
from sprite import *
# Run game
maze = Game("demo")
maze.play()
pygame.quit()
if len(step_list) > 0:
step_list.pop(0)
print("Average Value:", sum(step_list) / len(step_list))
graph = plt.figure()
graph.suptitle('Timestep Each Tick')
plt.plot(step_list, label="Timestep")
plt.legend()
plt.show()
|
Sciguy324/Maze-Game-Python | Maze (Multiplayer Tests)/Maze (Server 2)/Maze.py | import pygame
import os
from sys import platform
import matplotlib.pyplot as plt
import socket
# # PRE-INITIALIZATION:
# Pygame initialization
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
# If the platform is windows, ensure that the system is not stretching the game window to the display.
if platform == "win32":
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
os.environ['SDL_VIDEODRIVER'] = 'directx'
# missing = pygame.image.load(os.path.join('tiles/missing.png'))
# missing = pygame.transform.scale(missing, (64, 64)).convert()
def setup_light():
light_area = pygame.image.load(os.path.join('assets/light.png')).convert_alpha()
light = pygame.Surface((192, 192))
light.fill((100, 100, 100))
light.blit(light_area, (0, 0))
light.convert()
return light
def center_at(userx, usery, world_width, world_height):
"""Function to center the screen on a sprite"""
dispx = max(min(1024 / 2 - userx, 0), 1024 - world_width)
dispy = max(min(576 / 2 - usery, 0), 576 - world_height)
return dispx, dispy
class Game:
"""Overarching class for Maze"""
# General class variable setup
screen_width = pygame.display.Info().current_w
screen_height = pygame.display.Info().current_h
win_width = 64 * 16
win_height = 64 * 9
win = pygame.display.set_mode((win_width, win_height))
ext_width = win.get_width()
ext_height = win.get_height()
focus = None
light = None
player = None
def __init__(self, start_level):
# Display window setup
pygame.display.set_caption("Maze (Server)")
icon = pygame.image.load(os.path.join('assets', 'icon.bmp'))
pygame.display.set_icon(icon)
# Dialog system initialization
Dialog.dialog_init((Game.win_width, Game.win_height), Game.win)
# Internal window and module setup
self.win2 = pygame.Surface((Game.win_width, Game.win_height))
Dialog.internal_window = self.win2
self.full = False
self.night = False
# Setup all sprites
player_sprite = sprite_setup(start_level)
Sprite.focus = player_sprite
Game.focus = player_sprite
# Early rendering
Game.light = setup_light()
self.generate_litmap(player_sprite)
# Server setup
self.ip = str(socket.gethostbyname(socket.gethostname()))
print("Local Server Address:", self.ip)
self.port = 5555
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.server.bind((self.ip, self.port))
except socket.error as e:
str(e)
self.server.listen(2)
print("Waiting for a connection, Server Started")
# Server connection setup
GuestSprite.client_sprites[1] = player_sprite
def draw_tilemap(self, dispx, dispy, deco=False):
"""Function to draw tilemap. Only draws visible area."""
if deco:
draw_map = self.focus.level.decomap
id_list = Level.deco_ids
else:
draw_map = self.focus.level.tilemap
id_list = Level.tile_ids
window = self.win2
litmap = self.litmap
x_tile = int(abs(dispx / 64))
y_tile = int(abs(dispy / 64))
y = y_tile - 1
for i in draw_map[y_tile:y_tile + 10]:
y += 1
x = x_tile - 1
for j in i[x_tile:x_tile + 17]:
x += 1
if j in id_list:
if litmap[y][x]:
id_list[j].draw_norm(window, (dispx + x * 64, dispy + y * 64))
else:
id_list[j].draw(window, (dispx + x * 64, dispy + y * 64))
def stitch_tilemap(self, deco=False):
"""Function to stitch tiles together into a single image, as defined by a tilemap matrix."""
if deco:
id_list = Level.deco_ids
lvl_map = self.focus.level.decomap
else:
id_list = Level.tile_ids
lvl_map = self.focus.level.tilemap
result = pygame.Surface((self.focus.level.width, self.focus.level.height))
y = -64
for i in lvl_map:
y += 64
x = -64
for j in i:
x += 64
if j in id_list:
id_list.draw(result, (x, y))
return result
def generate_litmap(self, focus):
"""Generate a reference map for illuminated tiles"""
focus_level = focus.level
row = [0] * (focus_level.width // 64)
matrix = [list(row) for i in range(focus_level.height // 64)]
rects = []
for x, y in focus_level.lightmap:
for i in range(-1, 2):
for j in range(-1, 2):
matrix[max(y + j, 0)][max(x + i, 0)] = 1
rects.append(pygame.Rect((x - 1) * 64, (y - 1) * 64, 192, 192))
self.litmap, self.lit_rects = list(matrix), list(rects)
def illuminate(self, dispx, dispy):
"""Draw light sources using the lightmap"""
window = self.win2
light = Game.light
x_tile = int(abs(dispx / 64))
y_tile = int(abs(dispy / 64))
for x, y in self.focus.level.lightmap:
if x in range(x_tile, x_tile + 17) and y in range(y_tile, y_tile + 10):
window.blit(light, (dispx + (x - 1) * 64, dispy + (y - 1) * 64), special_flags=pygame.BLEND_SUB)
def wipe_effect(self):
"""Function that wipes the screen black, and then unwipes it again"""
wipe_up = pygame.image.load(os.path.join('assets/wipe_up.png')).convert_alpha()
self.wipe_basic(wipe_up, self.win2.copy(), Game.ext_height)
self.generate_litmap(self.focus)
self.render()
wipe_down = pygame.image.load(os.path.join('assets/wipe_down.png')).convert_alpha()
self.wipe_basic(wipe_down, self.win2.copy(), 0)
def wipe_basic(self, wipe, screenshot, start):
"""Generic function to wipe/unwipe the screen black."""
scaled_wipe = pygame.transform.scale(wipe, (Game.ext_width, Game.ext_height * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (Game.ext_width, Game.ext_height))
i = 0
while i < Game.ext_height * 2:
t1 = time.perf_counter_ns()
Game.win.blit(scaled_screenshot, (0, 0))
Game.win.blit(scaled_wipe, (0, start - i))
pygame.display.flip()
time_step = (time.perf_counter_ns() - t1) / 600000
i += time_step * Game.ext_height / Game.win_height
def render(self):
"""Standard rendering function"""
win2 = self.win2
focus = self.focus
# Wipe the screen if the player changed zone.
if Sprite.behavior_args['screen_wipe']:
Sprite.behavior_args['screen_wipe'] = False
self.wipe_effect()
Sprite.reload_loaded_locals()
self.generate_litmap(focus)
# world_image = stitch_tilemap(Sprite.focus.level, dispx, dispy)
# world_image.convert()
dispx, dispy = center_at(focus.x, focus.y, focus.level.width, focus.level.height)
self.draw_tilemap(dispx, dispy) # 6-7 ms
# win2.blit(world_image, dispx, dispy, area=pygame.Rect(0, 0, win_width, win_height).move_ip(dispx, dispy))
if self.night:
Sprite.blit_all_night(dispx, dispy, win2, self.litmap, self.lit_rects)
self.draw_tilemap(dispx, dispy, deco=True)
self.illuminate(dispx, dispy)
else:
Sprite.blit_all(dispx, dispy, win2)
self.draw_tilemap(dispx, dispy, deco=True)
# Transfer internal window to displayed window and update screen.
pygame.transform.scale(win2, (Game.ext_width, Game.ext_height), Game.win) # 2-3 ms
def play(self):
"""Play a level, requires starting position of player and map of level"""
# Start listening thread
start_new_thread(Game.listen_thread, (self,))
# Misc.
time_step = 1
time_taken = 0
Sprite.behavior_args["screen_wipe"] = True
# Main loop
done = False
while not done:
time_start = time.perf_counter_ns()
# # CLIENT INPUT
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
return
elif event.key == pygame.K_F11:
self.full = not self.full
if self.full:
Game.win = pygame.display.set_mode((Game.screen_width, Game.screen_height),
pygame.FULLSCREEN | pygame.DOUBLEBUF)
Game.ext_width = int(Game.screen_width)
Game.ext_height = int(Game.screen_height)
else:
Game.win = pygame.display.set_mode((Game.win_width, Game.win_height))
Game.ext_width = int(Game.win_width)
Game.ext_height = int(Game.win_height)
elif event.key == pygame.K_f:
print(1000 / time_taken, "fps. Fullscreen:", self.full, "Night:", self.night)
elif event.key == pygame.K_z:
self.night = True
Level.darken_imgs(100)
Sprite.darken_all(100)
elif event.key == pygame.K_x:
self.night = False
Level.reset_imgs()
Sprite.reset_imgs()
elif event.key == pygame.K_c:
#print(Sprite.global_sprites)
print(self.focus.collision_time)
# # SERVER FUNCTIONS (BUT NOT QUITE, CURRENTLY)
# Update entities
Sprite.behavior_args['keys'] = pygame.key.get_pressed()
Sprite.behavior_args['time_step'] = time_step
Sprite.behave_all()
# # CLIENT FUNCTIONS
self.render()
pygame.display.flip()
# # UNIVERSAL (SERVER + CLIENT) FUNCTIONS
time_taken = time.perf_counter_ns() - time_start
time_taken_ms = time_taken / 1000000
time_step = min(time_taken_ms / 9, 12)
Tile.advance_frame(time_step)
step_list.append(time_step)
def listen_thread(self):
while True:
conn, addr = self.server.accept()
player_animations = {"front": Animation(('assets/blue_player/player_front.png', 'assets/blue_player/player_front_walk1.png', 'assets/blue_player/player_front_walk2.png'), (0, 1, 0, 2)),
"back": Animation(('assets/blue_player/player_back.png', 'assets/blue_player/player_back_walk1.png', 'assets/blue_player/player_back_walk2.png'), (0, 1, 0, 2)),
"left": Animation(('assets/blue_player/player_left.png', 'assets/blue_player/player_left_walk1.png', 'assets/blue_player/player_left_walk2.png'), (0, 1, 0, 2)),
"right": Animation(('assets/blue_player/player_right.png', 'assets/blue_player/player_right_walk1.png', 'assets/blue_player/player_right_walk2.png'), (0, 1, 0, 2))
}
s = GuestSprite(conn,
pygame.Rect(0, 0, 48, int(112/2)),
player_animations,
"demo"
)
print("Connected to:", addr)
step_list = []
if __name__ == "__main__":
print("Debug Controls:")
print("\t-z: Night")
print("\t-x: Day")
print("\t-f: Print framerate")
print("\t-Backspace: Stop game")
print("\nTodo:")
print("\t-Add more to sprite system")
print("\t\t-Add more behaviours")
print("\t-Debug lighting engine")
print("\t-Complete major overhaul No. 3 billion")
print("\t-Make Static class more consistent")
# Worlds module must be called after the window mode is set, and sprite module depends on worlds module.
from sprite import *
Level.levels_init()
# Run game
maze = Game("demo")
maze.play()
pygame.quit()
if len(step_list) > 0:
step_list.pop(0)
print("Average Value:", sum(step_list) / len(step_list))
graph = plt.figure()
graph.suptitle('Timestep Each Tick')
plt.plot(step_list, label="Timestep")
plt.legend()
plt.show()
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 5/dialog.py | import pygame
import os
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
# Sound initialization
talk = pygame.mixer.Sound('sounds/talk.wav')
open_dialog = pygame.mixer.Sound('sounds/open_dialog.wav')
close_dialog = pygame.mixer.Sound('sounds/close_dialog.wav')
#walk = pygame.mixer.Sound('sounds/walk.wav')
# Main class for dialog boxes
class Dialog:
window_height = 0
window_width = 0
display_window = None
internal_window = None
def __init__(self, image_file, speed=2):
self.img = pygame.image.load(os.path.join(image_file)).convert_alpha()
self.base_img = self.img.copy()
self.width = self.img.get_size()[0]
self.height = self.img.get_size()[1]
self.x = Dialog.window_width / 2 - self.width / 2
self.y = Dialog.window_height + self.height
self.font = pygame.font.SysFont('Comic Sans MS', 25, True)
self.texts = []
self.text_speed = speed
def display_dialog(self, screenshot, file=None):
'''Override in subclass'''
pass
@classmethod
def dialog_init(cls, window_size, display_window):
cls.window_width = window_size[0]
cls.window_height = window_size[1]
cls.display_window = display_window
# Subclass for text-only dialog boxes
class TextDialog(Dialog):
def display_dialog(self, text_file=None):
'''Function to take a dialog file and output a dialog in the text box'''
screenshot = Dialog.internal_window
# Load texts into dialog
self.texts = []
if text_file != None:
try:
with open(os.path.join(text_file), 'r') as rf:
for line in rf:
if line[len(line) - 1] == "\n":
line = line[:len(line) - 1]
self.texts.append(line)
except:
self.texts.append("If you're reading this message, I messed up :( ")
else:
self.texts.append("If you're reading this message, I messed up :( ")
# Raise dialog box into place
temp_clock = pygame.time.Clock()
self.y = Dialog.window_height + 1
self.img = self.base_img.copy()
open_dialog.play()
temp_clock.tick()
for i in range(round(self.height * 1.2 / 12)):
screen_copy = screenshot.copy()
screen_copy.blit(self.img, (self.x, self.y - i * 12))
Dialog.display_window.blit(pygame.transform.scale(screen_copy, (Dialog.display_window.get_width(), Dialog.display_window.get_height())), (0, 0))
pygame.display.flip()
temp_clock.tick(60)
# Cycle through list of dialogs
for quote in self.texts:
# Scroll through the loaded text
line = 1
text_start = 0
self.y = Dialog.window_height - 1.2 * self.height
spacebar = False
screen_copy = screenshot.copy()
temp_clock.tick()
for frame in range(len(quote) + 1):
text = self.font.render(quote[text_start:frame], False, (32, 32, 32))
self.img.blit(text, (25, 25 * line - 10))
screen_copy.blit(self.img, (self.x, self.y))
Dialog.display_window.blit(pygame.transform.scale(screen_copy, (Dialog.display_window.get_width(), Dialog.display_window.get_height())), (0, 0))
pygame.display.flip()
if text.get_size()[0] > self.width - 70:
line += 1
text_start = frame
if frame % 2:
talk.play()
# Break the text-scroll-loop if the player presses 'space'
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
spacebar = True
if spacebar:
break
temp_clock.tick(30)
# If the player pressed 'space' for quick-scroll, render the entire text at once.
line = 1
text_start = 0
screen_copy = screenshot.copy()
for frame in range(len(quote) + 1):
text = self.font.render(quote[text_start:frame], False, (32, 32, 32))
self.img.blit(text, (25, 25 * line - 10))
if text.get_size()[0] > self.width - 70:
line += 1
text_start = frame
screen_copy.blit(self.img, (self.x, self.y))
Dialog.display_window.blit(pygame.transform.scale(screen_copy, (Dialog.display_window.get_width(), Dialog.display_window.get_height())), (0, 0))
pygame.display.flip()
# Wait for player to press 'space' to continue
spacebar = False
while not spacebar:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
spacebar = True
pygame.display.flip()
self.img = self.base_img.copy()
# Lower dialog box
self.y = Dialog.window_height - 1.2 * self.height
self.img = self.base_img.copy()
close_dialog.play()
temp_clock.tick()
for i in range(round(self.height * 1.2 / 12)):
screen_copy = screenshot.copy()
screen_copy.blit(self.img, (self.x, self.y + i * 12))
Dialog.display_window.blit(pygame.transform.scale(screen_copy, (Dialog.display_window.get_width(), Dialog.display_window.get_height())), (0, 0))
pygame.display.flip()
temp_clock.tick(60)
|
Sciguy324/Maze-Game-Python | Mazes (Graphics.py)/Mazes.py | <reponame>Sciguy324/Maze-Game-Python
from graphics import *
from math import ceil
from levels import *
import datetime
# Window settup
win_height = 64 * 9 # 576
win_width = 64 * 16 # 1024
#light_gray = color_rgb(127, 127, 127)
win = GraphWin("Mazes", win_width, win_height, autoflush=False)
win.setBackground("black")
win.set_icon("assets/rectangle.ico")
large_size = ceil(64 * win.get_screen()[0] / 1024)
#print(large_size)
# Image settup
missing = Image(Point(0, 0), "tiles/missing.png")
missing.img = missing.rescale(64)
large_missing = Image(Point(0, 0), "tiles/missing.png")
large_missing.img = large_missing.rescale(large_size)
def draw_tilemap(map_of_tiles, normal_ids, large_ids, large=False):
'''Function to draw tilemap'''
if large:
tile_dict = large_ids
else:
tile_dict = normal_ids
images = []
y = -1
for i in map_of_tiles:
y += 1
x = -1
for j in i:
x += 1
if map_of_tiles[y][x] in tile_dict:
temp_image = tile_dict[map_of_tiles[y][x]].clone()
temp_image.move(64 * x + 32, 64 * y + 32)
images.append(temp_image)
for i in images:
i.draw(win)
return images
def check_collision(player, dxdy, map_of_tiles, collision_ids):
'''Function to check collision'''
next_player = player.clone()
next_player.move(dxdy[0], dxdy[1])
p1 = next_player.p1
p2 = next_player.p2
x1, y1, x2, y2 = int(p1.x / 64), int(p1.y / 64), int(p2.x / 64), int(p2.y / 64)
if (p1.x < 0) or (p1.y < 0) or (p2.x < 0) or (p2.y < 0):
return False
try:
if map_of_tiles[y1][x1] in collision_ids:
return False
if map_of_tiles[y2][x1] in collision_ids:
return False
if map_of_tiles[y1][x2] in collision_ids:
return False
if map_of_tiles[y2][x2] in collision_ids:
return False
except IndexError:
return False
return True
def center_at_player(player, playerx, playery, images, size, large=False):
'''Function to shift the screen so the player is in the center'''
try:
cx = min(max(playerx - 64 * size[0], 0), size[2] * 64)
cy = min(max(playery - 64 * size[1], 0), size[3] * 64)
win.xview_moveto(cx / (64 * len(tilemap[0])))
win.yview_moveto(cy / (64 * len(tilemap)))
except:
pass
def load_level(lvl, lrg):
'''Function to load a level.
return tilemap, colliders, normal_tile_ids, large_tile_ids, start, movement_speed'''
return lvl(lrg)
def main(win, spawn, speed):
'''Main program'''
image_map = draw_tilemap(tilemap, normal_tile_ids, large_tile_ids)
bounds = (16 / 2, 9 / 2, (len(tilemap[0]) - 16 / 2), (len(tilemap) - 9 / 2))
player = Rectangle(Point(spawn[0], spawn[1]), Point(spawn[0] + 40, spawn[1] + 40))
player.setFill("red")
player.setWidth(0)
player.draw(win)
image_map += draw_tilemap(decomap, normal_deco_ids, large_deco_ids)
current_speed = speed
player_x = player.getCenter().x
player_y = player.getCenter().y
win.config(scrollregion=(0, 0, 64 * len(tilemap[0]), 64 * len(tilemap)))
multiplier = 1
key_lock = False
done = False
while not done:
try:
clock = datetime.datetime.now()
if win.checkKey() == 'Escape':
win.toggle_fullscreen()
if win.fullscreen:
player.undraw()
for i in image_map:
i.undraw()
image_map = draw_tilemap(tilemap, normal_tile_ids, large_tile_ids, large=True)
win.config(scrollregion=(0, 0, 64 * len(tilemap[0]) + large_size * len(tilemap[0]) / 4, 64 * len(tilemap) + large_size * len(tilemap) / 4))
player.draw(win)
image_map += draw_tilemap(decomap, normal_deco_ids, large_deco_ids, large=True)
else:
player.undraw()
for i in image_map:
i.undraw()
image_map = draw_tilemap(tilemap, normal_tile_ids, large_tile_ids, large=False)
win.config(scrollregion=(0, 0, 64 * len(tilemap[0]), 64 * len(tilemap)))
player.draw(win)
image_map += draw_tilemap(decomap, normal_deco_ids, large_deco_ids, large=False)
dx = 0
dy = 0
if win.keys['shift_l']:
move_speed = 0.5 * speed * multiplier
else:
move_speed = speed * multiplier
if win.keys['w']:
dy -= move_speed
if win.keys['s']:
dy += move_speed
if check_collision(player, (0, dy), tilemap, colliders):
player_y += dy
player.move(0, dy)
if win.keys['a']:
dx -= move_speed
if win.keys['d']:
dx += move_speed
if check_collision(player, (dx, 0), tilemap, colliders):
player_x += dx
player.move(dx, 0)
center_at_player(player, player_x, player_y, image_map, bounds)
update(120)
multiplier = min(20, (datetime.datetime.now() - clock).microseconds / 9000)
except GraphicsError:
done = True
print("Game Closed")
if __name__ == "__main__":
level = riverside
start, movement_speed, colliders, tilemap, decomap, normal_tile_ids, large_tile_ids, normal_deco_ids, large_deco_ids = load_level(level, large_size)
main(win, start, movement_speed)
|
Sciguy324/Maze-Game-Python | Mazes (Graphics.py)/levels.py | from graphics import *
def build_id_lists(ids, large_size):
normal_tile_ids = {}
large_tile_ids = {}
for i in list(ids.items()):
norm_img = Image(Point(0, 0), i[1])
norm_img.img = norm_img.rescale(64)
large_img = Image(Point(0, 0), i[1])
large_img.img = large_img.rescale(large_size)
normal_tile_ids[i[0]] = norm_img
large_tile_ids[i[0]] = large_img
return normal_tile_ids, large_tile_ids
def build_matrix(width, height):
row = []
matrix = []
for i in range(0, width):
row.append(0)
for i in range(0, height):
matrix.append(row)
return matrix
# return tilemap, colliders, normal_tile_ids, large_tile_ids, start, movement_speed
def test(large_size):
# Worldmap that tells 'draw_tilemap' what to render and where
tilemap = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Worldmap that tells 'draw_tilemap' how to render the decoration layer.
# In this case, the 'build_matrix' function is used to create a blank decomap.
decomap = build_matrix(len(tilemap[0]), len(tilemap))
# Setup tile images for both regular and fullscreen.
# To handle broken ids in the tilemap, the 'missing' tile has been omitted from these dictionaries and placed in the root script.
tile_dict = {1: 'tiles/block.png'}
deco_dict = {}
normal_tile_ids, large_tile_ids = build_id_lists(tile_dict, large_size)
normal_deco_ids, large_deco_ids = build_id_lists(deco_dict, large_size)
# List of blocks that will be solid
colliders = [1]
# Basic player setup
start = (0, 0)
movement_speed = 3
return start, movement_speed, colliders, tilemap, decomap, normal_tile_ids, large_tile_ids, normal_deco_ids, large_deco_ids
def house(large_size):
tilemap = [[16, 16, 16, 16, 16, 39, 20, 9, 22, 9, 9, 9, 21, 21, 21, 21],
[16, 16, 16, 16, 16, 17, 9, 9, 9, 10, 11, 11, 11, 11, 10, 9],
[16, 16, 16, 16, 16, 20, 21, 21, 9, 10, 8, 8, 8, 8, 10, 9],
[16, 39, 19, 19, 20, 9, 21, 21, 9, 10, 8, 8, 8, 8, 10, 21],
[19, 20, 24, 25, 26, 9, 9, 21, 9, 11, 11, 11, 8, 11, 11, 9],
[9, 9, 27, 28, 33, 25, 25, 25, 25, 25, 25, 36, 28, 29, 9, 9],
[21, 21, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 9, 9],
[9, 9, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 9, 22],
[21, 9, 9, 9, 9, 9, 9, 9, 21, 21, 9, 9, 9, 9, 9, 9],
[21, 9, 9, 9, 9, 9, 9, 9, 21, 21, 9, 9, 9, 9, 9, 9]]
decomap = build_matrix(len(tilemap[0]), len(tilemap))
tile_dict = {8: 'tiles/wood.png', 9: 'tiles/grass.png', 10: 'tiles/corner.png', 11: 'tiles/wall.png', 16: 'tiles/water.png', 17: 'tiles/right_water.png', 19: 'tiles/bottom_water.png', 20: 'tiles/bottom_right_water.png', 21: 'tiles/grass3.png', 22: 'tiles/grass2.png', 24: 'tiles/top_left_path.png', 25: 'tiles/top_path.png', 26: 'tiles/top_right_path.png', 27: 'tiles/left_path.png', 28: 'tiles/path.png', 29: 'tiles/right_path.png', 30: 'tiles/bottom_left_path.png', 31: 'tiles/bottom_path.png', 32: 'tiles/bottom_right_path.png', 33: 'tiles/path_corner1.png', 36: 'tiles/path_corner4.png', 39: 'tiles/water_corner3.png'}
deco_dict = {}
normal_tile_ids, large_tile_ids = build_id_lists(tile_dict, large_size)
normal_deco_ids, large_deco_ids = build_id_lists(deco_dict, large_size)
colliders = [10, 11, 16, 17, 19, 20, 39]
start = (64 * 0, 64 * 8)
movement_speed = 4
return start, movement_speed, colliders, tilemap, decomap, normal_tile_ids, large_tile_ids, normal_deco_ids, large_deco_ids
def riverside(large_size):
tilemap = [[9, 9, 21, 9, 9, 9, 9, 12, 13, 13, 14, 9, 9, 9, 9, 22, 21, 9, 9, 9, 9, 22, 9, 21, 9, 9, 21, 9, 9, 22, 9, 9],
[21, 9, 9, 9, 22, 9, 12, 37, 16, 16, 38, 14, 9, 9, 9, 9, 9, 9, 9, 10, 23, 23, 23, 23, 23, 10, 23, 23, 23, 23, 10, 9],
[9, 21, 9, 9, 9, 12, 37, 16, 16, 16, 16, 38, 14, 9, 24, 25, 25, 25, 26, 10, 8, 8, 8, 8, 8, 10, 8, 8, 8, 8, 10, 9],
[9, 9, 9, 21, 9, 15, 16, 16, 16, 16, 16, 16, 17, 9, 30, 31, 35, 28, 29, 10, 8, 8, 8, 8, 8, 10, 8, 8, 8, 8, 10, 9],
[9, 9, 9, 21, 9, 18, 40, 16, 16, 16, 16, 16, 17, 21, 21, 21, 27, 28, 29, 10, 8, 8, 8, 8, 8, 10, 8, 8, 8, 8, 10, 9],
[9, 22, 9, 21, 21, 9, 15, 16, 16, 16, 16, 39, 20, 9, 9, 9, 27, 28, 29, 11, 11, 11, 10, 23, 8, 23, 23, 8, 23, 23, 10, 9],
[9, 9, 9, 9, 21, 9, 18, 40, 16, 16, 39, 20, 9, 9, 9, 9, 27, 28, 29, 9, 9, 9, 10, 8, 8, 8, 8, 8, 8, 8, 10, 9],
[21, 21, 21, 21, 21, 9, 9, 18, 19, 19, 20, 9, 9, 21, 21, 9, 27, 28, 29, 9, 22, 9, 10, 8, 8, 8, 8, 8, 8, 8, 10, 9],
[9, 9, 21, 21, 21, 9, 9, 9, 9, 9, 9, 9, 9, 9, 21, 9, 27, 28, 29, 21, 21, 9, 11, 11, 8, 8, 11, 11, 11, 11, 11, 9],
[9, 9, 9, 9, 21, 21, 21, 21, 21, 21, 21, 21, 9, 9, 21, 21, 27, 28, 29, 9, 21, 9, 9, 27, 28, 28, 29, 9, 9, 21, 9, 9],
[13, 14, 9, 9, 21, 9, 9, 9, 9, 9, 9, 21, 9, 22, 9, 9, 27, 28, 33, 25, 25, 25, 25, 36, 28, 28, 29, 9, 21, 21, 21, 9],
[16, 38, 14, 9, 9, 9, 21, 21, 22, 9, 9, 21, 21, 9, 9, 9, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 9, 21, 22, 9, 9],
[16, 16, 38, 14, 9, 9, 9, 9, 9, 9, 9, 9, 21, 9, 9, 9, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 9, 9, 12, 13, 13],
[16, 16, 16, 38, 13, 13, 13, 13, 13, 13, 14, 9, 9, 21, 9, 9, 9, 9, 9, 9, 9, 12, 13, 13, 13, 13, 13, 13, 13, 37, 16, 16],
[16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 38, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 37, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16],
[16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16],
[16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16],
[16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]]
decomap = build_matrix(len(tilemap[0]), len(tilemap))
tile_dict = {8: 'tiles/wood.png', 9: 'tiles/grass.png', 10: 'tiles/corner.png', 11: 'tiles/wall.png', 12: 'tiles/top_left_water.png', 13: 'tiles/top_water.png', 14: 'tiles/top_right_water.png', 15: 'tiles/left_water.png', 16: 'tiles/water.png', 17: 'tiles/right_water.png', 18: 'tiles/bottom_left_water.png', 19: 'tiles/bottom_water.png', 20: 'tiles/bottom_right_water.png', 21: 'tiles/grass3.png', 22: 'tiles/grass2.png', 23: 'tiles/pink_wall.png', 24: 'tiles/top_left_path.png', 25: 'tiles/top_path.png', 26: 'tiles/top_right_path.png', 27: 'tiles/left_path.png', 28: 'tiles/path.png', 29: 'tiles/right_path.png', 30: 'tiles/bottom_left_path.png', 31: 'tiles/bottom_path.png', 32: 'tiles/bottom_right_path.png', 33: 'tiles/path_corner1.png', 35: 'tiles/path_corner3.png', 36: 'tiles/path_corner4.png', 37: 'tiles/water_corner1.png', 38: 'tiles/water_corner2.png', 39: 'tiles/water_corner3.png', 40: 'tiles/water_corner4.png'}
deco_dict = {}
normal_tile_ids, large_tile_ids = build_id_lists(tile_dict, large_size)
normal_deco_ids, large_deco_ids = build_id_lists(deco_dict, large_size)
colliders = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 37, 38, 39, 40]
start = (64 * 0, 64 * 8)
movement_speed = 4
return start, movement_speed, colliders, tilemap, decomap, normal_tile_ids, large_tile_ids, normal_deco_ids, large_deco_ids
def mountain(large_size):
tilemap = [[9, 9, 9, 9, 9, 9, 9, 9, 9, 47, 9, 9, 9, 9, 9, 24],
[9, 9, 52, 9, 9, 9, 52, 9, 9, 47, 9, 9, 9, 52, 24, 36],
[9, 9, 9, 9, 52, 9, 9, 9, 43, 44, 9, 9, 51, 24, 36, 28],
[42, 43, 9, 9, 9, 9, 9, 44, 41, 41, 9, 9, 24, 36, 28, 34],
[41, 41, 42, 43, 43, 43, 44, 41, 41, 41, 9, 24, 36, 28, 34, 32],
[41, 41, 41, 41, 41, 41, 41, 41, 49, 50, 24, 36, 28, 34, 32, 9],
[48, 49, 41, 41, 41, 41, 41, 50, 9, 9, 27, 28, 34, 32, 9, 9],
[9, 9, 48, 49, 49, 49, 50, 9, 52, 9, 27, 28, 29, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 27, 28, 29, 9, 52, 9]]
decomap = [[0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
tile_dict = {9: 'tiles/grass.png', 24: 'tiles/top_left_path.png', 27: 'tiles/left_path.png', 28: 'tiles/path.png', 29: 'tiles/right_path.png', 32: 'tiles/bottom_right_path.png', 34: 'tiles/path_corner2.png', 36: 'tiles/path_corner4.png', 41: 'tiles/cliff.png', 42: 'tiles/cliff_top_left.png', 43: 'tiles/cliff_top.png', 44: 'tiles/cliff_top_right.png', 47: 'tiles/cliff_right.png', 48: 'tiles/cliff_bottom_left.png', 49: 'tiles/cliff_bottom.png', 50: 'tiles/cliff_bottom_right.png', 51: 'tiles/lamp_post_bottom.png', 52: 'tiles/tree_bottom.png'}
deco_dict = {1: 'tiles/lamp_post_top.png', 2: 'tiles/tree_top.png'}
normal_tile_ids, large_tile_ids = build_id_lists(tile_dict, large_size)
normal_deco_ids, large_deco_ids = build_id_lists(deco_dict, large_size)
colliders = [41, 42, 43, 44, 47, 48, 49, 50, 51, 52]
start = (64 * 0, 64 * 8)
movement_speed = 4
return start, movement_speed, colliders, tilemap, decomap, normal_tile_ids, large_tile_ids, normal_deco_ids, large_deco_ids
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 1/Maze.py | import pygame
import os
from worlds import *
from sys import platform
from sprite import *
# If the platform is windows, ensure that the system is not stretching the game window to the display.
if platform == "win32":
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
os.environ['SDL_VIDEODRIVER'] = 'directx'
# Pygame initialization
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
screen_width = pygame.display.Info().current_w
screen_height = pygame.display.Info().current_h
# Window settup
win_width = 64 * 16
win_height = 64 * 9
win = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption("Maze")
icon = pygame.image.load(os.path.join('assets', 'icon.bmp'))
pygame.display.set_icon(icon)
missing = pygame.image.load(os.path.join('tiles/missing.png'))
missing = pygame.transform.scale(missing, (64, 64)).convert()
# Dialog system initialization
Dialog.dialog_init((win_width, win_height), win)
def draw_tilemap(tiles, ids, window, disp, deco=False):
'''Function to draw tilemap. Only draws visible area.'''
x_tile = int(abs(disp[0] / 64))
y_tile = int(abs(disp[1] / 64))
y = -64
for i in tiles[y_tile:y_tile + 10]:
y += 64
x = -64
for j in i[x_tile:x_tile + 17]:
x += 64
if j in ids:
window.blit(ids[j], (x + disp[0] + x_tile * 64, y + disp[1] + y_tile * 64))
if deco:
pass
return window
def illuminate(window, disp, light, sources):
'''Function to draw tilemap'''
for i in sources:
window.blit(light, (i[0] * 64 + disp[0] - 64, i[1] * 64 + disp[1] - 64))
return window
def center_at(user, world_width, world_height):
'''Function to center the screen on the player'''
dispx = max(min(win_width / 2 - user.x, 0), win_width - world_width)
dispy = max(min(win_height / 2 - user.y, 0), win_height - world_height)
return (dispx, dispy)
def check_zone(user, loading_zones):
'''Function to check if a rectangle is inside a loading zone'''
x, y = user.rect.center
x = int(x / 64)
y = int(y / 64)
if (x, y) in loading_zones:
return loading_zones[(x, y)]
return None
def wipe_screen(wipe, screenshot):
'''Function to wipe the screen black. Requires copy of the screen'''
temp_clock = pygame.time.Clock()
scaled_wipe = pygame.transform.scale(wipe, (win.get_width(), win.get_height() * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (win.get_width(), win.get_height()))
i = 0
while i < win.get_height() * 2:
win.blit(scaled_screenshot, (0, 0))
win.blit(scaled_wipe, (0, win.get_height() - i))
pygame.display.flip()
time_step = min(temp_clock.tick() / 10, 10) * 24
i += time_step
def unwipe_screen(wipe, screenshot):
'''Function to unwipe the screen. Requires a copy of screen without any wiping effect'''
temp_clock = pygame.time.Clock()
scaled_wipe = pygame.transform.scale(wipe, (win.get_width(), win.get_height() * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (win.get_width(), win.get_height()))
i = 0
while i < win.get_height() * 2:
win.blit(scaled_screenshot, (0, 0))
win.blit(scaled_wipe, (0, 0 - i))
pygame.display.flip()
time_step = min(temp_clock.tick() / 10, 10) * 24
i += time_step
def play_level(start, tilemap, decomap, loading_zones, full, data):
'''Play a level, requires starting position of player and map of level'''
global win
# Internal window
win2 = pygame.Surface((win_width, win_height))
Dialog.internal_window = win2
# Player bounding box
player = pygame.Rect(start[0] * 64, start[1] * 64, 48, int(112/2))
# Player sprites
player_images = {"front": 'assets/player/player_front.png',
"front_walk1": 'assets/player/player_front_walk1.png',
"front_walk2": 'assets/player/player_front_walk2.png',
"back": 'assets/player/player_back.png',
"back_walk1": 'assets/player/player_back_walk1.png',
"back_walk2": 'assets/player/player_back_walk2.png',
"left": 'assets/player/player_left.png',
"left_walk1": 'assets/player/player_left_walk1.png',
"left_walk2": 'assets/player/player_left_walk2.png',
"right": 'assets/player/player_right.png',
"right_walk1": 'assets/player/player_right_walk1.png',
"right_walk2": 'assets/player/player_right_walk2.png'
}
# Default player sprite data
player_sprite = PlayerSprite(player, player_images, PlayerSprite.normal, health=data['health'])
Sprite.behavour_args = {'tilemap': tilemap,
'collisions': collisions
}
# Misc.
world_width = 64 * len(tilemap[1])
world_height = 64 * len(tilemap)
displacement = center_at(player, world_width, world_height)
time_step = 1
clock = pygame.time.Clock()
timer = pygame.time.Clock() # For measuring how long various lines are taking to run.
time_taken = 0 # For recording the above
# Nightime-shading setup
shade = pygame.Surface((win_width, win_height))
shade.set_alpha(128)
shade.fill((0, 0, 27))
# Light source setup
light = pygame.image.load(os.path.join('assets/light.png')).convert_alpha()
# Screen wipe setup
wipe_up = pygame.image.load(os.path.join('assets/wipe_up.png')).convert_alpha()
wipe_down = pygame.image.load(os.path.join('assets/wipe_down.png')).convert_alpha()
first_time = True # Allows screen un-wipe to trigger once.
# Light sources setup
night = False
light_sources = []
y = -1
for i in decomap:
y +=1
x = -1
for j in i:
x += 1
if j == 1:
light_sources.append((x, y))
# Main loop
done = False
while not done:
timer.tick()
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True, None, None, full, data
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
return True, None, None, full, data
elif event.key == pygame.K_F11:
full = not(full)
if full:
win = pygame.display.set_mode((screen_width, screen_height), pygame.FULLSCREEN | pygame.DOUBLEBUF)
else:
win = pygame.display.set_mode((win_width, win_height))
elif event.key == pygame.K_f:
try:
print(1000 / time_taken, "fps. Fullscreen:", full, "Night:", night)
#print(timer.get_fps(), "fps. Fullscreen:", full, "Night:", night)
#print(time_taken, "ms. Fullscreen:", full, "Night:", night)
except ZeroDivisionError:
print("Failed to compute")
elif event.key == pygame.K_z:
night = True
elif event.key == pygame.K_x:
night = False
# Player movement via keyboard input
keys = pygame.key.get_pressed()
# Update internal window.
Sprite.behavour_args['keys'] = keys
Sprite.behavour_args['time_step'] = time_step
Sprite.behave_all()
displacement = center_at(player, world_width, world_height)
win2 = draw_tilemap(tilemap, tile_ids, win2, displacement) # 6-7 ms
Sprite.blit_all(displacement, win2)
win2 = draw_tilemap(decomap, deco_ids, win2, displacement, deco=True) # 0-1 ms
if night: # 3-4 ms, 5-6 ms if light sources are present
win2.blit(shade, (0, 0)) # 3-4 ms
win2 = illuminate(win2, displacement, light, light_sources) # 0 ms, 1 ms if light sources are present
# Transfer internal window to displayed window and update screen.
pygame.transform.scale(win2, (win.get_width(), win.get_height()), win) # 2-3 ms
if first_time:
first_time = False
unwipe_screen(wipe_down, win2)
pygame.display.flip() # 1-2 ms
time_step = min(clock.tick_busy_loop(60) / 10, 2.5)
# Check player position for loading zone.
# If loading zone is found, end this level so the next can be loaded.
current_zone = check_zone(player_sprite, loading_zones)
if current_zone:
wipe_screen(wipe_up, win2)
data['health'] = player_sprite.health
return False, current_zone[0], current_zone[1], full, data
time_taken = timer.tick()
if __name__ == "__main__":
print("Debug Controls:")
print("\t-z: Night")
print("\t-x: Day")
print("\t-f: Print framerate")
print("\t-Backspace: Stop game")
print("\nTodo:")
print("\t-Drastically rework sprite and level systems")
print("\t-Animated tiles")
print("\t-Expansion of sprite system")
print("\t\t-Add more behaviours")
print("\t\t-Combat system")
# Starting level and position
level = demo
start = (12, 10)
game_stop = False
fullscreen = False
data = {'health': 100}
while not game_stop:
Sprite.clear()
tile_ids, tilemap, deco_ids, decomap, collisions, loading_zones, = level()
game_stop, level, start, fullscreen, data = play_level(start, tilemap, decomap, loading_zones, fullscreen, data)
# Update persistant sprites upon level end.
for i in Sprite.instances:
try:
Sprite.persists[i.name]['rect'] = str(i.rect.topleft + i.rect.size)
Sprite.persists[i.name]['level'] = self.level
Sprite.persists[i.name]['behavour'] = self.behavour
Sprite.persists[i.name]['action'] = self.action.__name__
Sprite.persists[i.name]['action_args'] = self.action_args
Sprite.persists[i.name]['health'] = self.health
Sprite.persists[i.name]['max_health'] = self.max_health
except:
pass
pygame.quit()
|
Sciguy324/Maze-Game-Python | Maze (Multiplayer Tests)/Maze (Server 2)/sprite.py | from dialog import *
import random
from _thread import *
import json
from math import ceil
import time
from traceback import print_exc
from sys import stdout
# # WORLDS MODULE
class Tile:
"""Class for animated tiles"""
interval = 50
img_index = 0
colorkey = (200, 50, 200)
def __init__(self, file):
img = pygame.image.load(os.path.join(file))
# Check whether image contains transparency and load accordingly.
array = pygame.PixelArray(img)
transparent = False
x = -1
for i in array:
x += 1
y = -1
for j in i:
y += 1
if j is not None and img.unmap_rgb(j).a < 255:
transparent = True
break
if transparent:
break
if transparent:
img.convert_alpha()
else:
img.convert()
self.alpha = bool(transparent)
# Load individual sub-images from a larger image.
small_height = img.get_height() // 16
img = pygame.transform.scale(img, (64, small_height * 64))
self.img_list = []
for i in range(small_height):
if transparent:
temp_img = pygame.Surface((64, 64), pygame.SRCALPHA, 32)
temp_img.convert_alpha()
else:
temp_img = pygame.Surface((64, 64))
temp_img.blit(img, (0, 0), (0, 64 * i, 64, 64))
self.img_list.append(temp_img)
self.backup_list = tuple(i.copy() for i in self.img_list)
self.max_imgs = len(self.img_list)
self.main_img = img
def draw(self, surface, dest=(0, 0)):
"""Draw an image to a surface."""
if self.max_imgs > 1:
surface.blit(self.img_list[int(Tile.img_index % self.max_imgs)], dest)
else:
surface.blit(self.img_list[0], dest)
def draw_norm(self, surface, dest=(0, 0)):
"""Draw an image's backup to a surface."""
if self.max_imgs > 1:
surface.blit(self.backup_list[int(Tile.img_index % self.max_imgs)], dest)
else:
surface.blit(self.backup_list[0], dest)
def reset(self):
"""Reset all changes to a tile, restoring it to the backup"""
self.img_list = list(i.copy() for i in self.backup_list)
@classmethod
def advance_frame(cls, time_step=1):
cls.img_index += time_step / cls.interval
if cls.img_index > 64:
cls.img_index = 0
# Construction functions for level initialization
def build_id_dict(tiles):
"""Build a dictionary of images paired with their id from the given dictionary"""
id_list = {}
for i in list(tiles.items()):
img = Tile(i[1])
id_list[i[0]] = img
return id_list
def build_matrix(width, height):
"""Build a matrix of given width and height"""
matrix = []
row = [0] * width
for i in range(height):
matrix.append(list(row))
return matrix
def load_from_string(string):
data = json.loads(string)
loading_zones = {}
for j in data["loading_zones"]:
loading_zones[tuple(j["zone"])] = [j["target_level"], tuple(j["target_pos"])]
return Level(data["colliders"],
data["tilemap"],
data["decomap"],
loading_zones,
data["lightmap"],
data["spawn"],
data["name"])
class Level:
"""Class for levels"""
tile_ids = build_id_dict({1: 'tiles/block.png', 2: 'tiles/stone_table_top.png', 3: 'tiles/stone_table_left.png',
4: 'tiles/stone_table_right.png', 5: 'tiles/stone_table_bottom.png',
6: 'tiles/stone_wall.png', 7: 'tiles/void.png', 8: 'tiles/wood.png', 9: 'tiles/grass.png',
10: 'tiles/corner.png', 11: 'tiles/wall.png', 12: 'tiles/top_left_water.png',
13: 'tiles/top_water.png', 14: 'tiles/top_right_water.png', 15: 'tiles/left_water.png',
16: 'tiles/water.png', 17: 'tiles/right_water.png', 18: 'tiles/bottom_left_water.png',
19: 'tiles/bottom_water.png', 20: 'tiles/bottom_right_water.png', 21: 'tiles/grass3.png',
22: 'tiles/grass2.png', 23: 'tiles/pink_wall.png', 24: 'tiles/top_left_path.png',
25: 'tiles/top_path.png', 26: 'tiles/top_right_path.png', 27: 'tiles/left_path.png',
28: 'tiles/path.png', 29: 'tiles/right_path.png', 30: 'tiles/bottom_left_path.png',
31: 'tiles/bottom_path.png', 32: 'tiles/bottom_right_path.png',
33: 'tiles/path_corner1.png', 34: 'tiles/path_corner2.png', 35: 'tiles/path_corner3.png',
36: 'tiles/path_corner4.png', 37: 'tiles/water_corner1.png',
38: 'tiles/water_corner2.png', 39: 'tiles/water_corner3.png',
40: 'tiles/water_corner4.png', 41: 'tiles/cliff_top_left.png', 42: 'tiles/cliff_top.png',
43: 'tiles/cliff_top_right.png', 44: 'tiles/cliff_face_left.png',
45: 'tiles/cliff_center.png', 46: 'tiles/cliff_face_right.png',
47: 'tiles/cliff_bottom_left.png', 48: 'tiles/cliff_bottom.png',
49: 'tiles/cliff_bottom_right.png', 50: 'tiles/cliff_left.png',
51: 'tiles/cliff_back.png', 52: 'tiles/cliff_right.png',
53: 'tiles/cliff_left_corner.png', 54: 'tiles/cliff_right_corner.png',
55: 'tiles/cliff_stairs_top.png', 56: 'tiles/cliff_stairs.png',
57: 'tiles/cliff_stairs_bottom.png', 58: 'tiles/lamp_post_bottom.png',
59: 'tiles/table.png', 60: 'tiles/table_left.png', 61: 'tiles/table_center.png',
62: 'tiles/table_right.png', 63: 'tiles/table_bottom_left.png',
64: 'tiles/table_bottom.png', 65: 'tiles/table_bottom_right.png',
66: 'tiles/door_bottom.png', 67: 'tiles/door_top.png', 68: 'tiles/bricks_left.png',
69: 'tiles/bricks.png', 70: 'tiles/bricks_right.png', 71: 'tiles/bricks_bottom_left.png',
72: 'tiles/bricks_bottom.png', 73: 'tiles/bricks_bottom_right.png',
74: 'tiles/window.png', 75: 'tiles/pink_wall_base.png',
76: 'tiles/white_green_wall_base_rimmed.png',
77: 'tiles/white_green_wall_painting_base.png', 78: 'tiles/drawer.png',
79: 'tiles/white_green_wall_base_drawer.png', 80: 'tiles/drawer_legs.png',
81: 'tiles/white_green_wall_base_left.png', 82: 'tiles/white_green_wall_base.png',
83: 'tiles/white_green_wall_base_right.png', 84: 'tiles/tree_trunk.png',
85: 'tiles/water2.png', 86: 'tiles/water_ripple.png', 87: 'tiles/wood_shade.png'})
deco_ids = build_id_dict({1: 'tiles/lamp_post_top.png', 2: 'tiles/table_top_left.png', 3: 'tiles/table_top.png',
4: 'tiles/table_top_right.png', 5: 'tiles/roof_right_bottom.png',
6: 'tiles/roof_right_middle1.png', 7: 'tiles/roof_right_middle2.png',
8: 'tiles/roof_right_top.png', 9: 'tiles/roof_left_bottom.png',
10: 'tiles/roof_left_middle1.png', 11: 'tiles/roof_left_middle2.png',
12: 'tiles/roof_left_top.png', 13: 'tiles/roof_top1.png', 14: 'tiles/roof_top2.png',
15: 'tiles/top_roof_shadow.png', 16: 'tiles/pink_wall_top.png',
17: 'tiles/white_green_wall_top_rimmed.png',
18: 'tiles/white_green_wall_painting_top.png', 19: 'tiles/white_green_wall_clock.png',
20: 'tiles/lamp.png', 21: 'tiles/white_green_wall_top_left.png',
22: 'tiles/white_green_wall_top.png', 23: 'tiles/white_green_wall_top_right.png',
24: 'tiles/roof_right_edge.png', 25: 'tiles/roof_left_edge.png',
26: 'tiles/tree_top_left.png', 27: 'tiles/tree_top.png', 28: 'tiles/tree_top_right.png',
29: 'tiles/tree_mid_left.png', 30: 'tiles/tree_mid.png', 31: 'tiles/tree_mid_right.png'})
reference_dict = {}
def __init__(self, colliders, tilemap, decomap, loading_zones, lightmap, default_start=(0, 0), name="Unnamed"):
# self.t_ids = build_id_dict(tile_dict)
# self.d_ids = build_id_dict(deco_dict)
self.colliders = colliders
self.tilemap = tilemap
self.lightmap = lightmap
self.width = 64 * len(tilemap[1])
self.height = 64 * len(tilemap)
if decomap is None:
self.decomap = build_matrix(len(tilemap[0]), len(tilemap))
else:
self.decomap = decomap
self.loading_zones = loading_zones
self.default_start = default_start
if name in Level.reference_dict:
print("WARNING: Multiple levels have the same name '{}'. Overwriting!".format(name))
self.name = name
Level.reference_dict[name] = self
def __repr__(self):
return "<Level Object | Name: {}>".format(self.name)
def jsonify(self):
loading_zones = []
for i, j in self.loading_zones.items():
loading_zones.append({"zone": list(i), "target_level": j[0], "target_pos": j[1]})
export_dict = {"colliders": self.colliders,
"tilemap": self.tilemap,
"decomap": self.decomap,
"loading_zones": loading_zones,
"lightmap": self.lightmap,
"spawn": self.default_start,
"name": self.name
}
return json.dumps(export_dict)
@classmethod
def levels_init(cls):
found_files = [os.path.join('levels', f) for f in os.listdir('levels/') if
os.path.isfile(os.path.join('levels', f)) and os.path.splitext(f)[1] == '.json']
for i in found_files:
with open(i) as rf:
data = json.load(rf)
loading_zones = {}
for j in data["loading_zones"]:
loading_zones[tuple(j["zone"])] = [j["target_level"], tuple(j["target_pos"])]
Level(data["colliders"],
data["tilemap"],
data["decomap"],
loading_zones,
data["lightmap"],
data["spawn"],
data["name"])
@classmethod
def darken_imgs(cls, amount=100):
"""Darken all tiles and decos"""
shade = pygame.Surface((64, 64)).convert_alpha()
shade.fill((amount, amount, amount, 100))
for i, j in Level.tile_ids.items():
for k in j.img_list:
k.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
for i, j in Level.deco_ids.items():
for k in j.img_list:
k.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
@classmethod
def reset_imgs(cls):
"""Reset changes to all tiles and decos"""
for i, j in cls.tile_ids.items():
j.reset()
for i, j in cls.deco_ids.items():
j.reset()
def sprite_setup(start_level):
"""Build all sprites, returning the player sprite"""
blue_sign = Static('assets/sign/blue_sign.png')
SignSprite(pygame.Rect(16 * 64, 8 * 64, 64, 32), {'blue_sign': blue_sign}, SignSprite.stand, "demo", "local",
action='speak', action_args='dialogs/demo_welcome.txt')
pig_animations = {"front": Animation(
('assets/npc/pig/pig_front.png', 'assets/npc/pig/pig_front_walk1.png', 'assets/npc/pig/pig_front_walk2.png'),
(0, 1, 0, 2)),
"back": Animation(('assets/npc/pig/pig_back.png', 'assets/npc/pig/pig_back_walk1.png',
'assets/npc/pig/pig_back_walk2.png'), (0, 1, 0, 2)),
"left": Animation(('assets/npc/pig/pig_left.png', 'assets/npc/pig/pig_left_walk1.png',
'assets/npc/pig/pig_left_walk2.png'), (0, 1, 0, 2)),
"right": Animation(('assets/npc/pig/pig_right.png', 'assets/npc/pig/pig_right_walk1.png',
'assets/npc/pig/pig_right_walk2.png'), (0, 1, 0, 2))
}
NPCSprite(pygame.Rect(20 * 64, 6 * 64, 64, 32), pig_animations, NPCSprite.wander, "demo", "global", action='speak',
action_args='dialogs/oink.txt')
# NPCSprite(pygame.Rect(20 * 64, 8 * 64, 64, 32), pig_images, NPCSprite.always_right, demo, "global", action='speak', action_args='dialogs/oink.txt')
male_duck_animations = {"front": Animation(('assets/npc/male_duck/male_duck_front.png',), (0,)),
"back": Animation(('assets/npc/male_duck/male_duck_back.png',), (0,)),
"left": Animation(('assets/npc/male_duck/male_duck_left.png',), (0,)),
"right": Animation(('assets/npc/male_duck/male_duck_right.png',), (0,))
}
female_duck_animations = {"front": Animation(('assets/npc/female_duck/female_duck_front.png',), (0,)),
"back": Animation(('assets/npc/female_duck/female_duck_back.png',), (0,)),
"left": Animation(('assets/npc/female_duck/female_duck_left.png',), (0,)),
"right": Animation(('assets/npc/female_duck/female_duck_right.png',), (0,))
}
duckling_animations = {"front": Animation(('assets/npc/duckling/duckling_front.png',), (0,)),
"back": Animation(('assets/npc/duckling/duckling_back.png',), (0,)),
"left": Animation(('assets/npc/duckling/duckling_left.png',), (0,)),
"right": Animation(('assets/npc/duckling/duckling_right.png',), (0,))
}
NPCSprite(pygame.Rect(20 * 64, 14 * 64, 64, 32), male_duck_animations, NPCSprite.wander, "demo", "global")
# Player setup
player_start = Level.reference_dict[start_level].default_start
# Player bounding box
player = pygame.Rect(player_start[0] * 64, player_start[1] * 64, 48, int(112 / 2))
# Player sprite
player_animations = {"front": Animation(('assets/player/player_front.png', 'assets/player/player_front_walk1.png',
'assets/player/player_front_walk2.png'), (0, 1, 0, 2)),
"back": Animation(('assets/player/player_back.png', 'assets/player/player_back_walk1.png',
'assets/player/player_back_walk2.png'), (0, 1, 0, 2)),
"left": Animation(('assets/player/player_left.png', 'assets/player/player_left_walk1.png',
'assets/player/player_left_walk2.png'), (0, 1, 0, 2)),
"right": Animation(('assets/player/player_right.png', 'assets/player/player_right_walk1.png',
'assets/player/player_right_walk2.png'), (0, 1, 0, 2))
}
# Default player sprite data
player_sprite = PlayerSprite(player, player_animations, PlayerSprite.normal, start_level, health=100)
return player_sprite
# # SPRITE MODULE
# Functions for actions
def blank(args):
"""Default interaction that does absolutely nothing"""
def speak(args):
"""Basic interaction that causes text to appear"""
text_box = TextDialog("assets/dialog_box.png")
text_box.display_dialog(args)
action_lookup = {'blank': blank, 'speak': speak}
# Class for variables that keep track of whether they've been changed
class Tracker:
def __init__(self, value):
self.value = value
self.changed = False
def set(self, new_value):
if self.value != new_value:
self.changed = True
self.value = new_value
def querry(self):
if self.changed:
self.changed = False
return True
class Animation:
"""Class for animations"""
def __init__(self, images, sequence):
self.tick = 0
self.prev_tick = 0
self.image_tuple = images
self.image_sequence = sequence
self.frame_list = tuple(pygame.image.load(os.path.join(images[i])).convert_alpha() for i in sequence)
self.backup_list = tuple(i.copy() for i in self.frame_list)
self.rect_list = tuple(i.get_rect() for i in self.frame_list)
self.frame_count = len(self.frame_list) - 1
self.frame = self.frame_list[0]
self.rect = self.rect_list[0]
self.interval = 25
self.running = False
def update(self, time_step):
"""Update the animation frame"""
self.running = True
self.tick += time_step / self.interval
if int(self.tick) != int(self.prev_tick):
if int(self.tick) > self.frame_count:
self.tick = 0
self.frame = self.frame_list[int(self.tick)]
self.rect = self.rect_list[int(self.tick)]
self.prev_tick = float(self.tick)
def draw(self, window, dest):
"""Draw current frame to window"""
window.blit(self.frame, dest)
def draw_norm(self, window, dest):
"""Draw current backup frame to window"""
window.blit(self.backup_list[int(self.tick)], dest)
def draw_clipped(self, window, dispx, dispy, disp_reg, litmap_rects):
"""Draw current backup frame with only area outside a light
source darkened"""
# First get a list of rectangles that intersect the source
# Then copy this part of the current backup frame onto the current frame
img_rect = self.rect.move(1, 1)
img = self.frame.copy()
window.blit(img, disp_reg)
for i in img_rect.collidelistall(litmap_rects):
dark_rect = img_rect.clip(litmap_rects[i])
if dark_rect.size != (0, 0):
dark_rect.move_ip(-1, -1)
#window.fill((255, 0, 0), dark_rect.move(disp))
window.blit(self.backup_list[int(self.tick)], dest=dark_rect.move(dispx, dispy), area=dark_rect.move(self.rect.topleft[0] * -1, self.rect.topleft[1] * -1))
def reset(self):
"""Reset the animation frame to base"""
self.running = False
if self.tick != 0:
self.tick = 0
self.prev_tick = 0
self.frame = self.frame_list[0]
self.rect = self.rect_list[0]
def darken(self, amount):
"""Darken the image by an amount"""
for i in self.frame_list:
shade = pygame.Surface((i.get_width(), i.get_height()))
shade.fill((amount, amount, amount))
i.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
self.frame = self.frame_list[int(self.tick)]
def undo(self):
"""Reset all changes to the animation images"""
self.frame_list = tuple(i.copy() for i in self.backup_list)
self.frame = self.frame_list[int(self.tick)]
# Subclass for static animations
class Static(Animation):
def __init__(self, image):
self.image = image
self.frame = pygame.image.load(os.path.join(image)).convert_alpha()
self.backup = self.frame.copy()
self.rect = self.frame.get_rect()
def update(self, timestep):
"""Handles any calls for frame updates. Helps with
standardization, but otherwise does nothing"""
pass
def draw(self, window, dest):
"""Standard drawing function"""
window.blit(self.frame, dest)
def draw_norm(self, window, dest):
"""Draw backup image"""
window.blit(self.backup, dest)
def draw_clipped(self, window, dispx, dispy, disp_reg, litmap_rects):
"""Draw current backup frame with only area outside a light
source darkened"""
img_rect = self.rect.move(1, 1)
img = self.frame.copy()
window.blit(img, (disp_reg))
for i in img_rect.collidelistall(litmap_rects):
dark_rect = img_rect.clip(litmap_rects[i])
if dark_rect.size != (0, 0):
dark_rect.move_ip(-1, -1)
#window.fill((255, 0, 0), dark_rect.move(disp))
window.blit(self.backup_list, dest=dark_rect.move(dispx, dispy), area=dark_rect.move(self.rect.topleft[0] * -1, self.rect.topleft[1] * -1))
def darken(self, amount):
"""Darken the image by an amount"""
shade = pygame.Surface((self.frame.get_width(), self.frame.get_height()))
shade.fill((amount, amount, amount))
self.frame.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
def undo(self):
"""Reset all changes to the animation image"""
self.frame = self.backup.copy()
class Sprite:
"""Main class for sprites"""
sprite_dict = {}
global_sprites = []
local_sprites = []
loaded_locals = []
focus = None
id_counter = 0
behavior_args = {}
def __init__(self, rect, animation_dict, behavior, level, scope, action='blank', action_args=None, health=None, max_health=None):
self.health = health
self.rect = rect
self.x = int(rect.x)
self.y = int(rect.y)
self.collision_time = 0
self.animation_dict = animation_dict
self.animation = list(animation_dict.items())[0][1]
self.freeze = False
self.behavior = behavior
self.level = Level.reference_dict[level]
self.scope = scope
self.action = action_lookup[action]
self.action_args = action_args
self.speed_mult = 1
self.entity_id = Sprite.id_counter
Sprite.id_counter += 1
if scope == "global":
Sprite.global_sprites.append(self)
elif scope == "local":
Sprite.local_sprites.append(self)
else:
print("An error occurred while loading a sprite:\n'{}' is an invalid scope".format(scope))
Sprite.sprite_dict[self.entity_id] = self
def __repr__(self):
"""Override in subclass"""
pass
def __str__(self):
return self.__repr__()
def jsonify(self):
"""Converts a sprite into a json representation of itself, override in subclass"""
pass
def check_collision(self, dx, dy, tilemap, collider_ids):
"""Function to check if a sprite rectangle has collided with something"""
try:
dx, dy = round(1.5 * dx), round(1.5 * dy)
new_rect = self.rect.move(dx, dy)
x1 = int(new_rect.x / 64)
y1 = int(new_rect.y / 64)
x2 = int((new_rect.x + new_rect.width) / 64)
y2 = int((new_rect.y + new_rect.height) / 64)
if (new_rect.x < 0) or (new_rect.y < 0) or (x2 < 0) or (y2 < 0):
return False
if tilemap[y1][x1] in collider_ids:
return False
if tilemap[y2][x1] in collider_ids:
return False
if tilemap[y1][x2] in collider_ids:
return False
if tilemap[y2][x2] in collider_ids:
return False
if self.collision_time < 180 and Sprite.check_sprites_collide(self, dx, dy) is not None:
self.collision_time += Sprite.behavior_args["time_step"]
return False
if self.collision_time > 0:
self.collision_time -= Sprite.behavior_args["time_step"] / 2
return True
except IndexError:
return False
def behave(self):
"""Function to execute the behavior of sprites. Override in subclass"""
pass
def check_load_zone(self):
"""Check if the sprite is in a loading zone, and if so, send the sprite to the relevant level.
Return False if nothing happens, return True if successful."""
x, y = self.rect.center
x = int(x / 64)
y = int(y / 64)
if (x, y) in self.level.loading_zones:
zone = self.level.loading_zones[(x, y)]
self.level = Level.reference_dict[zone[0]]
self.x = zone[1][0] * 64
self.y = zone[1][1] * 64
self.rect.x = zone[1][0] * 64
self.rect.y = zone[1][1] * 64
return True
else:
return False
def check_lit_tile(self, litmap):
"""Check if the sprite is partially or fully on tiles that are illuminated.
-Return 0 if sprite is NOT touching a lit tile.
-Return 1-3 if the sprite is PARTIALLY touching a lit tile.
-Return 4 if the sprite is ONLY touching lit tiles.
-Return 0 if an IndexError occurs (entity is out of bounds).
"""
try:
r = self.animation.rect
c = 0
for x in (r.left, r.right):
for y in (r.bottom, r.top):
if litmap[y // 64][x // 64]: c += 1
return c
except IndexError:
return 0
@classmethod
def freeze(cls, state):
"""Set the freeze state of all sprites"""
for s in cls.global_sprites + cls.loaded_locals:
s.freeze = state
@classmethod
def check_sprites_collide(cls, entity, dx, dy):
"""Function to check if a sprite has collided with another sprite, and if so, return that sprite"""
entity_rect = entity.rect.copy()
sprite_list = cls.local_sprites + [i for i in cls.global_sprites if i.level == entity.level]
other_rects = list(s.rect for s in sprite_list if s.rect != entity_rect)
index = entity_rect.move(dx, dy).collidelist(other_rects)
if index == -1:
return None
return other_rects[index]
@classmethod
def behave_all(cls):
"""Function to execute the behavior of all sprites"""
for s in cls.global_sprites + cls.loaded_locals:
s.behave()
for s in cls.global_sprites:
if s.check_load_zone() and type(s) == PlayerSprite:
Sprite.behavior_args["screen_wipe"] = True
@classmethod
def blit_all(cls, dispx, dispy, window):
"""Blit all sprites that are currently located in the same level as the focus sprite.
Does not handle light levels"""
loaded_sprites = [i for i in cls.loaded_locals if i.level.name == cls.focus.level.name] + [i for i in cls.global_sprites if i.level.name == cls.focus.level.name]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
s.animation.draw(window, (s.x + dispx, s.y + dispy - s.rect.height))
#window.fill((127, 0, 0), s.rect.move((dispx, dispy)))
@classmethod
def blit_all_night(cls, dispx, dispy, window, litmap, lit_rects):
"""Blit all sprites that are currently located in the same level as the focus sprite.
Designed for handling light levels"""
loaded_sprites = cls.loaded_locals + [i for i in cls.global_sprites if i.level.name == cls.focus.level.name]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
s.animation.rect.bottomleft = s.rect.bottomleft # (s.x, s.y + s.rect.height)
disp_reg = (s.x + dispx, s.y + dispy - s.rect.height)
lit_tiles = s.check_lit_tile(litmap)
# Player is not touching any lit tiles, draw normally.
if not lit_tiles:
s.animation.draw(window, disp_reg)
# Player is only touching lit tiles, draw illuminated backup.
elif lit_tiles == 4:
s.animation.draw_norm(window, disp_reg)
# Player is partially touching lit tiles, draw clipped.
else:
s.animation.draw_clipped(window, dispx, dispy, disp_reg, lit_rects)
if s.entity_id == 4:
print("Test")
@classmethod
def darken_all(cls, amount=100):
for i in cls.local_sprites + cls.global_sprites:
try:
for j, k in i.animation_dict.items():
k.darken(amount)
except AttributeError:
i.animation.darken(amount)
@classmethod
def reset_imgs(cls):
for i in cls.local_sprites + cls.global_sprites:
try:
for j, k in i.animation_dict.items():
k.undo()
except AttributeError:
i.animation.undo()
@classmethod
def reload_loaded_locals(cls):
"""Recalculate the list of loaded local sprites"""
loaded_levels = [i.level for i in cls.global_sprites + cls.local_sprites
if type(i) == PlayerSprite or type(i) == GuestSprite]
cls.loaded_locals = [i for i in cls.local_sprites if i.level in loaded_levels]
class SignSprite(Sprite):
"""Subclass for sign sprites"""
def __init__(self, rect, animation_dict, behavior, level, scope, action='blank', action_args=None):
Sprite.__init__(self, rect, animation_dict, behavior, level, "local", action, action_args, None, None)
def __repr__(self):
result = """<SignSprite | ID: {}, Rect: {}, Behavior: {}, Action: {}, Action Args: {}, Level: {}>""".format(
self.entity_id, self.rect, self.behavior, self.action, self.action_args, self.level)
return result
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "SignSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation": dict((i, j.image)
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": "local",
"action": dict((j, i) for i, j in action_lookup.items())[self.action],
"action_args": self.action_args
}
}
return json.dumps(result)
def jsonify2(self):
"""Converts all necessary information about a sprite into json format"""
data = {"id": self.entity_id,
"x": self.x,
"y": self.y,
"facing": None,
"direction": None,
"animated": False,
"speed": 0,
"level": self.level.name
}
return json.dumps(data)
def behave(self):
"""Execute the behavior of the sign sprite"""
self.behavior(self)
def stand(self):
"""Normal standing behavior"""
pass
class NPCSprite(Sprite):
"""Subclass for NPC sprites"""
def __init__(self, rect, animation_dict, behavior, level, scope, action='blank', action_args=None, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavior, level, scope, action, action_args, health, max_health)
self.interval = 25
self.cycle = 0
self.facing = Tracker("front")
self.direction = Tracker(None)
self.dx = 0
self.dy = 0
def __repr__(self):
result = """<NPCSprite | ID: {}, Rect: {}, Behavior: {}, Action: {}, Action Args: {}, Health: {},
Level: {}>""".format(self.entity_id, self.rect, self.behavior, self.action, self.action_args, self.health,
self.level)
return result
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "NPCSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, [j.image_tuple, j.image_sequence])
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": self.scope,
"action": dict((j, i) for i, j in action_lookup.items())[self.action],
"action_args": self.action_args,
"health": None,
"max_health": None
}
}
return json.dumps(result)
def jsonify2(self):
"""Converts all necessary information about a sprite into json format"""
data = {"id": self.entity_id,
"x": self.x,
"y": self.y,
"facing": self.facing.value,
"direction": self.direction.value,
"animated": self.animation.running,
"speed": self.speed_mult,
"level": self.level.name
}
return json.dumps(data)
# Execute behavior of the NPC sprite
def behave(self):
self.behavior(self)
# Behavour of NPC to stand in place
def stand(self):
pass
# Behavour of NPC to wander aimlessly around the map
def wander(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 384:
self.cycle = 0
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx * 64, self.dy * 64, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 385
time_step = Sprite.behavior_args['time_step']
self.cycle += time_step
if self.cycle <= 80:
self.movement(tilemap, colliders, time_step)
self.speed_mult = 1
else:
if self.cycle <= 100:
self.animation.reset()
# Behavour of NPC to panic aimlessly
def panic(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 64:
self.cycle = 0
self.dx, self.dy = 0, 0
while self.dx == 0 and self.dy == 0:
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx, self.dy, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 65
time_step = Sprite.behavior_args['time_step']
self.cycle += time_step
if self.cycle <= 64:
self.movement(tilemap, colliders, 1.5 * time_step)
self.speed_mult = 1.5
else:
self.animation.reset()
# Behavour of NPC to always move right
def always_right(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
self.dx = 1
self.dy = 0
time_step = Sprite.behavior_args['time_step']
self.movement(tilemap, colliders, 1.5 * time_step)
# Standard NPC movement
def movement(self, tilemap, colliders, time_step):
dx, dy = 0, 0
speed = 1.0 * time_step
moved = False
if self.dy == -1:
dy -= speed
self.facing.set("back")
self.direction.set(None)
moved = True
if self.dy == 1:
dy += speed
self.facing.set("front")
self.direction.set(None)
moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
else:
dy = 0
self.cycle = 80
if self.dx == -1:
dx -= speed
self.direction.set("left")
moved = True
if self.dx == 1:
dx += speed
self.direction.set("right")
moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
else:
dx = 0
self.cycle = 80
if not moved:
self.animation.reset()
else:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(speed * 1.5)
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
class PlayerSprite(Sprite):
"""Subclass for player sprite"""
def __init__(self, rect, animation_dict, behavior, level, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavior, level, "global", health=health, max_health=max_health)
self.facing = Tracker("front")
self.direction = Tracker(None)
def __repr__(self):
return """<PlayerSprite | ID: {}, Rect: {}, Health: {}, Level: {}>""".format(
self.entity_id, self.rect, self.health, str(self.level))
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "PlayerSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, [j.image_tuple, j.image_sequence])
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": "global",
"health": None,
"max_health": None
}
}
return json.dumps(result)
# Execute the behavior of the player sprite
def behave(self):
self.behavior(self)
# Standard player sprite movement
def movement(self, args):
if self.freeze:
return
keys = args[0]
time_step = args[1]
tilemap = args[2]
colliders = args[3]
dx = 0
dy = 0
player_moved = False
self.speed_mult = 1.25
if keys[pygame.K_LSHIFT]:
speed = time_step * 1.25
else:
speed = 2.25 * time_step
self.speed_mult += 1
if keys[pygame.K_w]:
dy -= speed
self.facing.set("back")
self.direction.set(None)
player_moved = True
if keys[pygame.K_s]:
dy += speed
self.facing.set("front")
self.direction.set(None)
player_moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
if keys[pygame.K_a]:
dx -= speed
self.direction.set("left")
player_moved = True
if keys[pygame.K_d]:
dx += speed
self.direction.set("right")
player_moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
# Trigger call for interact
if keys[pygame.K_SPACE]:
loaded_globals = [i for i in Sprite.global_sprites if i.level == self.level]
for s in (loaded_globals + Sprite.loaded_locals):
if type(s) != PlayerSprite:
if self.rect.inflate(16, 16).colliderect(s.rect):
s.action(s.action_args)
break
if not player_moved:
self.animation.reset()
else:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(speed / 2)
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
# Regular player behavior
def normal(self):
keys = Sprite.behavior_args['keys']
time_step = Sprite.behavior_args['time_step']
tilemap = self.level.tilemap
colliders = self.level.colliders
self.movement((keys, time_step, tilemap, colliders))
def jsonify2(self):
"""Converts all necessary information about a sprite into json format"""
data = {"id": self.entity_id,
"x": self.x,
"y": self.y,
"facing": self.facing.value,
"direction": self.direction.value,
"animated": self.animation.running,
"speed": self.speed_mult,
"level": self.level.name
}
return json.dumps(data)
class GuestSprite(Sprite):
"""Class for sprites that represent multiplayer players"""
client_sprites = {}
active_clients = []
refresh_class = False
def __init__(self, conn, rect, animation_dict, level, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, GuestSprite.normal, level, "global", health=health, max_health=max_health)
self.facing = Tracker("front")
self.direction = Tracker(None)
self.conn = conn
self.refresh = False
self.order = []
# Add guest sprite to client sprite dictionary
GuestSprite.client_sprites[self.entity_id] = self
conn.send(str.encode(str(self.entity_id)))
start_new_thread(GuestSprite.client_thread, (self,))
def __repr__(self):
return """<GuestSprite | ID: {}, Rect: {}, Health: {}, Level: {}>""".format(
self.entity_id, self.rect, self.health, str(self.level))
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "GuestSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, [j.image_tuple, j.image_sequence])
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": "global",
"health": None,
"max_health": None
}
}
return json.dumps(result)
def behave(self):
self.behavior(self)
def jsonify2(self):
"""Converts all necessary information about a sprite into json format"""
data = {"id": self.entity_id,
"x": self.x,
"y": self.y,
"facing": self.facing.value,
"direction": self.direction.value,
"animated": self.animation.running,
"speed": self.speed_mult,
"level": self.level.name
}
return json.dumps(data)
def normal(self):
"""Regular guest sprite behavior, typically used for syncing internal sprite data"""
self.rect.x = self.x
self.rect.y = self.y
if self.animation.running:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(self.speed_mult * Sprite.behavior_args["time_step"] / 2)
else:
self.animation.reset()
def client_thread(self):
"""Function for maintaining the connection to the clients"""
# Order clients to refresh their guest sprite lists due too a new client connecting
entity_id = self.entity_id
print("Player joined with entity ID:", entity_id)
GuestSprite.queue_order(1, entity_id)
while True:
try:
data = self.conn.recv(2048)
message = data.decode("utf-8")
if not data:
print("Disconnected")
break
else:
if message[:3] == "set":
# Set the position reported by the connected client
m = json.loads(message[3:])
self.x = m["x"]
self.y = m["y"]
self.facing.set(m["facing"])
self.direction.set(m["direction"])
self.animation.running = m["animated"]
self.speed_mult = m["speed"]
self.level = Level.reference_dict[m["level"]]
# Override standard set command if refresh is queued for this client
# if self.refresh:
# self.conn.sendall(str.encode("rel"))
# If an order has been queued, relay that order instead of sprite data
if len(self.order):
print("Sending Order:", self.order)
if self.order[0][0] == 1:
print("Telling", entity_id, "to add", self.order[0][1])
self.conn.sendall(str.encode("add" + str(self.order[0][1])))
elif self.order[0][0] == 2:
print("Telling", entity_id, "to delete", self.order[0][1])
self.conn.sendall(str.encode("del" + str(self.order[0][1])))
self.order.pop(0)
else:
# Send the sprite data to the client
local_sprites = [i for i in Sprite.loaded_locals if i.level.name == self.level.name]
entity_count = len(Sprite.global_sprites) + len(local_sprites) - 1
self.conn.sendall(str.encode("upd" + str(entity_count)))
for i in Sprite.global_sprites + local_sprites:
if i.entity_id != entity_id:
self.conn.recv(2048)
self.conn.sendall(str.encode(i.jsonify2()))
elif message[:3] == "pst":
# Pre-set, client ONLY wants to inform server of its position
m = json.loads(message[3:])
self.x = m["x"]
self.y = m["y"]
self.facing.set(m["facing"])
self.direction.set(m["direction"])
self.animation.running = m["animated"]
self.speed_mult = m["speed"]
self.level = Level.reference_dict[m["level"]]
self.conn.sendall(str.encode("Ack"))
elif message == "spr":
# Client wants to know about the other sprites, excluding its own
entity_count = len(Sprite.global_sprites) + len(Sprite.local_sprites) - 1
self.conn.sendall(str.encode(str(entity_count)))
for i in Sprite.global_sprites + Sprite.local_sprites:
if entity_id != i.entity_id:
reply = i.jsonify()
package_count = ceil(len(reply) / 2048)
self.conn.recv(2048)
self.conn.sendall(str.encode(str(package_count)))
if package_count == 1:
self.conn.recv(2048)
self.conn.sendall(str.encode(reply))
else:
for j in range(int(len(reply) / 2048)):
self.conn.recv(2048)
self.conn.sendall(str.encode(reply[(2048 * i):(2048 * (i + 1))]))
time.sleep(0.1)
self.conn.recv(2048)
self.conn.sendall(str.encode(reply[(2048 * int(len(reply) / 2048)):]))
print(entity_id, "> Finished downloading sprites")
# If the client has not already been marked as ready to receive requests, do so now.
if self not in GuestSprite.active_clients:
GuestSprite.active_clients.append(self)
elif message[:3] == "lvl":
# Client wants to know about a level
print(entity_id, "> Downloading level:", message[3:])
reply = Level.reference_dict[message[3:]].jsonify()
package_count = ceil(len(reply) / 2048)
self.conn.sendall(str.encode(str(package_count)))
time.sleep(0.1)
if package_count == 1:
self.conn.recv(2048)
self.conn.sendall(str.encode(reply))
else:
for i in range(int(len(reply) / 2048)):
self.conn.recv(2048)
self.conn.sendall(str.encode(reply[(2048 * i):(2048 * (i + 1))]))
time.sleep(0.1)
self.conn.recv(2048)
self.conn.sendall(str.encode(reply[(2048 * int(len(reply) / 2048)):]))
# Reload "loaded_locals" list in the Sprite class
Sprite.reload_loaded_locals()
elif message == "blv":
print(entity_id, "> Asked for levels outline")
self.conn.sendall(str.encode(str(len(Level.reference_dict))))
for i, j in Level.reference_dict.items():
print(self.conn.recv(2048).decode("utf-8"))
self.conn.sendall(str.encode(i))
time.sleep(0.1)
else:
print(entity_id, "attempted to send:", message)
except Exception:
print("Connection crashed, is this an error?")
print_exc(file=stdout)
break
GuestSprite.queue_order(2, entity_id)
print(self.entity_id, "lost connection")
for i, j in enumerate(Sprite.global_sprites):
if j.entity_id == entity_id:
del Sprite.global_sprites[i]
for i, j in GuestSprite.client_sprites.items():
if i == entity_id:
del GuestSprite.client_sprites[i]
break
self.conn.close()
del self
# @classmethod
# def queue_refresh(cls):
# for i in cls.active_clients:
# i.refresh = True
@classmethod
def queue_order(cls, cmd, cmd_args):
for i in cls.active_clients:
i.order.append((cmd, cmd_args))
behavior_lookup = {"SignSprite.stand": SignSprite.stand,
"NPCSprite.wander": NPCSprite.wander,
"NPCSprite.panic": NPCSprite.panic,
"NPCSprite.always_right": NPCSprite.always_right,
"PlayerSprite.normal": PlayerSprite.normal,
"GuestSprite.normal": GuestSprite.normal
}
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 3/sprite.py | import pygame
import os
import ast
from dialog import *
import random
# Functions for actions
def blank(args):
'''Default interaction that does absolutely nothing'''
def speak(args):
'''Basic interaction that causes text to appear'''
text_box = TextDialog("assets/dialog_box.png")
text_box.display_dialog(args)
action_lookup = {'blank': blank, 'speak': speak}
# Class for animations
class Animation:
def __init__():
pass
# Main class for sprites
class Sprite:
global_sprites = []
local_sprites = []
loaded_locals = []
focus = None
behavour_args = {}
def __init__(self, rect, images, behavour, level, scope, action='blank', action_args=None, health=None, max_health=None):
self.frame = 0
self.interval = 50
self.health = health
self.rect = rect
self.x = int(rect.x)
self.y = int(rect.y)
self.images = dict((v, pygame.image.load(os.path.join(k)).convert_alpha()) for v, k in images.items())
self.sprite = list(self.images.items())[0][1]
self.freeze = False
self.behavour = behavour
self.level = level
self.scope = scope
self.action = action_lookup[action]
self.action_args = action_args
if scope == "global":
Sprite.global_sprites.append(self)
elif scope == "local":
Sprite.global_sprites.append(self)
else:
print("An error occured while loading a sprite:\n'{}' is an invalid scope".format(scope))
def __repr__(self):
'''Override in subclass'''
pass
def __str__(self):
return self.__repr__()
def check_collision(self, dx, dy, tilemap, collider_ids):
'''Function to check if a sprite rectange has collided with something'''
try:
dx, dy = round(1.5 * dx), round(1.5 * dy)
new_rect = self.rect.move(dx, dy)
x1 = int(new_rect.x / 64)
y1 = int(new_rect.y / 64)
x2 = int((new_rect.x + new_rect.width) / 64)
y2 = int((new_rect.y + new_rect.height) / 64)
if (new_rect.x < 0) or (new_rect.y < 0) or (x2 < 0) or (y2 < 0):
return False
if tilemap[y1][x1] in collider_ids:
return False
if tilemap[y2][x1] in collider_ids:
return False
if tilemap[y1][x2] in collider_ids:
return False
if tilemap[y2][x2] in collider_ids:
return False
if Sprite.check_sprites_collide(self, dx, dy) != None:
return False
return True
except IndexError:
return False
def behave(self):
'''Function to execute the behavour of sprites. Override in subclass'''
pass
def check_load_zone(self):
'''Check if the sprite is in a loading zone, and if so, send the sprite to the relevant level.
Return False if nothing happens, return True if successfull.'''
x, y = self.rect.center
x = int(x / 64)
y = int(y / 64)
if (x, y) in self.level.loading_zones:
zone = self.level.loading_zones[(x, y)]
self.level = zone[0]()
self.x = zone[1][0] * 64
self.y = zone[1][1] * 64
self.rect.x = zone[1][0] * 64
self.rect.y = zone[1][1] * 64
return True
else:
return False
@classmethod
def freeze(cls, state):
'''Set the freeze state of all sprites'''
for s in cls.global_sprites + cls.loaded_locals:
s.freeze = state
@classmethod
def check_sprites_collide(cls, entity, dx, dy):
'''Function to check if a sprite has collided with another sprite, and if so, return that sprite'''
entity_rect = entity.rect.copy()
sprite_list = cls.local_sprites + [i for i in cls.global_sprites if i.level == entity.level]
other_rects = list(s.rect for s in sprite_list if s.rect != entity_rect)
index = entity_rect.move(dx, dy).collidelist(other_rects)
if index == -1:
return None
return other_rects[index]
@classmethod
def behave_all(cls):
'''Function to execute the behavour of all sprites'''
for s in cls.global_sprites + cls.loaded_locals:
s.behave()
for s in cls.global_sprites:
if s.check_load_zone() and type(s) == PlayerSprite:
Sprite.behavour_args["screen_wipe"] = True
@classmethod
def blit_all(cls, displacement, window):
'''Blit all sprites that are currently located in the same level as the focus sprite'''
loaded_sprites = cls.loaded_locals + [i for i in cls.global_sprites if i.level == cls.focus.level]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
window.blit(s.sprite, (s.x + displacement[0], s.y + displacement[1] - s.rect.height))
#window.fill((127, 0, 0), s.rect.move(displacement))
@classmethod
def reload_loaded_locals(cls):
'''Recalculate the list of loaded local sprites'''
cls.loaded_locals = [i for i in cls.local_sprites if i.level == cls.focus.level]
# Subclass for sign sprites
class SignSprite(Sprite):
def __init__(self, rect, images, behavour, level, scope, action='blank', action_args=None, health=None, max_health=None):
Sprite.__init__(self, rect, images, behavour, level, "local", action, action_args, None, None)
def __repr__(self):
result ='''SignSprite:
Collision Rectangle: {}
Images: {}
Behavour: {}
Name: {}
Action: {}
Action Arguments: {}'''.format(self.rect, list(i for i in self.images), self.behavour, self.name, self.action, self.action_args)
try:
result += '\nInhabited Level: {}'.format(self.level)
except:
pass
return result
# Execute the behavour of the sign sprite
def behave(self):
self.behavour(self)
# Normal standing behavour
def stand(self):
pass
# Subclass for NPC sprites
class NPCSprite(Sprite):
def __init__(self, rect, images, behavour, level, scope, action='blank', action_args=None, health=None, max_health=None):
Sprite.__init__(self, rect, images, behavour, level, scope, action, action_args, health, max_health)
self.interval = 25
self.cycle = 0
self.facing = "front"
self.direction = None
self.dx = 0
self.dy = 0
def __repr__(self):
result ='''NPCSprite:
Collision Rectangle: {}
Images: {}
Behavour: {}
Action: {}
Action Arguments: {}
Health: {}'''.format(self.rect, list(i for i in self.images), self.behavour, self.action, self.action_args, self.health)
try:
result += '\nInhabited Level: {}'.format(self.level)
except:
pass
return result
# Execute behavour of the NPC sprite
def behave(self):
self.behavour(self)
# Behavour of NPC to stand in place
def stand(self):
pass
# Behavour of NPC to wander aimlessly around the map
def wander(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 384:
self.cycle = 0
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx * 64, self.dy * 64, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 385
time_step = Sprite.behavour_args['time_step']
self.cycle += 1.0 * time_step
if self.cycle <= 80:
self.movement(tilemap, colliders, time_step)
else:
self.frame = 0
PlayerSprite.update_frame(self)
# Behavour of NPC to panic aimlessly
def panic(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 64:
self.cycle = 0
self.dx, self.dy = 0, 0
while self.dx == 0 and self.dy == 0:
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx, self.dy, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 65
time_step = Sprite.behavour_args['time_step']
self.cycle += 1.0 * time_step
if self.cycle <= 64:
self.movement(tilemap, colliders, 1.5 * time_step)
else:
self.frame = 0
PlayerSprite.update_frame(self)
# Behavour of NPC to always move right
def always_right(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
self.dx = 1
self.dy = 0
time_step = Sprite.behavour_args['time_step']
self.movement(tilemap, colliders, 1.5 * time_step)
PlayerSprite.update_frame(self)
# Standard NPC movement
def movement(self, tilemap, colliders, time_step):
dx, dy = 0, 0
speed = 1.0 * time_step
moved = False
if self.dy == -1:
dy -= speed
self.facing = "back"
self.direction = None
moved = True
if self.dy == 1:
dy += speed
self.facing = "front"
self.direction = None
moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
else:
dy = 0
self.cycle = 80
if self.dx == -1:
dx -= speed
self.direction = "left"
moved = True
if self.dx == 1:
dx += speed
self.direction = "right"
moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
else:
dx = 0
self.cycle = 80
if moved == False or self.frame > self.interval * 4:
self.frame = 0
else:
self.frame += speed * 1.5
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
# Subclass for player sprite
class PlayerSprite(Sprite):
def __init__(self, rect, images, behavour, level, health=None, max_health=None):
Sprite.__init__(self, rect, images, behavour, level, "global", health=health, max_health=max_health)
self.facing = "front"
self.direction = None
def __repr__(self):
return '''PlayerSprite:
Collision Rectangle: {}
Images: {}
Health: {}
Level: <{}>'''.format(self.rect, list(i for i in self.images), self.health, str(self.level))
# Execute the behavour of the player sprite
def behave(self):
self.behavour(self)
# Determine which frame should be shown
def update_frame(self):
if self.facing == "front":
if self.direction == "left":
if 0 <= self.frame < self.interval:
self.sprite = self.images["left"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["left_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["left"]
else:
self.sprite = self.images["left_walk2"]
elif self.direction == "right":
if 0 <= self.frame < self.interval:
self.sprite = self.images["right"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["right_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["right"]
else:
self.sprite = self.images["right_walk2"]
else:
if 0 <= self.frame < self.interval:
self.sprite = self.images["front"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["front_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["front"]
else:
self.sprite = self.images["front_walk2"]
else:
if self.direction == "left":
if 0 <= self.frame < self.interval:
self.sprite = self.images["left"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["left_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["left"]
else:
self.sprite = self.images["left_walk2"]
elif self.direction == "right":
if 0 <= self.frame < self.interval:
self.sprite = self.images["right"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["right_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["right"]
else:
self.sprite = self.images["right_walk2"]
else:
if 0 <= self.frame < self.interval:
self.sprite = self.images["back"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["back_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["back"]
else:
self.sprite = self.images["back_walk2"]
# Player sprite movement
def movement(self, args):
if self.freeze:
return
keys = args[0]
time_step = args[1]
tilemap = args[2]
colliders = args[3]
dx = 0
dy = 0
player_moved = False
if keys[pygame.K_LSHIFT]:
speed = time_step * 1.25
else:
speed = 2.25 * time_step
if keys[pygame.K_w]:
dy -= speed
self.facing = "back"
self.direction = None
player_moved = True
if keys[pygame.K_s]:
dy += speed
self.facing = "front"
self.direction = None
player_moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
if keys[pygame.K_a]:
dx -= speed
self.direction = "left"
player_moved = True
if keys[pygame.K_d]:
dx += speed
self.direction = "right"
player_moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
# Trigger call for interact
if keys[pygame.K_SPACE]:
loaded_globals = [i for i in Sprite.global_sprites if i.level == self.level]
for s in (loaded_globals + Sprite.loaded_locals):
if type(s) != PlayerSprite:
if self.rect.inflate(16, 16).colliderect(s.rect):
s.action(s.action_args)
break
if player_moved == False or self.frame > self.interval * 4:
self.frame = 0
else:
self.frame += speed / 2
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
self.update_frame()
# Regular player behavour
def normal(self):
keys = Sprite.behavour_args['keys']
time_step = Sprite.behavour_args['time_step']
tilemap = self.level.tilemap
colliders = self.level.colliders
self.movement((keys, time_step, tilemap, colliders))
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 4/sprite.py | import pygame
import os
from dialog import *
import random
# Functions for actions
def blank(args):
'''Default interaction that does absolutely nothing'''
def speak(args):
'''Basic interaction that causes text to appear'''
text_box = TextDialog("assets/dialog_box.png")
text_box.display_dialog(args)
action_lookup = {'blank': blank, 'speak': speak}
# Class for variables that keep track of whether they've been changed
class Tracker:
def __init__(self, value):
self.value = value
self.changed = False
def set(self, new_value):
if self.value != new_value:
self.changed = True
self.value = new_value
def querry(self):
if self.changed:
self.changed = False
return True
# Class for animations
class Animation:
def __init__(self, images, sequence):
self.tick = 0
self.prev_tick = 0
self.frame_list = tuple(pygame.image.load(os.path.join(images[i])).convert_alpha() for i in sequence)
self.backup_list = tuple(i.copy() for i in self.frame_list)
self.rect_list = tuple(i.get_rect() for i in self.frame_list)
self.frame_count = len(self.frame_list) - 1
self.frame = self.frame_list[0]
self.rect = self.rect_list[0]
self.interval = 50
def update(self, time_step):
'''Update the animation frame'''
self.tick += time_step / self.interval
if int(self.tick) != int(self.prev_tick):
if int(self.tick) > self.frame_count:
self.tick = 0
self.frame = self.frame_list[int(self.tick)]
self.rect = self.rect_list[int(self.tick)]
self.prev_tick = float(self.tick)
def draw(self, window, dest):
'''Draw current frame to window'''
window.blit(self.frame, dest)
def draw_norm(self, window, dest):
'''Draw current backup frame to window'''
window.blit(self.backup_list[int(self.tick)], dest)
def draw_clipped(self, window, disp, disp_reg, litmap_rects):
'''Draw current backup frame with only area outside a light
source darkened'''
# First get a list of rectangles that intersect the source
# Then copy this part of the current backup frame onto the current frame
img_rect = self.rect.move(1, 1)
img = self.frame.copy()
window.blit(img, (disp_reg))
for i in img_rect.collidelistall(litmap_rects):
dark_rect = img_rect.clip(litmap_rects[i])
dark_rect.move_ip(-1, -1)
if dark_rect.size != (0, 0):
#window.fill((255, 0, 0), dark_rect.move(disp))
window.blit(self.backup_list[int(self.tick)], dest=dark_rect.move(disp), area=dark_rect.move(self.rect.topleft[0] * -1, self.rect.topleft[1] * -1))
def reset(self):
'''Reset the animation frame to base'''
if self.tick != 0:
self.tick = 0
self.prev_tick = 0
self.frame = self.frame_list[0]
self.rect = self.rect_list[0]
def darken(self, amount):
'''Darken the image by an amount'''
for i in self.frame_list:
shade = pygame.Surface((i.get_width(), i.get_height()))
shade.fill((amount, amount, amount))
i.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
self.frame = self.frame_list[int(self.tick)]
def undo(self):
'''Reset all changes to the animation images'''
self.frame_list = tuple(i.copy() for i in self.backup_list)
self.frame = self.frame_list[int(self.tick)]
# Subclass for static animations
class Static(Animation):
def __init__(self, image):
self.frame = pygame.image.load(os.path.join(image)).convert_alpha()
self.backup = self.frame.copy()
self.rect = self.frame.get_rect()
def update(self):
'''Handles any calls for frame updates. Helps with
standardization, but otherwise does nothing'''
pass
def draw(self, window, dest):
'''Standard drawing function'''
window.blit(self.frame, dest)
def draw_norm(self, window, dest):
'''Draw backup image'''
window.blit(self.backup, dest)
def draw_clipped(self, window, source, litmap_rects):
'''Draw current backup frame with only area outside a light
source darkened'''
img_rect = self.rect.move(1, 1)
img = self.frame.copy()
window.blit(img, (disp_reg))
for i in img_rect.collidelistall(litmap_rects):
dark_rect = img_rect.clip(litmap_rects[i])
dark_rect.move_ip(-1, -1)
if dark_rect.size != (0, 0):
#window.fill((255, 0, 0), dark_rect.move(disp))
window.blit(self.backup_list, dest=dark_rect.move(disp), area=dark_rect.move(self.rect.topleft[0] * -1, self.rect.topleft[1] * -1))
def darken(self, amount):
'''Darken the image by an amount'''
shade = pygame.Surface((self.frame.get_width(), self.frame.get_height()))
shade.fill((amount, amount, amount))
self.frame.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
def undo(self):
'''Reset all changes to the animation image'''
self.frame = self.backup.copy()
# Main class for sprites
class Sprite:
global_sprites = []
local_sprites = []
loaded_locals = []
focus = None
behavour_args = {}
def __init__(self, rect, animation_dict, behavour, level, scope, action='blank', action_args=None, health=None, max_health=None):
self.health = health
self.rect = rect
self.x = int(rect.x)
self.y = int(rect.y)
if len(animation_dict) == 1:
self.animation = list(animation_dict.items())[0][1]
else:
self.animation_dict = animation_dict
self.animation = list(animation_dict.items())[0][1]
self.freeze = False
self.behavour = behavour
self.level = level
self.scope = scope
self.action = action_lookup[action]
self.action_args = action_args
if scope == "global":
Sprite.global_sprites.append(self)
elif scope == "local":
Sprite.global_sprites.append(self)
else:
print("An error occured while loading a sprite:\n'{}' is an invalid scope".format(scope))
def __repr__(self):
'''Override in subclass'''
pass
def __str__(self):
return self.__repr__()
def check_collision(self, dx, dy, tilemap, collider_ids):
'''Function to check if a sprite rectange has collided with something'''
try:
dx, dy = round(1.5 * dx), round(1.5 * dy)
new_rect = self.rect.move(dx, dy)
x1 = int(new_rect.x / 64)
y1 = int(new_rect.y / 64)
x2 = int((new_rect.x + new_rect.width) / 64)
y2 = int((new_rect.y + new_rect.height) / 64)
if (new_rect.x < 0) or (new_rect.y < 0) or (x2 < 0) or (y2 < 0):
return False
if tilemap[y1][x1] in collider_ids:
return False
if tilemap[y2][x1] in collider_ids:
return False
if tilemap[y1][x2] in collider_ids:
return False
if tilemap[y2][x2] in collider_ids:
return False
if Sprite.check_sprites_collide(self, dx, dy) != None:
return False
return True
except IndexError:
return False
def behave(self):
'''Function to execute the behavour of sprites. Override in subclass'''
pass
def check_load_zone(self):
'''Check if the sprite is in a loading zone, and if so, send the sprite to the relevant level.
Return False if nothing happens, return True if successfull.'''
x, y = self.rect.center
x = int(x / 64)
y = int(y / 64)
if (x, y) in self.level.loading_zones:
zone = self.level.loading_zones[(x, y)]
self.level = zone[0]()
self.x = zone[1][0] * 64
self.y = zone[1][1] * 64
self.rect.x = zone[1][0] * 64
self.rect.y = zone[1][1] * 64
return True
else:
return False
def check_lit_tile(self, litmap):
'''Check if the sprite is partially or fully on tiles that are illuminated.
-Return 0 if sprite is NOT touching a lit tile.
-Return 1-3 if the sprite is PARTIALLY touching a lit tile.
-Return 4 if the sprite is ONLY touching lit tiles.
-Return 0 if an IndexError occurs (entity is out of bounds).
'''
try:
r = self.animation.rect
c = 0
for x in (r.left, r.right):
for y in (r.bottom, r.top):
if litmap[y // 64][x // 64]: c += 1
return c
except IndexError:
return 0
@classmethod
def freeze(cls, state):
'''Set the freeze state of all sprites'''
for s in cls.global_sprites + cls.loaded_locals:
s.freeze = state
@classmethod
def check_sprites_collide(cls, entity, dx, dy):
'''Function to check if a sprite has collided with another sprite, and if so, return that sprite'''
entity_rect = entity.rect.copy()
sprite_list = cls.local_sprites + [i for i in cls.global_sprites if i.level == entity.level]
other_rects = list(s.rect for s in sprite_list if s.rect != entity_rect)
index = entity_rect.move(dx, dy).collidelist(other_rects)
if index == -1:
return None
return other_rects[index]
@classmethod
def behave_all(cls):
'''Function to execute the behavour of all sprites'''
for s in cls.global_sprites + cls.loaded_locals:
s.behave()
for s in cls.global_sprites:
if s.check_load_zone() and type(s) == PlayerSprite:
Sprite.behavour_args["screen_wipe"] = True
@classmethod
def blit_all(cls, dispx, dispy, window):
'''Blit all sprites that are currently located in the same level as the focus sprite.
Does not handle light levels'''
loaded_sprites = cls.loaded_locals + [i for i in cls.global_sprites if i.level == cls.focus.level]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
s.animation.draw(window, (s.x + dispx, s.y + dispy - s.rect.height))
#window.blit(s.animation.frame, (s.x + dispx, s.y + dispy - s.rect.height))
#window.fill((127, 0, 0), s.rect.move(dispx, dispy))
@classmethod
def blit_all_night(cls, dispx, dispy, window, litmap, lit_rects):
'''Blit all sprites that are currently located in the same level as the focus sprite.
Designed for handling light levels'''
loaded_sprites = cls.loaded_locals + [i for i in cls.global_sprites if i.level == cls.focus.level]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
s.animation.rect.bottomleft = (s.x, s.y + s.rect.height)
disp_reg = (s.x + dispx, s.y + dispy - s.rect.height)
lit_tiles = s.check_lit_tile(litmap)
# Player is not touching any lit tiles, draw normally.
if not lit_tiles:
s.animation.draw(window, disp_reg)
# Player is only touching lit tiles, draw illuminated backup.
elif lit_tiles == 4:
s.animation.draw_norm(window, disp_reg)
# Player is partially touching lit tiles,
else:
s.animation.draw_clipped(window, (dispx, dispy), disp_reg, lit_rects)
@classmethod
def darken_all(cls, amount=100):
for i in cls.local_sprites + cls.global_sprites:
try:
for j, k in i.animation_dict.items():
k.darken(amount)
except AttributeError:
i.animation.darken(amount)
@classmethod
def reset_imgs(cls):
for i in cls.local_sprites + cls.global_sprites:
try:
for j, k in i.animation_dict.items():
k.undo()
except AttributeError:
i.animation.undo()
@classmethod
def reload_loaded_locals(cls):
'''Recalculate the list of loaded local sprites'''
cls.loaded_locals = [i for i in cls.local_sprites if i.level == cls.focus.level]
# Subclass for sign sprites
class SignSprite(Sprite):
def __init__(self, rect, animation_dict, behavour, level, scope, action='blank', action_args=None, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavour, level, "local", action, action_args, None, None)
def __repr__(self):
result ='''SignSprite:
Collision Rectangle: {}
Behavour: {}
Name: {}
Action: {}
Action Arguments: {}'''.format(self.rect, self.behavour, self.name, self.action, self.action_args)
try:
result += '\nInhabited Level: {}'.format(self.level)
except:
pass
return result
# Execute the behavour of the sign sprite
def behave(self):
self.behavour(self)
# Normal standing behavour
def stand(self):
pass
# Subclass for NPC sprites
class NPCSprite(Sprite):
def __init__(self, rect, animation_dict, behavour, level, scope, action='blank', action_args=None, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavour, level, scope, action, action_args, health, max_health)
self.interval = 25
self.cycle = 0
self.facing = Tracker("front")
self.direction = Tracker(None)
self.dx = 0
self.dy = 0
def __repr__(self):
result ='''NPCSprite:
Collision Rectangle: {}
Behavour: {}
Action: {}
Action Arguments: {}
Health: {}'''.format(self.rect, self.behavour, self.action, self.action_args, self.health)
try:
result += '\nInhabited Level: {}'.format(self.level)
except:
pass
return result
# Execute behavour of the NPC sprite
def behave(self):
self.behavour(self)
# Behavour of NPC to stand in place
def stand(self):
pass
# Behavour of NPC to wander aimlessly around the map
def wander(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 384:
self.cycle = 0
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx * 64, self.dy * 64, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 385
time_step = Sprite.behavour_args['time_step']
self.cycle += time_step
if self.cycle <= 80:
self.movement(tilemap, colliders, time_step)
else:
if self.cycle <= 100:
self.animation.reset()
# Behavour of NPC to panic aimlessly
def panic(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 64:
self.cycle = 0
self.dx, self.dy = 0, 0
while self.dx == 0 and self.dy == 0:
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx, self.dy, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 65
time_step = Sprite.behavour_args['time_step']
self.cycle += time_step
if self.cycle <= 64:
self.movement(tilemap, colliders, 1.5 * time_step)
else:
self.animation.reset()
# Behavour of NPC to always move right
def always_right(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
self.dx = 1
self.dy = 0
time_step = Sprite.behavour_args['time_step']
self.movement(tilemap, colliders, 1.5 * time_step)
# Standard NPC movement
def movement(self, tilemap, colliders, time_step):
dx, dy = 0, 0
speed = 1.0 * time_step
moved = False
if self.dy == -1:
dy -= speed
self.facing.set("back")
self.direction.set(None)
moved = True
if self.dy == 1:
dy += speed
self.facing.set("front")
self.direction.set(None)
moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
else:
dy = 0
self.cycle = 80
if self.dx == -1:
dx -= speed
self.direction.set("left")
moved = True
if self.dx == 1:
dx += speed
self.direction.set("right")
moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
else:
dx = 0
self.cycle = 80
if moved == False:
self.animation.reset()
else:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(speed * 1.5)
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
# Subclass for player sprite
class PlayerSprite(Sprite):
def __init__(self, rect, animation_dict, behavour, level, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavour, level, "global", health=health, max_health=max_health)
self.facing = Tracker("front")
self.direction = Tracker(None)
def __repr__(self):
return '''PlayerSprite:
Collision Rectangle: {}
Health: {}
Level: <{}>'''.format(self.rect, self.health, str(self.level))
# Execute the behavour of the player sprite
def behave(self):
self.behavour(self)
# Standard player sprite movement
def movement(self, args):
if self.freeze:
return
keys = args[0]
time_step = args[1]
tilemap = args[2]
colliders = args[3]
dx = 0
dy = 0
player_moved = False
if keys[pygame.K_LSHIFT]:
speed = time_step * 1.25
else:
speed = 2.25 * time_step
if keys[pygame.K_w]:
dy -= speed
self.facing.set("back")
self.direction.set(None)
player_moved = True
if keys[pygame.K_s]:
dy += speed
self.facing.set("front")
self.direction.set(None)
player_moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
if keys[pygame.K_a]:
dx -= speed
self.direction.set("left")
player_moved = True
if keys[pygame.K_d]:
dx += speed
self.direction.set("right")
player_moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
# Trigger call for interact
if keys[pygame.K_SPACE]:
loaded_globals = [i for i in Sprite.global_sprites if i.level == self.level]
for s in (loaded_globals + Sprite.loaded_locals):
if type(s) != PlayerSprite:
if self.rect.inflate(16, 16).colliderect(s.rect):
s.action(s.action_args)
break
if player_moved == False:
self.animation.reset()
else:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(speed / 2)
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
# Regular player behavour
def normal(self):
keys = Sprite.behavour_args['keys']
time_step = Sprite.behavour_args['time_step']
tilemap = self.level.tilemap
colliders = self.level.colliders
self.movement((keys, time_step, tilemap, colliders))
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 4/Maze 2 Test.py | import pygame
import os
from sys import platform
import matplotlib.pyplot as plt
import time
import threading
# Pygame initialization
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
# If the platform is windows, ensure that the system is not stretching the game window to the display.
if platform == "win32":
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
os.environ['SDL_VIDEODRIVER'] = 'directx'
screen_width = pygame.display.Info().current_w
screen_height = pygame.display.Info().current_h
# Window settup
win_width = 64 * 16
win_height = 64 * 9
win = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption("Maze")
icon = pygame.image.load(os.path.join('assets', 'icon.bmp'))
pygame.display.set_icon(icon)
# Worlds module must be called after the window mode is set, and sprite module depends on worlds module.
from worlds import *
from sprite import *
#missing = pygame.image.load(os.path.join('tiles/missing.png'))
#missing = pygame.transform.scale(missing, (64, 64)).convert()
# Dialog system initialization
Dialog.dialog_init((win_width, win_height), win)
def draw_tilemap(tiles, window, disp, deco=False):
'''Function to draw tilemap. Only draws visible area.'''
x_tile = int(abs(disp[0] / 64))
y_tile = int(abs(disp[1] / 64))
y = -64
for i in tiles[y_tile:y_tile + 10]:
y += 64
x = -64
for j in i[x_tile:x_tile + 17]:
x += 64
if deco:
if j in Level.deco_ids:
Level.deco_ids[j].draw(window, (x + disp[0] + x_tile * 64, y + disp[1] + y_tile * 64))
else:
if j in Level.tile_ids:
Level.tile_ids[j].draw(window, (x + disp[0] + x_tile * 64, y + disp[1] + y_tile * 64))
return window
def stitch_tilemap(lvl, disp, deco=False):
'''Function to stitch tiles together into a single image, as defined by a tilemap matrix.'''
result = pygame.Surface((lvl.width, lvl.height))
y = -64
for i in lvl.tilemap:
y += 64
x = -64
for j in i:
x += 64
if deco:
if j in Level.deco_ids:
(Level.deco_ids[j]).draw(result, (x, y))
else:
if j in Level.tile_ids:
(Level.tile_ids[j]).draw(result, (x, y))
return result
def construct_lightmap(decomap, light):
'''Construct a light map using the decomap'''
shade = pygame.Surface((Sprite.focus.level.width, Sprite.focus.level.height))
shade.fill((100, 100, 100))
y = -1
for i in decomap:
y +=1
x = -1
for j in i:
x += 1
if j == 1:
shade.blit(light, (x * 64 - 64, y * 64 - 64))
shade.convert()
return shade
def center_at(user, world_width, world_height):
'''Function to center the screen on the player'''
dispx = max(min(win_width / 2 - user[0], 0), win_width - world_width)
dispy = max(min(win_height / 2 - user[1], 0), win_height - world_height)
return (dispx, dispy)
def wipe_screen(wipe, screenshot):
'''Function to wipe the screen black. Requires copy of the screen'''
scaled_wipe = pygame.transform.scale(wipe, (win.get_width(), win.get_height() * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (win.get_width(), win.get_height()))
i = 0
while i < win.get_height() * 2:
t1 = time.perf_counter_ns()
win.blit(scaled_screenshot, (0, 0))
win.blit(scaled_wipe, (0, win.get_height() - i))
pygame.display.flip()
time_step = (time.perf_counter_ns() - t1) / 600000
i += time_step * win.get_height() / win_height
def unwipe_screen(wipe, screenshot):
'''Function to unwipe the screen. Requires a copy of screen without any wiping effect'''
scaled_wipe = pygame.transform.scale(wipe, (win.get_width(), win.get_height() * 2))
scaled_screenshot = pygame.transform.scale(screenshot, (win.get_width(), win.get_height()))
i = 0
while i < win.get_height() * 2:
t1 = time.perf_counter_ns()
win.blit(scaled_screenshot, (0, 0))
win.blit(scaled_wipe, (0, 0 - i))
pygame.display.flip()
time_step = (time.perf_counter_ns() - t1) / 600000
i += time_step * win.get_height() / win_height
def render_thread():
global step_list
time.sleep(1.0)
win2 = pygame.Surface((win_width, win_height))
Dialog.internal_window = win2
# Screen wipe setup
wipe_up = pygame.image.load(os.path.join('assets/wipe_up.png')).convert_alpha()
wipe_down = pygame.image.load(os.path.join('assets/wipe_down.png')).convert_alpha()
# Nightime-shading setup
light = pygame.image.load(os.path.join('assets/light.png')).convert_alpha()
shade = construct_lightmap(Sprite.focus.level.decomap, light)
night = False
while True:
# Wipe the screen if the player changed zone.
if Sprite.behavour_args['screen_wipe']:
wipe_screen(wipe_up, win2)
Sprite.reload_loaded_locals()
shade = construct_lightmap(Sprite.focus.level.decomap, light)
#world_image = stitch_tilemap(Sprite.focus.level, displacement)
#world_image.convert()
time_start = time.perf_counter_ns()
displacement = center_at((Sprite.focus.x, Sprite.focus.y), Sprite.focus.level.width, Sprite.focus.level.height)
win2 = draw_tilemap(Sprite.focus.level.tilemap, win2, displacement)
#win2.blit(world_image, displacement, area=pygame.Rect(0, 0, win_width, win_height).move_ip(displacement[0], displacement[1]))
Sprite.blit_all(displacement, win2)
win2 = draw_tilemap(Sprite.focus.level.decomap, win2, displacement, deco=True)
step_list.append((time.perf_counter_ns() - time_start) / 1000000)
#if night: win2.blit(shade,
# (displacement[0], displacement[1]),
# special_flags=pygame.BLEND_SUB,
# area=pygame.Rect(0, 0, win_width, win_height).move_ip(displacement[0], displacement[1])
# )
# Transfer internal window to displayed window and update screen.
pygame.transform.scale(win2, (win.get_width(), win.get_height()), win) # 5-6 ms
if Sprite.behavour_args['screen_wipe']:
unwipe_screen(wipe_down, win2)
Sprite.behavour_args['screen_wipe'] = False
pygame.display.flip()
def play_level(start_level):
'''Play a level, requires starting position of player and map of level'''
global win
full = False
# Setup all sprites
player_sprite = sprite_setup(start_level)
Sprite.focus = player_sprite
# Misc.
#displacement = center_at(Sprite.focus.rect, Sprite.focus.level.width, Sprite.focus.level.height)
time_step = 1
clock = pygame.time.Clock()
timer = pygame.time.Clock() # For measuring how long various lines are taking to run.
time_taken = 0 # For recording the above
Sprite.behavour_args["screen_wipe"] = True
# Main loop
done = False
while not done:
time_start = time.perf_counter_ns()
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
return
elif event.key == pygame.K_F11:
full = not(full)
if full:
win = pygame.display.set_mode((screen_width, screen_height), pygame.FULLSCREEN | pygame.DOUBLEBUF)
else:
win = pygame.display.set_mode((win_width, win_height))
elif event.key == pygame.K_f:
#print(1000 / time_taken, "fps. Fullscreen:", full, "Night:", night)
print(time_taken_ms, "ms. Fullscreen:", full, "Night:", night)
elif event.key == pygame.K_z:
night = True
elif event.key == pygame.K_x:
night = False
# Get keyboard input for sprite arguments
keys = pygame.key.get_pressed()
# Update sprites
Sprite.behavour_args['keys'] = keys
Sprite.behavour_args['time_step'] = time_step
Sprite.behave_all()
#timer.tick()
if keys[pygame.K_v]:
time_step = min(clock.tick() / 10, 2.5)
else:
time_taken = time.perf_counter_ns() - time_start
time_taken_ms = time_taken / 1000000
time_step = min(time_taken_ms / 10, 12)
Tile.advance_frame(time_step)
step_list = []
if __name__ == "__main__":
print("Debug Controls:")
print("\t-z: Night")
print("\t-x: Day")
print("\t-f: Print framerate")
print("\t-v: Hold for old time-step calculation")
print("\t-Backspace: Stop game")
print("\nTodo:")
print("\t-Animate more tiles")
print("\t-Add more to sprite system")
print("\t\t-Add more behaviours")
rendering = threading.Thread(target=render_thread)
rendering.daemon = True
rendering.start()
play_level(demo)
pygame.quit()
if len(step_list) > 0:
step_list.pop(0)
print("Average Value:", sum(step_list) / len(step_list))
graph = plt.figure()
graph.suptitle('Timestep Each Tick')
plt.plot(step_list, label="Timestep")
plt.legend()
plt.show()
|
Sciguy324/Maze-Game-Python | Maze (Pygame) Checkpoint 1/sprite.py | import pygame
import os
import ast
from dialog import *
import random
# Functions for actions
def blank(args):
'''Default interaction that does absolutely nothing'''
def speak(args):
'''Basic interaction that causes text to appear'''
text_box = TextDialog("assets/dialog_box.png")
text_box.display_dialog(args)
action_lookup = {'blank': blank, 'speak': speak}
# Main class for sprites
class Sprite:
instances = []
behavour_args = {}
persists = {}
def __init__(self, rect, images, behavour, name=None, action='blank', action_args=None, level='test', health=None, max_health=None):
self.frame = 0
self.interval = 20
self.health = health
if name:
if name in Sprite.persists:
if Sprite.persists[name]['level'] != level: # Abort construction if persistent is not in the level being loaded4
return
sprite_type = Sprite.persists[name]['type']
r = ast.literal_eval(Sprite.persists[name]['rect'])
s_rect = pygame.Rect(r)
s_imgs = ast.literal_eval(Sprite.persists[name]['images'])
s_behave = Sprite.persists[name]['behavour']
s_action = Sprite.persists[name]['action']
s_action_args = Sprite.persists[name]['action_args']
s_health = Sprite.persists[name]['health']
s_max_health = Sprite.persists[name]['max_health']
if sprite_type == 'SignSprite':
full_sprite = SignSprite(s_rect, s_imgs, s_behave, action=s_action, action_args=s_action_args, level=level, health=s_health, max_health=s_max_health)
elif sprite_type == 'NPCSprite':
full_sprite = NPCSprite(s_rect, s_imgs, s_behave, action=s_action, action_args=s_action_args, level=level, health=s_health, max_health=s_max_health)
full_sprite.name = name
if not full_sprite in Sprite.instances:
Sprite.instances.append(full_sprite)
return
self.rect = rect
self.images = dict((v, pygame.image.load(os.path.join(k)).convert_alpha()) for v, k in images.items())
self.sprite = list(self.images.items())[0][1]
self.freeze = False
self.behavour = behavour
self.action = action_lookup[action]
self.action_args = action_args
if name:
self.name = name
Sprite.persists[name] = {'images': str(images),
'rect': str(rect.topleft + rect.size),
'level': level,
'behavour': behavour,
'action': action,
'action_args': action_args,
'health': health,
'max_health': max_health}
Sprite.instances.append(self)
def __repr__(self):
'''Override in subclass'''
pass
def __str__(self):
Sprite.__repr__(self)
def check_collision(rect, dx, dy, tilemap, collider_ids):
'''Function to check if a sprite rectange has collided with something'''
try:
new_rect = rect.move(dx, dy)
x1 = int(new_rect.x / 64)
y1 = int(new_rect.y / 64)
x2 = int((new_rect.x + new_rect.width) / 64)
y2 = int((new_rect.y + new_rect.height) / 64)
if (new_rect.x < 0) or (new_rect.y < 0) or (x2 < 0) or (y2 < 0):
return False
if tilemap[y1][x1] in collider_ids:
return False
if tilemap[y2][x1] in collider_ids:
return False
if tilemap[y1][x2] in collider_ids:
return False
if tilemap[y2][x2] in collider_ids:
return False
if Sprite.check_sprites_collide(rect, dx, dy) != None:
return False
return True
except IndexError:
return False
def behave(self):
'''Function to execute the behavour of sprites. Override in subclass'''
pass
#def movement(self, args):
# '''Function to control how the sprite moves. Override in subclasses'''
# pass
@classmethod
def freeze(cls, state):
'''Set the freeze state of all sprites'''
for instance in cls.instances:
instance.freeze = state
@classmethod
def check_sprites_collide(cls, entity, dx, dy):
'''Function to check if a sprite has collided with another sprite, and if so, return that sprite'''
other_rects = list(instance.rect for instance in cls.instances if instance.rect != entity)
index = entity.move(dx, dy).collidelist(other_rects)
if index == -1:
return None
return other_rects[index]
@classmethod
def behave_all(cls):
'''Function to execute the behavour of all sprites'''
for instance in cls.instances:
instance.behave()
@classmethod
def blit_all(cls, displacement, window):
cls.instances.sort(key=lambda kv: kv.rect.center[1])
for instance in cls.instances:
window.blit(instance.sprite, (instance.rect.x + displacement[0], instance.rect.y + displacement[1] - instance.rect.height))
#window.fill((127, 0, 0), instance.rect.move(displacement))
@classmethod
def clear(cls):
'''Delete sprite instances'''
cls.instances = []
# Subclass for sign sprites
class SignSprite(Sprite):
def __init__(self, rect, images, behavour, name=None, action='blank', action_args=None, level='test', health=None, max_health=None):
Sprite.__init__(self, rect, images, behavour, name, action, action_args, level, None, None)
if name:
Sprite.persists[name]['type'] = 'SignSprite'
def __repr__(self):
result ='''SignSprite:
Collision Rectangle: {}
Images: {}
Behavour: {}
Name: {}
Action: {}
Action Arguments: {}'''.format(self.rect, list(i for i in self.images), self.behavour, self.name, self.action, self.action_args)
try:
result += '\nInhabited Level: {}'.format(self.level)
except:
pass
return result
# Execute the behavour of the sign sprite
def behave(self):
self.behavour(self)
# Normal standing behavour
def stand(self):
pass
# Subclass for NPC sprites
class NPCSprite(Sprite):
def __init__(self, rect, images, behavour, name=None, action='blank', action_args=None, level='test', health=None, max_health=None):
Sprite.__init__(self, rect, images, behavour, name, action, action_args, level, health, max_health)
self.interval = 10
self.cycle = 0
self.facing = "front"
self.direction = None
self.dx = 0
self.dy = 0
if name:
Sprite.persists[name]['type'] = 'NPCSprite'
def __repr__(self):
result ='''NPCSprite:
Collision Rectangle: {}
Images: {}
Behavour: {}
Name: {}
Action: {}
Action Arguments: {}
Health: {}'''.format(self.rect, list(i for i in self.images), self.behavour, self.name, self.action, self.action_args, self.health)
try:
result += '\nInhabited Level: {}'.format(self.level)
except:
pass
return result
# Execute behavour of the NPC sprite
def behave(self):
self.behavour(self)
# Behavour of NPC to stand in place
def stand(self):
pass
# Behavour of NPC to wander aimlessly around the map
def wander(self):
tilemap = Sprite.behavour_args['tilemap']
collisions = Sprite.behavour_args['collisions']
if self.cycle > 384:
self.cycle = 0
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not Sprite.check_collision(self.rect, self.dx * 64, self.dy * 64, tilemap, collisions):
self.dx, self.dy = 0, 0
self.cycle = 385
time_step = Sprite.behavour_args['time_step']
self.cycle += 1.0 * time_step
if self.cycle <= 80:
self.movement(tilemap, collisions, time_step)
else:
self.frame = 0
PlayerSprite.update_frame(self)
# Behavour of NPC to panic aimlessly
def panic(self):
tilemap = Sprite.behavour_args['tilemap']
collisions = Sprite.behavour_args['collisions']
if self.cycle > 64:
self.cycle = 0
self.dx, self.dy = 0, 0
while self.dx == 0 and self.dy == 0:
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not Sprite.check_collision(self.rect, self.dx, self.dy, tilemap, collisions):
self.dx, self.dy = 0, 0
self.cycle = 65
time_step = Sprite.behavour_args['time_step']
self.cycle += 1.0 * time_step
if self.cycle <= 64:
self.movement(tilemap, collisions, 1.5 * time_step)
else:
self.frame = 0
PlayerSprite.update_frame(self)
# Standard NPC movement
def movement(self, tilemap, collisions, time_step):
dx, dy = 0, 0
speed = 1.0 * time_step
moved = False
if self.dy == -1:
dy -= speed
self.facing = "back"
self.direction = None
moved = True
if self.dy == 1:
dy += speed
self.facing = "front"
self.direction = None
moved = True
if Sprite.check_collision(self.rect, 0, dy, tilemap, collisions):
self.rect.move_ip(0, dy)
else:
dy = 0
self.cycle = 80
if self.dx == -1:
dx -= speed
self.direction = "left"
moved = True
if self.dx == 1:
dx += speed
self.direction = "right"
moved = True
if Sprite.check_collision(self.rect, dx, 0, tilemap, collisions):
self.rect.move_ip(dx, 0)
else:
dx = 0
self.cycle = 80
if moved == False or self.frame > self.interval * 4:
self.frame = 0
else:
self.frame += round(speed / 7) + 1
# Subclass for player sprite
class PlayerSprite(Sprite):
def __init__(self, rect, images, behavour, name=None, health=None, max_health=None):
Sprite.__init__(self, rect, images, behavour, None, health=health, max_health=max_health)
self.facing = "front"
self.direction = None
def __repr__(self):
return '''PlayerSprite:
Collision Rectangle: {}
Images: {}
Health: {}'''.format(self.rect, list(i for i in self.images), self.health)
# Execute the behavour of the player sprite
def behave(self):
self.behavour(self)
# Determine which frame should be shown
def update_frame(self):
if self.facing == "front":
if self.direction == "left":
if 0 <= self.frame < self.interval:
self.sprite = self.images["left"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["left_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["left"]
else:
self.sprite = self.images["left_walk2"]
elif self.direction == "right":
if 0 <= self.frame < self.interval:
self.sprite = self.images["right"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["right_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["right"]
else:
self.sprite = self.images["right_walk2"]
else:
if 0 <= self.frame < self.interval:
self.sprite = self.images["front"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["front_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["front"]
else:
self.sprite = self.images["front_walk2"]
else:
if self.direction == "left":
if 0 <= self.frame < self.interval:
self.sprite = self.images["left"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["left_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["left"]
else:
self.sprite = self.images["left_walk2"]
elif self.direction == "right":
if 0 <= self.frame < self.interval:
self.sprite = self.images["right"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["right_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["right"]
else:
self.sprite = self.images["right_walk2"]
else:
if 0 <= self.frame < self.interval:
self.sprite = self.images["back"]
elif self.interval <= self.frame < self.interval * 2:
self.sprite = self.images["back_walk1"]
elif self.interval * 2 <= self.frame < self.interval * 3:
self.sprite = self.images["back"]
else:
self.sprite = self.images["back_walk2"]
# Player sprite movement
def movement(self, args):
if self.freeze:
return
keys = args[0]
time_step = args[1]
tilemap = args[2]
collisions = args[3]
dx = 0
dy = 0
player_moved = False
if keys[pygame.K_LSHIFT]:
speed = time_step * 1.5
else:
speed = 3 * time_step
if keys[pygame.K_w]:
dy -= speed
self.facing = "back"
self.direction = None
player_moved = True
if keys[pygame.K_s]:
dy += speed
self.facing = "front"
self.direction = None
player_moved = True
if Sprite.check_collision(self.rect, 0, dy, tilemap, collisions):
self.rect.move_ip(0, dy)
if keys[pygame.K_a]:
dx -= speed
self.direction = "left"
player_moved = True
if keys[pygame.K_d]:
dx += speed
self.direction = "right"
player_moved = True
if Sprite.check_collision(self.rect, dx, 0, tilemap, collisions):
self.rect.move_ip(dx, 0)
if player_moved == False or self.frame > self.interval * 4:
self.frame = 0
else:
self.frame += round(speed / 7) + 1
self.update_frame()
# Trigger call for interact
if keys[pygame.K_SPACE]:
for instance in Sprite.instances:
if type(instance) != PlayerSprite:
if self.rect.inflate(16, 16).colliderect(instance.rect):
instance.action(instance.action_args)
break
# Regular player behavour
def normal(self):
keys = Sprite.behavour_args['keys']
time_step = Sprite.behavour_args['time_step']
tilemap = Sprite.behavour_args['tilemap']
collisions = Sprite.behavour_args['collisions']
self.movement((keys, time_step, tilemap, collisions))
|
Sciguy324/Maze-Game-Python | Maze (Multiplayer Tests)/Maze (Client 2)/sprite.py | <reponame>Sciguy324/Maze-Game-Python
from dialog import *
import random
import json
# # WORLDS MODULE
class Tile:
"""Class for animated tiles"""
interval = 50
img_index = 0
colorkey = (200, 50, 200)
def __init__(self, file):
img = pygame.image.load(os.path.join(file))
# Check whether image contains transparency and load accordingly.
array = pygame.PixelArray(img)
transparent = False
x = -1
for i in array:
x += 1
y = -1
for j in i:
y += 1
if j is not None and img.unmap_rgb(j).a < 255:
transparent = True
break
if transparent:
break
if transparent:
img.convert_alpha()
else:
img.convert()
self.alpha = bool(transparent)
# Load individual sub-images from a larger image.
small_height = img.get_height() // 16
img = pygame.transform.scale(img, (64, small_height * 64))
self.img_list = []
for i in range(small_height):
if transparent:
temp_img = pygame.Surface((64, 64), pygame.SRCALPHA, 32)
temp_img.convert_alpha()
else:
temp_img = pygame.Surface((64, 64))
temp_img.blit(img, (0, 0), (0, 64 * i, 64, 64))
self.img_list.append(temp_img)
self.backup_list = tuple(i.copy() for i in self.img_list)
self.max_imgs = len(self.img_list)
self.main_img = img
def draw(self, surface, dest=(0, 0)):
"""Draw an image to a surface."""
if self.max_imgs > 1:
surface.blit(self.img_list[int(Tile.img_index % self.max_imgs)], dest)
else:
surface.blit(self.img_list[0], dest)
def draw_norm(self, surface, dest=(0, 0)):
"""Draw an image's backup to a surface."""
if self.max_imgs > 1:
surface.blit(self.backup_list[int(Tile.img_index % self.max_imgs)], dest)
else:
surface.blit(self.backup_list[0], dest)
def reset(self):
"""Reset all changes to a tile, restoring it to the backup"""
self.img_list = list(i.copy() for i in self.backup_list)
@classmethod
def advance_frame(cls, time_step=1):
cls.img_index += time_step / cls.interval
if cls.img_index > 64:
cls.img_index = 0
def build_id_dict(tiles):
"""Build a dictionary of images paired with their id from the given dictionary"""
id_list = {}
for i in list(tiles.items()):
img = Tile(i[1])
id_list[i[0]] = img
return id_list
def build_matrix(width, height):
"""Build a matrix of given width and height"""
matrix = []
row = [0] * width
for i in range(height):
matrix.append(list(row))
return matrix
def load_from_string(string):
data = json.loads(string)
loading_zones = {}
for j in data["loading_zones"]:
loading_zones[tuple(j["zone"])] = [j["target_level"], tuple(j["target_pos"])]
return Level(data["colliders"],
data["tilemap"],
data["decomap"],
loading_zones,
data["lightmap"],
data["spawn"],
data["name"])
class Level:
"""Class for levels"""
tile_ids = build_id_dict({1: 'tiles/block.png', 2: 'tiles/stone_table_top.png', 3: 'tiles/stone_table_left.png',
4: 'tiles/stone_table_right.png', 5: 'tiles/stone_table_bottom.png',
6: 'tiles/stone_wall.png', 7: 'tiles/void.png', 8: 'tiles/wood.png', 9: 'tiles/grass.png',
10: 'tiles/corner.png', 11: 'tiles/wall.png', 12: 'tiles/top_left_water.png',
13: 'tiles/top_water.png', 14: 'tiles/top_right_water.png', 15: 'tiles/left_water.png',
16: 'tiles/water.png', 17: 'tiles/right_water.png', 18: 'tiles/bottom_left_water.png',
19: 'tiles/bottom_water.png', 20: 'tiles/bottom_right_water.png', 21: 'tiles/grass3.png',
22: 'tiles/grass2.png', 23: 'tiles/pink_wall.png', 24: 'tiles/top_left_path.png',
25: 'tiles/top_path.png', 26: 'tiles/top_right_path.png', 27: 'tiles/left_path.png',
28: 'tiles/path.png', 29: 'tiles/right_path.png', 30: 'tiles/bottom_left_path.png',
31: 'tiles/bottom_path.png', 32: 'tiles/bottom_right_path.png',
33: 'tiles/path_corner1.png', 34: 'tiles/path_corner2.png', 35: 'tiles/path_corner3.png',
36: 'tiles/path_corner4.png', 37: 'tiles/water_corner1.png',
38: 'tiles/water_corner2.png', 39: 'tiles/water_corner3.png',
40: 'tiles/water_corner4.png', 41: 'tiles/cliff_top_left.png', 42: 'tiles/cliff_top.png',
43: 'tiles/cliff_top_right.png', 44: 'tiles/cliff_face_left.png',
45: 'tiles/cliff_center.png', 46: 'tiles/cliff_face_right.png',
47: 'tiles/cliff_bottom_left.png', 48: 'tiles/cliff_bottom.png',
49: 'tiles/cliff_bottom_right.png', 50: 'tiles/cliff_left.png',
51: 'tiles/cliff_back.png', 52: 'tiles/cliff_right.png',
53: 'tiles/cliff_left_corner.png', 54: 'tiles/cliff_right_corner.png',
55: 'tiles/cliff_stairs_top.png', 56: 'tiles/cliff_stairs.png',
57: 'tiles/cliff_stairs_bottom.png', 58: 'tiles/lamp_post_bottom.png',
59: 'tiles/table.png', 60: 'tiles/table_left.png', 61: 'tiles/table_center.png',
62: 'tiles/table_right.png', 63: 'tiles/table_bottom_left.png',
64: 'tiles/table_bottom.png', 65: 'tiles/table_bottom_right.png',
66: 'tiles/door_bottom.png', 67: 'tiles/door_top.png', 68: 'tiles/bricks_left.png',
69: 'tiles/bricks.png', 70: 'tiles/bricks_right.png', 71: 'tiles/bricks_bottom_left.png',
72: 'tiles/bricks_bottom.png', 73: 'tiles/bricks_bottom_right.png',
74: 'tiles/window.png', 75: 'tiles/pink_wall_base.png',
76: 'tiles/white_green_wall_base_rimmed.png',
77: 'tiles/white_green_wall_painting_base.png', 78: 'tiles/drawer.png',
79: 'tiles/white_green_wall_base_drawer.png', 80: 'tiles/drawer_legs.png',
81: 'tiles/white_green_wall_base_left.png', 82: 'tiles/white_green_wall_base.png',
83: 'tiles/white_green_wall_base_right.png', 84: 'tiles/tree_trunk.png',
85: 'tiles/water2.png', 86: 'tiles/water_ripple.png', 87: 'tiles/wood_shade.png'})
deco_ids = build_id_dict({1: 'tiles/lamp_post_top.png', 2: 'tiles/table_top_left.png', 3: 'tiles/table_top.png',
4: 'tiles/table_top_right.png', 5: 'tiles/roof_right_bottom.png',
6: 'tiles/roof_right_middle1.png', 7: 'tiles/roof_right_middle2.png',
8: 'tiles/roof_right_top.png', 9: 'tiles/roof_left_bottom.png',
10: 'tiles/roof_left_middle1.png', 11: 'tiles/roof_left_middle2.png',
12: 'tiles/roof_left_top.png', 13: 'tiles/roof_top1.png', 14: 'tiles/roof_top2.png',
15: 'tiles/top_roof_shadow.png', 16: 'tiles/pink_wall_top.png',
17: 'tiles/white_green_wall_top_rimmed.png',
18: 'tiles/white_green_wall_painting_top.png', 19: 'tiles/white_green_wall_clock.png',
20: 'tiles/lamp.png', 21: 'tiles/white_green_wall_top_left.png',
22: 'tiles/white_green_wall_top.png', 23: 'tiles/white_green_wall_top_right.png',
24: 'tiles/roof_right_edge.png', 25: 'tiles/roof_left_edge.png',
26: 'tiles/tree_top_left.png', 27: 'tiles/tree_top.png', 28: 'tiles/tree_top_right.png',
29: 'tiles/tree_mid_left.png', 30: 'tiles/tree_mid.png', 31: 'tiles/tree_mid_right.png'})
reference_dict = {}
def __init__(self, colliders, tilemap, decomap, loading_zones, lightmap, default_start=(0, 0), name="Unnamed"):
self.colliders = colliders
self.tilemap = tilemap
self.lightmap = lightmap
self.width = 64 * len(tilemap[1])
self.height = 64 * len(tilemap)
if decomap is None:
self.decomap = build_matrix(len(tilemap[0]), len(tilemap))
else:
self.decomap = decomap
self.loading_zones = loading_zones
self.default_start = default_start
if name in Level.reference_dict:
print("WARNING: Multiple levels have the same name '{}'. Overwriting!".format(name))
self.name = name
Level.reference_dict[name] = self
def __repr__(self):
return "<Level Object | Name: {}>".format(self.name)
def jsonify(self):
loading_zones = []
for i, j in self.loading_zones.items():
loading_zones.append({"zone": list(i), "target_level": j[0], "target_pos": j[1]})
export_dict = {"colliders": self.colliders,
"tilemap": self.tilemap,
"decomap": self.decomap,
"loading_zones": loading_zones,
"lightmap": self.lightmap,
"spawn": self.default_start,
"name": self.name
}
return json.dumps(export_dict)
@classmethod
def levels_init(cls):
found_files = [os.path.join('levels', f) for f in os.listdir('levels/') if
os.path.isfile(os.path.join('levels', f)) and os.path.splitext(f)[1] == '.json']
for i in found_files:
with open(i) as rf:
data = json.load(rf)
loading_zones = {}
for j in data["loading_zones"]:
loading_zones[tuple(j["zone"])] = [j["target_level"], tuple(j["target_pos"])]
Level(data["colliders"],
data["tilemap"],
data["decomap"],
loading_zones,
data["lightmap"],
data["spawn"],
data["name"])
@classmethod
def darken_imgs(cls, amount=100):
"""Darken all tiles and decos"""
shade = pygame.Surface((64, 64)).convert_alpha()
shade.fill((amount, amount, amount, 100))
for i, j in Level.tile_ids.items():
for k in j.img_list:
k.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
for i, j in Level.deco_ids.items():
for k in j.img_list:
k.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
@classmethod
def reset_imgs(cls):
"""Reset changes to all tiles and decos"""
for i, j in cls.tile_ids.items():
j.reset()
for i, j in cls.deco_ids.items():
j.reset()
def sprite_setup(start_level):
"""Build all sprites, returning the player sprite"""
blue_sign = Static('assets/sign/blue_sign.png')
SignSprite(pygame.Rect(16 * 64, 8 * 64, 64, 32), {'blue_sign': blue_sign}, SignSprite.stand, "demo", "local",
action='speak', action_args='dialogs/demo_welcome.txt')
pig_animations = {"front": Animation(
('assets/npc/pig/pig_front.png', 'assets/npc/pig/pig_front_walk1.png', 'assets/npc/pig/pig_front_walk2.png'),
(0, 1, 0, 2)),
"back": Animation(('assets/npc/pig/pig_back.png', 'assets/npc/pig/pig_back_walk1.png',
'assets/npc/pig/pig_back_walk2.png'), (0, 1, 0, 2)),
"left": Animation(('assets/npc/pig/pig_left.png', 'assets/npc/pig/pig_left_walk1.png',
'assets/npc/pig/pig_left_walk2.png'), (0, 1, 0, 2)),
"right": Animation(('assets/npc/pig/pig_right.png', 'assets/npc/pig/pig_right_walk1.png',
'assets/npc/pig/pig_right_walk2.png'), (0, 1, 0, 2))
}
NPCSprite(pygame.Rect(20 * 64, 6 * 64, 64, 32), pig_animations, NPCSprite.wander, "demo", "global", action='speak',
action_args='dialogs/oink.txt')
# NPCSprite(pygame.Rect(20 * 64, 8 * 64, 64, 32), pig_images, NPCSprite.always_right, demo, "global", action='speak', action_args='dialogs/oink.txt')
male_duck_animations = {"front": Animation(('assets/npc/male_duck/male_duck_front.png',), (0,)),
"back": Animation(('assets/npc/male_duck/male_duck_back.png',), (0,)),
"left": Animation(('assets/npc/male_duck/male_duck_left.png',), (0,)),
"right": Animation(('assets/npc/male_duck/male_duck_right.png',), (0,))
}
female_duck_animations = {"front": Animation(('assets/npc/female_duck/female_duck_front.png',), (0,)),
"back": Animation(('assets/npc/female_duck/female_duck_back.png',), (0,)),
"left": Animation(('assets/npc/female_duck/female_duck_left.png',), (0,)),
"right": Animation(('assets/npc/female_duck/female_duck_right.png',), (0,))
}
duckling_animations = {"front": Animation(('assets/npc/duckling/duckling_front.png',), (0,)),
"back": Animation(('assets/npc/duckling/duckling_back.png',), (0,)),
"left": Animation(('assets/npc/duckling/duckling_left.png',), (0,)),
"right": Animation(('assets/npc/duckling/duckling_right.png',), (0,))
}
NPCSprite(pygame.Rect(20 * 64, 14 * 64, 64, 32), male_duck_animations, NPCSprite.wander, "demo", "global")
# Player setup
player_start = Level.reference_dict[start_level].default_start
# Player bounding box
player = pygame.Rect(player_start[0] * 64, player_start[1] * 64, 48, int(112 / 2))
# Player sprite
player_animations = {"front": Animation(('assets/player/player_front.png', 'assets/player/player_front_walk1.png',
'assets/player/player_front_walk2.png'), (0, 1, 0, 2)),
"back": Animation(('assets/player/player_back.png', 'assets/player/player_back_walk1.png',
'assets/player/player_back_walk2.png'), (0, 1, 0, 2)),
"left": Animation(('assets/player/player_left.png', 'assets/player/player_left_walk1.png',
'assets/player/player_left_walk2.png'), (0, 1, 0, 2)),
"right": Animation(('assets/player/player_right.png', 'assets/player/player_right_walk1.png',
'assets/player/player_right_walk2.png'), (0, 1, 0, 2))
}
# Default player sprite data
player_sprite = PlayerSprite(player, player_animations, PlayerSprite.normal, start_level, health=100)
return player_sprite
def minor_sprite_setup(start_level):
"""Build all sprites, returning the player sprite"""
# Player setup
player_start = Level.reference_dict[start_level].default_start
# Player bounding box
player = pygame.Rect(player_start[0] * 64, player_start[1] * 64, 48, int(112 / 2))
# Player sprite
player_animations = {"front": Animation(('assets/blue_player/player_front.png', 'assets/blue_player/player_front_walk1.png',
'assets/blue_player/player_front_walk2.png'), (0, 1, 0, 2)),
"back": Animation(('assets/blue_player/player_back.png', 'assets/blue_player/player_back_walk1.png',
'assets/blue_player/player_back_walk2.png'), (0, 1, 0, 2)),
"left": Animation(('assets/blue_player/player_left.png', 'assets/blue_player/player_left_walk1.png',
'assets/blue_player/player_left_walk2.png'), (0, 1, 0, 2)),
"right": Animation(('assets/blue_player/player_right.png', 'assets/blue_player/player_right_walk1.png',
'assets/blue_player/player_right_walk2.png'), (0, 1, 0, 2))
}
# Default player sprite data
player_sprite = PlayerSprite(player, player_animations, PlayerSprite.normal, start_level, health=100)
return player_sprite
# # SPRITE MODULE
# Functions for actions
def blank(args):
"""Default interaction that does absolutely nothing"""
pass
def speak(args):
"""Basic interaction that causes text to appear"""
text_box = TextDialog("assets/dialog_box.png")
text_box.display_dialog(args)
action_lookup = {'blank': blank, 'speak': speak}
class Tracker:
"""Class for variables that keep track of whether they've been changed"""
def __init__(self, value):
self.value = value
self.changed = False
def set(self, new_value):
if self.value != new_value:
self.changed = True
self.value = new_value
def querry(self):
if self.changed:
self.changed = False
return True
class Animation:
"""Class for animations"""
def __init__(self, images, sequence):
self.tick = 0
self.prev_tick = 0
self.image_tuple = images
self.image_sequence = sequence
self.frame_list = tuple(pygame.image.load(os.path.join(images[i])).convert_alpha() for i in sequence)
self.backup_list = tuple(i.copy() for i in self.frame_list)
self.rect_list = tuple(i.get_rect() for i in self.frame_list)
self.frame_count = len(self.frame_list) - 1
self.frame = self.frame_list[0]
self.rect = self.rect_list[0]
self.interval = 25
self.running = False
def update(self, time_step):
"""Update the animation frame"""
self.running = True
self.tick += time_step / self.interval
if int(self.tick) != int(self.prev_tick):
if int(self.tick) > self.frame_count:
self.tick = 0
self.frame = self.frame_list[int(self.tick)]
self.rect = self.rect_list[int(self.tick)]
self.prev_tick = float(self.tick)
def draw(self, window, dest):
"""Draw current frame to window"""
window.blit(self.frame, dest)
def draw_norm(self, window, dest):
"""Draw current backup frame to window"""
window.blit(self.backup_list[int(self.tick)], dest)
def draw_clipped(self, window, dispx, dispy, disp_reg, litmap_rects):
"""Draw current backup frame with only area outside a light
source darkened"""
# First get a list of rectangles that intersect the source
# Then copy this part of the current backup frame onto the current frame
img_rect = self.rect.move(1, 1)
img = self.frame.copy()
window.blit(img, disp_reg)
for i in img_rect.collidelistall(litmap_rects):
dark_rect = img_rect.clip(litmap_rects[i])
if dark_rect.size != (0, 0):
dark_rect.move_ip(-1, -1)
# window.fill((255, 0, 0), dark_rect.move(disp))
window.blit(self.backup_list[int(self.tick)], dest=dark_rect.move(dispx, dispy), area=dark_rect.move(self.rect.topleft[0] * -1, self.rect.topleft[1] * -1))
def reset(self):
"""Reset the animation frame to base"""
self.running = False
if self.tick != 0:
self.tick = 0
self.prev_tick = 0
self.frame = self.frame_list[0]
self.rect = self.rect_list[0]
def darken(self, amount):
"""Darken the image by an amount"""
for i in self.frame_list:
shade = pygame.Surface((i.get_width(), i.get_height()))
shade.fill((amount, amount, amount))
i.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
self.frame = self.frame_list[int(self.tick)]
def undo(self):
"""Reset all changes to the animation images"""
self.frame_list = tuple(i.copy() for i in self.backup_list)
self.frame = self.frame_list[int(self.tick)]
class Static(Animation):
"""Subclass for static animations"""
def __init__(self, image):
self.frame = pygame.image.load(os.path.join(image)).convert_alpha()
self.backup = self.frame.copy()
self.rect = self.frame.get_rect()
self.running = False
def update(self, timestep):
"""Handles any calls for frame updates. Helps with
standardization, but otherwise does nothing"""
pass
def draw(self, window, dest):
"""Standard drawing function"""
window.blit(self.frame, dest)
def draw_norm(self, window, dest):
"""Draw backup image"""
window.blit(self.backup, dest)
def draw_clipped(self, window, dispx, dispy, disp_reg, litmap_rects):
"""Draw current backup frame with only area outside a light
source darkened"""
img_rect = self.rect.move(1, 1)
img = self.frame.copy()
window.blit(img, (disp_reg))
for i in img_rect.collidelistall(litmap_rects):
dark_rect = img_rect.clip(litmap_rects[i])
if dark_rect.size != (0, 0):
dark_rect.move_ip(-1, -1)
#window.fill((255, 0, 0), dark_rect.move(disp))
window.blit(self.backup_list, dest=dark_rect.move(dispx, dispy), area=dark_rect.move(self.rect.topleft[0] * -1, self.rect.topleft[1] * -1))
def darken(self, amount):
"""Darken the image by an amount"""
shade = pygame.Surface((self.frame.get_width(), self.frame.get_height()))
shade.fill((amount, amount, amount))
self.frame.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)
def undo(self):
"""Reset all changes to the animation image"""
self.frame = self.backup.copy()
class Sprite:
"""Main class for sprites"""
sprite_dict = {}
global_sprites = []
local_sprites = []
loaded_locals = []
focus = None
id_counter = 0
behavior_args = {}
def __init__(self, rect, animation_dict, behavior, level, scope, action='blank', action_args=None, health=None, max_health=None):
self.health = health
self.max_health = max_health
self.rect = rect
self.x = int(rect.x)
self.y = int(rect.y)
self.collision_time = 0
self.animation_dict = animation_dict
self.animation = list(animation_dict.items())[0][1]
self.freeze = False
self.behavior = behavior
self.level = Level.reference_dict[level]
self.scope = scope
self.action = action_lookup[action]
self.action_args = action_args
self.speed_mult = 1
self.entity_id = Sprite.id_counter
self.facing = Tracker(None)
self.direction = Tracker(None)
Sprite.id_counter += 1
if scope == "global":
Sprite.global_sprites.append(self)
elif scope == "local":
Sprite.local_sprites.append(self)
else:
print("An error occurred while loading a sprite:\n'{}' is an invalid scope".format(scope))
# Additional data for asking server about levels
self.inquiring = False
self.inquire_of = ""
Sprite.sprite_dict[self.entity_id] = self
def __repr__(self):
"""Override in subclass"""
pass
def __str__(self):
return self.__repr__()
def jsonify(self):
"""Converts a sprite into a json representation of itself, override in subclass"""
pass
def check_collision(self, dx, dy, tilemap, collider_ids):
"""Function to check if a sprite rectangle has collided with something"""
try:
dx, dy = round(1.5 * dx), round(1.5 * dy)
new_rect = self.rect.move(dx, dy)
x1 = int(new_rect.x / 64)
y1 = int(new_rect.y / 64)
x2 = int((new_rect.x + new_rect.width) / 64)
y2 = int((new_rect.y + new_rect.height) / 64)
if (new_rect.x < 0) or (new_rect.y < 0) or (x2 < 0) or (y2 < 0):
return False
if tilemap[y1][x1] in collider_ids:
return False
if tilemap[y2][x1] in collider_ids:
return False
if tilemap[y1][x2] in collider_ids:
return False
if tilemap[y2][x2] in collider_ids:
return False
if self.collision_time < 180 and Sprite.check_sprites_collide(self, dx, dy) is not None:
self.collision_time += Sprite.behavior_args["time_step"]
return False
if self.collision_time > 0:
self.collision_time -= Sprite.behavior_args["time_step"] / 2
return True
except IndexError:
return False
def behave(self):
"""Function to execute the behavior of sprites. Override in subclass"""
pass
def check_load_zone(self):
"""Check if the sprite is in a loading zone, and if so, send the sprite to the relevant level.
Return False if nothing happens, return True if successful."""
x, y = self.rect.center
x = int(x / 64)
y = int(y / 64)
if (x, y) in self.level.loading_zones:
zone = self.level.loading_zones[(x, y)]
self.inquire_of = zone[0]
self.x = zone[1][0] * 64
self.y = zone[1][1] * 64
self.rect.x = zone[1][0] * 64
self.rect.y = zone[1][1] * 64
return True
else:
return False
def check_lit_tile(self, litmap):
"""Check if the sprite is partially or fully on tiles that are illuminated.
-Return 0 if sprite is NOT touching a lit tile.
-Return 1-3 if the sprite is PARTIALLY touching a lit tile.
-Return 4 if the sprite is ONLY touching lit tiles.
-Return 0 if an IndexError occurs (entity is out of bounds).
"""
try:
r = self.animation.rect
c = 0
for x in (r.left, r.right):
for y in (r.bottom, r.top):
if litmap[y // 64][x // 64]: c += 1
return c
except IndexError:
return 0
@classmethod
def delete_sprite(cls, entity_id):
"""Removed a sprite from the sprite dictionary"""
for i, j in enumerate(Sprite.global_sprites):
if entity_id == j.entity_id:
Sprite.global_sprites.pop(i)
break
for i, j in cls.sprite_dict.items():
if entity_id == i:
del cls.sprite_dict[i]
break
@classmethod
def add_sprite(cls, json_dict):
"""Registers a new sprite sent from the server"""
if json_dict["type"] == "SignSprite":
new = SignSprite(pygame.Rect(json_dict["entity_data"]["rect"]),
dict((i, Static(j)) for i, j in json_dict["entity_data"]["animation"].items()),
behavior_lookup[json_dict["entity_data"]["behavior"]],
json_dict["entity_data"]["level"],
json_dict["entity_data"]["scope"],
json_dict["entity_data"]["action"],
json_dict["entity_data"]["action_args"]
)
del cls.sprite_dict[new.entity_id]
new.entity_id = json_dict["id"]
cls.sprite_dict[new.entity_id] = new
print("Entity ID:", new.entity_id)
print("After attempting to add SignSprite:", cls.sprite_dict)
elif json_dict["type"] == "NPCSprite":
new = NPCSprite(pygame.Rect(json_dict["entity_data"]["rect"]),
dict((i, Animation(j[0], j[1])) for i, j in json_dict["entity_data"]["animation_dict"].items()),
GuestSprite.normal,
json_dict["entity_data"]["level"],
json_dict["entity_data"]["scope"],
json_dict["entity_data"]["action"],
json_dict["entity_data"]["action_args"],
json_dict["entity_data"]["health"],
json_dict["entity_data"]["max_health"]
)
del cls.sprite_dict[new.entity_id]
new.entity_id = json_dict["id"]
cls.sprite_dict[new.entity_id] = new
elif json_dict["type"] == "GuestSprite" or json_dict["type"] == "PlayerSprite":
new = GuestSprite(json_dict["id"],
pygame.Rect(json_dict["entity_data"]["rect"]),
dict((i, Animation(j[0], j[1])) for i, j in json_dict["entity_data"]["animation_dict"].items()),
json_dict["entity_data"]["level"]
)
del cls.sprite_dict[new.entity_id]
new.entity_id = json_dict["id"]
cls.sprite_dict[new.entity_id] = new
else:
print("Error: '{}' is not a valid sprite type".format(json_dict["type"]))
@classmethod
def freeze(cls, state):
"""Set the freeze state of all sprites"""
for s in cls.global_sprites + cls.loaded_locals:
s.freeze = state
@classmethod
def check_sprites_collide(cls, entity, dx, dy):
"""Function to check if a sprite has collided with another sprite, and if so, return that sprite"""
entity_rect = entity.rect.copy()
sprite_list = cls.local_sprites + [i for i in cls.global_sprites if i.level == entity.level]
other_rects = list(s.rect for s in sprite_list if s.rect != entity_rect)
index = entity_rect.move(dx, dy).collidelist(other_rects)
if index == -1:
return None
return other_rects[index]
@classmethod
def behave_all(cls):
"""Function to execute the behavior of all sprites"""
for s in cls.global_sprites + cls.loaded_locals:
s.behave()
for s in cls.global_sprites:
if s.check_load_zone() and type(s) == PlayerSprite:
s.inquiring = True
Sprite.behavior_args["screen_wipe"] = True
@classmethod
def blit_all(cls, dispx, dispy, window):
"""Blit all sprites that are currently located in the same level as the focus sprite.
Does not handle light levels"""
loaded_sprites = cls.loaded_locals + [i for i in cls.global_sprites if i.level.name == cls.focus.level.name]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
s.animation.draw(window, (s.x + dispx, s.y + dispy - s.rect.height))
# window.fill((127, 0, 0), s.rect.move((dispx, dispy)))
@classmethod
def blit_all_night(cls, dispx, dispy, window, litmap, lit_rects):
"""Blit all sprites that are currently located in the same level as the focus sprite.
Designed for handling light levels"""
loaded_sprites = cls.loaded_locals + [i for i in cls.global_sprites if i.level.name == cls.focus.level.name]
loaded_sprites.sort(key=lambda kv: kv.rect.center[1])
for s in loaded_sprites:
s.animation.rect.bottomleft = s.rect.bottomleft
disp_reg = (s.x + dispx, s.y + dispy - s.rect.height)
lit_tiles = s.check_lit_tile(litmap)
# Player is not touching any lit tiles, draw normally.
if not lit_tiles:
s.animation.draw(window, disp_reg)
# Player is only touching lit tiles, draw illuminated backup.
elif lit_tiles == 4:
s.animation.draw_norm(window, disp_reg)
# Player is partially touching lit tiles, draw clipped.
else:
s.animation.draw_clipped(window, dispx, dispy, disp_reg, lit_rects)
@classmethod
def darken_all(cls, amount=100):
for i in cls.local_sprites + cls.global_sprites:
try:
for j, k in i.animation_dict.items():
k.darken(amount)
except AttributeError:
i.animation.darken(amount)
@classmethod
def reset_imgs(cls):
for i in cls.local_sprites + cls.global_sprites:
try:
for j, k in i.animation_dict.items():
k.undo()
except AttributeError:
i.animation.undo()
@classmethod
def reload_loaded_locals(cls):
"""Recalculate the list of loaded local sprites"""
cls.loaded_locals = [i for i in cls.local_sprites if i.level == cls.focus.level]
# Subclass for sign sprites
class SignSprite(Sprite):
def __init__(self, rect, animation_dict, behavior, level, scope, action='blank', action_args=None):
Sprite.__init__(self, rect, animation_dict, behavior, level, "local", action, action_args, None, None)
def __repr__(self):
result = """<SignSprite | ID: {}, Rect: {}, Behavior: {}, Action: {}, Action Args: {}, Level: {}""".format(
self.entity_id, self.rect, self.behavior, self.action, self.action_args, self.level)
return result
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "SignSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, j.image_tuple)
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": "local",
"action": dict((j, i) for i, j in action_lookup.items())[self.action],
"action_args": self.action_args,
"health": None,
"max_health": None
}
}
return json.dumps(result)
# Execute the behavior of the sign sprite
def behave(self):
self.behavior(self)
# Normal standing behavior
def stand(self):
pass
# Subclass for NPC sprites
class NPCSprite(Sprite):
def __init__(self, rect, animation_dict, behavior, level, scope, action='blank', action_args=None, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavior, level, scope, action, action_args, health, max_health)
self.interval = 25
self.cycle = 0
self.facing = Tracker("front")
self.direction = Tracker(None)
self.dx = 0
self.dy = 0
def __repr__(self):
result = """<NPCSprite | Rect: {}, Behavior: {}, Action: {}, Action Args: {}, Health: {}, Level: {}>""".format(
self.rect, self.behavior, self.action, self.action_args, self.health, self.level)
return result
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "NPCSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, [j.image_tuple, j.image_sequence])
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": self.scope,
"action": dict((j, i) for i, j in action_lookup.items())[self.action],
"action_args": self.action_args,
"health": None,
"max_health": None
}
}
return json.dumps(result)
# Execute behavior of the NPC sprite
def behave(self):
self.behavior(self)
# Behavour of NPC to stand in place
def stand(self):
pass
# Behavour of NPC to wander aimlessly around the map
def wander(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 384:
self.cycle = 0
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx * 64, self.dy * 64, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 385
time_step = Sprite.behavior_args['time_step']
self.cycle += time_step
if self.cycle <= 80:
self.movement(tilemap, colliders, time_step)
self.speed_mult = 1
else:
if self.cycle <= 100:
self.animation.reset()
# Behavour of NPC to panic aimlessly
def panic(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
if self.cycle > 64:
self.cycle = 0
self.dx, self.dy = 0, 0
while self.dx == 0 and self.dy == 0:
self.dx = random.randint(-1, 1)
self.dy = random.randint(-1, 1)
if not self.check_collision(self.dx, self.dy, tilemap, colliders):
self.dx, self.dy = 0, 0
self.cycle = 65
time_step = Sprite.behavior_args['time_step']
self.cycle += time_step
if self.cycle <= 64:
self.movement(tilemap, colliders, 1.5 * time_step)
self.speed_mult = 1.5
else:
self.animation.reset()
# Behavour of NPC to always move right
def always_right(self):
tilemap = self.level.tilemap
colliders = self.level.colliders
self.dx = 1
self.dy = 0
time_step = Sprite.behavior_args['time_step']
self.movement(tilemap, colliders, 1.5 * time_step)
self.speed_mult = 1.5
# Standard NPC movement
def movement(self, tilemap, colliders, time_step):
dx, dy = 0, 0
speed = 1.0 * time_step
moved = False
if self.dy == -1:
dy -= speed
self.facing.set("back")
self.direction.set(None)
moved = True
if self.dy == 1:
dy += speed
self.facing.set("front")
self.direction.set(None)
moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
else:
dy = 0
self.cycle = 80
if self.dx == -1:
dx -= speed
self.direction.set("left")
moved = True
if self.dx == 1:
dx += speed
self.direction.set("right")
moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
else:
dx = 0
self.cycle = 80
if not moved:
self.animation.reset()
else:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(speed * 1.5)
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
# Subclass for player sprite
class PlayerSprite(Sprite):
def __init__(self, rect, animation_dict, behavior, level, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, behavior, level, "global", health=health, max_health=max_health)
self.facing = Tracker("front")
self.direction = Tracker(None)
def __repr__(self):
return """<PlayerSprite | Rect: {}, Health: {}, Level: {}>""".format(self.rect, self.health, str(self.level))
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "PlayerSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, [j.image_tuple, j.image_sequence])
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": "global",
"health": None,
"max_health": None
}
}
return json.dumps(result)
def jsonify2(self):
"""Converts necessary information about this sprite into a json format"""
data = {"id": self.entity_id,
"x": self.x,
"y": self.y,
"facing": self.facing.value,
"direction": self.direction.value,
"animated": self.animation.running,
"speed": self.speed_mult,
"level": self.level.name
}
return json.dumps(data)
# Execute the behavior of the player sprite
def behave(self):
self.behavior(self)
# Standard player sprite movement
def movement(self, args):
if self.freeze:
return
keys = args[0]
time_step = args[1]
tilemap = args[2]
colliders = args[3]
dx = 0
dy = 0
player_moved = False
self.speed_mult = 1.25
if keys[pygame.K_LSHIFT]:
speed = time_step * 1.25
else:
speed = 2.25 * time_step
self.speed_mult += 1
if keys[pygame.K_w]:
dy -= speed
self.facing.set("back")
self.direction.set(None)
player_moved = True
if keys[pygame.K_s]:
dy += speed
self.facing.set("front")
self.direction.set(None)
player_moved = True
if self.check_collision(0, dy, tilemap, colliders):
self.y += dy
if keys[pygame.K_a]:
dx -= speed
self.direction.set("left")
player_moved = True
if keys[pygame.K_d]:
dx += speed
self.direction.set("right")
player_moved = True
if self.check_collision(dx, 0, tilemap, colliders):
self.x += dx
# Trigger call for interact
if keys[pygame.K_SPACE]:
loaded_globals = [i for i in Sprite.global_sprites if i.level == self.level]
for s in (loaded_globals + Sprite.loaded_locals):
if type(s) != PlayerSprite:
if self.rect.inflate(16, 16).colliderect(s.rect):
s.action(s.action_args)
break
if player_moved is False:
self.animation.reset()
else:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(speed / 2)
self.rect.move_ip(round(self.x - self.rect.x), round(self.y - self.rect.y))
# Regular player behavior
def normal(self):
keys = Sprite.behavior_args['keys']
time_step = Sprite.behavior_args['time_step']
tilemap = self.level.tilemap
colliders = self.level.colliders
self.movement((keys, time_step, tilemap, colliders))
class GuestSprite(Sprite):
"""Class for sprites that represent multiplayer players"""
def __init__(self, entity_id, rect, animation_dict, level, health=None, max_health=None):
Sprite.__init__(self, rect, animation_dict, GuestSprite.normal, level, "global", health=health, max_health=max_health)
self.facing = Tracker("front")
self.direction = Tracker(None)
self.entity_id = entity_id
def __repr__(self):
return """<GuestSprite | ID: {}, Rect: {}, Health: {}, Level: {}>""".format(
self.entity_id, self.rect, self.health, str(self.level))
def jsonify(self):
"""Converts the sprite into a JSON representation of itself"""
result = {"id": self.entity_id, "type": "GuestSprite", "entity_data": {
"rect": [self.rect.x, self.rect.y, self.rect.w, self.rect.h],
"animation_dict": dict((i, [j.image_tuple, j.image_sequence])
for i, j in self.animation_dict.items()),
"behavior": dict((j, i) for i, j in behavior_lookup.items())[self.behavior],
"level": self.level.name,
"scope": "global",
"health": None,
"max_health": None
}
}
return json.dumps(result)
def behave(self):
self.behavior(self)
def normal(self):
"""Regular guest sprite behavior, typically used for syncing internal sprite data"""
self.rect.x = self.x
self.rect.y = self.y
if self.animation.running:
if self.direction.querry() or self.facing.querry():
if self.direction.value:
self.animation = self.animation_dict[self.direction.value]
else:
self.animation = self.animation_dict[self.facing.value]
self.animation.update(self.speed_mult * Sprite.behavior_args["time_step"] / 2)
else:
self.animation.reset()
behavior_lookup = {"SignSprite.stand": SignSprite.stand,
"NPCSprite.wander": NPCSprite.wander,
"NPCSprite.panic": NPCSprite.panic,
"NPCSprite.always_right": NPCSprite.always_right,
"PlayerSprite.normal": PlayerSprite.normal,
"GuestSprite.normal": GuestSprite.normal
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.