repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
gregzajac/MyRent | MyRent/migrations/0004_tenant_user.py | <filename>MyRent/migrations/0004_tenant_user.py
# Generated by Django 3.0.3 on 2020-03-02 08:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('MyRent', '0003_operation_operationdict'),
]
operations = [
migrations.AddField(
model_name='tenant',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
]
|
gregzajac/MyRent | MyRent/migrations/0001_initial.py | # Generated by Django 3.0.3 on 2020-03-01 14:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Flat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(max_length=128, verbose_name='Ulica')),
('block_number', models.CharField(max_length=64, verbose_name='Nr domu')),
('flat_number', models.CharField(max_length=64, null=True, verbose_name='Nr mieszkania')),
('post_code', models.CharField(max_length=16, verbose_name='Kod pocztowy')),
('city', models.CharField(max_length=64, verbose_name='Miasto')),
('description', models.TextField(verbose_name='Opis mieszkania')),
],
),
]
|
gregzajac/MyRent | MyRent/models.py | <filename>MyRent/models.py
from django.contrib.auth.models import User
from django.db import models
from datetime import datetime, timedelta
class Landlord(models.Model):
first_name = models.CharField(max_length=64, verbose_name="Imię")
last_name = models.CharField(max_length=64, verbose_name="Nazwisko")
phone = models.CharField(max_length=16, verbose_name="Telefon", null=True)
email = models.CharField(max_length=64, verbose_name="E-mail", null=True)
info = models.TextField(verbose_name="Dodatkowe info", null=True, blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, verbose_name="Właściciel")
class Meta:
verbose_name = u'Właściciel'
verbose_name_plural = u'Właściciele'
def __str__(self):
return f"{self.first_name} {self.last_name}"
class Flat(models.Model):
street = models.CharField(max_length=128, verbose_name="Ulica")
block_number = models.CharField(max_length=64, verbose_name="Nr domu")
flat_number = models.CharField(max_length=64, verbose_name="Nr mieszkania", null=True)
post_code = models.CharField(max_length=16, verbose_name="Kod pocztowy")
city = models.CharField(max_length=64, verbose_name="Miasto")
info = models.TextField(verbose_name="Dodatkowe info", null=True, blank=True)
landlord = models.ForeignKey(Landlord, verbose_name="Właściciel", on_delete=models.CASCADE, null=True)
is_for_rent = models.BooleanField(default=True, verbose_name="Czy jest do wynajęcia")
class Meta:
verbose_name = u'Mieszkanie'
verbose_name_plural = u'Mieszkania'
def __str__(self):
if self.flat_number:
block_flat_number = f"{self.block_number}/{self.flat_number}"
else:
block_flat_number = f"{self.block_number}"
return f"{self.street} {block_flat_number}, {self.post_code} {self.city}"
def get_active_agreement(self):
lst = self.agreement_set.filter(date_from__lte=datetime.now().date(), date_to__gte=datetime.now().date())
if lst.count() > 0:
return lst[0]
def available_from(self):
active_agreement = self.get_active_agreement()
if active_agreement:
return active_agreement.date_to + timedelta(days=1)
return datetime.now().date()
class Tenant(models.Model):
first_name = models.CharField(max_length=64, verbose_name="Imię")
last_name = models.CharField(max_length=64, verbose_name="Nazwisko")
phone = models.CharField(max_length=16, verbose_name="Telefon", null=True)
email = models.CharField(max_length=64, verbose_name="E-mail", null=True)
info = models.TextField(verbose_name="Dodatkowe info", null=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, verbose_name="User login")
class Meta:
verbose_name = u'Najemca'
verbose_name_plural = u'Najemcy'
def __str__(self):
return f"{self.first_name} {self.last_name}"
class Agreement(models.Model):
code = models.CharField(max_length=32, verbose_name="Identyfikator umowy", unique=True)
agreement_date = models.DateField(verbose_name="Data podpisania umowy")
date_from = models.DateField(verbose_name="Data początku najmu")
date_to = models.DateField(verbose_name="Data końca najmu")
mth_payment_value = models.FloatField(verbose_name="Miesięczny koszt wynajmu")
mth_payment_deadline = models.SmallIntegerField(verbose_name="Termin miesięcznej opłaty")
tenant = models.ForeignKey(Tenant, on_delete=models.CASCADE, verbose_name="Najemca")
flat = models.ForeignKey(Flat, on_delete=models.CASCADE, verbose_name="Wynajmowane mieszkanie")
info = models.TextField(verbose_name="Dodatkowe info", null=True, blank=True)
class Meta:
verbose_name = u'Umowa'
verbose_name_plural = u'Umowy'
def __str__(self):
return f"{self.code}, {self.agreement_date}"
def is_active(self):
return self.date_from <= datetime.now().date() <= self.date_to
class OperationDict(models.Model):
PLUS_MINUS = (
(1, "PLUS"),
(2, "MINUS")
)
name = models.CharField(max_length=32, verbose_name="Operacja finansowa")
plus_minus = models.SmallIntegerField(choices=PLUS_MINUS, verbose_name="Wpływ na saldo rozliczeń")
class Meta:
verbose_name = u'Typ operacji finansowej'
verbose_name_plural = u'Typy operacji finansowych'
def __str__(self):
return self.name
class Operation(models.Model):
agreement = models.ForeignKey(Agreement, on_delete=models.CASCADE, verbose_name="Umowa najmu")
type = models.ForeignKey(OperationDict, on_delete=models.CASCADE, verbose_name="Typ operacji finansowej")
date = models.DateField(verbose_name="Data operacji")
value = models.FloatField(verbose_name="Kwota operacji")
info = models.TextField(verbose_name="Dodatkowe info", null=True, blank=True)
class Meta:
verbose_name = u'Operacja finansowa'
verbose_name_plural = u'Operacje finansowe'
def __str__(self):
return f"{self.type} | {self.date} | {self.value}"
class Image(models.Model):
picture = models.ImageField(default="no-img.png", verbose_name="Zdjęcie")
info = models.CharField(max_length=128, null=True, blank=True, verbose_name="Opis zdjęcia")
flat = models.ForeignKey(Flat, on_delete=models.CASCADE, verbose_name="Mieszkanie dot. zdjęcia")
class Meta:
verbose_name = u'Zdjęcie'
verbose_name_plural = u'Zdjęcia'
def __str__(self):
return f"{self.flat} | {self.info}"
|
mrgloom/Sparse-Autoencoder-Linear | sparseAutoencoderLinear.py | # This piece of software is bound by The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# Code written by : <NAME>
# Email ID : <EMAIL>
import numpy
import math
import time
import scipy.io
import scipy.optimize
import matplotlib.pyplot
###########################################################################################
""" The Sparse Autoencoder Linear class """
class SparseAutoencoderLinear(object):
#######################################################################################
""" Initialization of Autoencoder object """
def __init__(self, visible_size, hidden_size, rho, lamda, beta):
""" Initialize parameters of the Autoencoder object """
self.visible_size = visible_size # number of input units
self.hidden_size = hidden_size # number of hidden units
self.rho = rho # desired average activation of hidden units
self.lamda = lamda # weight decay parameter
self.beta = beta # weight of sparsity penalty term
""" Set limits for accessing 'theta' values """
self.limit0 = 0
self.limit1 = hidden_size * visible_size
self.limit2 = 2 * hidden_size * visible_size
self.limit3 = 2 * hidden_size * visible_size + hidden_size
self.limit4 = 2 * hidden_size * visible_size + hidden_size + visible_size
""" Initialize Neural Network weights randomly
W1, W2 values are chosen in the range [-r, r] """
r = math.sqrt(6) / math.sqrt(visible_size + hidden_size + 1)
rand = numpy.random.RandomState(int(time.time()))
W1 = numpy.asarray(rand.uniform(low = -r, high = r, size = (hidden_size, visible_size)))
W2 = numpy.asarray(rand.uniform(low = -r, high = r, size = (visible_size, hidden_size)))
""" Bias values are initialized to zero """
b1 = numpy.zeros((hidden_size, 1))
b2 = numpy.zeros((visible_size, 1))
""" Create 'theta' by unrolling W1, W2, b1, b2 """
self.theta = numpy.concatenate((W1.flatten(), W2.flatten(),
b1.flatten(), b2.flatten()))
#######################################################################################
""" Returns elementwise sigmoid output of input array """
def sigmoid(self, x):
return (1 / (1 + numpy.exp(-x)))
#######################################################################################
""" Returns the cost of the Autoencoder and gradient at a particular 'theta' """
def sparseAutoencoderLinearCost(self, theta, input):
""" Extract weights and biases from 'theta' input """
W1 = theta[self.limit0 : self.limit1].reshape(self.hidden_size, self.visible_size)
W2 = theta[self.limit1 : self.limit2].reshape(self.visible_size, self.hidden_size)
b1 = theta[self.limit2 : self.limit3].reshape(self.hidden_size, 1)
b2 = theta[self.limit3 : self.limit4].reshape(self.visible_size, 1)
""" Compute output layers by performing a feedforward pass
Computation is done for all the training inputs simultaneously """
hidden_layer = self.sigmoid(numpy.dot(W1, input) + b1)
output_layer = numpy.dot(W2, hidden_layer) + b2
""" Estimate the average activation value of the hidden layers """
rho_cap = numpy.sum(hidden_layer, axis = 1) / input.shape[1]
""" Compute intermediate difference values using Backpropagation algorithm """
diff = output_layer - input
sum_of_squares_error = 0.5 * numpy.sum(numpy.multiply(diff, diff)) / input.shape[1]
weight_decay = 0.5 * self.lamda * (numpy.sum(numpy.multiply(W1, W1)) +
numpy.sum(numpy.multiply(W2, W2)))
KL_divergence = self.beta * numpy.sum(self.rho * numpy.log(self.rho / rho_cap) +
(1 - self.rho) * numpy.log((1 - self.rho) / (1 - rho_cap)))
cost = sum_of_squares_error + weight_decay + KL_divergence
KL_div_grad = self.beta * (-(self.rho / rho_cap) + ((1 - self.rho) / (1 - rho_cap)))
del_out = diff
del_hid = numpy.multiply(numpy.dot(numpy.transpose(W2), del_out) + numpy.transpose(numpy.matrix(KL_div_grad)),
numpy.multiply(hidden_layer, 1 - hidden_layer))
""" Compute the gradient values by averaging partial derivatives
Partial derivatives are averaged over all training examples """
W1_grad = numpy.dot(del_hid, numpy.transpose(input))
W2_grad = numpy.dot(del_out, numpy.transpose(hidden_layer))
b1_grad = numpy.sum(del_hid, axis = 1)
b2_grad = numpy.sum(del_out, axis = 1)
W1_grad = W1_grad / input.shape[1] + self.lamda * W1
W2_grad = W2_grad / input.shape[1] + self.lamda * W2
b1_grad = b1_grad / input.shape[1]
b2_grad = b2_grad / input.shape[1]
""" Transform numpy matrices into arrays """
W1_grad = numpy.array(W1_grad)
W2_grad = numpy.array(W2_grad)
b1_grad = numpy.array(b1_grad)
b2_grad = numpy.array(b2_grad)
""" Unroll the gradient values and return as 'theta' gradient """
theta_grad = numpy.concatenate((W1_grad.flatten(), W2_grad.flatten(),
b1_grad.flatten(), b2_grad.flatten()))
return [cost, theta_grad]
###########################################################################################
""" Preprocesses the dataset using ZCA Whitening """
def preprocessDataset(data, num_patches, epsilon):
""" Subtract mean of each patch separately """
mean_patch = numpy.mean(data, axis = 1, keepdims = True)
data = data - mean_patch
""" Compute the ZCA Whitening matrix """
sigma = numpy.dot(data, numpy.transpose(data)) / num_patches
[u, s, v] = numpy.linalg.svd(sigma)
rescale_factors = numpy.diag(1 / numpy.sqrt(s + epsilon))
zca_white = numpy.dot(numpy.dot(u, rescale_factors), numpy.transpose(u));
""" Apply ZCA Whitening to the data """
data = numpy.dot(zca_white, data)
return data, zca_white, mean_patch
###########################################################################################
""" Loads the image patches from the mat file """
def loadDataset():
""" Loads the dataset as a numpy array
The dataset is originally read as a dictionary """
images = scipy.io.loadmat('stlSampledPatches.mat')
images = numpy.array(images['patches'])
return images
###########################################################################################
""" Visualizes the obtained optimal W1 values as images """
def visualizeW1(opt_W1, vis_patch_side, hid_patch_side):
""" Add the weights as a matrix of images """
figure, axes = matplotlib.pyplot.subplots(nrows = hid_patch_side,
ncols = hid_patch_side)
""" Rescale the values from [-1, 1] to [0, 1] """
opt_W1 = (opt_W1 + 1) / 2
""" Define useful values """
index = 0
limit0 = 0
limit1 = limit0 + vis_patch_side * vis_patch_side
limit2 = limit1 + vis_patch_side * vis_patch_side
limit3 = limit2 + vis_patch_side * vis_patch_side
for axis in axes.flat:
""" Initialize image as array of zeros """
img = numpy.zeros((vis_patch_side, vis_patch_side, 3))
""" Divide the rows of parameter values into image channels """
img[:, :, 0] = opt_W1[index, limit0 : limit1].reshape(vis_patch_side, vis_patch_side)
img[:, :, 1] = opt_W1[index, limit1 : limit2].reshape(vis_patch_side, vis_patch_side)
img[:, :, 2] = opt_W1[index, limit2 : limit3].reshape(vis_patch_side, vis_patch_side)
""" Plot the image on the figure """
image = axis.imshow(img, interpolation = 'nearest')
axis.set_frame_on(False)
axis.set_axis_off()
index += 1
""" Show the obtained plot """
matplotlib.pyplot.show()
###########################################################################################
""" Loads data, trains the Autoencoder and visualizes the learned weights """
def executeSparseAutoencoderLinear():
""" Define the parameters of the Autoencoder """
image_channels = 3 # number of channels in the image patches
vis_patch_side = 8 # side length of sampled image patches
hid_patch_side = 20 # side length of representative image patches
num_patches = 100000 # number of training examples
rho = 0.035 # desired average activation of hidden units
lamda = 0.003 # weight decay parameter
beta = 5 # weight of sparsity penalty term
max_iterations = 400 # number of optimization iterations
epsilon = 0.1 # regularization constant for ZCA Whitening
visible_size = vis_patch_side * vis_patch_side * image_channels # number of input units
hidden_size = hid_patch_side * hid_patch_side # number of hidden units
""" Load the dataset and preprocess using ZCA Whitening """
training_data = loadDataset()
training_data, zca_white, mean_patch = preprocessDataset(training_data, num_patches, epsilon)
""" Initialize the Autoencoder with the above parameters """
encoder = SparseAutoencoderLinear(visible_size, hidden_size, rho, lamda, beta)
""" Run the L-BFGS algorithm to get the optimal parameter values """
opt_solution = scipy.optimize.minimize(encoder.sparseAutoencoderLinearCost, encoder.theta,
args = (training_data,), method = 'L-BFGS-B',
jac = True, options = {'maxiter': max_iterations})
opt_theta = opt_solution.x
opt_W1 = opt_theta[encoder.limit0 : encoder.limit1].reshape(hidden_size, visible_size)
""" Visualize the obtained optimal W1 weights """
visualizeW1(numpy.dot(opt_W1, zca_white), vis_patch_side, hid_patch_side)
executeSparseAutoencoderLinear()
|
AbinavRavi/PredictionAPI-rust | model/train.py | <reponame>AbinavRavi/PredictionAPI-rust<filename>model/train.py
import tensorflow as tf
from network import classifier
from utils import read_config
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images = train_images / 255.0
test_images = test_images / 255.0
config = read_config("./model-config.yaml")
num_classes = config["train"]["num_classes"]
learning_rate = config["train"]["lr"]
batch_size = config["train"]["batch_size"]
epochs = config["train"]["epochs"]
checkpoint_filepath = config["train"]["save_path"]
model = classifier(num_classes)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True)
model.compile(
optimizer = optimizer ,
loss= loss_fn,
metrics=['accuracy'])
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath = checkpoint_filepath,save_weights_only=True,verbose=1)
model.fit(train_images,train_labels,epochs=epochs,callbacks=[checkpoint_callback])
model.save(checkpoint_filepath+"/final_model.h5") |
AbinavRavi/PredictionAPI-rust | model/utils.py | <reponame>AbinavRavi/PredictionAPI-rust
import yaml
def read_config(config_path):
with open(config_path, "r") as stream:
config = yaml.full_load(stream)
return config
|
AbinavRavi/PredictionAPI-rust | model/network.py | <filename>model/network.py
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
def classifier(num_classes):
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(num_classes)
])
return model
|
yangsong158/gooreplacer4chrome | package.py | <filename>package.py<gh_stars>1-10
#!/usr/bin/env python
# coding=utf8
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
browers = ['chrome', 'firefox']
package_cmd_template = r'cd src;zip -x *.DS_Store -r %(dest_zip)s *'
zip_file = os.path.expanduser('~/Desktop/%s_gooreplacer.zip')
origin_version = r'"version":.*'
def update_manifest_version(new_version):
new_version = r'"version": "%s",' % new_version
cmd = "sed -i '' 's#%s#%s#' src/manifest.json" % (origin_version, new_version)
print(cmd)
os.system(cmd)
def update_online_status_to_false():
cmd = ("sed -i '' 's#localStorage.setItem(ISREDIRECT_KEY, true);#"
"localStorage.setItem(ISREDIRECT_KEY, false);#' src/data/js/db.js")
print(cmd)
os.system(cmd)
def restore_online_status():
cmd = ("sed -i '' 's#localStorage.setItem(ISREDIRECT_KEY, false);#"
"localStorage.setItem(ISREDIRECT_KEY, true);#' src/data/js/db.js")
print(cmd)
os.system(cmd)
if __name__ == '__main__':
for brower in browers:
with open('%s_version.txt' % brower) as f:
version_num = f.read().strip()
# 替换为当前浏览器的版本
update_manifest_version(version_num)
dest_zip = zip_file % brower
if os.path.isfile(dest_zip):
print('remove old zip %s' % dest_zip)
os.remove(dest_zip)
if brower == 'firefox':
update_online_status_to_false()
cmd = package_cmd_template % {
"dest_zip": dest_zip
}
print(cmd)
os.system(cmd)
if brower == 'firefox':
restore_online_status()
# 还原为初始化状态
update_manifest_version('1.0')
|
RakhulKumar/Handwritten-Digit-Recogniser-using-PyTorch-and-OpenCV | src/Recognition_App.py | import numpy as np
from skimage import img_as_ubyte
from skimage.color import rgb2gray
import cv2
import datetime
import argparse
import imutils
import time
import torch
from time import sleep
from imutils.video import VideoStream
from CNN_NET import CNN_NET
path="/home/pi/Desktop/DIGIT RECOGNIZER/weights.h5"
model=torch.load(path)
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
def ImagePreProcess(im_orig, fr):
im_gray = rgb2gray(im_orig)
img_gray_u8 = img_as_ubyte(im_gray)
(thresh, im_bw) = cv2.threshold(img_gray_u8, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
img_resized = cv2.resize(im_bw,(28,28))
im_gray_invert = 255 - img_resized ;
im_final = im_gray_invert.reshape(1,1,28,28); im_final = torch.from_numpy(im_final);im_final = im_final.type('torch.FloatTensor')
ans=model(im_final)
ans = ans[0].tolist().index(max(ans[0].tolist())); a= "Predicted digit: "; b= str(ans); c=a+b; cv2.putText(fr, c, (70,270), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,225), 2);fr = imutils.resize(fr, width=400); cv2.imshow('OuTpUt',fr)
def main():
t0=int(time.time());
d=0
while True:
try:
frame = vs.read()
frame = imutils.resize(frame, width=400)
cv2.imshow("Show the digit", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
else:
cv2.imwrite("num.jpg", frame)
im_orig = cv2.imread("num.jpg")
ImagePreProcess(im_orig, frame)
except KeyboardInterrupt:
cv2.destroyAllWindows()
vs.stop()
if __name__=="__main__":
main() |
napoler/tkitMarkerFast | json2csv.py | <filename>json2csv.py<gh_stars>1-10
import tkitJson
import csv
Newjson=tkitJson.Json("../newTriplet.json")
data=[]
#python2可以用file替代open
with open("data.csv","w") as csvfile:
writer = csv.writer(csvfile)
#先写入columns_name
writer.writerow(["name","type","word"])
for i,item in enumerate( Newjson.load()):
#写入多行用writerows
print(item)
try:
data.append([item['name'],item['type'],item['word']])
pass
except:
pass
writer.writerows(data) |
napoler/tkitMarkerFast | test.py | <gh_stars>1-10
import tkitMarkerFast
# 初始化
model = tkitMarkerFast.MarkerFast()
# 加载模型
model.load_model()
# 【禁忌证】 [@顽固、难治性高血压#禁忌症*]、[@严重的心血管疾病#禁忌症*]及[@甲亢#禁忌症*]患者。
text = "【禁忌证】 顽固、难治性高血压#禁忌症、严重的心血管疾病#禁忌症及甲亢#禁忌症患者。"
text,_,_,_=model.pre(text)
print(text)
# 【禁忌证】顽[@固、#禁忌症*]难[@治性高血压#禁忌症、#禁忌症*]严[@重的心血管疾病禁忌症*]禁忌症及甲[@亢#禁忌症患#禁忌症*]者。
|
napoler/tkitMarkerFast | test_mc.py | <gh_stars>1-10
import tkitMarkerFast
import tkitJson
import re
import tqdm
# 初始化
model = tkitMarkerFast.MarkerFast()
# 加载模型
model.load_model()
# 【禁忌证】 [@顽固、难治性高血压#禁忌症*]、[@严重的心血管疾病#禁忌症*]及[@甲亢#禁忌症*]患者。
Tjson=tkitJson.Json("../1.json")
for i,item in tqdm.tqdm(enumerate( Tjson.load())):
if len(item["title"])>20:
# break
continue
else:
with open('data/'+str(i)+item["title"]+'.txt','w') as f: #设置文件对象
# f.write(str)
# model.cut_sent(""item["data"])
for it in item["data"]:
# print(it)
if len(it)>2:
p=model.pre(it)[0]
# print(p)
f.write(p+"\n")
pass
else:
f.write(it)
# text = "【禁忌证】 顽固、难治性高血压#禁忌症、严重的心血管疾病#禁忌症及甲亢#禁忌症患者。"
# text,_,_,_=model.pre(text)
# print(text)
# # 【禁忌证】顽[@固、#禁忌症*]难[@治性高血压#禁忌症、#禁忌症*]严[@重的心血管疾病禁忌症*]禁忌症及甲[@亢#禁忌症患#禁忌症*]者。
|
napoler/tkitMarkerFast | test/test.py |
# encoding=utf-8
from __future__ import unicode_literals
import tkitMarkerFast
import src
import sys
# 切换到上级目录
sys.path.append("../")
# 引入本地库
Demo = tkitMarkerFast.MarkerFast()
Demo.fun()
|
napoler/tkitMarkerFast | test_mc2data2Triplet.py | <reponame>napoler/tkitMarkerFast<gh_stars>1-10
import tkitMarkerFast
import tkitJson
import re
"""[用于将预测的数据转为三元格式]
"""
import tqdm
# 初始化
model = tkitMarkerFast.MarkerFast()
# 加载模型
model.load_model()
# 【禁忌证】 [@顽固、难治性高血压#禁忌症*]、[@严重的心血管疾病#禁忌症*]及[@甲亢#禁忌症*]患者。
Tjson=tkitJson.Json("../newData.json")
Newjson=tkitJson.Json("../newTriplet.json")
data=[]
for i,item in tqdm.tqdm(enumerate( Tjson.load())):
# item["prediction"]=[]
for it in item["prediction"]:
for line in it["marked"]:
# print (line)
data.append({"name":item['title'],"zh":item['zh'],"en":item['en'],"type":line["type"],"word":"".join(line["word"])})
pass
# print(it)
# if len(it)>2:
# sent,words,mark,taged=model.pre(it)
# # print({"sent":sent,'words':words,"marked":mark,"taged":data})
# item["prediction"].append({"sent":sent,'words':words,"marked":mark,"taged":taged})
# # f.write(p+"\n")
# pass
# else:
# # f.write(it)
# pass
# data.append(item)
print(data)
Newjson.save(data)
# text = "【禁忌证】 顽固、难治性高血压#禁忌症、严重的心血管疾病#禁忌症及甲亢#禁忌症患者。"
# text,_,_,_=model.pre(text)
# print(text)
# # 【禁忌证】顽[@固、#禁忌症*]难[@治性高血压#禁忌症、#禁忌症*]严[@重的心血管疾病禁忌症*]禁忌症及甲[@亢#禁忌症患#禁忌症*]者。
|
napoler/tkitMarkerFast | tkitMarkerFast/MarkerFast.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from transformers import AutoModelForTokenClassification, AutoTokenizer
import os
import re
# import tkitFile
import regex
from tqdm import tqdm
import time
# from tkitJson import Config
import tkitJson
import BMESBIO2Data
import difflib
class MarkerFast:
"""[自动从ner标注结果中提取数据]
"""
def __init__(self, model_path="../model", device='cpu',markType='BMES'):
"""[初始化自动标记系统]
Args:
model_path (str, optional): [模型地址]. Defaults to "../model".
device (str, optional): [使用cpu黑色cuda]. Defaults to 'cpu'.
"""
self.model_path = model_path
self.labels_file = os.path.join(model_path, "labels.txt")
self.device = device
self.markType=markType
pass
def __del__(self):
# self.release()
pass
def release(self):
"""[释放模型]
"""
# print("释放显存")
self.model.cpu()
torch.cuda.empty_cache()
pass
# torch.cuda.empty_cache()
del self.model
del self.tokenizer
del self.lablels_dict
# gc.collect()
# @profile
def load_model(self):
"""[加载模型]
Returns:
[type]: [返回model, tokenizer]
"""
# tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.model = AutoModelForTokenClassification.from_pretrained(
self.model_path)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
Config=tkitJson.Config(os.path.join(self.model_path,"config.json"))
self.config=Config.read()
# print(data.get("id2label"))
# model.to(self.device)
# f2 = open(self.labels_file, 'r')
# lablels_dict = {}
# for i, line in enumerate(f2):
# # l=line.split(" ")
# l = line.replace("\n", '')
# # print(l)
# lablels_dict[i] = l
# f2.close()
self.lablels_dict = self.config.get("id2label")
# self.model=model
# self.tokenizer=tokenizer
# self.model.eval()
return self.model, self.tokenizer
# @profile
def cut_sent(self,para):
"""[中文分句函数]
Args:
para ([type]): [句子段落]
Returns:
[type]: [句子列表]
"""
para = re.sub('([。!?\?])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
return para.split("\n")
def filterPunctuation(self, x):
"""[过滤中文标点]
Args:
x ([type]): [输入文本]
Returns:
[type]: [输出文本]
"""
x = regex.sub(r'[‘’]', "'", x)
x = regex.sub(r'[“”]', '"', x)
x = regex.sub(r'[…]', '...', x)
x = regex.sub(r'[—]', '-', x)
x = regex.sub(r" ", "", x)
return x
def findDiff(self,cases):
"""[自动 获取文本改变部分的位置 https://www.kaggle.com/terrychanorg/unk-ok/]
Args:
cases ([list]): [cases=[('使用difflib库来比较两个字符串,并标记出不同的', ['使', '用', 'di', '##ff', '##li', '##b', '库', '来', '比', '较', '两', '个', '字', '符', '串', ',', '并', '标', '记', '出', '[UNK]', '同', '的', '地', '方'])]]
"""
data=[]
for a,b in cases:
print('{} => {}'.format(a,b))
words=[]
word={"word":[],"start":None,"end":None,"new":{"word":[],"start":None,"end":None}}
line=list(difflib.ndiff(a, b))
for i,s in enumerate(line):
# print(s)
if s[0]==' ':
if word['start']!=None:
# word['word']=line[word["start"]:word["end"]]
word['word']="".join(word['word']).replace("##","").replace(" ","")
word["new"]['word']="".join(word["new"]['word']).replace("##","").replace(" ","")
words.append(word)
word={"word":[],"start":None,"end":None,"new":{"word":[],"start":None,"end":None}}
continue
elif s[0]=='-':
# print(u'Delete "{}" from position {}'.format(s[-1],i))
word["word"].append(s[1:])
if word['start']==None:
word['start']=i
word["end"]=i
else:
word["end"]=i
elif s[0]=='+':
# print(u'Add "{}" to position {}'.format(s[-1],i))
word["new"]["word"].append(s[1:])
if word["new"]['start']==None:
word["new"]['start']=i
word["new"]["end"]=i
else:
word["new"]["end"]=i
pass
# print("修改内容",words)
# print()
data.append(words)
return data
def pre(self, text):
"""[自动预测文本的标记数据]
Args:
text ([type]): [输入文本即可限制256]
Returns:
[标记后,words,mark,data]: [返回标记后数据和标记信息 tag格式数据]
"""
data=[]
model = self.model
# text=word+" [SEP] "+text
# lenth = 500-len(word)
# all_ms = []
# n = 0
with torch.no_grad():
text = self.filterPunctuation(text)
ids = self.tokenizer.encode_plus(
text, None, max_length=256, add_special_tokens=True,truncation=True)
# print(ids)
input_ids = torch.tensor(
ids['input_ids']).unsqueeze(0) # Batch size 1
labels = torch.tensor(
[1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
# print("outputs",outputs)
tokenWords=self.tokenizer.tokenize(text)
# cases=[(text,self.tokenizer.tokenize(text))]
# words=self.findDiff(cases)
tmp_eval_loss, logits = outputs[:2]
# print("words",words)
# print(len(torch.argmax(logits, axis=2).tolist()[0][1:-1]))
# print(len(words))
for i,(m,wd) in enumerate( zip(torch.argmax(logits, axis=2).tolist()[0][1:-1],tokenWords)):
# print(m,wd)
# print(self.lablels_dict)
if m >=len(self.lablels_dict):
mark_lable="X"
else:
mark_lable=self.lablels_dict[str(m)]
# print("w",wd,"m",mark_lable)
# print(words[i],mark_lable)
data.append(wd+" "+mark_lable+"")
M2D=BMESBIO2Data.BMESBIO2Data(markType=self.markType)
# print(M2D.toData(data))
# (['【', '禁', '忌', '证', '】', '顽', '固', '、', '难', '治', '性', '高', '血', '压', '#', '禁', '忌', '症', '、', '严', '重', '的', '心', '血', '管', '疾', '病', '#', '禁', '忌', '症', '及', '甲', '亢', '#', '禁', '忌', '症', '患', '者', '。'], [{'type': '禁忌症', 'word': ['固', '、'], 'start': 6, 'end': 7}, {'type': '禁忌症', 'word': ['治', '性', '高', '血', '压', '#', '忌', '症', '、'], 'start': 9, 'end': 18}, {'type': '禁忌症', 'word': ['重', '的', '心', '血', '管', '疾', '病', '#'], 'start': 20, 'end': 27}, {'type': '禁忌症', 'word': ['亢', '#', '禁', '忌', '症', '患'], 'start': 33, 'end': 38}])
words,mark =M2D.toData(data)
# print("".join(M2D.data2BMES(words,mark)))
#返回标记后数据集
return "".join(M2D.data2BMES(words,mark)),tokenWords,mark,data
# for text_mini in self.cut_text(text, lenth):
# # text_mini=word+"[SEP]"+text_mini
# # print(word,"text_mini",text_mini)
# n = n+1
# ids = self.tokenizer.encode_plus(
# word, text_mini, max_length=512, add_special_tokens=True)
# # print(ids)
# input_ids = torch.tensor(
# ids['input_ids']).unsqueeze(0) # Batch size 1
# labels = torch.tensor(
# [1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
# outputs = model(input_ids, labels=labels)
# # print("outputs",outputs)
# tmp_eval_loss, logits = outputs[:2]
# # ids=tokenizer.encode(text)
# # print(ids['token_type_ids'])
# # print("\n".join([i for i in self.lablels_dict.keys()]))
# words = []
# for i, m in enumerate(torch.argmax(logits, axis=2).tolist()[0]):
# # print(m)
# # if i<h_i:
# # continue
# # print(i,m,ids['input_ids'][i],self.tokenizer.convert_ids_to_tokens(ids['input_ids'][i]),self.lablels_dict[m])
# # print(h_i)
# word = self.tokenizer.convert_ids_to_tokens(
# ids['input_ids'][i])
# # try:
# # word=text_mini[i-h_i]
# # except:
# # continue
# # print(word)
# if m >= len(self.lablels_dict):
# mark_lable = "X"
# else:
# mark_lable = self.lablels_dict[m]
|
napoler/tkitMarkerFast | test_mc2data.py | <gh_stars>1-10
import tkitMarkerFast
import tkitJson
import re
"""[用于生成预测结果]
"""
import tqdm
# 初始化
model = tkitMarkerFast.MarkerFast()
# 加载模型
model.load_model()
# 【禁忌证】 [@顽固、难治性高血压#禁忌症*]、[@严重的心血管疾病#禁忌症*]及[@甲亢#禁忌症*]患者。
Tjson=tkitJson.Json("../data.json")
Newjson=tkitJson.Json("../newData.json")
data=[]
for i,item in tqdm.tqdm(enumerate( Tjson.load())):
if len(item["title"])>20:
# break
continue
else:
# with open('data/'+str(i)+item["title"]+'.txt','w') as f: #设置文件对象
# f.write(str)
# model.cut_sent(""item["data"])
item["prediction"]=[]
for it in item["data"]:
print(it)
if len(it)>2:
sent,words,mark,taged=model.pre(it)
# print({"sent":sent,'words':words,"marked":mark,"taged":data})
item["prediction"].append({"sent":sent,'words':words,"marked":mark,"taged":taged})
# f.write(p+"\n")
pass
else:
# f.write(it)
pass
data.append(item)
# break
Newjson.save(data)
# text = "【禁忌证】 顽固、难治性高血压#禁忌症、严重的心血管疾病#禁忌症及甲亢#禁忌症患者。"
# text,_,_,_=model.pre(text)
# print(text)
# # 【禁忌证】顽[@固、#禁忌症*]难[@治性高血压#禁忌症、#禁忌症*]严[@重的心血管疾病禁忌症*]禁忌症及甲[@亢#禁忌症患#禁忌症*]者。
|
zachary-hawk/dispersion.py | src/main.py | #!/usr/bin/env python
###############################################################
# #
# D I S P E R S I O N . P Y #
# #
###############################################################
'''
ALTERNATIVE CODE FOR PLOTTING BANDSTRUCTURES
FROM A CASTEP .BANDS FILE
'''
# Let us import all the stuff we need, shouldnt require any specialist packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from fractions import Fraction
import sys
import os
from itertools import cycle
import argparse
import ase.io as io
import ase.dft.bz as bz
import warnings
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
# Define some constants
hartree = 27.211386245988
fracs=np.array([0.5,0.0,0.25,0.75,0.33333333,0.66666667])
# pdos reader
def pdos_read(seed,species):
from scipy.io import FortranFile as FF
f=FF(seed+'.pdos_bin', 'r','>u4')
version=f.read_reals('>f8')
header=f.read_record('a80')[0]
num_kpoints=f.read_ints('>u4')[0]
num_spins=f.read_ints('>u4')[0]
num_popn_orb=f.read_ints('>u4')[0]
max_eigenvalues=f.read_ints('>u4')[0]
orbital_species=f.read_ints('>u4')
orbital_ion=f.read_ints('>u4')
orbital_l=f.read_ints('>u4')
print(orbital_species,orbital_ion,orbital_l)
kpoints=np.zeros((num_kpoints,3))
pdos_weights=np.zeros((num_popn_orb,max_eigenvalues,num_kpoints,num_spins))
for nk in range(0,num_kpoints):
record=f.read_record('>i4','>3f8')
kpt_index,kpoints[nk,:]=record
for ns in range(0,num_spins):
spin_index=f.read_ints('>u4')[0]
num_eigenvalues=f.read_ints('>u4')[0]
for nb in range(0,num_eigenvalues):
pdos_weights[0:num_popn_orb,nb,nk,ns]=f.read_reals('>f8')
#norm=np.sqrt(np.sum((pdos_weights[0:num_popn_orb,nb,nk,ns])**2))
norm=np.sum((pdos_weights[0:num_popn_orb,nb,nk,ns]))
pdos_weights[0:num_popn_orb,nb,nk,ns]=pdos_weights[0:num_popn_orb,nb,nk,ns]/norm
if species:
num_species=len(np.unique(orbital_species))
pdos_weights_sum=np.zeros((num_species,max_eigenvalues,num_kpoints,num_spins))
for i in range(0,num_species):
loc=np.where(orbital_species==i+1)[0]
pdos_weights_sum[i,:,:,:]=np.sum(pdos_weights[loc,:,:,:],axis=0)
else:
num_orbitals=4
pdos_weights_sum=np.zeros((num_orbitals,max_eigenvalues,num_kpoints,num_spins))
pdos_colours=np.zeros((3,max_eigenvalues,num_kpoints,num_spins))
r=np.array([1,0,0])
g=np.array([0,1,0])
b=np.array([0,0,1])
k=np.array([0,0,0])
for i in range(0,num_orbitals):
loc=np.where(orbital_l==i)[0]
if len(loc)>0:
pdos_weights_sum[i,:,:,:]=np.sum(pdos_weights[loc,:,:,:],axis=0)
#print(kpoints[1])
#for nb in range(num_eigenvalues):
# print(pdos_weights_sum[:,nb,1,0])
pdos_weights_sum=np.where(pdos_weights_sum>1,1,pdos_weights_sum)
pdos_weights_sum=np.where(pdos_weights_sum<0,0,pdos_weights_sum)
return np.round(pdos_weights_sum,7)
def cart_to_abc(lattice):
a=np.sqrt( lattice[0,0]**2+lattice[0,1]**2+lattice[0,2]**2)
b=np.sqrt( lattice[1,0]**2+lattice[1,1]**2+lattice[1,2]**2)
c=np.sqrt( lattice[2,0]**2+lattice[2,1]**2+lattice[2,2]**2)
alpha=( lattice[1,0]* lattice[2,0]+lattice[1,1]* lattice[2,1]+lattice[1,2]* lattice[2,2])/(b*c)
alpha=np.arccos(alpha)
beta =( lattice[2,0]* lattice[0,0]+lattice[2,1]* lattice[0,1]+lattice[2,2]* lattice[0,2])/(c*a)
beta =np.arccos(beta)
gamma=( lattice[0,0]* lattice[1,0]+lattice[0,1]* lattice[1,1]+ lattice[0,2]*lattice[1,2])/(a*b)
gamma=np.arccos(gamma)
return a,b,c,alpha,beta,gamma
def calc_phonons(buff_seed):
no_ions = 0
no_kpoints = 0
no_branches = 0
no_electrons = 0
unit = 0
# Open the phonon file
phonon_file=buff_seed+".phonon"
phonon=open(phonon_file,'r')
lines=phonon.readlines()
no_ions=int(lines[1].split()[-1])
no_branches=int(lines[2].split()[-1])
no_kpoints=int(lines[3].split()[-1])
lattice=np.zeros((3,3))
lattice[0]=[i for i in lines[8].split()]
lattice[1]=[i for i in lines[9].split()]
lattice[2]=[i for i in lines[10].split()]
#make the arrays
energy_array=np.empty(shape=(no_kpoints,no_branches))
kpoint_array=np.empty(shape=(no_kpoints)) # the array holding the number of the kpoint
kpoint_list=[] # array of the kpoint vectors
kpoint_string=lines[15::no_branches+3+no_ions*no_branches]
for i in range(len(kpoint_string)):
kpoint_array[i]=int(kpoint_string[i].split()[1])
#Empty list for vectors
vec=[]
vec.append(float(kpoint_string[i].split()[2]))
vec.append(float(kpoint_string[i].split()[3]))
vec.append(float(kpoint_string[i].split()[4]))
kpoint_list.append(vec)
# print(vec)
#Lets get the eigen values into the big array
for k in range(0,no_kpoints):
ind=16 + (k) * (3+no_branches+no_ions*no_branches)
energy_array[k,:]=np.array([float(i.split()[-1]) for i in lines[ind:ind+no_branches]])
sort_array=kpoint_array.argsort()
kpoint_list=np.array(kpoint_list)[sort_array]
return energy_array,sort_array,kpoint_list,kpoint_array,no_kpoints,no_ions,lattice
# Variables we need from the bands file
def calc_bands(buff_seed,zero,show):
no_spins = 0
no_kpoints = 0
fermi_energy = 0
no_electrons = 0
no_electrons_2 = 0
no_eigen = 0
no_eigen_2 = 0
# Open the bands file
bands_file=buff_seed+".bands"
bands=open(bands_file,'r')
lines=bands.readlines()
no_spins=int(lines[1].split()[-1])
no_kpoints=int(lines[0].split()[-1])
fermi_energy=float(lines[4].split()[-1])
if no_spins==1:
fermi_energy=float(lines[4].split()[-1])
no_electrons =float(lines[2].split()[-1])
no_eigen = int(lines[3].split()[-1])
if no_spins==2:
spin_polarised=True
no_electrons=float(lines[2].split()[-2])
no_electrons_2=float(lines[2].split()[-1])
no_eigen = int(lines[3].split()[-2])
no_eigen_2=int(lines[3].split()[-1])
lattice=np.zeros((3,3))
lattice[0]=[i for i in lines[6].split()]
lattice[1]=[i for i in lines[7].split()]
lattice[2]=[i for i in lines[8].split()]
lattice=lattice/1.889
#make the arrays
energy_array=np.empty(shape=(no_kpoints,no_eigen))
energy_array_2=np.empty(shape=(no_kpoints,no_eigen_2))
kpoint_array=np.empty(shape=(no_kpoints)) # the array holding the number of the kpoint
kpoint_list=[] # array of the kpoint vectors
if no_spins==1:
kpoint_string=lines[9::no_eigen+2]
else:
kpoint_string=lines[9::no_eigen+3+no_eigen_2]
#loop through the kpoints to split it
for i in range(len(kpoint_string)):
kpoint_array[i]=int(kpoint_string[i].split()[1])
#Empty list for vectors
vec=[]
vec.append(float(kpoint_string[i].split()[2]))
vec.append(float(kpoint_string[i].split()[3]))
vec.append(float(kpoint_string[i].split()[4]))
kpoint_list.append(vec)
# print(vec)
#Lets get the eigen values into the big array
for k in range(0,no_kpoints):
if no_spins==1:
ind=9+k*no_eigen+2*(k+1)
if not zero:
energy_array[k,:]=hartree*np.array([float(i)-fermi_energy for i in lines[ind:ind+no_eigen]])
else:
energy_array[k,:]=hartree*np.array([float(i) for i in lines[ind:ind+no_eigen]])
if no_spins==2:
ind=9+k*(no_eigen+no_eigen_2+1)+2*(k+1)
if not zero:
energy_array[k,:]=hartree*np.array([float(i)-fermi_energy for i in lines[ind:ind+no_eigen]])
energy_array_2[k,:]=hartree*np.array([float(i)-fermi_energy for i in lines[ind+no_eigen+1:ind+no_eigen+1+no_eigen_2]])
else:
energy_array[k,:]=hartree*np.array([float(i) for i in lines[ind:ind+no_eigen]])
energy_array_2[k,:]=hartree*np.array([float(i) for i in lines[ind+no_eigen+1:ind+no_eigen+1+no_eigen_2]])
sort_array=kpoint_array.argsort()
kpoint_list=np.array(kpoint_list)[sort_array]
return energy_array,energy_array_2,sort_array,kpoint_list,kpoint_array,no_spins,no_kpoints,fermi_energy,no_electrons,no_electrons_2,no_eigen,no_eigen_2,lattice
def check_sym(vec):
frac=[]
for i in vec:
#frac.append(i.as_integer_ratio()[0])
#frac.append(i.as_integer_ratio()[1])
buff=[]
for j in fracs:
buff.append(np.isclose(i,j))
frac.append(any(buff))
if all(frac):
#print(vec)
return True
else:
return False
def main_dispersion():
warnings.filterwarnings("ignore")
#matplotlib.rcParams['mathtext.fontset'] = 'stix'
#matplotlib.rcParams['font.family'] = 'STIXGeneral'
#matplotlib.pyplot.title(r'ABC123 vs $\mathrm{ABC123}^{123}$')
#matplotlib.use('macOsX')
matplotlib.rc('text', usetex = True)
plt.style.use("classic")
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
#Do the parser
parser = argparse.ArgumentParser(description= "Utillity for plotting bandstructurs from a CASTEP run.")
parser.add_argument("seed",help="The seed from the CASTEP calculation.")
parser.add_argument("--save",action="store_true",help="Save DOS as .pdf with name <seed>-dos.pdf.")
parser.add_argument("-m","--multi",action="store_true",help="Set lines multicoloured.")
parser.add_argument("-l","--line",help="Set linewidth.",default=0.75)
parser.add_argument("--lim",help="Provide plotting limits around the Fermi energy.",nargs=2,default=[None,None])
parser.add_argument("-s","--spin",help="Plot spin-up and spin-down channels.",action="store_true")
parser.add_argument("-d","--debug",action='store_true',help="Debug flag.")
#parser.add_argument("--sym",help="Provide crystal symmetry for plot labels.",default=None)
parser.add_argument("--overlay",help="Seedname of second bands file containing a different bandstructure.",default=None)
parser.add_argument("--n_up",help="Indices of up bands to be highlighted",nargs="+")
parser.add_argument("--n_down",help="Indices of down bands to be highlighted",nargs="+")
parser.add_argument("-f","--flip",action="store_true",help="Plot with a global spin flip")
parser.add_argument("--fontsize",help="Font size",default=20)
parser.add_argument("--title",help="Add a title for saving")
parser.add_argument("--fig",help="add figure caption")
parser.add_argument("-e","--exe",help="File extension for saving",default="png")
parser.add_argument("--dos",help="Prodide some data files for DOS plots adjoining bandstructure",nargs="+")
parser.add_argument("--path",help="Compute a suitable band path for the cell and exit.",nargs="*")
parser.add_argument("--pdos",help="Use .pdos_bin file to project orbital information",action='store_true')
parser.add_argument("--species",help="Project pdos onto species rather than orbitals",action='store_true')
parser.add_argument("--phonon",help="Plot phonon dispersion curve",action='store_true')
parser.add_argument("-b","--bandgap",help="Indicate bandgap on plots",action="store_true")
parser.add_argument("--no_plot",help="Supress plotting of dispersions",action="store_true")
parser.add_argument("--overlay_labels",help="Legend labels for overlay plots",nargs=2,default=[None,None])
parser.add_argument("-E","--optados",help="Use castep fermi energy if optados error persists",action='store_true')
parser.add_argument("-as",'--aspect_ratio',help="Specify the aspect ratio of the dispersion plot.",choices=['letter','square'],default='square')
parser.add_argument('-z','--zero',help='Do not shift the Fermi level to 0 eV.',action='store_true')
parser.add_argument('--show',help='Supress plotting of spin bands',choices=['up','down','both'],default='both')
args = parser.parse_args()
seed = args.seed
save = args.save
multi= args.multi
linewidth=np.float(args.line)
lim= args.lim
debug=args.debug
spin_split=args.spin
#sym=args.sym
SOC=args.overlay
spin_polarised=False
n_up=args.n_up
n_down=args.n_down
flip=args.flip
text=float(args.fontsize)
title=args.title
fig_cap=args.fig
exe=args.exe
dos_files=args.dos
path=args.path
pdos=args.pdos
species=args.species
do_phonons=args.phonon
bg=args.bandgap
no_plot=args.no_plot
overlay_labels=args.overlay_labels
opt_err=args.optados
aspect=args.aspect_ratio
zero=args.zero
show=args.show
blockPrint()
def path_finder():
# Open the cell
path_str=bv_latt.special_path
path_points=[]
path_labels=[]
for L in path_str:
if L==",":
break
path_labels.append(L)
path_points.append(special_points[L])
print("%BLOCK SPECTRAL_KPOINT_PATH")
for i in range(len(path_labels)):
print("%.5f %.5f %.5f" %(path_points[i][0],path_points[i][1],path_points[i][2]),"#",path_labels[i])
print("%ENDBLOCK SPECTRAL_KPOINT_PATH")
# Dothe path and labels
cell=io.read(seed+".cell")
bv_latt=cell.cell.get_bravais_lattice()
special_points=bv_latt.get_special_points()
atoms=np.unique(cell.get_chemical_symbols())[::-1]
enablePrint()
if path==[]:
path_finder()
sys.exit()
else:
if path!=None:
path_points=[]
path_labels=[]
for i in path:
try:
path_point=special_points[i]
path_points.append(path_point)
path_labels.append(i)
except:
print()
print("Error: %s has no symmetry point %s"%(bv_latt.name,i))
sys.exit()
path_points.append(path_point)
path_labels.append(i)
print("%BLOCK SPECTRAL_KPOINT_PATH")
for j in range(len(path_labels)):
print("%.5f %.5f %.5f" %(path_points[j][0],path_points[j][1],path_points[j][2]),"#",path_labels[j])
print("%ENDBLOCK SPECTRAL_KPOINT_PATH")
sys.exit()
if n_up!=None:
n_up=np.array(n_up,dtype=int)-1
spin_split=False
else:
n_up=[]
if n_down!=None:
n_down=np.array(n_down,dtype=int)-1
spin_split=False
else:
n_down=[]
if SOC != None:
doSOC=True
else :
doSOC=False
if dos_files!=None:
do_dos=True
else:
do_dos=False
bands_file=True
if multi and spin_split:
multi=False
#if doSOC:
# multi=False
# spin_split=False
#set the colours
if spin_split:
spin_up="r"
spin_do="b"
elif flip:
spin_up="b"
spin_do="r"
else :
spin_up="black"
spin_do="black"
#calculate the pdos if needed
if pdos:
pdos_weights=pdos_read(seed,species)
if doSOC:
energy_array_soc,energy_array_soc2,sort_array_soc,kpoint_list_soc,kpoint_array_soc,no_spins_soc,no_kpoints,fermi_energy,no_electrons,no_electrons_2,no_eigen,no_eigen_2,lattice2=calc_bands(SOC,zero,show)
if not do_phonons:
energy_array,energy_array_2,sort_array,kpoint_list,kpoint_array,no_spins,no_kpoints,fermi_energy,no_electrons,no_electrons_2,no_eigen,no_eigen_2,lattice=calc_bands(seed,zero,show)
if energy_array_2.shape[1]!=0:
vb_max_up=np.max(energy_array[:,int(no_electrons)-1])
vb_max_down=np.max(energy_array_2[:,int(no_electrons_2)-1])
cb_min_up=np.min(energy_array[:,int(no_electrons)])
cb_min_down=np.min(energy_array_2[:,int(no_electrons_2)])
band_gap_up=cb_min_up-vb_max_up
band_gap_down=cb_min_down-vb_max_down
print("Band gap (up) : %6.3f eV"%band_gap_up)
print("Band gap (down) : %6.3f eV"%band_gap_down)
vb_max_ind_up=np.where(energy_array[sort_array][:,int(no_electrons)-1]==vb_max_up)[0][-1]
vb_max_ind_down=np.where(energy_array_2[sort_array][:,int(no_electrons_2)-1]==vb_max_down)[0][-1]
cb_min_ind_up=np.where(energy_array[sort_array][:,int(no_electrons)]==cb_min_up)[0][-1]
cb_min_ind_down=np.where(energy_array_2[sort_array][:,int(no_electrons_2)]==cb_min_down)[0][-1]
k_max_loc_up=kpoint_array[sort_array][vb_max_ind_up]
k_max_loc_down=kpoint_array[sort_array][vb_max_ind_down]
k_min_loc_up=kpoint_array[sort_array][cb_min_ind_up]
k_min_loc_down=kpoint_array[sort_array][cb_min_ind_down]
else:
vb_max=np.max(energy_array[:,int(no_electrons/2)-1])
cb_min=np.min(energy_array[:,int(no_electrons/2)])
band_gap=cb_min-vb_max
print("Band gap : %6.3f eV"%band_gap)
vb_max_ind=np.where(energy_array[sort_array][:,int(no_electrons/2)-1]==vb_max)[0][-1]
cb_min_ind=np.where(energy_array[sort_array][:,int(no_electrons/2)]==cb_min)[0][-1]
k_max_loc=kpoint_array[sort_array][vb_max_ind]
k_min_loc=kpoint_array[sort_array][cb_min_ind]
else:
energy_array,sort_array,kpoint_list,kpoint_array,no_kpoints,no_ions,lattice=calc_phonons(seed)
a,b,c,alpha,beta,gamma=cart_to_abc(lattice)
a1,a2,a3=lattice[0],lattice[1],lattice[2]
b1=2*np.pi*np.cross(a2,a3)/(np.dot(a1,np.cross(a2,a3)))
b2=2*np.pi*np.cross(a3,a1)/(np.dot(a1,np.cross(a2,a3)))
b3=2*np.pi*np.cross(a1,a2)/(np.dot(a1,np.cross(a2,a3)))
kalpha=np.arccos(np.dot(a2,a3)/(np.linalg.norm(a2)*np.linalg.norm(a3)))
kbeta=np.arccos(np.dot(a1,a3)/(np.linalg.norm(a1)*np.linalg.norm(a3)))
kgamma=np.arccos(np.dot(a2,a1)/(np.linalg.norm(a2)*np.linalg.norm(a1)))
#matplotlib.rc('text', usetex = True)
# Here we do the analysis of the kpoints and the symmetry.. It's going to be horific!
#define all the greek letters we will use for weird ones
if no_plot:
sys.exit()
k_ticks=[]
for i,vec in enumerate(kpoint_list):
if check_sym(vec):
k_ticks.append(kpoint_array[i])
tol=1e-5
tol=[tol,tol,tol]
kpoint_grad=[]
for i in range(1,len(kpoint_list)):
diff=kpoint_list[i]-kpoint_list[i-1]
kpoint_grad.append(diff)
kpoint_2grad=[]
high_sym=[0]
for i in range(1,len(kpoint_grad)):
diff=kpoint_grad[i]-kpoint_grad[i-1]
kpoint_2grad.append(diff)
#print(diff)
if any(np.abs(diff)>tol):
# print(diff)
high_sym.append(i)
high_sym.append(len(kpoint_list)-1)
high_sym=np.array(high_sym)+1
##################### SOC ###################
if doSOC:
k_ticks_soc=[]
for i,vec in enumerate(kpoint_list_soc):
if check_sym(vec):
k_ticks_soc.append(kpoint_array_soc[i])
tol=1e-5
tol=[tol,tol,tol]
kpoint_grad_soc=[]
for i in range(1,len(kpoint_list_soc)):
diff=kpoint_list_soc[i]-kpoint_list_soc[i-1]
kpoint_grad_soc.append(diff)
kpoint_2grad_soc=[]
high_sym_soc=[0]
for i in range(1,len(kpoint_grad_soc)):
diff=kpoint_grad_soc[i]-kpoint_grad_soc[i-1]
kpoint_2grad_soc.append(diff)
#print(diff)
if any(np.abs(diff)>tol):
# print(diff)
high_sym_soc.append(i)
high_sym_soc.append(len(kpoint_list_soc)-1)
high_sym_soc=np.array(high_sym_soc)+1
#############################################
if len(high_sym)!=len(high_sym_soc):
print("Second Bandsstructure Does not match")
sys.exit()
for i in range(1,len(high_sym)):
high_up=int(high_sym[i])
high_low=int(high_sym[i-1])
soc_up=int(high_sym_soc[i])
soc_low=int(high_sym_soc[i-1])
nsoc=len(kpoint_array_soc[soc_low:soc_up])+1
nhigh=len(kpoint_array[high_low:high_up])+1
kpoint_array_soc[soc_low-1:soc_up]=np.linspace(high_low,high_up,nsoc,endpoint=True)
# Set up the plotting environment
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif',weight='bold')
#Do the fonts
#matplotlib.rcParams['font.sans-serif'] = "Times New Roman"#Comic Sans MS"
# Then, "ALWAYS use sans-serif fonts"
#matplotlib.rcParams['font.family'] = "sans-serif"
if not do_dos:
if aspect=='square':
aspect_r=(7,7)
else:
aspect_r=(9,7)
fig, ax = plt.subplots(figsize=aspect_r)
else:
from matplotlib.ticker import MaxNLocator
fig, (ax, ax2) = plt.subplots(1, 2,sharey=True, gridspec_kw={'hspace': 0,'wspace': 0,'width_ratios': [2.4, 1]},figsize=(11,7))
for file in dos_files:
pdos_dat=np.loadtxt(file)
shape=pdos_dat.shape[1]
if opt_err:
energy = pdos_dat[:,0]-fermi_energy*hartree
else:
energy = pdos_dat[:,0]
if lim[0]!= None:
if not zero:
mask = (energy >= float(lim[0])) & (energy <= float(lim[1]))
else:
mask = (energy >= float(lim[0])+fermi_energy*hartree) & (energy <= float(lim[1])+fermi_energy*hartree)
else:
if not zero:
ax2.set_ylim(lim[0],lim[1])
else:
ax2.set_ylim(lim[0]+fermi_energy*hartree,lim[1]+fermi_energy*hartree)
mask=[True]*len(energy)
[mask]
if shape==3:
ax2.plot(pdos_dat[:,1][mask],energy[mask],linewidth=linewidth,color="black")
if shape==5:
ax2.plot(2*(pdos_dat[:,1][mask]-pdos_dat[:,2][mask]),energy[mask],linewidth=linewidth,color="black")
if not zero:
ax2.axhline(0,color="0.6",dashes=[8, 8],linewidth=1,)
else:
ax2.axhline(fermi_energy*hartree,color="0.6",dashes=[8, 8],linewidth=1,)
ax2.tick_params(axis='both', which='major', labelsize=text,length=7)
ax2.set_xlabel(r"$\mathit{g}(\mathit{E}$) (states/eV)",fontsize=text)
ax2.xaxis.set_major_locator(MaxNLocator(4))
dos_ticks=ax2.get_xticks()
dos_ticks=np.delete(dos_ticks,0)
ax2.set_xticks(dos_ticks)
for vline in high_sym:
ax.axvline(vline,color="black",linewidth=1)
ax.set_xticks(high_sym)
if not zero:
ax.axhline(0,color="0.6",dashes=[8, 8],linewidth=1,)
else:
ax.axhline(fermi_energy*hartree,color="0.6",dashes=[8, 8],linewidth=1,)
if not do_phonons:
if not zero:
ax.set_ylabel(r'$\mathit{E}$-$\mathit{E}_{\mathrm{F}}$ (eV)',fontsize=text)
else:
ax.set_ylabel(r'$\mathit{E}$ (eV)',fontsize=text)
else:
ax.set_ylabel(r'$\omega$ (cm$^{-1}$)',fontsize=text)
ax.set_xlim(1,no_kpoints)
ax.tick_params(axis='both', which='major', labelsize=text,length=7)
if lim[0]!= None:
if not zero:
ax.set_ylim(float(lim[0]),float(lim[1]))
else:
ax.set_ylim(float(lim[0])+fermi_energy*hartree,float(lim[1])+fermi_energy*hartree)
#set the x labels
ticks= []
tol=1e-4
'''
if sym==None:
for vec in kpoint_list[high_sym-1]:
ticks.append("("+str(Fraction(vec[0]).limit_denominator())+","+str(Fraction(vec[1]).limit_denominator())+","+str(Fraction(vec[2]).limit_denominator())+")")
ax.set_xticklabels(ticks)
for tick in ax.get_xticklabels():
tick.set_rotation(-30)'''
ticks=[""]*len(high_sym)
found=False
for k_count,k in enumerate(kpoint_list[high_sym-1]):
found=False
for i in special_points:#sym_dict[sym]:
#if abs(sym_dict[sym][i][0]-k[0])<tol and abs(sym_dict[sym][i][1]-k[1])<tol and abs(sym_dict[sym][i][2]-k[2])<tol:
if abs(special_points[i][0]-k[0])<tol and abs(special_points[i][1]-k[1])<tol and abs(special_points[i][2]-k[2])<tol:
if i=="G":
ticks[k_count]="$\Gamma$"
else:
ticks[k_count]=i
found=True
#if not found:
# ticks.append("")
ax.set_xticklabels(ticks)
#plt.gcf().subplots_adjust(bottom=0.2)
n_colors=cycle(['blue','red','green','black','purple','orange','yellow','cyan'])
if bg:
if energy_array_2.shape[1]!=0:
ax.plot([k_max_loc_up,k_max_loc_up],[vb_max_up,cb_min_up],color='r',linewidth=linewidth*2)
ax.plot([k_max_loc_down,k_max_loc_down],[vb_max_down,cb_min_down],color='b',linewidth=linewidth*2)
ax.plot([k_max_loc_up,k_min_loc_up],[cb_min_up,cb_min_up],color='r',linewidth=linewidth*2)
ax.plot([k_max_loc_down,k_min_loc_down],[cb_min_down,cb_min_down],color='b',linewidth=linewidth*2)
ax.text(k_max_loc_up*1.05,vb_max_up+(-vb_max_up+cb_min_up)*0.8/2,"%4.2f eV"%band_gap_up,fontsize=text)
ax.text(k_max_loc_down*1.05,vb_max_down+(-vb_max_down+cb_min_down)*0.8/2,"%4.2f eV"%band_gap_down,fontsize=text)
else:
#ax.scatter(k_min_loc,cb_min)
#ax.scatter(k_max_loc,vb_max)
ax.plot([k_max_loc,k_max_loc],[vb_max,cb_min],color='k',linewidth=linewidth*2)
ax.plot([k_max_loc,k_min_loc],[cb_min,cb_min],color='k',linewidth=linewidth*2)
ax.text(k_max_loc*1.05,vb_max+(-vb_max+cb_min)*0.8/2,"%4.2f eV"%band_gap,fontsize=text)
if multi:
if not do_phonons:
ax.plot(kpoint_array[sort_array],energy_array[sort_array],linewidth=linewidth)
if no_spins==2:
if show=='up' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array_2[sort_array])
else:
if show=='down' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array[sort_array],linewidth=linewidth)
elif not do_phonons:
if pdos:
from matplotlib import colors
from matplotlib.colors import ListedColormap
from matplotlib.lines import Line2D
import matplotlib.collections as mcoll
import matplotlib.path as mpath
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(
x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha)
ax.add_collection(lc)
return lc
if species:
n_cat=len(atoms)
else:
n_cat=4
basis=[]
for i in range(n_cat):
basis.append(np.array(colors.to_rgba(next(n_colors))))
for nb in range(no_eigen):
# calculate the colour
cmap_array=np.zeros((len(kpoint_array),4))
for i in range(n_cat):
cmap_array[:,0]+=pdos_weights[i,nb,:,0]*basis[i][0]#/n_cat
cmap_array[:,1]+=pdos_weights[i,nb,:,0]*basis[i][1]#/n_cat
cmap_array[:,2]+=pdos_weights[i,nb,:,0]*basis[i][2]#/n_cat
cmap_array[:,3]+=pdos_weights[i,nb,:,0]*basis[i][3]#/n_cat
#cmap_array[:,0:3]=cmap_array[:,0:3]/n_cat
cmap_array=np.where(cmap_array>1,1,cmap_array)
cmap = ListedColormap(cmap_array)
z = np.linspace(0, 1, len(kpoint_array))
colorline(kpoint_array[sort_array], energy_array[sort_array][:,nb], z, cmap=cmap, linewidth=3)
ax.plot(kpoint_array[sort_array],energy_array[sort_array][:,nb],linewidth=linewidth,alpha=0)
if no_spins==2:
for nb in range(no_eigen):
# calculate the colour
cmap_array=np.zeros((len(kpoint_array),4))
for i in range(n_cat):
cmap_array[:,0]+=pdos_weights[i,nb,:,1]*basis[i][0]#/n_cat
cmap_array[:,1]+=pdos_weights[i,nb,:,1]*basis[i][1]#/n_cat
cmap_array[:,2]+=pdos_weights[i,nb,:,1]*basis[i][2]#/n_cat
cmap_array[:,3]+=pdos_weights[i,nb,:,1]*basis[i][3]#/n_cat
#cmap_array[:,0:3]=cmap_array[:,0:3]/n_cat
cmap_array=np.where(cmap_array>1,1,cmap_array)
cmap = ListedColormap(cmap_array)
z = np.linspace(0, 1, len(kpoint_array))
colorline(kpoint_array[sort_array], energy_array_2[sort_array][:,nb], z, cmap=cmap, linewidth=3)
ax.plot(kpoint_array[sort_array],energy_array[sort_array][:,nb],linewidth=linewidth,alpha=0)
custom_lines = []
labels=[]
for i in range(n_cat):
custom_lines.append(Line2D([0], [0], color=basis[i], lw=3))
if species:
labels.append(atoms[i])
else:
labels=["s","p","d","f"]
#custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
# Line2D([0], [0], color=cmap(.5), lw=4),
# Line2D([0], [0], color=cmap(1.), lw=4)]
ax.legend(custom_lines,labels,fontsize=text)
else:
if show=='up' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array[sort_array],color=spin_up,label=overlay_labels[0],linewidth=linewidth)
for i in n_up:
ax.plot(kpoint_array[sort_array],energy_array[sort_array][:,i],linewidth=linewidth,color=next(n_colors))
c=1
if no_spins==2:
if show=='down' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array_2[sort_array],color=spin_do,label=overlay_labels[0],linewidth=linewidth)
for i in n_down:
ax.plot(kpoint_array[sort_array],energy_array_2[sort_array][:,i],linewidth=linewidth,color=next(n_colors))
if doSOC:
#kpoint_array_soc=1+(kpoint_array[-1]-1)*(kpoint_array_soc-1)/(kpoint_array_soc[-1]-1)
ax.plot(kpoint_array_soc,energy_array_soc[sort_array_soc],color=spin_up,label=overlay_labels[1],linewidth=linewidth,linestyle="--")
if no_spins_soc==2:
ax.plot(kpoint_array_soc,energy_array_soc2[sort_array_soc],color=spin_do,label=overlay_labels[1],linewidth=linewidth,linestyle="--")
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
if not do_dos and overlay_labels[0]!=None:
plt.legend(by_label.values(), by_label.keys(),loc="upper right",fontsize=text)
else: #This is the part where we plot the phonons
ax.plot(kpoint_array[sort_array],energy_array[sort_array],color=spin_up,label="without SOC",linewidth=linewidth)
if spin_polarised and debug:
split_en=np.mean(energy_array-energy_array_2,axis=0)
if not do_dos:
plt.figtext(0.95, 0.96, fig_cap, wrap=True, horizontalalignment='center', fontsize=text)
else:
x=ax2.get_xlim()[1]*0.9
y=ax2.get_ylim()[1]*0.85
ax2.text(x,y,fig_cap,wrap=True, horizontalalignment='center', fontsize=text)
title_seed=seed#.replace("_","\_")
if save:
if title!=None:
plt.suptitle(title,fontsize=text)
if do_phonons:
plt.tight_layout()
fig.savefig(seed+"-phonon."+exe)
elif doSOC:
plt.tight_layout()
fig.savefig(seed+"-SOC-bs."+exe)
elif do_dos:
plt.tight_layout()
fig.savefig(seed+"-SOC-bs-dos."+exe)
else:
plt.tight_layout()
fig.savefig(seed+"-bs."+exe)
else:
plt.title(title_seed,fontsize=20)
plt.tight_layout()
plt.show()
if __name__=='__main__':
main_dispersion()
|
John-Chan/protobuf-rpc-test | protobuf-rpc-test/protobuf/protobuf-2.6.0/python/google/protobuf/pyext/reflection_cpp2_generated_test.py | <reponame>John-Chan/protobuf-rpc-test<gh_stars>1-10
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which tests the generated C++ implementation."""
__author__ = '<EMAIL> (<NAME>)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2'
from google.apputils import basetest
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_dynamic_pb2
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal.reflection_test import *
class ReflectionCppTest(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('cpp', api_implementation.Type())
self.assertEqual(2, api_implementation.Version())
def testExtensionOfGeneratedTypeInDynamicFile(self):
"""Tests that a file built dynamically can extend a generated C++ type.
The C++ implementation uses a DescriptorPool that has the generated
DescriptorPool as an underlay. Typically, a type can only find
extensions in its own pool. With the python C-extension, the generated C++
extendee may be available, but not the extension. This tests that the
C-extension implements the correct special handling to make such extensions
available.
"""
pb1 = more_extensions_pb2.ExtendedMessage()
# Test that basic accessors work.
self.assertFalse(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension))
self.assertFalse(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension))
pb1.Extensions[more_extensions_dynamic_pb2.dynamic_int32_extension] = 17
pb1.Extensions[more_extensions_dynamic_pb2.dynamic_message_extension].a = 24
self.assertTrue(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension))
self.assertTrue(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension))
# Now serialize the data and parse to a new message.
pb2 = more_extensions_pb2.ExtendedMessage()
pb2.MergeFromString(pb1.SerializeToString())
self.assertTrue(
pb2.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension))
self.assertTrue(
pb2.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension))
self.assertEqual(
17, pb2.Extensions[more_extensions_dynamic_pb2.dynamic_int32_extension])
self.assertEqual(
24,
pb2.Extensions[more_extensions_dynamic_pb2.dynamic_message_extension].a)
if __name__ == '__main__':
basetest.main()
|
John-Chan/protobuf-rpc-test | protobuf-rpc-test/protobuf/protobuf-2.6.0/python/google/protobuf/internal/descriptor_pool_test.py | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_pool."""
__author__ = '<EMAIL> (<NAME>)'
import os
import unittest
from google.apputils import basetest
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import descriptor_pool_test1_pb2
from google.protobuf.internal import descriptor_pool_test2_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
class DescriptorPoolTest(basetest.TestCase):
def setUp(self):
self.pool = descriptor_pool.DescriptorPool()
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(self.factory_test1_fd)
self.pool.Add(self.factory_test2_fd)
def testFindFileByName(self):
name1 = 'google/protobuf/internal/factory_test1.proto'
file_desc1 = self.pool.FindFileByName(name1)
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEquals(name1, file_desc1.name)
self.assertEquals('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
name2 = 'google/protobuf/internal/factory_test2.proto'
file_desc2 = self.pool.FindFileByName(name2)
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEquals(name2, file_desc2.name)
self.assertEquals('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileByName('Does not exist')
def testFindFileContainingSymbol(self):
file_desc1 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEquals('google/protobuf/internal/factory_test1.proto',
file_desc1.name)
self.assertEquals('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
file_desc2 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEquals('google/protobuf/internal/factory_test2.proto',
file_desc2.name)
self.assertEquals('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileContainingSymbolFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileContainingSymbol('Does not exist')
def testFindMessageTypeByName(self):
msg1 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(msg1, descriptor.Descriptor)
self.assertEquals('Factory1Message', msg1.name)
self.assertEquals('google.protobuf.python.internal.Factory1Message',
msg1.full_name)
self.assertEquals(None, msg1.containing_type)
nested_msg1 = msg1.nested_types[0]
self.assertEquals('NestedFactory1Message', nested_msg1.name)
self.assertEquals(msg1, nested_msg1.containing_type)
nested_enum1 = msg1.enum_types[0]
self.assertEquals('NestedFactory1Enum', nested_enum1.name)
self.assertEquals(msg1, nested_enum1.containing_type)
self.assertEquals(nested_msg1, msg1.fields_by_name[
'nested_factory_1_message'].message_type)
self.assertEquals(nested_enum1, msg1.fields_by_name[
'nested_factory_1_enum'].enum_type)
msg2 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(msg2, descriptor.Descriptor)
self.assertEquals('Factory2Message', msg2.name)
self.assertEquals('google.protobuf.python.internal.Factory2Message',
msg2.full_name)
self.assertIsNone(msg2.containing_type)
nested_msg2 = msg2.nested_types[0]
self.assertEquals('NestedFactory2Message', nested_msg2.name)
self.assertEquals(msg2, nested_msg2.containing_type)
nested_enum2 = msg2.enum_types[0]
self.assertEquals('NestedFactory2Enum', nested_enum2.name)
self.assertEquals(msg2, nested_enum2.containing_type)
self.assertEquals(nested_msg2, msg2.fields_by_name[
'nested_factory_2_message'].message_type)
self.assertEquals(nested_enum2, msg2.fields_by_name[
'nested_factory_2_enum'].enum_type)
self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value)
self.assertEquals(
1776, msg2.fields_by_name['int_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['double_with_default'].has_default_value)
self.assertEquals(
9.99, msg2.fields_by_name['double_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['string_with_default'].has_default_value)
self.assertEquals(
'hello world', msg2.fields_by_name['string_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value)
self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value)
self.assertEquals(
1, msg2.fields_by_name['enum_with_default'].default_value)
msg3 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')
self.assertEquals(nested_msg2, msg3)
self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value)
self.assertEquals(
b'a\xfb\x00c',
msg2.fields_by_name['bytes_with_default'].default_value)
self.assertEqual(1, len(msg2.oneofs))
self.assertEqual(1, len(msg2.oneofs_by_name))
self.assertEqual(2, len(msg2.oneofs[0].fields))
for name in ['oneof_int', 'oneof_string']:
self.assertEqual(msg2.oneofs[0],
msg2.fields_by_name[name].containing_oneof)
self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields)
def testFindMessageTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindMessageTypeByName('Does not exist')
def testFindEnumTypeByName(self):
enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Enum')
self.assertIsInstance(enum1, descriptor.EnumDescriptor)
self.assertEquals(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number)
self.assertEquals(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number)
nested_enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum')
self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor)
self.assertEquals(
0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number)
self.assertEquals(
1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number)
enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Enum')
self.assertIsInstance(enum2, descriptor.EnumDescriptor)
self.assertEquals(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number)
self.assertEquals(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number)
nested_enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')
self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor)
self.assertEquals(
0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number)
self.assertEquals(
1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number)
def testFindEnumTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindEnumTypeByName('Does not exist')
def testUserDefinedDB(self):
db = descriptor_database.DescriptorDatabase()
self.pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
self.testFindMessageTypeByName()
def testComplexNesting(self):
test1_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
test2_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(test1_desc)
self.pool.Add(test2_desc)
TEST1_FILE.CheckFile(self, self.pool)
TEST2_FILE.CheckFile(self, self.pool)
class ProtoFile(object):
def __init__(self, name, package, messages, dependencies=None):
self.name = name
self.package = package
self.messages = messages
self.dependencies = dependencies or []
def CheckFile(self, test, pool):
file_desc = pool.FindFileByName(self.name)
test.assertEquals(self.name, file_desc.name)
test.assertEquals(self.package, file_desc.package)
dependencies_names = [f.name for f in file_desc.dependencies]
test.assertEqual(self.dependencies, dependencies_names)
for name, msg_type in self.messages.items():
msg_type.CheckType(test, None, name, file_desc)
class EnumType(object):
def __init__(self, values):
self.values = values
def CheckType(self, test, msg_desc, name, file_desc):
enum_desc = msg_desc.enum_types_by_name[name]
test.assertEqual(name, enum_desc.name)
expected_enum_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_enum_full_name, enum_desc.full_name)
test.assertEqual(msg_desc, enum_desc.containing_type)
test.assertEqual(file_desc, enum_desc.file)
for index, (value, number) in enumerate(self.values):
value_desc = enum_desc.values_by_name[value]
test.assertEqual(value, value_desc.name)
test.assertEqual(index, value_desc.index)
test.assertEqual(number, value_desc.number)
test.assertEqual(enum_desc, value_desc.type)
test.assertIn(value, msg_desc.enum_values_by_name)
class MessageType(object):
def __init__(self, type_dict, field_list, is_extendable=False,
extensions=None):
self.type_dict = type_dict
self.field_list = field_list
self.is_extendable = is_extendable
self.extensions = extensions or []
def CheckType(self, test, containing_type_desc, name, file_desc):
if containing_type_desc is None:
desc = file_desc.message_types_by_name[name]
expected_full_name = '.'.join([file_desc.package, name])
else:
desc = containing_type_desc.nested_types_by_name[name]
expected_full_name = '.'.join([containing_type_desc.full_name, name])
test.assertEqual(name, desc.name)
test.assertEqual(expected_full_name, desc.full_name)
test.assertEqual(containing_type_desc, desc.containing_type)
test.assertEqual(desc.file, file_desc)
test.assertEqual(self.is_extendable, desc.is_extendable)
for name, subtype in self.type_dict.items():
subtype.CheckType(test, desc, name, file_desc)
for index, (name, field) in enumerate(self.field_list):
field.CheckField(test, desc, name, index)
for index, (name, field) in enumerate(self.extensions):
field.CheckField(test, desc, name, index)
class EnumField(object):
def __init__(self, number, type_name, default_value):
self.number = number
self.type_name = type_name
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
enum_desc = msg_desc.enum_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_ENUM, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_ENUM,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(enum_desc.values_by_name[self.default_value].index,
field_desc.default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(enum_desc, field_desc.enum_type)
class MessageField(object):
def __init__(self, number, type_name):
self.number = number
self.type_name = type_name
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
field_type_desc = msg_desc.nested_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(field_type_desc, field_desc.message_type)
class StringField(object):
def __init__(self, number, default_value):
self.number = number
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_STRING, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_STRING,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(self.default_value, field_desc.default_value)
class ExtensionField(object):
def __init__(self, number, extended_type):
self.number = number
self.extended_type = extended_type
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.extensions_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(index, field_desc.index)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertTrue(field_desc.is_extension)
test.assertEqual(msg_desc, field_desc.extension_scope)
test.assertEqual(msg_desc, field_desc.message_type)
test.assertEqual(self.extended_type, field_desc.containing_type.name)
class AddDescriptorTest(basetest.TestCase):
def _TestMessage(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.TestAllTypes',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes').full_name)
# AddDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage')
pool.AddDescriptor(unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.TestAllTypes.NestedMessage',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
# Files are implicitly also indexed when messages are added.
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').name)
def testMessage(self):
self._TestMessage('')
self._TestMessage('.')
def _TestEnum(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.ForeignEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum').full_name)
# AddEnumDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum.NestedEnum')
pool.AddEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.TestAllTypes.NestedEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
# Files are implicitly also indexed when enums are added.
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').name)
def testEnum(self):
self._TestEnum('')
self._TestEnum('.')
def testFile(self):
pool = descriptor_pool.DescriptorPool()
pool.AddFileDescriptor(unittest_pb2.DESCRIPTOR)
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
# AddFileDescriptor is not recursive; messages and enums within files must
# be explicitly registered.
with self.assertRaises(KeyError):
pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes')
TEST1_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test1.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest1': MessageType({
'NestedEnum': EnumType([('ALPHA', 1), ('BETA', 2)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('EPSILON', 5), ('ZETA', 6)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('ETA', 7), ('THETA', 8)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ETA')),
('nested_field', StringField(2, 'theta')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ZETA')),
('nested_field', StringField(2, 'beta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'BETA')),
('nested_message', MessageField(2, 'NestedMessage')),
], is_extendable=True),
'DescriptorPoolTest2': MessageType({
'NestedEnum': EnumType([('GAMMA', 3), ('DELTA', 4)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('IOTA', 9), ('KAPPA', 10)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('LAMBDA', 11), ('MU', 12)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'MU')),
('nested_field', StringField(2, 'lambda')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'IOTA')),
('nested_field', StringField(2, 'delta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'GAMMA')),
('nested_message', MessageField(2, 'NestedMessage')),
]),
})
TEST2_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test2.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest3': MessageType({
'NestedEnum': EnumType([('NU', 13), ('XI', 14)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('OMICRON', 15), ('PI', 16)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('RHO', 17), ('SIGMA', 18)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'RHO')),
('nested_field', StringField(2, 'sigma')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'PI')),
('nested_field', StringField(2, 'nu')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'XI')),
('nested_message', MessageField(2, 'NestedMessage')),
], extensions=[
('descriptor_pool_test',
ExtensionField(1001, 'DescriptorPoolTest1')),
]),
},
dependencies=['google/protobuf/internal/descriptor_pool_test1.proto'])
if __name__ == '__main__':
basetest.main()
|
John-Chan/protobuf-rpc-test | protobuf-rpc-test/protobuf/protobuf-2.6.0/python/google/protobuf/internal/encoder.py | <reponame>John-Chan/protobuf-rpc-test
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a cStringIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = '<EMAIL> (<NAME>)'
import struct
import sys ##PY25
_PY2 = sys.version_info[0] < 3 ##PY25
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25
##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,))
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25
##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,))
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return "".encode("latin1").join(pieces) ##PY25
##!PY25 return b"".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
b = _PY2 and (lambda x:x) or (lambda x:x.encode('latin1')) ##PY25
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write(b('\x00\x00\x80\x7F')) ##PY25
##!PY25 write(b'\x00\x00\x80\x7F')
elif value == _NEG_INF:
write(b('\x00\x00\x80\xFF')) ##PY25
##!PY25 write(b'\x00\x00\x80\xFF')
elif value != value: # NaN
write(b('\x00\x00\xC0\x7F')) ##PY25
##!PY25 write(b'\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write(b('\x00\x00\x00\x00\x00\x00\xF0\x7F')) ##PY25
##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write(b('\x00\x00\x00\x00\x00\x00\xF0\xFF')) ##PY25
##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write(b('\x00\x00\x00\x00\x00\x00\xF8\x7F')) ##PY25
##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
##!PY25 false_byte = b'\x00'
##!PY25 true_byte = b'\x01'
false_byte = '\x00'.encode('latin1') ##PY25
true_byte = '\x01'.encode('latin1') ##PY25
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = "".encode("latin1").join([ ##PY25
##!PY25 start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
|
edvbld/jurov | test/integration/runner.py | import os
import os.path
import sys
import parser.runner
def run():
d = os.path.split(os.path.abspath(__file__))[0]
d += '/sample_programs/'
samples = [os.path.abspath(d + 'Factorial.java')]
cmd = './build/src/jurov'
res = parser.runner.run(cmd, samples)
if res:
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
run()
|
edvbld/jurov | test/integration/parser/runner.py | <gh_stars>0
import os.path
import os
def run(cmd, files):
res = True;
d = os.path.split(os.path.abspath(__file__))[0]
for p in files:
fname = os.path.split(p)[1]
fname = fname.replace('java', 'syntax')
exp = open((d + '/' + fname), 'r').read()
output = os.popen((cmd + ' ' + p)).read()
res = ((output == exp) and res)
return res
|
segfo/ctfTools | scriptAutogen.py | #!/usr/bin/python3
#coding: utf-8
import sys
import os
import libstrings
from operator import attrgetter
from pwn import *
def printUsage(module):
print("%s <elfFile>"%(module))
argv = sys.argv
argc = len(argv)
elf = None
if argc < 2:
printUsage(argv[0])
exit(1)
fileName = argv[1]
try:
elf = ELF(fileName)
except:
print("%s file not found."%fileName)
exit(0)
pwnPyTemplate = """#!/usr/bin/python3
from pwn import *
e = ELF("%s");
r = remote("localhost",11111)
bofPattern = cyclic(2048)
r.send()
r.recv()
# wait recvuntil
r.recvuntil("1337 input:")
# wait lines
line=r.recvlines(2)
#cyclic_find()
#r.recvall()
#r.interactive()
r.close()
"""%(fileName)
gdbServerTemplate = """#!/bin/sh
gdbserver localhost:22222 %s
"""%(fileName)
gdbCmdTemplate = """target remote localhost:22222
si
ni
b __libc_start_main
c
b *($rdi)
c
"""
runGdbTempleate = """
gdb -x ./gdbCmd %s
"""%(fileName)
runGdbSvrTemplate = """#!/bin/sh
socat tcp-l:11111,reuseaddr,fork exec:./__gdbServer
"""
gdbCmd = open("gdbCmd","w")
gdbserver = open("__gdbServer","w")
pwnPy = open("exploit.py","w")
gdbCmd.write(gdbCmdTemplate)
gdbserver.write(gdbServerTemplate)
pwnPy.write(pwnPyTemplate)
gdbCmd.close()
gdbserver.close()
pwnPy.close()
runGdb = open("runGdb.sh","w")
runGdbSvr = open("runGdbServer.sh","w")
runGdb.write(runGdbTempleate)
runGdbSvr.write(runGdbSvrTemplate)
runGdb.close()
runGdbSvr.close()
#chmod
os.chmod("__gdbServer",0o755)
os.chmod("runGdbServer.sh",0o755)
os.chmod("runGdb.sh",0o755)
os.chmod("exploit.py",0o755)
if fileName[0:2] == "./":
fileName = fileName[2:]
os.chmod(fileName,0o755)
stringsTxt = open("strings.txt","w")
str,maxLen = libstrings.getStrings(elf.file)
# length sort (default : address sort)
str = sorted(str,key=attrgetter('len'))
for s in str:
tab = " "*(1+int(math.log10(maxLen))-int(math.log10(s.len)))
stringsTxt.write("%x(%d)%s: %s\n"%(s.addr+elf.load_addr,s.len,tab,s.data))
stringsTxt.close()
|
segfo/ctfTools | libstrings.py | #!/usr/bin/python3
#coding: utf-8
import string
import math
from pwn import *
from collections import *
stringsData = namedtuple('stringsData','addr len data')
def getStrings(file, min=4):
result = ""
f = file
resultData = []
cnt = 0
maxLen = 0
for c in f.read():
cnt += 1
c = chr(c)
if c in string.printable:
if c == '\n':
result += "\\n"
else:
result += c
continue
if len(result) >= min:
resultData.append(stringsData(cnt-len(result)-1,len(result),result))
if maxLen < len(result):
maxLen = len(result)
result = ""
return resultData,maxLen
|
segfo/ctfTools | str2stk32.py | <reponame>segfo/ctfTools<filename>str2stk32.py
import binascii
import sys
if len(sys.argv) <= 1:
print "missing arguments( given \"path string\" )"
exit(1)
strlen = len(sys.argv[1])
str = sys.argv[1]
cnt = strlen /4
cnt += 1 if strlen%4!=0 else 0
for i in xrange(0,cnt):
s = binascii.hexlify(str[(cnt-i-1)*4:(cnt-i)*4][::-1])
bytes = len(s)/2
s = "0x"+s
if bytes == 1 :
print "mov al,"+s
print "movzx eax,al"
print "push eax"
elif bytes == 2:
print "mov ax,"+s
print "movzx eax,ax"
print "push eax"
else:
print "push "+s |
MatPoliquin/retro | retro/examples/discretizer.py | """
Define discrete action spaces for Gym Retro environments with a limited set of button combos
"""
import gym
import numpy as np
import retro
class Discretizer(gym.ActionWrapper):
"""
Wrap a gym environment and make it use discrete actions.
Args:
combos: ordered list of lists of valid button combinations
"""
def __init__(self, env, combos):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.MultiBinary)
buttons = env.unwrapped.buttons
self._decode_discrete_action = []
for combo in combos:
arr = np.array([False] * env.action_space.n)
for button in combo:
arr[buttons.index(button)] = True
self._decode_discrete_action.append(arr)
self.action_space = gym.spaces.Discrete(len(self._decode_discrete_action))
def action(self, act):
return self._decode_discrete_action[act].copy()
class SonicDiscretizer(Discretizer):
"""
Use Sonic-specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
"""
def __init__(self, env):
super().__init__(env=env, combos=[['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']])
def main():
env = retro.make(game='SonicTheHedgehog-Genesis', use_restricted_actions=retro.Actions.DISCRETE)
print('retro.Actions.DISCRETE action_space', env.action_space)
env.close()
env = retro.make(game='SonicTheHedgehog-Genesis')
env = SonicDiscretizer(env)
print('SonicDiscretizer action_space', env.action_space)
env.close()
if __name__ == '__main__':
main() |
MatPoliquin/retro | retro/cores/gba/src/platform/python/cinema/__init__.py | <filename>retro/cores/gba/src/platform/python/cinema/__init__.py
from PIL.ImageChops import difference
from PIL.ImageOps import autocontrast
from PIL.Image import open as PIOpen
class VideoFrame(object):
def __init__(self, pilImage):
self.image = pilImage.convert('RGB')
@staticmethod
def diff(a, b):
diff = difference(a.image, b.image)
diffNormalized = autocontrast(diff)
return (VideoFrame(diff), VideoFrame(diffNormalized))
@staticmethod
def load(path):
with open(path, 'rb') as f:
image = PIOpen(f)
image.load()
return VideoFrame(image)
def save(self, path):
return self.image.save(path)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/png.py | <reponame>MatPoliquin/retro
# Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import vfs
MODE_RGB = 0
MODE_RGBA = 1
MODE_INDEX = 2
class PNG:
def __init__(self, f, mode=MODE_RGB):
self.vf = vfs.open(f)
self.mode = mode
def writeHeader(self, image):
self._png = lib.PNGWriteOpen(self.vf.handle)
if self.mode == MODE_RGB:
self._info = lib.PNGWriteHeader(self._png, image.width, image.height)
if self.mode == MODE_RGBA:
self._info = lib.PNGWriteHeaderA(self._png, image.width, image.height)
if self.mode == MODE_INDEX:
self._info = lib.PNGWriteHeader8(self._png, image.width, image.height)
return self._info != ffi.NULL
def writePixels(self, image):
if self.mode == MODE_RGB:
return lib.PNGWritePixels(self._png, image.width, image.height, image.stride, image.buffer)
if self.mode == MODE_RGBA:
return lib.PNGWritePixelsA(self._png, image.width, image.height, image.stride, image.buffer)
if self.mode == MODE_INDEX:
return lib.PNGWritePixels8(self._png, image.width, image.height, image.stride, image.buffer)
def writeClose(self):
lib.PNGWriteClose(self._png, self._info)
del self._png
del self._info
|
MatPoliquin/retro | retro/data/__init__.py | from retro._retro import GameDataGlue, RetroEmulator, data_path as _data_path
import glob
import hashlib
import json
import os
import sys
try:
import enum
from enum import Flag
except ImportError:
# Python < 3.6 doesn't support Flag, so we polyfill it ourself
class Flag(enum.Enum):
def __and__(self, b):
value = self.value & b.value
try:
return Integrations(value)
except ValueError:
return value
def __or__(self, b):
value = self.value | b.value
try:
return Integrations(value)
except ValueError:
return value
__all__ = ['GameData', 'Integrations', 'add_integrations', 'add_custom_integration', 'path', 'get_file_path', 'get_romfile_path', 'list_games', 'list_states', 'merge']
if sys.platform.startswith('linux'):
EXT = 'so'
elif sys.platform == 'darwin':
EXT = 'dylib'
elif sys.platform == 'win32':
EXT = 'dll'
else:
raise RuntimeError('Unrecognized platform')
DATA_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
EMU_CORES = {}
EMU_INFO = {}
EMU_EXTENSIONS = {}
class DefaultIntegrations:
@classmethod
def _init(cls):
if not hasattr(cls, 'DEFAULT'):
cls.reset()
def __or__(self, b):
try:
self._init()
except NameError:
return False
return DefaultIntegrations.DEFAULT.value | b
def __and__(self, b):
try:
self._init()
except NameError:
return False
return DefaultIntegrations.DEFAULT.value & b
@classmethod
def add(cls, extra):
cls._init()
cls.DEFAULT |= extra
@classmethod
def reset(cls):
cls.DEFAULT = Integrations.STABLE
class Integrations(Flag):
STABLE = 1
EXPERIMENTAL_ONLY = 2
CONTRIB_ONLY = 4
CUSTOM_ONLY = 8
EXPERIMENTAL = EXPERIMENTAL_ONLY | STABLE
CONTRIB = CONTRIB_ONLY | STABLE
CUSTOM = CUSTOM_ONLY | STABLE
ALL = STABLE | EXPERIMENTAL_ONLY | CONTRIB_ONLY | CUSTOM_ONLY
DEFAULT = DefaultIntegrations()
@classmethod
def _init(cls):
if not hasattr(cls, 'CUSTOM_PATHS'):
cls.CUSTOM_PATHS = []
@property
def paths(self):
p = []
if self & self.CONTRIB_ONLY:
p.append(str(self.CONTRIB_ONLY))
if self & self.EXPERIMENTAL_ONLY:
p.append(str(self.EXPERIMENTAL_ONLY))
if self & self.CUSTOM_ONLY:
Integrations._init()
p.extend(self.CUSTOM_PATHS)
if self & self.STABLE:
p.append('stable')
return p
@classmethod
def add_custom_path(cls, path):
cls._init()
cls.CUSTOM_PATHS.append(path)
@classmethod
def clear_custom_paths(cls):
cls._init()
del cls.CUSTOM_PATHS[:]
def __str__(self):
if self == self.ALL:
return 'all'
if self == self.STABLE:
return ''
names = []
if self & self.STABLE:
names.append('stable')
if self & self.CONTRIB_ONLY:
names.append('contrib')
if self & self.EXPERIMENTAL_ONLY:
names.append('experimental')
if self & self.CUSTOM_ONLY:
names.append('custom')
return '|'.join(names)
class GameData(GameDataGlue):
def __init__(self, game=None, data=None, scenario=None, inttype=Integrations.DEFAULT):
super(GameData, self).__init__()
if game:
if not data:
data = get_file_path(game, 'data.json', inttype)
if not data.endswith('.json'):
data += '.json'
if not os.path.isabs(data):
data = get_file_path(game, data, inttype)
if not scenario:
scenario = get_file_path(game, 'scenario.json', inttype)
if not scenario.endswith('.json'):
scenario += '.json'
if not os.path.isabs(scenario):
scenario = get_file_path(game, scenario, inttype)
if data:
self.load(data, scenario)
def __getitem__(self, name):
return self.lookup_value(name)
def __setitem__(self, name, value):
return self.set_value(name, value)
@property
def searches(self):
return SearchListHandle(self)
@property
def vars(self):
return Variables(self)
class Variables(object):
def __init__(self, data):
super(Variables, self).__init__()
self.data = data
def __getitem__(self, name):
return self.data.get_variable(name)
def __setitem__(self, name, value):
return self.data.set_variable(name, value)
def __delitem__(self, name):
self.data.remove_variable(name)
def __iter__(self):
variables = self.data.list_variables()
for v in variables.items():
yield v
def __contains__(self, name):
variables = self.data.list_variables()
return name in variables
class SearchListHandle(object):
def __init__(self, data):
self._data = data
def __getitem__(self, name):
return SearchHandle(self._data, name)
def __delitem__(self, name):
self._data.remove_search(name)
def __iter__(self):
searches = self._data.list_searches()
for search in searches.items():
yield search
def __contains__(self, name):
searches = self._data.list_searches()
return name in searches
def load(self, name):
self._data.load_searches(name)
def save(self, name):
self._data.save_searches(name)
class SearchHandle(object):
def __init__(self, data, name):
self._data = data
self._name = name
self._search = None
def search(self, value):
self._data.search(self._name, value)
def delta(self, op, ref):
self._data.delta_search(self._name, op, ref)
def __getattr__(self, attr):
if not self._search:
self._search = self._data.get_search(self._name)
return getattr(self._search, attr)
def add_integrations(integrations):
DefaultIntegrations.add(integrations)
def add_custom_integration(path):
DefaultIntegrations.add(Integrations.CUSTOM_ONLY)
Integrations.add_custom_path(path)
def init_core_info(path):
for fname in glob.glob(os.path.join(path, '*.json')):
with open(fname) as f:
core_info = f.read()
RetroEmulator.load_core_info(core_info)
EMU_INFO.update(json.loads(core_info))
for platform, core in EMU_INFO.items():
EMU_CORES[platform] = core['lib'] + '_libretro.' + EXT
for ext in core['ext']:
EMU_EXTENSIONS['.' + ext] = platform
def path(hint=DATA_PATH):
if hint == DATA_PATH and not os.path.exists(os.path.join(DATA_PATH, 'data', 'stable', 'Airstriker-Genesis')):
# Development installation
hint = os.path.join(hint, '..')
return _data_path(hint)
def get_file_path(game, file, inttype=Integrations.DEFAULT):
"""
Return the path to a given game's directory
"""
base = path()
for t in inttype.paths:
possible_path = os.path.join(base, t, game, file)
if os.path.exists(possible_path):
return possible_path
return None
def get_romfile_path(game, inttype=Integrations.DEFAULT):
"""
Return the path to a given game's romfile
"""
for extension in EMU_EXTENSIONS.keys():
possible_path = get_file_path(game, "rom" + extension, inttype)
if possible_path:
return possible_path
raise FileNotFoundError("No romfiles found for game: %s" % game)
def list_games(inttype=Integrations.DEFAULT):
files = []
for curpath in inttype.paths:
files.extend(os.listdir(os.path.join(path(), curpath)))
possible_games = []
for file in files:
if get_file_path(file, "rom.sha", inttype):
possible_games.append(file)
return sorted(set(possible_games))
def list_states(game, inttype=Integrations.DEFAULT):
paths = []
for curpath in inttype.paths:
paths.append(os.path.join(path(), curpath, game))
states = []
for curpath in paths:
local_states = glob.glob(os.path.join(curpath, "*.state"))
states.extend(os.path.split(local_state)[-1][:-len(".state")]
for local_state in local_states
if not os.path.split(local_state)[-1].startswith("_"))
return sorted(set(states))
def list_scenarios(game, inttype=Integrations.DEFAULT):
paths = []
for curpath in inttype.paths:
paths.append(os.path.join(path(), curpath, game))
scens = []
for curpath in paths:
local_json = glob.glob(os.path.join(curpath, "*.json"))
for j in local_json:
try:
with open(j) as f:
scen = json.load(f)
except (json.JSONDecodeError, IOError):
continue
if scen.get('reward') is not None or scen.get('rewards') is not None or scen.get('done') is not None:
scens.append(os.path.split(j)[-1][:-len(".json")])
return sorted(set(scens))
def parse_smd(header, body):
import numpy as np
try:
if body[0x80] != b'E' or body[0x81] != b'A':
return header + body
body2 = b''
for i in range(len(body) / 0x4000):
block = body[i * 0x4000:(i + 1) * 0x4000]
if not block:
break
nb = np.fromstring(block, dtype=np.uint8)
nb = np.flipud(nb.reshape(2, 0x2000))
nb = nb.flatten(order='F')
body2 += nb.tostring()
except IndexError:
return header + body
return body2
def groom_rom(rom, r):
if rom.lower().endswith('.smd'):
# Read Super Magic Drive header
header = r.read(512)
body = r.read()
body = parse_smd(header, body)
elif rom.lower().endswith('.nes'):
header = r.read(16)
body = r.read()
return header + body, hashlib.sha1(body).hexdigest()
else:
# Don't read more than 32 MiB, the largest game supported
body = r.read(0x2000000)
if r.read(1):
raise ValueError('ROM is too big')
return body, hashlib.sha1(body).hexdigest()
def verify_hash(game, inttype=Integrations.DEFAULT):
import retro
errors = []
rom = get_romfile_path(game, inttype=inttype)
system = retro.get_romfile_system(rom)
with open(retro.data.get_file_path(game, 'rom.sha', inttype=inttype | retro.data.Integrations.STABLE)) as f:
expected_shas = f.read().strip().split('\n')
with open(rom, 'rb') as f:
if system == 'Nes':
# Chop off header for checksum
f.read(16)
real_sha = hashlib.sha1(f.read()).hexdigest()
if real_sha not in expected_shas:
errors.append((game, 'sha mismatch'))
return errors
def get_known_hashes():
known_hashes = {}
for game in list_games(Integrations.ALL):
for curpath in Integrations.ALL.paths:
shafile = os.path.join(path(), curpath, game, 'rom.sha')
try:
with open(shafile) as f:
shas = f.read().strip().split('\n')
except (FileNotFoundError, ValueError):
continue
for ext, platform in EMU_EXTENSIONS.items():
if game.endswith('-' + platform):
break
for sha in shas:
known_hashes[sha] = (game, ext, os.path.join(path(), curpath))
return known_hashes
def merge(*args, quiet=True):
import retro
known_hashes = get_known_hashes()
imported_games = 0
for rom in args:
try:
with open(rom, "rb") as r:
data, hash = groom_rom(rom, r)
except (IOError, ValueError):
continue
if hash in known_hashes:
game, ext, curpath = known_hashes[hash]
if not quiet:
print('Importing', game)
with open(os.path.join(curpath, game, 'rom%s' % ext), 'wb') as f:
f.write(data)
imported_games += 1
if not quiet:
print('Imported %i games' % imported_games)
|
MatPoliquin/retro | retro/examples/interactive.py | <gh_stars>1000+
"""
Interact with Gym environments using the keyboard
An adapter object is defined for each environment to map keyboard commands to actions and extract observations as pixels.
"""
import sys
import ctypes
import argparse
import abc
import time
import numpy as np
import retro
import pyglet
from pyglet import gl
from pyglet.window import key as keycodes
class Interactive(abc.ABC):
"""
Base class for making gym environments interactive for human use
"""
def __init__(self, env, sync=True, tps=60, aspect_ratio=None):
obs = env.reset()
self._image = self.get_image(obs, env)
assert len(self._image.shape) == 3 and self._image.shape[2] == 3, 'must be an RGB image'
image_height, image_width = self._image.shape[:2]
if aspect_ratio is None:
aspect_ratio = image_width / image_height
# guess a screen size that doesn't distort the image too much but also is not tiny or huge
display = pyglet.canvas.get_display()
screen = display.get_default_screen()
max_win_width = screen.width * 0.9
max_win_height = screen.height * 0.9
win_width = image_width
win_height = int(win_width / aspect_ratio)
while win_width > max_win_width or win_height > max_win_height:
win_width //= 2
win_height //= 2
while win_width < max_win_width / 2 and win_height < max_win_height / 2:
win_width *= 2
win_height *= 2
win = pyglet.window.Window(width=win_width, height=win_height)
self._key_handler = pyglet.window.key.KeyStateHandler()
win.push_handlers(self._key_handler)
win.on_close = self._on_close
gl.glEnable(gl.GL_TEXTURE_2D)
self._texture_id = gl.GLuint(0)
gl.glGenTextures(1, ctypes.byref(self._texture_id))
gl.glBindTexture(gl.GL_TEXTURE_2D, self._texture_id)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, image_width, image_height, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, None)
self._env = env
self._win = win
# self._render_human = render_human
self._key_previous_states = {}
self._steps = 0
self._episode_steps = 0
self._episode_returns = 0
self._prev_episode_returns = 0
self._tps = tps
self._sync = sync
self._current_time = 0
self._sim_time = 0
self._max_sim_frames_per_update = 4
def _update(self, dt):
# cap the number of frames rendered so we don't just spend forever trying to catch up on frames
# if rendering is slow
max_dt = self._max_sim_frames_per_update / self._tps
if dt > max_dt:
dt = max_dt
# catch up the simulation to the current time
self._current_time += dt
while self._sim_time < self._current_time:
self._sim_time += 1 / self._tps
keys_clicked = set()
keys_pressed = set()
for key_code, pressed in self._key_handler.items():
if pressed:
keys_pressed.add(key_code)
if not self._key_previous_states.get(key_code, False) and pressed:
keys_clicked.add(key_code)
self._key_previous_states[key_code] = pressed
if keycodes.ESCAPE in keys_pressed:
self._on_close()
# assume that for async environments, we just want to repeat keys for as long as they are held
inputs = keys_pressed
if self._sync:
inputs = keys_clicked
keys = []
for keycode in inputs:
for name in dir(keycodes):
if getattr(keycodes, name) == keycode:
keys.append(name)
act = self.keys_to_act(keys)
if not self._sync or act is not None:
obs, rew, done, _info = self._env.step(act)
self._image = self.get_image(obs, self._env)
self._episode_returns += rew
self._steps += 1
self._episode_steps += 1
np.set_printoptions(precision=2)
if self._sync:
done_int = int(done) # shorter than printing True/False
mess = 'steps={self._steps} episode_steps={self._episode_steps} rew={rew} episode_returns={self._episode_returns} done={done_int}'.format(
**locals()
)
print(mess)
elif self._steps % self._tps == 0 or done:
episode_returns_delta = self._episode_returns - self._prev_episode_returns
self._prev_episode_returns = self._episode_returns
mess = 'steps={self._steps} episode_steps={self._episode_steps} episode_returns_delta={episode_returns_delta} episode_returns={self._episode_returns}'.format(
**locals()
)
print(mess)
if done:
self._env.reset()
self._episode_steps = 0
self._episode_returns = 0
self._prev_episode_returns = 0
def _draw(self):
gl.glBindTexture(gl.GL_TEXTURE_2D, self._texture_id)
video_buffer = ctypes.cast(self._image.tobytes(), ctypes.POINTER(ctypes.c_short))
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, self._image.shape[1], self._image.shape[0], gl.GL_RGB, gl.GL_UNSIGNED_BYTE, video_buffer)
x = 0
y = 0
w = self._win.width
h = self._win.height
pyglet.graphics.draw(
4,
pyglet.gl.GL_QUADS,
('v2f', [x, y, x + w, y, x + w, y + h, x, y + h]),
('t2f', [0, 1, 1, 1, 1, 0, 0, 0]),
)
def _on_close(self):
self._env.close()
sys.exit(0)
@abc.abstractmethod
def get_image(self, obs, venv):
"""
Given an observation and the Env object, return an rgb array to display to the user
"""
pass
@abc.abstractmethod
def keys_to_act(self, keys):
"""
Given a list of keys that the user has input, produce a gym action to pass to the environment
For sync environments, keys is a list of keys that have been pressed since the last step
For async environments, keys is a list of keys currently held down
"""
pass
def run(self):
"""
Run the interactive window until the user quits
"""
# pyglet.app.run() has issues like https://bitbucket.org/pyglet/pyglet/issues/199/attempting-to-resize-or-close-pyglet
# and also involves inverting your code to run inside the pyglet framework
# avoid both by using a while loop
prev_frame_time = time.time()
while True:
self._win.switch_to()
self._win.dispatch_events()
now = time.time()
self._update(now - prev_frame_time)
prev_frame_time = now
self._draw()
self._win.flip()
class RetroInteractive(Interactive):
"""
Interactive setup for retro games
"""
def __init__(self, game, state, scenario, record):
env = retro.make(game=game, state=state, scenario=scenario, record=record)
self._buttons = env.buttons
super().__init__(env=env, sync=False, tps=60, aspect_ratio=4/3)
def get_image(self, _obs, env):
return env.render(mode='rgb_array')
def keys_to_act(self, keys):
inputs = {
None: False,
'BUTTON': 'Z' in keys,
'A': 'Z' in keys,
'B': 'X' in keys,
'C': 'C' in keys,
'X': 'A' in keys,
'Y': 'S' in keys,
'Z': 'D' in keys,
'L': 'Q' in keys,
'R': 'W' in keys,
'UP': 'UP' in keys,
'DOWN': 'DOWN' in keys,
'LEFT': 'LEFT' in keys,
'RIGHT': 'RIGHT' in keys,
'MODE': 'TAB' in keys,
'SELECT': 'TAB' in keys,
'RESET': 'ENTER' in keys,
'START': 'ENTER' in keys,
}
return [inputs[b] for b in self._buttons]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', default='Airstriker-Genesis')
parser.add_argument('--state', default=retro.State.DEFAULT)
parser.add_argument('--scenario', default=None)
parser.add_argument('--record', default=None, nargs='?', const=True)
args = parser.parse_args()
ia = RetroInteractive(game=args.game, state=args.state, scenario=args.scenario, record=args.record)
ia.run()
if __name__ == '__main__':
main()
|
MatPoliquin/retro | tests/data/test_load.py | import retro
import pytest
import gc
import gzip
import os
import zlib
from retro.testing import game, handle
from concurrent.futures import ProcessPoolExecutor, TimeoutError
from concurrent.futures.process import BrokenProcessPool
pool = ProcessPoolExecutor(1)
@pytest.fixture(scope="module")
def processpool():
def run(fn, *args):
global pool
try:
future = pool.submit(fn, *args)
return future.result(2)
except BrokenProcessPool:
pool = ProcessPoolExecutor(1)
return [], [(args[0], 'subprocess crashed')]
except TimeoutError:
return [], [(args[0], 'task timed out')]
yield run
pool.shutdown()
def load(game, inttype):
errors = []
rom = retro.data.get_romfile_path(game, inttype)
emu = retro.RetroEmulator(rom)
emu.step()
del emu
gc.collect()
return [], errors
def state(game, inttype):
errors = []
states = retro.data.list_states(game, inttype)
if not states:
return [], []
rom = retro.data.get_romfile_path(game, inttype | retro.data.Integrations.STABLE)
emu = retro.RetroEmulator(rom)
for statefile in states:
try:
with gzip.open(retro.data.get_file_path(game, statefile + '.state', inttype), 'rb') as fh:
state = fh.read()
except (IOError, zlib.error):
errors.append((game, 'state failed to decode: %s' % statefile))
continue
emu.set_state(state)
emu.step()
del emu
gc.collect()
return [], errors
def test_load(game, processpool):
warnings, errors = processpool(load, *game)
handle(warnings, errors)
def test_state(game, processpool):
warnings, errors = processpool(state, *game)
handle(warnings, errors)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/cinema/util.py | def dictMerge(a, b):
for key, value in b.items():
if isinstance(value, dict):
if key in a:
dictMerge(a[key], value)
else:
a[key] = dict(value)
else:
a[key] = value
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/gba.py | # Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from .arm import ARMCore
from .core import Core, needsReset
from .tile import Sprite
from .memory import Memory
from . import createCallback
class GBA(Core):
KEY_A = lib.GBA_KEY_A
KEY_B = lib.GBA_KEY_B
KEY_SELECT = lib.GBA_KEY_SELECT
KEY_START = lib.GBA_KEY_START
KEY_DOWN = lib.GBA_KEY_DOWN
KEY_UP = lib.GBA_KEY_UP
KEY_LEFT = lib.GBA_KEY_LEFT
KEY_RIGHT = lib.GBA_KEY_RIGHT
KEY_L = lib.GBA_KEY_L
KEY_R = lib.GBA_KEY_R
SIO_NORMAL_8 = lib.SIO_NORMAL_8
SIO_NORMAL_32 = lib.SIO_NORMAL_32
SIO_MULTI = lib.SIO_MULTI
SIO_UART = lib.SIO_UART
SIO_JOYBUS = lib.SIO_JOYBUS
SIO_GPIO = lib.SIO_GPIO
def __init__(self, native):
super(GBA, self).__init__(native)
self._native = ffi.cast("struct GBA*", native.board)
self.sprites = GBAObjs(self)
self.cpu = ARMCore(self._core.cpu)
self._sio = set()
@needsReset
def _initCache(self, cache):
lib.GBAVideoCacheInit(cache)
lib.GBAVideoCacheAssociate(cache, ffi.addressof(self._native.video))
def _deinitCache(self, cache):
lib.mCacheSetDeinit(cache)
if self._wasReset:
self._native.video.renderer.cache = ffi.NULL
def _load(self):
super(GBA, self)._load()
self.memory = GBAMemory(self._core, self._native.memory.romSize)
def attachSIO(self, link, mode=lib.SIO_MULTI):
self._sio.add(mode)
lib.GBASIOSetDriver(ffi.addressof(self._native.sio), link._native, mode)
def __del__(self):
for mode in self._sio:
lib.GBASIOSetDriver(ffi.addressof(self._native.sio), ffi.NULL, mode)
createCallback("GBASIOPythonDriver", "init")
createCallback("GBASIOPythonDriver", "deinit")
createCallback("GBASIOPythonDriver", "load")
createCallback("GBASIOPythonDriver", "unload")
createCallback("GBASIOPythonDriver", "writeRegister")
class GBASIODriver(object):
def __init__(self):
self._handle = ffi.new_handle(self)
self._native = ffi.gc(lib.GBASIOPythonDriverCreate(self._handle), lib.free)
def init(self):
return True
def deinit(self):
pass
def load(self):
return True
def unload(self):
return True
def writeRegister(self, address, value):
return value
class GBASIOJOYDriver(GBASIODriver):
RESET = lib.JOY_RESET
POLL = lib.JOY_POLL
TRANS = lib.JOY_TRANS
RECV = lib.JOY_RECV
def __init__(self):
self._handle = ffi.new_handle(self)
self._native = ffi.gc(lib.GBASIOJOYPythonDriverCreate(self._handle), lib.free)
def sendCommand(self, cmd, data):
buffer = ffi.new('uint8_t[5]')
try:
buffer[0] = data[0]
buffer[1] = data[1]
buffer[2] = data[2]
buffer[3] = data[3]
buffer[4] = data[4]
except IndexError:
pass
outlen = lib.GBASIOJOYSendCommand(self._native, cmd, buffer)
if outlen > 0 and outlen <= 5:
return bytes(buffer[0:outlen])
return None
class GBAMemory(Memory):
def __init__(self, core, romSize=lib.SIZE_CART0):
super(GBAMemory, self).__init__(core, 0x100000000)
self.bios = Memory(core, lib.SIZE_BIOS, lib.BASE_BIOS)
self.wram = Memory(core, lib.SIZE_WORKING_RAM, lib.BASE_WORKING_RAM)
self.iwram = Memory(core, lib.SIZE_WORKING_IRAM, lib.BASE_WORKING_IRAM)
self.io = Memory(core, lib.SIZE_IO, lib.BASE_IO)
self.palette = Memory(core, lib.SIZE_PALETTE_RAM, lib.BASE_PALETTE_RAM)
self.vram = Memory(core, lib.SIZE_VRAM, lib.BASE_VRAM)
self.oam = Memory(core, lib.SIZE_OAM, lib.BASE_OAM)
self.cart0 = Memory(core, romSize, lib.BASE_CART0)
self.cart1 = Memory(core, romSize, lib.BASE_CART1)
self.cart2 = Memory(core, romSize, lib.BASE_CART2)
self.cart = self.cart0
self.rom = self.cart0
self.sram = Memory(core, lib.SIZE_CART_SRAM, lib.BASE_CART_SRAM)
class GBASprite(Sprite):
TILE_BASE = 0x800, 0x400
PALETTE_BASE = 0x10, 1
def __init__(self, obj):
self._a = obj.a
self._b = obj.b
self._c = obj.c
self.x = self._b & 0x1FF
self.y = self._a & 0xFF
self._shape = self._a >> 14
self._size = self._b >> 14
self._256Color = bool(self._a & 0x2000)
self.width, self.height = lib.GBAVideoObjSizes[self._shape * 4 + self._size]
self.tile = self._c & 0x3FF
if self._256Color:
self.paletteId = 0
self.tile >>= 1
else:
self.paletteId = self._c >> 12
class GBAObjs:
def __init__(self, core):
self._core = core
self._obj = core._native.video.oam.obj
def __len__(self):
return 128
def __getitem__(self, index):
if index >= len(self):
raise IndexError()
sprite = GBASprite(self._obj[index])
tiles = self._core.tiles[3 if sprite._256Color else 2]
map1D = bool(self._core._native.memory.io[0] & 0x40)
sprite.constitute(tiles, 0 if map1D else 0x20)
return sprite
|
MatPoliquin/retro | docker/linux/build_scripts/python-tag-abi-tag.py | # Utility script to print the python tag + the abi tag for a Python
# See PEP 425 for exactly what these are, but an example would be:
# cp27-cp27mu
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
print("{0}{1}-{2}".format(get_abbr_impl(), get_impl_ver(), get_abi_tag())) |
MatPoliquin/retro | conftest.py | <filename>conftest.py<gh_stars>1000+
import pytest
import retro.data
inttypes = {
'exp': retro.data.Integrations.EXPERIMENTAL_ONLY,
'contrib': retro.data.Integrations.CONTRIB_ONLY,
}
def pytest_collection_modifyitems(items):
def test(*args, **kwargs):
print(kwargs)
return False
for item in items:
if item.originalname in ('test_load', 'test_rom', 'test_state', 'test_hash'):
for key in item.keywords.keys():
if '[' + key + ']' not in item.nodeid:
continue
game = key.split('_')
gamename = '%s-%s' % (game[0], game[1])
try:
retro.data.get_romfile_path(gamename, inttypes[game[2]] if len(game) > 2 else retro.data.Integrations.STABLE)
except (FileNotFoundError, KeyError):
item.add_marker(pytest.mark.skip)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/conftest.py | import errno
import itertools
import os
import os.path
import pytest
import yaml
def pytest_addoption(parser):
parser.addoption("--rebaseline", action="store_true", help="output a new baseline instead of testing")
parser.addoption("--mark-failing", action="store_true", help="mark all failing tests as failing")
parser.addoption("--mark-succeeding", action="store_true", help="unmark all succeeding tests marked as failing")
parser.addoption("--output-diff", help="output diffs for failed tests to directory")
EXPECTED = 'expected_%04u.png'
RESULT = 'result_%04u.png'
DIFF = 'diff_%04u.png'
DIFF_NORM = 'diff_norm_%04u.png'
def pytest_exception_interact(node, call, report):
outroot = node.config.getoption("--output-diff")
if report.failed and hasattr(node, 'funcargs'):
vtest = node.funcargs.get('vtest')
if outroot:
if not vtest:
return
outdir = os.path.join(outroot, *vtest.fullPath)
try:
os.makedirs(outdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(outdir):
pass
else:
raise
for i, expected, result, diff, diffNorm in zip(itertools.count(), vtest.baseline, vtest.frames, *zip(*vtest.diffs)):
result.save(os.path.join(outdir, RESULT % i))
if expected:
expected.save(os.path.join(outdir, EXPECTED % i))
diff.save(os.path.join(outdir, DIFF % i))
diffNorm.save(os.path.join(outdir, DIFF_NORM % i))
if node.config.getoption("--mark-failing"):
try:
with open(os.path.join(vtest.path, 'manifest.yml'), 'r') as f:
settings = yaml.safe_load(f)
except IOError:
settings = {}
settings['fail'] = True
with open(os.path.join(vtest.path, 'manifest.yml'), 'w') as f:
yaml.dump(settings, f, default_flow_style=False)
|
MatPoliquin/retro | retro/cores/gba/tools/deploy-mac.py | <reponame>MatPoliquin/retro
#!/usr/bin/env python
from __future__ import print_function
import argparse
import errno
import os
import re
import shutil
import subprocess
qtPath = None
verbose = False
def splitPath(path):
folders = []
while True:
path, folder = os.path.split(path)
if folder != '':
folders.append(folder)
else:
if path != '':
folders.append(path)
break
folders.reverse()
return folders
def joinPath(path):
return reduce(os.path.join, path, '')
def findFramework(path):
child = []
while path and not path[-1].endswith('.framework'):
child.append(path.pop())
child.reverse()
return path, child
def findQtPath(path):
parent, child = findFramework(splitPath(path))
return joinPath(parent[:-2])
def makedirs(path):
split = splitPath(path)
accum = []
split.reverse()
while split:
accum.append(split.pop())
newPath = joinPath(accum)
if newPath == '/':
continue
try:
os.mkdir(newPath)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def parseOtoolLine(line, execPath, root):
if not line.startswith('\t'):
return None, None, None, None
line = line[1:]
match = re.match(r'(\S.*) \(compatibility version.*\)', line)
path = match.group(1)
split = splitPath(path)
newExecPath = ['@executable_path', '..', 'Frameworks']
newPath = execPath[:-1]
newPath.append('Frameworks')
if split[:3] == ['/', 'usr', 'lib'] or split[:2] == ['/', 'System']:
return None, None, None, None
if split[0] == '@executable_path':
split[:1] = execPath
if split[0] == '/' and not os.access(joinPath(split), os.F_OK):
split[:1] = root
oldPath = os.path.realpath(joinPath(split))
split = splitPath(oldPath)
isFramework = False
if not split[-1].endswith('.dylib'):
isFramework = True
split, framework = findFramework(split)
newPath.append(split[-1])
newExecPath.append(split[-1])
if isFramework:
newPath.extend(framework)
newExecPath.extend(framework)
split.extend(framework)
newPath = joinPath(newPath)
newExecPath = joinPath(newExecPath)
return joinPath(split), newPath, path, newExecPath
def updateMachO(bin, execPath, root):
global qtPath
otoolOutput = subprocess.check_output([otool, '-L', bin])
toUpdate = []
for line in otoolOutput.split('\n'):
oldPath, newPath, oldExecPath, newExecPath = parseOtoolLine(line, execPath, root)
if not newPath:
continue
if os.access(newPath, os.F_OK):
if verbose:
print('Skipping copying {}, already done.'.format(oldPath))
newPath = None
elif os.path.abspath(oldPath) != os.path.abspath(newPath):
if verbose:
print('Copying {} to {}...'.format(oldPath, newPath))
parent, child = os.path.split(newPath)
makedirs(parent)
shutil.copy2(oldPath, newPath)
os.chmod(newPath, 0o644)
toUpdate.append((newPath, oldExecPath, newExecPath))
if not qtPath and 'Qt' in oldPath:
qtPath = findQtPath(oldPath)
if verbose:
print('Found Qt path at {}.'.format(qtPath))
args = [installNameTool]
for path, oldExecPath, newExecPath in toUpdate:
if path != bin:
if path:
updateMachO(path, execPath, root)
if verbose:
print('Updating Mach-O load from {} to {}...'.format(oldExecPath, newExecPath))
args.extend(['-change', oldExecPath, newExecPath])
else:
if verbose:
print('Updating Mach-O id from {} to {}...'.format(oldExecPath, newExecPath))
args.extend(['-id', newExecPath])
args.append(bin)
subprocess.check_call(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-R', '--root', metavar='ROOT', default='/', help='root directory to search')
parser.add_argument('-I', '--install-name-tool', metavar='INSTALL_NAME_TOOL', default='install_name_tool', help='path to install_name_tool')
parser.add_argument('-O', '--otool', metavar='OTOOL', default='otool', help='path to otool')
parser.add_argument('-p', '--qt-plugins', metavar='PLUGINS', default='', help='Qt plugins to include (comma-separated)')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='output more information')
parser.add_argument('bundle', help='application bundle to deploy')
args = parser.parse_args()
otool = args.otool
installNameTool = args.install_name_tool
verbose = args.verbose
try:
shutil.rmtree(os.path.join(args.bundle, 'Contents/Frameworks/'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
for executable in os.listdir(os.path.join(args.bundle, 'Contents/MacOS')):
if executable.endswith('.dSYM'):
continue
fullPath = os.path.join(args.bundle, 'Contents/MacOS/', executable)
updateMachO(fullPath, splitPath(os.path.join(args.bundle, 'Contents/MacOS')), splitPath(args.root))
if args.qt_plugins:
try:
shutil.rmtree(os.path.join(args.bundle, 'Contents/PlugIns/'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
makedirs(os.path.join(args.bundle, 'Contents/PlugIns'))
makedirs(os.path.join(args.bundle, 'Contents/Resources'))
with open(os.path.join(args.bundle, 'Contents/Resources/qt.conf'), 'w') as conf:
conf.write('[Paths]\nPlugins = PlugIns\n')
plugins = args.qt_plugins.split(',')
for plugin in plugins:
plugin = plugin.strip()
kind, plug = os.path.split(plugin)
newDir = os.path.join(args.bundle, 'Contents/PlugIns/', kind)
makedirs(newDir)
newPath = os.path.join(newDir, plug)
shutil.copy2(os.path.join(qtPath, 'plugins', plugin), newPath)
updateMachO(newPath, splitPath(os.path.join(args.bundle, 'Contents/MacOS')), splitPath(args.root))
|
MatPoliquin/retro | retro/examples/retro_interactive.py | import argparse
import retro
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from .interactive import Interactive
class RetroInteractive(Interactive):
"""
Interactive setup for retro games
"""
def __init__(self, game, state, scenario):
def make_env():
return retro.make(game=game, state=state, scenario=scenario)
env = make_env()
self._buttons = env.buttons
env.close()
venv = SubprocVecEnv([make_env])
super().__init__(venv=venv, sync=False, tps=60, aspect_ratio=4/3)
def get_screen(self, _obs, venv):
return venv.render(mode='rgb_array')
def keys_to_act(self, keys):
inputs = {
None: False,
'BUTTON': 'Z' in keys,
'A': 'Z' in keys,
'B': 'X' in keys,
'C': 'C' in keys,
'X': 'A' in keys,
'Y': 'S' in keys,
'Z': 'D' in keys,
'L': 'Q' in keys,
'R': 'W' in keys,
'UP': 'UP' in keys,
'DOWN': 'DOWN' in keys,
'LEFT': 'LEFT' in keys,
'RIGHT': 'RIGHT' in keys,
'MODE': 'TAB' in keys,
'SELECT': 'TAB' in keys,
'RESET': 'ENTER' in keys,
'START': 'ENTER' in keys,
}
return [inputs[b] for b in self._buttons]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', default='SonicTheHedgehog-Genesis')
parser.add_argument('--state', default=retro.State.DEFAULT)
parser.add_argument('--scenario', default='scenario')
args = parser.parse_args()
ia = RetroInteractive(game=args.game, state=args.state, scenario=args.scenario)
ia.run()
if __name__ == '__main__':
main()
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/tests/mgba/test_core.py | import pytest
def test_core_import():
try:
import mgba.core
except:
raise AssertionError
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/image.py | <reponame>MatPoliquin/retro
# Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import png
try:
import PIL.Image as PImage
except ImportError:
pass
class Image:
def __init__(self, width, height, stride=0, alpha=False):
self.width = width
self.height = height
self.stride = stride
self.alpha = alpha
self.constitute()
def constitute(self):
if self.stride <= 0:
self.stride = self.width
self.buffer = ffi.new("color_t[{}]".format(self.stride * self.height))
def savePNG(self, f):
p = png.PNG(f, mode=png.MODE_RGBA if self.alpha else png.MODE_RGB)
success = p.writeHeader(self)
success = success and p.writePixels(self)
p.writeClose()
return success
if 'PImage' in globals():
def toPIL(self):
type = "RGBA" if self.alpha else "RGBX"
return PImage.frombytes(type, (self.width, self.height), ffi.buffer(self.buffer), "raw",
type, self.stride * 4)
def u16ToU32(c):
r = c & 0x1F
g = (c >> 5) & 0x1F
b = (c >> 10) & 0x1F
a = (c >> 15) & 1
abgr = r << 3
abgr |= g << 11
abgr |= b << 19
abgr |= (a * 0xFF) << 24
return abgr
def u32ToU16(c):
r = (c >> 3) & 0x1F
g = (c >> 11) & 0x1F
b = (c >> 19) & 0x1F
a = c >> 31
abgr = r
abgr |= g << 5
abgr |= b << 10
abgr |= a << 15
return abgr
if ffi.sizeof("color_t") == 2:
def colorToU16(c):
return c
colorToU32 = u16ToU32
def u16ToColor(c):
return c
u32ToColor = u32ToU16
else:
def colorToU32(c):
return c
colorToU16 = u32ToU16
def u32ToColor(c):
return c
u16ToColor = u16ToU32
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/cinema/test.py | <gh_stars>1000+
import os, os.path
import mgba.core, mgba.image
import cinema.movie
import itertools
import glob
import re
import yaml
from copy import deepcopy
from cinema import VideoFrame
from cinema.util import dictMerge
class CinemaTest(object):
TEST = 'test.(mvl|gb|gba|nds)'
def __init__(self, path, root, settings={}):
self.fullPath = path or []
self.path = os.path.abspath(os.path.join(root, *self.fullPath))
self.root = root
self.name = '.'.join(path)
self.settings = settings
try:
with open(os.path.join(self.path, 'manifest.yml'), 'r') as f:
dictMerge(self.settings, yaml.safe_load(f))
except IOError:
pass
self.tests = {}
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
def setUp(self):
results = [f for f in glob.glob(os.path.join(self.path, 'test.*')) if re.search(self.TEST, f)]
self.core = mgba.core.loadPath(results[0])
if 'config' in self.settings:
self.config = mgba.core.Config(defaults=self.settings['config'])
self.core.loadConfig(self.config)
self.core.reset()
def addTest(self, name, cls=None, settings={}):
cls = cls or self.__class__
newSettings = deepcopy(self.settings)
dictMerge(newSettings, settings)
self.tests[name] = cls(self.fullPath + [name], self.root, newSettings)
return self.tests[name]
def outputSettings(self):
outputSettings = {}
if 'frames' in self.settings:
outputSettings['limit'] = self.settings['frames']
if 'skip' in self.settings:
outputSettings['skip'] = self.settings['skip']
return outputSettings
def __lt__(self, other):
return self.path < other.path
class VideoTest(CinemaTest):
BASELINE = 'baseline_%04u.png'
def setUp(self):
super(VideoTest, self).setUp();
self.tracer = cinema.movie.Tracer(self.core)
def generateFrames(self):
for i, frame in zip(itertools.count(), self.tracer.video(**self.outputSettings())):
try:
baseline = VideoFrame.load(os.path.join(self.path, self.BASELINE % i))
yield baseline, frame, VideoFrame.diff(baseline, frame)
except IOError:
yield None, frame, (None, None)
def test(self):
self.baseline, self.frames, self.diffs = zip(*self.generateFrames())
assert not any(any(diffs[0].image.convert("L").point(bool).getdata()) for diffs in self.diffs)
def generateBaseline(self):
for i, frame in zip(itertools.count(), self.tracer.video(**self.outputSettings())):
frame.save(os.path.join(self.path, self.BASELINE % i))
def gatherTests(root=os.getcwd()):
tests = CinemaTest([], root)
for path, _, files in os.walk(root):
test = [f for f in files if re.match(CinemaTest.TEST, f)]
if not test:
continue
prefix = os.path.commonprefix([path, root])
suffix = path[len(prefix)+1:]
testPath = suffix.split(os.sep)
testRoot = tests
for component in testPath[:-1]:
newTest = testRoot.tests.get(component)
if not newTest:
newTest = testRoot.addTest(component)
testRoot = newTest
testRoot.addTest(testPath[-1], VideoTest)
return tests
|
MatPoliquin/retro | tests/test_paths.py | <reponame>MatPoliquin/retro
import retro
import os
import pytest
@pytest.yield_fixture
def custom_cleanup():
retro.data.Integrations.clear_custom_paths()
assert not retro.data.Integrations.CUSTOM_ONLY.paths
yield
retro.data.Integrations.clear_custom_paths()
assert not retro.data.Integrations.CUSTOM_ONLY.paths
def test_basic_paths():
assert retro.data.Integrations.STABLE.paths == ['stable']
assert retro.data.Integrations.CONTRIB_ONLY.paths == ['contrib']
assert retro.data.Integrations.EXPERIMENTAL_ONLY.paths == ['experimental']
assert not retro.data.Integrations.CUSTOM_ONLY.paths
assert retro.data.Integrations.CONTRIB.paths == ['contrib', 'stable']
assert retro.data.Integrations.EXPERIMENTAL.paths == ['experimental', 'stable']
assert retro.data.Integrations.CUSTOM.paths == ['stable']
assert retro.data.Integrations.ALL.paths == ['contrib', 'experimental', 'stable']
def test_custom_path(custom_cleanup):
assert not retro.data.Integrations.CUSTOM_ONLY.paths
assert retro.data.Integrations.CUSTOM.paths == ['stable']
retro.data.Integrations.add_custom_path('a')
assert retro.data.Integrations.CUSTOM_ONLY.paths == ['a']
assert retro.data.Integrations.CUSTOM.paths == ['a', 'stable']
retro.data.Integrations.add_custom_path('b')
assert retro.data.Integrations.CUSTOM_ONLY.paths == ['a', 'b']
assert retro.data.Integrations.CUSTOM.paths == ['a', 'b', 'stable']
def test_custom_path_default(custom_cleanup):
assert not retro.data.Integrations.CUSTOM_ONLY.paths
assert retro.data.Integrations.CUSTOM.paths == ['stable']
assert retro.data.Integrations.DEFAULT.paths == ['stable']
retro.data.add_custom_integration('a')
assert retro.data.Integrations.CUSTOM_ONLY.paths == ['a']
assert retro.data.Integrations.CUSTOM.paths == ['a', 'stable']
assert retro.data.Integrations.DEFAULT.paths == ['a', 'stable']
retro.data.DefaultIntegrations.reset()
assert retro.data.Integrations.CUSTOM_ONLY.paths == ['a']
assert retro.data.Integrations.CUSTOM.paths == ['a', 'stable']
assert retro.data.Integrations.DEFAULT.paths == ['stable']
def test_custom_path_absolute(custom_cleanup):
assert not retro.data.get_file_path('', 'Dekadence-Dekadrive.md', inttype=retro.data.Integrations.CUSTOM_ONLY)
test_rom_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'roms')
retro.data.Integrations.add_custom_path(test_rom_dir)
assert retro.data.get_file_path('', 'Dekadence-Dekadrive.md', inttype=retro.data.Integrations.CUSTOM_ONLY) == \
os.path.join(test_rom_dir, 'Dekadence-Dekadrive.md')
def test_custom_path_relative(custom_cleanup):
assert not retro.data.get_file_path('Airstriker-Genesis', 'rom.md', inttype=retro.data.Integrations.CUSTOM_ONLY)
retro.data.Integrations.add_custom_path(retro.data.Integrations.STABLE.paths[0])
assert retro.data.get_file_path('Airstriker-Genesis', 'rom.md', inttype=retro.data.Integrations.CUSTOM_ONLY) == \
retro.data.get_file_path('Airstriker-Genesis', 'rom.md', inttype=retro.data.Integrations.STABLE)
|
MatPoliquin/retro | retro/testing/tools.py | import glob
import hashlib
import json
import os
import re
import retro.data
def load_whitelist(game, inttype):
try:
with open(retro.data.get_file_path(game, 'metadata.json', inttype | retro.data.Integrations.STABLE)) as f:
whitelist = json.load(f).get('whitelist', {})
except json.JSONDecodeError:
return None, [(metadata_file, 'fail decode')]
except IOError:
return None, [(metadata_file, 'fail I/O')]
return whitelist, []
def scan_missing():
missing = []
for game in retro.data.list_games(retro.data.Integrations.ALL):
if not retro.data.get_file_path(game, 'data.json', retro.data.Integrations.ALL):
missing.append((game, 'data.json'))
if not retro.data.get_file_path(game, 'scenario.json', retro.data.Integrations.ALL):
missing.append((game, 'scenario.json'))
if not retro.data.get_file_path(game, 'metadata.json', retro.data.Integrations.ALL):
missing.append((game, 'metadata.json'))
if not retro.data.list_states(game, retro.data.Integrations.ALL):
missing.append((game, '*.state'))
if not retro.data.get_file_path(game, 'rom.sha', retro.data.Integrations.ALL):
missing.append((game, 'rom.sha'))
return missing
def verify_data(game, inttype, raw=None):
file = os.path.join(str(inttype), game, 'data.json')
path = retro.data.get_file_path(game, 'data.json', inttype)
if not path:
return [], []
try:
if not raw:
with open(path) as f:
data = json.load(f)
else:
data = json.loads(raw)
except json.JSONDecodeError:
return [], [(file, 'fail decode')]
except IOError:
return [], [(file, 'fail I/O')]
whitelist, errors = load_whitelist(game, inttype)
if errors:
return [], errors
warnings = []
data = data.get('info')
if not data:
return [], [(file, 'missing info')]
for variable, definition in data.items():
if 'address' not in definition:
errors.append((file, 'missing address for %s' % variable))
if 'type' not in definition:
errors.append((file, 'missing type for %s' % variable))
else:
if not re.match(r'\|[dinu]1|(>[<=]?|<[>=]?|=[><]?)[dinu][2-8]', definition['type']):
errors.append((file, 'invalid type %s for %s' % (definition['type'], variable)))
elif re.match(r'([><=]{2}|=[><]|<[>=]|>[<=])[dinu][2-8]|[><=]{1,2}d[5-8]', definition['type']):
warnings.append((file, 'suspicious type %s for %s' % (definition['type'], variable)))
if 'lives' in data and data['lives'].get('type', '') not in ('|u1', '|i1', '|d1'):
warnings.append((file, 'suspicious type %s for lives' % data['lives']['type']))
if 'score' in data and (data['score'].get('type', '??')[1:] in ('u1', 'd1', 'n1', 'n2') or 'i' in data['score'].get('type', '')):
warnings.append((file, 'suspicious type %s for score' % data['score']['type']))
whitelist = {(file, w) for w in whitelist.get('data.json', [])}
all_warnings = {(file, w) for (file, w) in warnings}
warnings = list(all_warnings - whitelist)
errors.extend(('metadata.json', 'missing warning "%s: %s"' % (file, w)) for (file, w) in whitelist - all_warnings)
return warnings, errors
def verify_scenario(game, inttype, scenario='scenario', raw=None, dataraw=None):
file = os.path.join(str(inttype), game, '%s.json' % scenario)
path = retro.data.get_file_path(game, '%s.json' % scenario, inttype)
if not path:
return [], []
try:
if not raw:
with open(path) as f:
scen = json.load(f)
else:
scen = json.loads(raw)
except json.JSONDecodeError:
return [], [(file, 'fail decode')]
except IOError:
return [], [(file, 'fail I/O')]
whitelist, errors = load_whitelist(game, inttype)
if errors:
return [], errors
warnings = []
if 'rewards' in scen:
for i, r in enumerate(scen['rewards']):
if 'variables' not in r and 'script' not in r:
warnings.append((file, 'missing reward in rewards[%d]' % i))
elif 'variables' in r and 'script' in r:
warnings.append((file, 'both variables and script present in rewards[%d]' % i))
if 'reward' in scen:
warnings.append((file, 'reward and rewards both present'))
elif 'reward' not in scen or ('variables' not in scen['reward'] and 'script' not in scen['reward']):
warnings.append((file, 'missing reward'))
elif 'variables' in scen['reward'] and 'script' in scen['reward']:
warnings.append((file, 'both variables and script present in reward'))
if 'done' not in scen or ('variables' not in scen['done'] and 'script' not in scen['done'] and 'nodes' not in scen['done']):
warnings.append((file, 'missing done'))
try:
if not dataraw:
datafile = retro.data.get_file_path(game, 'data.json', inttype=inttype | retro.data.Integrations.STABLE)
with open(datafile) as f:
data = json.load(f)
else:
data = json.loads(dataraw)
data = data.get('info')
reward = scen.get('reward')
done = scen.get('done')
if reward and 'variables' in reward:
for variable, definition in reward['variables'].items():
if variable not in data:
errors.append((file, 'invalid variable %s' % variable))
if not definition:
errors.append((file, 'invalid definition %s' % variable))
continue
if 'reward' not in definition and 'penalty' not in definition:
errors.append((file, 'blank reward %s' % variable))
if done and 'variables' in done:
if 'score'in done['variables']:
warnings.append((file, 'suspicious variable in done condition: score'))
if 'health' in done['variables'] and 'lives' in done['variables'] and 'condition' not in done:
warnings.append((file, 'suspicious done condition: health OR lives'))
if done.get('condition', 'any') == 'all' and (len(done['variables']) + len(done.get('nodes', {}))) < 2:
errors.append((file, 'incorrect done condition all with only 1 check'))
if done.get('condition', 'any') == 'any' and (len(done['variables']) + len(done.get('nodes', {}))) > 2:
warnings.append((file, 'suspicious done condition any with more than 2 checks'))
for variable, definition in done['variables'].items():
if 'op' not in definition:
errors.append((file, 'invalid done condition %s' % variable))
elif definition.get('reference', 0) == 0:
if 'op' in ('equal', 'negative-equal'):
warnings.append((file, 'incorrect op: zero for %s' % variable))
elif 'op' == 'not-equal':
warnings.append((file, 'incorrect op: nonzero for %s' % variable))
elif 'op' == 'less-than':
warnings.append((file, 'incorrect op: negative for %s' % variable))
elif 'op' == 'greater-than':
warnings.append((file, 'incorrect op: positive for %s' % variable))
if data:
if variable not in data:
errors.append((file, 'invalid variable %s' % variable))
else:
if 'i' not in data[variable].get('type', '') and definition.get('op', '') == 'negative' and definition.get('measurement') != 'delta':
errors.append((file, 'op: negative on unsigned %s' % variable))
except (json.JSONDecodeError, IOError):
pass
whitelist = {(file, w) for w in whitelist.get(os.path.split(file)[-1], [])}
all_warnings = {(file, w) for (file, w) in warnings}
warnings = list(all_warnings - whitelist)
errors.extend(('metadata.json', 'missing warning "%s: %s"' % (file, w)) for (file, w) in whitelist - all_warnings)
return warnings, errors
def verify_default_state(game, inttype, raw=None):
file = os.path.join(str(inttype), game, 'metadata.json')
path = retro.data.get_file_path(game, 'metadata.json', inttype)
if not path:
return [], []
try:
if not raw:
with open(path) as f:
metadata = json.load(f)
else:
metadata = json.loads(raw)
except json.JSONDecodeError:
return [], [(file, 'fail decode')]
except IOError:
return [], []
errors = []
state = metadata.get('default_state')
if not state:
return [], [(file, 'default state missing')]
if state not in retro.data.list_states(game, inttype | retro.data.Integrations.STABLE):
errors.append((file, 'invalid default state %s' % state))
return [], errors
def verify_hash_collisions():
errors = []
seen_hashes = {}
for game in retro.data.list_games(retro.data.Integrations.ALL):
shafile = retro.data.get_file_path(game, 'rom.sha', retro.data.Integrations.ALL)
try:
with open(os.path.join(shafile, 'rom.sha')) as f:
expected_shas = f.read().strip().split('\n')
except IOError:
continue
for expected_sha in expected_shas:
seen = seen_hashes.get(expected_sha, [])
seen.append(game)
seen_hashes[expected_sha] = seen
for sha, games in seen_hashes.items():
if len(games) < 2:
continue
for game in games:
errors.append((game, 'sha duplicate'))
return [], errors
def verify_genesis(game, inttype):
whitelist, errors = load_whitelist(game, inttype)
if errors:
return [], errors
warnings = []
rom = retro.data.get_romfile_path(game, inttype=inttype)
if not rom.endswith('.md'):
errors.append((game, 'invalid extension for %s' % rom))
if 'rom.md' in whitelist:
return [], []
with open(rom, 'rb') as f:
header = f.read(512)
if header[0x100:0x105] not in (b'SEGA ', b' SEGA'):
errors.append((game, 'invalid genesis rom'))
return warnings, errors
def verify_extension(game, inttype):
whitelist, errors = load_whitelist(game, inttype)
if errors:
return [], errors
warnings = []
rom = os.path.split(retro.data.get_romfile_path(game, inttype=inttype))[-1]
platform = retro.data.EMU_EXTENSIONS.get(os.path.splitext(rom)[-1])
if not platform or not game.endswith('-%s' % platform):
errors.append((game, 'invalid extension for %s' % rom))
if rom in whitelist:
return [], []
return warnings, errors
def verify_rom(game, inttype):
try:
rom = retro.data.get_romfile_path(game, inttype=inttype)
except FileNotFoundError:
return [], [(game, 'ROM file missing')]
if game.endswith('-Genesis'):
return verify_genesis(game, inttype)
return verify_extension(game, inttype)
|
MatPoliquin/retro | scripts/import_sega_classics.py | <filename>scripts/import_sega_classics.py
#!/usr/bin/env python
from retro.scripts.import_sega_classics import main
main()
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/arm.py | # Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
class _ARMRegisters:
def __init__(self, cpu):
self._cpu = cpu
def __getitem__(self, r):
if r > lib.ARM_PC:
raise IndexError("Register out of range")
return self._cpu._native.gprs[r]
def __setitem__(self, r, value):
if r >= lib.ARM_PC:
raise IndexError("Register out of range")
self._cpu._native.gprs[r] = value
class ARMCore:
def __init__(self, native):
self._native = ffi.cast("struct ARMCore*", native)
self.gprs = _ARMRegisters(self)
self.cpsr = self._native.cpsr
self.spsr = self._native.spsr
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/gb.py | # Copyright (c) 2013-2017 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from .lr35902 import LR35902Core
from .core import Core, needsReset
from .memory import Memory
from .tile import Sprite
from . import createCallback
class GB(Core):
KEY_A = lib.GBA_KEY_A
KEY_B = lib.GBA_KEY_B
KEY_SELECT = lib.GBA_KEY_SELECT
KEY_START = lib.GBA_KEY_START
KEY_DOWN = lib.GBA_KEY_DOWN
KEY_UP = lib.GBA_KEY_UP
KEY_LEFT = lib.GBA_KEY_LEFT
KEY_RIGHT = lib.GBA_KEY_RIGHT
def __init__(self, native):
super(GB, self).__init__(native)
self._native = ffi.cast("struct GB*", native.board)
self.sprites = GBObjs(self)
self.cpu = LR35902Core(self._core.cpu)
@needsReset
def _initCache(self, cache):
lib.GBVideoCacheInit(cache)
lib.GBVideoCacheAssociate(cache, ffi.addressof(self._native.video))
def _deinitCache(self, cache):
lib.mCacheSetDeinit(cache)
if self._wasReset:
self._native.video.renderer.cache = ffi.NULL
def _load(self):
super(GB, self)._load()
self.memory = GBMemory(self._core)
def attachSIO(self, link):
lib.GBSIOSetDriver(ffi.addressof(self._native.sio), link._native)
def __del__(self):
lib.GBSIOSetDriver(ffi.addressof(self._native.sio), ffi.NULL)
createCallback("GBSIOPythonDriver", "init")
createCallback("GBSIOPythonDriver", "deinit")
createCallback("GBSIOPythonDriver", "writeSB")
createCallback("GBSIOPythonDriver", "writeSC")
class GBSIODriver(object):
def __init__(self):
self._handle = ffi.new_handle(self)
self._native = ffi.gc(lib.GBSIOPythonDriverCreate(self._handle), lib.free)
def init(self):
return True
def deinit(self):
pass
def writeSB(self, value):
pass
def writeSC(self, value):
return value
class GBSIOSimpleDriver(GBSIODriver):
def __init__(self, period=0x100):
super(GBSIOSimpleDriver, self).__init__()
self.rx = 0x00
self._period = period
def init(self):
self._native.p.period = self._period
return True
def writeSB(self, value):
self.rx = value
def writeSC(self, value):
self._native.p.period = self._period
if value & 0x80:
lib.mTimingDeschedule(ffi.addressof(self._native.p.p.timing), ffi.addressof(self._native.p.event))
lib.mTimingSchedule(ffi.addressof(self._native.p.p.timing), ffi.addressof(self._native.p.event), self._native.p.period)
return value
def isReady(self):
return not self._native.p.remainingBits
@property
def tx(self):
self._native.p.pendingSB
@property
def period(self):
return self._native.p.period
@tx.setter
def tx(self, newTx):
self._native.p.pendingSB = newTx
self._native.p.remainingBits = 8
@period.setter
def period(self, newPeriod):
self._period = newPeriod
if self._native.p:
self._native.p.period = newPeriod
class GBMemory(Memory):
def __init__(self, core):
super(GBMemory, self).__init__(core, 0x10000)
self.cart = Memory(core, lib.GB_SIZE_CART_BANK0 * 2, lib.GB_BASE_CART_BANK0)
self.vram = Memory(core, lib.GB_SIZE_VRAM, lib.GB_BASE_VRAM)
self.sram = Memory(core, lib.GB_SIZE_EXTERNAL_RAM, lib.GB_REGION_EXTERNAL_RAM)
self.iwram = Memory(core, lib.GB_SIZE_WORKING_RAM_BANK0, lib.GB_BASE_WORKING_RAM_BANK0)
self.oam = Memory(core, lib.GB_SIZE_OAM, lib.GB_BASE_OAM)
self.io = Memory(core, lib.GB_SIZE_IO, lib.GB_BASE_IO)
self.hram = Memory(core, lib.GB_SIZE_HRAM, lib.GB_BASE_HRAM)
class GBSprite(Sprite):
PALETTE_BASE = 8,
def __init__(self, obj, core):
self.x = obj.x
self.y = obj.y
self.tile = obj.tile
self._attr = obj.attr
self.width = 8
lcdc = core._native.memory.io[0x40]
self.height = 16 if lcdc & 4 else 8
if core._native.model >= lib.GB_MODEL_CGB:
if self._attr & 8:
self.tile += 512
self.paletteId = self._attr & 7
else:
self.paletteId = (self._attr >> 4) & 1
self.paletteId += 8
class GBObjs:
def __init__(self, core):
self._core = core
self._obj = core._native.video.oam.obj
def __len__(self):
return 40
def __getitem__(self, index):
if index >= len(self):
raise IndexError()
sprite = GBSprite(self._obj[index], self._core)
sprite.constitute(self._core.tiles[0], 0)
return sprite
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/__init__.py | # Copyright (c) 2013-2017 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from collections import namedtuple
def createCallback(structName, cbName, funcName=None):
funcName = funcName or "_py{}{}".format(structName, cbName[0].upper() + cbName[1:])
fullStruct = "struct {}*".format(structName)
def cb(handle, *args):
h = ffi.cast(fullStruct, handle)
return getattr(ffi.from_handle(h.pyobj), cbName)(*args)
return ffi.def_extern(name=funcName)(cb)
version = ffi.string(lib.projectVersion).decode('utf-8')
GitInfo = namedtuple("GitInfo", "commit commitShort branch revision")
git = {}
if lib.gitCommit and lib.gitCommit != "(unknown)":
git['commit'] = ffi.string(lib.gitCommit).decode('utf-8')
if lib.gitCommitShort and lib.gitCommitShort != "(unknown)":
git['commitShort'] = ffi.string(lib.gitCommitShort).decode('utf-8')
if lib.gitBranch and lib.gitBranch != "(unknown)":
git['branch'] = ffi.string(lib.gitBranch).decode('utf-8')
if lib.gitRevision > 0:
git['revision'] = lib.gitRevision
git = GitInfo(**git)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/gamedata.py | <reponame>MatPoliquin/retro<gh_stars>1000+
# Copyright (c) 2013-2017 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
try:
import mgba_gamedata
except ImportError:
pass
def search(core):
crc32 = None
if hasattr(core, 'PLATFORM_GBA') and core.platform() == core.PLATFORM_GBA:
platform = 'GBA'
crc32 = core.crc32
if hasattr(core, 'PLATFORM_GB') and core.platform() == core.PLATFORM_GB:
platform = 'GB'
crc32 = core.crc32
cls = mgba_gamedata.registry.search(platform, {'crc32': crc32})
if not cls:
return None
return cls(core.memory.u8)
|
MatPoliquin/retro | retro/examples/determinism.py | """
Example wrapper to improve determinism of Retro environments
"""
import retro
import numpy as np
import argparse
import gym
import multiprocessing as mp
CHUNK_LENGTH = 128
class MoreDeterministicRetroState(gym.Wrapper):
"""
Save/restore state on each step to avoid de-sync
It's likely that reward and done will not be correct if they
depend on lua state (e.g. Sonic "contest" scenario)
For most emulated systems this is 10%-50% slower, for Atari2600 it is
60x slower. It's unclear why stella is slow slow to save/load a state.
This also fails on GameBoy games due to https://github.com/openai/retro/issues/116
If other wrappers have state (such as Timelimit), they would need to be extended
to support get_state() and reset(state=state), and then this class would need
to make sure parent methods are called.
"""
def __init__(self, *args, reset_on_step=True, **kwargs):
super().__init__(*args, **kwargs)
self._last_obs = None
self._done = False
# if retro were more deterministic, this would not be necessary
self._reset_on_step = reset_on_step
def reset(self, state=None):
self._done = False
if state is not None:
em_state, self._last_obs = state
self.unwrapped.em.set_state(em_state)
self.unwrapped.data.reset()
self.unwrapped.data.update_ram()
else:
self._last_obs = self.env.reset()
return self._last_obs
def step(self, act):
if self._reset_on_step:
self.reset(state=self.get_state())
self._last_obs, rew, self._done, info = self.env.step(act)
return self._last_obs, rew, self._done, info
def get_state(self):
assert not self._done, "cannot store a terminal state"
return (self.unwrapped.em.get_state(), self._last_obs)
def rollout(env, acts):
total_rew = 0.0
for act in acts:
_obs, rew, done, _info = env.step(act)
total_rew += rew
if done:
break
return total_rew
def chunk(L, length):
result = []
while True:
sublist = L[:length]
if len(sublist) == 0:
break
L = L[length:]
result.append(sublist)
return result
def partition(L, pieces):
return chunk(L, len(L) // pieces + 1)
def check_env_helper(make_env, all_acts, verbose, out_success):
# do rollouts and get reference values
env = make_env()
env.reset()
# truncate actions to end before done
valid_acts = []
for act in all_acts:
_obs, _rew, done, _info = env.step(act)
if done:
break
valid_acts.append(act)
env.reset()
in_states = [env.get_state()]
in_acts = chunk(valid_acts, CHUNK_LENGTH)
out_rews = []
out_rams = []
for acts in in_acts:
out_rews.append(rollout(env, acts))
out_rams.append(env.get_ram())
in_states.append(env.get_state())
in_states.pop() # remove extra final state since there are no actions after it
success = True
for start_idx in range(len(in_states)):
if verbose:
print(start_idx+1, len(in_states))
env.reset(state=in_states[start_idx])
for offset, acts in enumerate(in_acts[start_idx:]):
if not np.array_equal(rollout(env, acts), out_rews[start_idx+offset]):
print('failed rew')
success = False
if not np.array_equal(env.get_ram(), out_rams[start_idx+offset]):
print('failed ram')
success = False
env.close()
out_success.value = success
def check_env(make_env, acts, verbose=False, timeout=None):
out_success = mp.Value('b', False)
p = mp.Process(target=check_env_helper, args=(make_env, acts, verbose, out_success), daemon=True)
p.start()
p.join(timeout)
if p.is_alive():
print('failed to finish in time')
p.terminate()
p.join()
return False
return bool(out_success.value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--deterministic', action='store_true', help='use deterministic wrapper')
parser.add_argument('--suffix', default='', help='run against games matching this suffix')
parser.add_argument('--movie-file', help='load a bk2 and use states obtained from replaying actions from the bk2')
args = parser.parse_args()
if args.movie_file is None:
games = [g for g in sorted(retro.data.list_games()) if g.endswith(args.suffix)]
failed_games = []
for game in games:
print(game)
def make_env():
env = retro.make(game=game)
if args.deterministic:
env = MoreDeterministicRetroState(env)
else:
env = MoreDeterministicRetroState(env, reset_on_step=False)
return env
env = make_env()
env.action_space.seed(0)
acts = [env.action_space.sample() for _ in range(CHUNK_LENGTH * 2)]
env.close()
if not check_env(make_env, acts, timeout=128):
failed_games.append(game)
for game in failed_games:
print('failed:', game)
elif args.movie_file is not None:
movie = retro.Movie(args.movie_file)
movie.step()
def make_env():
env = retro.make(movie.get_game(), state=retro.State.DEFAULT, use_restricted_actions=retro.Actions.ALL)
env.initial_state = movie.get_state()
if args.deterministic:
env = MoreDeterministicRetroState(env)
else:
env = RetroState(env)
return env
env = make_env()
acts = []
while movie.step():
act = []
for p in range(movie.players):
for i in range(env.num_buttons):
act.append(movie.get_key(i, p))
acts.append(act)
env.close()
check_env(make_env, acts, verbose=True)
else:
raise Exception('must specify --suffix or --movie-file')
if __name__ == '__main__':
main() |
MatPoliquin/retro | retro/cores/gba/tools/perf.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import csv
import os
import shlex
import signal
import socket
import subprocess
import sys
import time
class PerfTest(object):
EXECUTABLE = 'mgba-perf'
def __init__(self, rom, renderer='software'):
self.rom = rom
self.renderer = renderer
self.results = None
self.name = 'Perf Test: {}'.format(rom)
def get_args(self):
return []
def wait(self, proc):
pass
def run(self, cwd):
args = [os.path.join(os.getcwd(), self.EXECUTABLE), '-P']
args.extend(self.get_args())
if self.renderer != 'software':
args.append('-N')
args.append(self.rom)
env = {}
if 'LD_LIBRARY_PATH' in os.environ:
env['LD_LIBRARY_PATH'] = os.path.abspath(os.environ['LD_LIBRARY_PATH'])
env['DYLD_LIBRARY_PATH'] = env['LD_LIBRARY_PATH'] # Fake it on OS X
proc = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd, universal_newlines=True, env=env)
try:
self.wait(proc)
proc.wait()
except:
proc.kill()
raise
if proc.returncode:
print('Game crashed!', file=sys.stderr)
return
reader = csv.DictReader(proc.stdout)
self.results = next(reader)
class WallClockTest(PerfTest):
def __init__(self, rom, duration, renderer='software'):
super(WallClockTest, self).__init__(rom, renderer)
self.duration = duration
self.name = 'Wall-Clock Test ({} seconds, {} renderer): {}'.format(duration, renderer, rom)
def wait(self, proc):
time.sleep(self.duration)
proc.send_signal(signal.SIGINT)
class GameClockTest(PerfTest):
def __init__(self, rom, frames, renderer='software'):
super(GameClockTest, self).__init__(rom, renderer)
self.frames = frames
self.name = 'Game-Clock Test ({} frames, {} renderer): {}'.format(frames, renderer, rom)
def get_args(self):
return ['-F', str(self.frames)]
class PerfServer(object):
ITERATIONS_PER_INSTANCE = 50
def __init__(self, address, command=None):
s = address.rsplit(':', 1)
if len(s) == 1:
self.address = (s[0], 7216)
else:
self.address = (s[0], s[1])
if command:
self.command = shlex.split(command)
self.iterations = self.ITERATIONS_PER_INSTANCE
self.socket = None
self.results = []
self.reader = None
def _start(self, test):
if self.command:
server_command = list(self.command)
else:
server_command = [os.path.join(os.getcwd(), PerfTest.EXECUTABLE)]
server_command.extend(['--', '-PD', '0'])
if hasattr(test, "frames"):
server_command.extend(['-F', str(test.frames)])
if test.renderer != "software":
server_command.append('-N')
subprocess.check_call(server_command)
time.sleep(4)
self.socket = socket.create_connection(self.address, timeout=1000)
self.reader = csv.DictReader(self.socket.makefile())
def run(self, test):
if not self.socket:
self._start(test)
self.socket.send(os.path.join("/perfroms", test.rom))
self.results.append(next(self.reader))
self.iterations -= 1
if self.iterations == 0:
self.finish()
self.iterations = self.ITERATIONS_PER_INSTANCE
def finish(self):
self.socket.send("\n");
self.reader = None
self.socket.close()
time.sleep(5)
self.socket = None
class Suite(object):
def __init__(self, cwd, wall=None, game=None, renderer='software'):
self.cwd = cwd
self.tests = []
self.wall = wall
self.game = game
self.renderer = renderer
self.server = None
def set_server(self, server):
self.server = server
def collect_tests(self):
roms = []
for f in os.listdir(self.cwd):
if f.endswith('.gba') or f.endswith('.zip') or f.endswith('.gbc') or f.endswith('.gb'):
roms.append(f)
roms.sort()
for rom in roms:
self.add_tests(rom)
def add_tests(self, rom):
if self.wall:
self.tests.append(WallClockTest(rom, self.wall, renderer=self.renderer))
if self.game:
self.tests.append(GameClockTest(rom, self.game, renderer=self.renderer))
def run(self):
results = []
sock = None
for test in self.tests:
print('Running test {}'.format(test.name), file=sys.stderr)
if self.server:
self.server.run(test)
else:
try:
test.run(self.cwd)
except KeyboardInterrupt:
print('Interrupted, returning early...', file=sys.stderr)
return results
if test.results:
results.append(test.results)
if self.server:
self.server.finish()
results.extend(self.server.results)
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--wall-time', type=float, default=0, metavar='TIME', help='wall-clock time')
parser.add_argument('-g', '--game-frames', type=int, default=0, metavar='FRAMES', help='game-clock frames')
parser.add_argument('-N', '--disable-renderer', action='store_const', const=True, help='disable video rendering')
parser.add_argument('-s', '--server', metavar='ADDRESS', help='run on server')
parser.add_argument('-S', '--server-command', metavar='COMMAND', help='command to launch server')
parser.add_argument('-o', '--out', metavar='FILE', help='output file path')
parser.add_argument('directory', help='directory containing ROM files')
args = parser.parse_args()
s = Suite(args.directory, wall=args.wall_time, game=args.game_frames, renderer=None if args.disable_renderer else 'software')
if args.server:
if args.server_command:
server = PerfServer(args.server, args.server_command)
else:
server = PerfServer(args.server)
s.set_server(server)
s.collect_tests()
results = s.run()
fout = sys.stdout
if args.out:
fout = open(args.out, 'w')
writer = csv.DictWriter(fout, results[0].keys())
writer.writeheader()
writer.writerows(results)
if fout is not sys.stdout:
fout.close()
|
MatPoliquin/retro | retro/examples/trivial_random_agent_multiplayer.py | import retro
def main():
env = retro.make(game='Pong-Atari2600', players=2)
obs = env.reset()
while True:
# action_space will by MultiBinary(16) now instead of MultiBinary(8)
# the bottom half of the actions will be for player 1 and the top half for player 2
obs, rew, done, info = env.step(env.action_space.sample())
# rew will be a list of [player_1_rew, player_2_rew]
# done and info will remain the same
env.render()
if done:
obs = env.reset()
env.close()
if __name__ == "__main__":
main()
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/thread.py | # Copyright (c) 2013-2017 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from .core import IRunner, ICoreOwner, Core
class ThreadCoreOwner(ICoreOwner):
def __init__(self, thread):
self.thread = thread
def claim(self):
if not self.thread.isRunning():
raise ValueError
lib.mCoreThreadInterrupt(self.thread._native)
return self.thread._core
def release(self):
lib.mCoreThreadContinue(self.thread._native)
class Thread(IRunner):
def __init__(self, native=None):
if native:
self._native = native
self._core = Core(native.core)
self._core._wasReset = lib.mCoreThreadHasStarted(self._native)
else:
self._native = ffi.new("struct mCoreThread*")
def start(self, core):
if lib.mCoreThreadHasStarted(self._native):
raise ValueError
self._core = core
self._native.core = core._core
lib.mCoreThreadStart(self._native)
self._core._wasReset = lib.mCoreThreadHasStarted(self._native)
def end(self):
if not lib.mCoreThreadHasStarted(self._native):
raise ValueError
lib.mCoreThreadEnd(self._native)
lib.mCoreThreadJoin(self._native)
def pause(self):
lib.mCoreThreadPause(self._native)
def unpause(self):
lib.mCoreThreadUnpause(self._native)
def isRunning(self):
return bool(lib.mCoreThreadIsActive(self._native))
def isPaused(self):
return bool(lib.mCoreThreadIsPaused(self._native))
def useCore(self):
return ThreadCoreOwner(self)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/lr35902.py | <gh_stars>1000+
# Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
class LR35902Core:
def __init__(self, native):
self._native = ffi.cast("struct LR35902Core*", native)
@property
def a(self):
return self._native.a
@property
def b(self):
return self._native.b
@property
def c(self):
return self._native.c
@property
def d(self):
return self._native.d
@property
def e(self):
return self._native.e
@property
def f(self):
return self._native.f
@property
def h(self):
return self._native.h
@property
def l(self):
return self._native.l
@property
def sp(self):
return self._native.sp
@property
def pc(self):
return self._native.pc
@property
def af(self):
return (self.a << 8) | self.f
@property
def bc(self):
return (self.b << 8) | self.c
@property
def de(self):
return (self.d << 8) | self.e
@property
def hl(self):
return (self.h << 8) | self.l
@a.setter
def a(self, value):
self._native.a = value
@b.setter
def b(self, value):
self._native.b = value
@c.setter
def c(self, value):
self._native.c = value
@d.setter
def d(self, value):
self._native.d = value
@e.setter
def e(self, value):
self._native.e = value
@f.setter
def f(self, value):
self._native.f.packed = value
self._native.f.unused = 0
@h.setter
def h(self, value):
self._native.h = value
@l.setter
def l(self, value):
self._native.l = value
@sp.setter
def sp(self, value):
self._native.sp = value
@af.setter
def af(self, value):
self.a = value >> 8
self.f = value & 0xFF
@bc.setter
def bc(self, value):
self.b = value >> 8
self.c = value & 0xFF
@de.setter
def de(self, value):
self.d = value >> 8
self.e = value & 0xFF
@hl.setter
def hl(self, value):
self.h = value >> 8
self.l = value & 0xFF
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/vfs.py | <reponame>MatPoliquin/retro<filename>retro/cores/gba/src/platform/python/mgba/vfs.py
# Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
import mmap
import os
@ffi.def_extern()
def _vfpClose(vf):
vfp = ffi.cast("struct VFilePy*", vf)
ffi.from_handle(vfp.fileobj).close()
return True
@ffi.def_extern()
def _vfpSeek(vf, offset, whence):
vfp = ffi.cast("struct VFilePy*", vf)
f = ffi.from_handle(vfp.fileobj)
f.seek(offset, whence)
return f.tell()
@ffi.def_extern()
def _vfpRead(vf, buffer, size):
vfp = ffi.cast("struct VFilePy*", vf)
pybuf = ffi.buffer(buffer, size)
ffi.from_handle(vfp.fileobj).readinto(pybuf)
return size
@ffi.def_extern()
def _vfpWrite(vf, buffer, size):
vfp = ffi.cast("struct VFilePy*", vf)
pybuf = ffi.buffer(buffer, size)
ffi.from_handle(vfp.fileobj).write(pybuf)
return size
@ffi.def_extern()
def _vfpMap(vf, size, flags):
pass
@ffi.def_extern()
def _vfpUnmap(vf, memory, size):
pass
@ffi.def_extern()
def _vfpTruncate(vf, size):
vfp = ffi.cast("struct VFilePy*", vf)
ffi.from_handle(vfp.fileobj).truncate(size)
@ffi.def_extern()
def _vfpSize(vf):
vfp = ffi.cast("struct VFilePy*", vf)
f = ffi.from_handle(vfp.fileobj)
pos = f.tell()
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(pos, os.SEEK_SET)
return size
@ffi.def_extern()
def _vfpSync(vf, buffer, size):
vfp = ffi.cast("struct VFilePy*", vf)
f = ffi.from_handle(vfp.fileobj)
if buffer and size:
pos = f.tell()
f.seek(0, os.SEEK_SET)
_vfpWrite(vf, buffer, size)
f.seek(pos, os.SEEK_SET)
f.flush()
os.fsync()
return True
def open(f):
handle = ffi.new_handle(f)
vf = VFile(lib.VFileFromPython(handle))
# Prevent garbage collection
vf._fileobj = f
vf._handle = handle
return vf
def openPath(path, mode="r"):
flags = 0
if mode.startswith("r"):
flags |= os.O_RDONLY
elif mode.startswith("w"):
flags |= os.O_WRONLY | os.O_CREAT | os.O_TRUNC
elif mode.startswith("a"):
flags |= os.O_WRONLY | os.O_CREAT | os.O_APPEND
else:
return None
if "+" in mode[1:]:
flags |= os.O_RDWR
if "x" in mode[1:]:
flags |= os.O_EXCL
vf = lib.VFileOpen(path.encode("UTF-8"), flags);
if vf == ffi.NULL:
return None
return VFile(vf)
class VFile:
def __init__(self, vf):
self.handle = vf
def close(self):
return bool(self.handle.close(self.handle))
def seek(self, offset, whence):
return self.handle.seek(self.handle, offset, whence)
def read(self, buffer, size):
return self.handle.read(self.handle, buffer, size)
def readAll(self, size=0):
if not size:
size = self.size()
buffer = ffi.new("char[%i]" % size)
size = self.handle.read(self.handle, buffer, size)
return ffi.unpack(buffer, size)
def readline(self, buffer, size):
return self.handle.readline(self.handle, buffer, size)
def write(self, buffer, size):
return self.handle.write(self.handle, buffer, size)
def map(self, size, flags):
return self.handle.map(self.handle, size, flags)
def unmap(self, memory, size):
self.handle.unmap(self.handle, memory, size)
def truncate(self, size):
self.handle.truncate(self.handle, size)
def size(self):
return self.handle.size(self.handle)
def sync(self, buffer, size):
return self.handle.sync(self.handle, buffer, size)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/core.py | # Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import tile, createCallback
from cached_property import cached_property
def find(path):
core = lib.mCoreFind(path.encode('UTF-8'))
if core == ffi.NULL:
return None
return Core._init(core)
def findVF(vf):
core = lib.mCoreFindVF(vf.handle)
if core == ffi.NULL:
return None
return Core._init(core)
def loadPath(path):
core = find(path)
if not core or not core.loadFile(path):
return None
return core
def loadVF(vf):
core = findVF(vf)
if not core or not core.loadROM(vf):
return None
return core
def needsReset(f):
def wrapper(self, *args, **kwargs):
if not self._wasReset:
raise RuntimeError("Core must be reset first")
return f(self, *args, **kwargs)
return wrapper
def protected(f):
def wrapper(self, *args, **kwargs):
if self._protected:
raise RuntimeError("Core is protected")
return f(self, *args, **kwargs)
return wrapper
@ffi.def_extern()
def _mCorePythonCallbacksVideoFrameStarted(user):
context = ffi.from_handle(user)
context._videoFrameStarted()
@ffi.def_extern()
def _mCorePythonCallbacksVideoFrameEnded(user):
context = ffi.from_handle(user)
context._videoFrameEnded()
@ffi.def_extern()
def _mCorePythonCallbacksCoreCrashed(user):
context = ffi.from_handle(user)
context._coreCrashed()
@ffi.def_extern()
def _mCorePythonCallbacksSleep(user):
context = ffi.from_handle(user)
context._sleep()
class CoreCallbacks(object):
def __init__(self):
self._handle = ffi.new_handle(self)
self.videoFrameStarted = []
self.videoFrameEnded = []
self.coreCrashed = []
self.sleep = []
self.context = lib.mCorePythonCallbackCreate(self._handle)
def _videoFrameStarted(self):
for cb in self.videoFrameStarted:
cb()
def _videoFrameEnded(self):
for cb in self.videoFrameEnded:
cb()
def _coreCrashed(self):
for cb in self.coreCrashed:
cb()
def _sleep(self):
for cb in self.sleep:
cb()
class Core(object):
if hasattr(lib, 'PLATFORM_GBA'):
PLATFORM_GBA = lib.PLATFORM_GBA
if hasattr(lib, 'PLATFORM_GB'):
PLATFORM_GB = lib.PLATFORM_GB
def __init__(self, native):
self._core = native
self._wasReset = False
self._protected = False
self._callbacks = CoreCallbacks()
self._core.addCoreCallbacks(self._core, self._callbacks.context)
self.config = Config(ffi.addressof(native.config))
def __del__(self):
self._wasReset = False
@cached_property
def graphicsCache(self):
if not self._wasReset:
raise RuntimeError("Core must be reset first")
return tile.CacheSet(self)
@cached_property
def tiles(self):
t = []
ts = ffi.addressof(self.graphicsCache.cache.tiles)
for i in range(lib.mTileCacheSetSize(ts)):
t.append(tile.TileView(lib.mTileCacheSetGetPointer(ts, i)))
return t
@cached_property
def maps(self):
m = []
ms = ffi.addressof(self.graphicsCache.cache.maps)
for i in range(lib.mMapCacheSetSize(ms)):
m.append(tile.MapView(lib.mMapCacheSetGetPointer(ms, i)))
return m
@classmethod
def _init(cls, native):
core = ffi.gc(native, native.deinit)
success = bool(core.init(core))
lib.mCoreInitConfig(core, ffi.NULL)
if not success:
raise RuntimeError("Failed to initialize core")
return cls._detect(core)
@classmethod
def _detect(cls, core):
if hasattr(cls, 'PLATFORM_GBA') and core.platform(core) == cls.PLATFORM_GBA:
from .gba import GBA
return GBA(core)
if hasattr(cls, 'PLATFORM_GB') and core.platform(core) == cls.PLATFORM_GB:
from .gb import GB
return GB(core)
return Core(core)
def _load(self):
self._wasReset = True
def loadFile(self, path):
return bool(lib.mCoreLoadFile(self._core, path.encode('UTF-8')))
def isROM(self, vf):
return bool(self._core.isROM(vf.handle))
def loadROM(self, vf):
return bool(self._core.loadROM(self._core, vf.handle))
def loadBIOS(self, vf, id=0):
return bool(self._core.loadBIOS(self._core, vf.handle, id))
def loadSave(self, vf):
return bool(self._core.loadSave(self._core, vf.handle))
def loadTemporarySave(self, vf):
return bool(self._core.loadTemporarySave(self._core, vf.handle))
def loadPatch(self, vf):
return bool(self._core.loadPatch(self._core, vf.handle))
def loadConfig(self, config):
lib.mCoreLoadForeignConfig(self._core, config._native)
def autoloadSave(self):
return bool(lib.mCoreAutoloadSave(self._core))
def autoloadPatch(self):
return bool(lib.mCoreAutoloadPatch(self._core))
def autoloadCheats(self):
return bool(lib.mCoreAutoloadCheats(self._core))
def platform(self):
return self._core.platform(self._core)
def desiredVideoDimensions(self):
width = ffi.new("unsigned*")
height = ffi.new("unsigned*")
self._core.desiredVideoDimensions(self._core, width, height)
return width[0], height[0]
def setVideoBuffer(self, image):
self._core.setVideoBuffer(self._core, image.buffer, image.stride)
def reset(self):
self._core.reset(self._core)
self._load()
@needsReset
@protected
def runFrame(self):
self._core.runFrame(self._core)
@needsReset
@protected
def runLoop(self):
self._core.runLoop(self._core)
@needsReset
def step(self):
self._core.step(self._core)
@staticmethod
def _keysToInt(*args, **kwargs):
keys = 0
if 'raw' in kwargs:
keys = kwargs['raw']
for key in args:
keys |= 1 << key
return keys
def setKeys(self, *args, **kwargs):
self._core.setKeys(self._core, self._keysToInt(*args, **kwargs))
def addKeys(self, *args, **kwargs):
self._core.addKeys(self._core, self._keysToInt(*args, **kwargs))
def clearKeys(self, *args, **kwargs):
self._core.clearKeys(self._core, self._keysToInt(*args, **kwargs))
@property
@needsReset
def frameCounter(self):
return self._core.frameCounter(self._core)
@property
def frameCycles(self):
return self._core.frameCycles(self._core)
@property
def frequency(self):
return self._core.frequency(self._core)
@property
def gameTitle(self):
title = ffi.new("char[16]")
self._core.getGameTitle(self._core, title)
return ffi.string(title, 16).decode("ascii")
@property
def gameCode(self):
code = ffi.new("char[12]")
self._core.getGameCode(self._core, code)
return ffi.string(code, 12).decode("ascii")
def addFrameCallback(self, cb):
self._callbacks.videoFrameEnded.append(cb)
@property
def crc32(self):
return self._native.romCrc32
class ICoreOwner(object):
def claim(self):
raise NotImplementedError
def release(self):
raise NotImplementedError
def __enter__(self):
self.core = self.claim()
self.core._protected = True
return self.core
def __exit__(self, type, value, traceback):
self.core._protected = False
self.release()
class IRunner(object):
def pause(self):
raise NotImplementedError
def unpause(self):
raise NotImplementedError
def useCore(self):
raise NotImplementedError
def isRunning(self):
raise NotImplementedError
def isPaused(self):
raise NotImplementedError
class Config(object):
def __init__(self, native=None, port=None, defaults={}):
if not native:
self._port = ffi.NULL
if port:
self._port = ffi.new("char[]", port.encode("UTF-8"))
native = ffi.gc(ffi.new("struct mCoreConfig*"), lib.mCoreConfigDeinit)
lib.mCoreConfigInit(native, self._port)
self._native = native
for key, value in defaults.items():
if isinstance(value, bool):
value = int(value)
lib.mCoreConfigSetDefaultValue(self._native, ffi.new("char[]", key.encode("UTF-8")), ffi.new("char[]", str(value).encode("UTF-8")))
def __getitem__(self, key):
string = lib.mCoreConfigGetValue(self._native, ffi.new("char[]", key.encode("UTF-8")))
if not string:
return None
return ffi.string(string)
def __setitem__(self, key, value):
if isinstance(value, bool):
value = int(value)
lib.mCoreConfigSetValue(self._native, ffi.new("char[]", key.encode("UTF-8")), ffi.new("char[]", str(value).encode("UTF-8")))
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/log.py | <filename>retro/cores/gba/src/platform/python/mgba/log.py
# Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import createCallback
createCallback("mLoggerPy", "log", "_pyLog")
defaultLogger = None
def installDefault(logger):
global defaultLogger
defaultLogger = logger
lib.mLogSetDefaultLogger(logger._native)
class Logger(object):
FATAL = lib.mLOG_FATAL
DEBUG = lib.mLOG_DEBUG
INFO = lib.mLOG_INFO
WARN = lib.mLOG_WARN
ERROR = lib.mLOG_ERROR
STUB = lib.mLOG_STUB
GAME_ERROR = lib.mLOG_GAME_ERROR
def __init__(self):
self._handle = ffi.new_handle(self)
self._native = ffi.gc(lib.mLoggerPythonCreate(self._handle), lib.free)
@staticmethod
def categoryName(category):
return ffi.string(lib.mLogCategoryName(category)).decode('UTF-8')
def log(self, category, level, message):
print("{}: {}".format(self.categoryName(category), message))
class NullLogger(Logger):
def log(self, category, level, message):
pass
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/_builder.py | <filename>retro/cores/gba/src/platform/python/_builder.py
import cffi
import os, os.path
import shlex
import subprocess
import sys
ffi = cffi.FFI()
pydir = os.path.dirname(os.path.abspath(__file__))
srcdir = os.path.join(pydir, "..", "..")
incdir = os.path.join(pydir, "..", "..", "..", "include")
bindir = os.environ.get("BINDIR", os.path.join(os.getcwd(), ".."))
cpp = shlex.split(os.environ.get("CPP", "cc -E"))
cppflags = shlex.split(os.environ.get("CPPFLAGS", ""))
if __name__ == "__main__":
cppflags.extend(sys.argv[1:])
cppflags.extend(["-I" + incdir, "-I" + srcdir, "-I" + bindir])
ffi.set_source("mgba._pylib", """
#define static
#define inline
#include "flags.h"
#define OPAQUE_THREADING
#include <mgba/core/cache-set.h>
#include <mgba-util/common.h>
#include <mgba/core/core.h>
#include <mgba/core/map-cache.h>
#include <mgba/core/log.h>
#include <mgba/core/mem-search.h>
#include <mgba/core/thread.h>
#include <mgba/core/version.h>
#include <mgba/debugger/debugger.h>
#include <mgba/gba/interface.h>
#include <mgba/internal/arm/arm.h>
#include <mgba/internal/debugger/cli-debugger.h>
#include <mgba/internal/gba/gba.h>
#include <mgba/internal/gba/input.h>
#include <mgba/internal/gba/renderers/cache-set.h>
#include <mgba/internal/lr35902/lr35902.h>
#include <mgba/internal/gb/gb.h>
#include <mgba/internal/gb/renderers/cache-set.h>
#include <mgba-util/png-io.h>
#include <mgba-util/vfs.h>
#define PYEXPORT
#include "platform/python/core.h"
#include "platform/python/log.h"
#include "platform/python/sio.h"
#include "platform/python/vfs-py.h"
#undef PYEXPORT
""", include_dirs=[incdir, srcdir],
extra_compile_args=cppflags,
libraries=["mgba"],
library_dirs=[bindir],
sources=[os.path.join(pydir, path) for path in ["vfs-py.c", "core.c", "log.c", "sio.c"]])
preprocessed = subprocess.check_output(cpp + ["-fno-inline", "-P"] + cppflags + [os.path.join(pydir, "_builder.h")], universal_newlines=True)
lines = []
for line in preprocessed.splitlines():
line = line.strip()
if line.startswith('#'):
continue
lines.append(line)
ffi.cdef('\n'.join(lines))
preprocessed = subprocess.check_output(cpp + ["-fno-inline", "-P"] + cppflags + [os.path.join(pydir, "lib.h")], universal_newlines=True)
lines = []
for line in preprocessed.splitlines():
line = line.strip()
if line.startswith('#'):
continue
lines.append(line)
ffi.embedding_api('\n'.join(lines))
ffi.embedding_init_code("""
import os, os.path
venv = os.getenv('VIRTUAL_ENV')
if venv:
activate = os.path.join(venv, 'bin', 'activate_this.py')
exec(compile(open(activate, "rb").read(), activate, 'exec'), dict(__file__=activate))
from mgba._pylib import ffi, lib
symbols = {}
globalSyms = {
'symbols': symbols
}
pendingCode = []
@ffi.def_extern()
def mPythonSetDebugger(debugger):
from mgba.debugger import NativeDebugger, CLIDebugger
oldDebugger = globalSyms.get('debugger')
if oldDebugger and oldDebugger._native == debugger:
return
if oldDebugger and not debugger:
del globalSyms['debugger']
return
if debugger.type == lib.DEBUGGER_CLI:
debugger = CLIDebugger(debugger)
else:
debugger = NativeDebugger(debugger)
globalSyms['debugger'] = debugger
@ffi.def_extern()
def mPythonLoadScript(name, vf):
from mgba.vfs import VFile
vf = VFile(vf)
name = ffi.string(name)
source = vf.readAll().decode('utf-8')
try:
code = compile(source, name, 'exec')
pendingCode.append(code)
except:
return False
return True
@ffi.def_extern()
def mPythonRunPending():
global pendingCode
for code in pendingCode:
exec(code, globalSyms, {})
pendingCode = []
@ffi.def_extern()
def mPythonDebuggerEntered(reason, info):
debugger = globalSyms['debugger']
if not debugger:
return
if info == ffi.NULL:
info = None
for cb in debugger._cbs:
cb(reason, info)
@ffi.def_extern()
def mPythonLookupSymbol(name, outptr):
name = ffi.string(name).decode('utf-8')
if name not in symbols:
return False
sym = symbols[name]
val = None
try:
val = int(sym)
except:
try:
val = sym()
except:
pass
if val is None:
return False
try:
outptr[0] = ffi.cast('int32_t', val)
return True
except:
return False
""")
if __name__ == "__main__":
ffi.emit_c_code("lib.c")
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/debugger.py | # Copyright (c) 2013-2017 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from .core import IRunner, ICoreOwner, Core
import io
import sys
class DebuggerCoreOwner(ICoreOwner):
def __init__(self, debugger):
self.debugger = debugger
self.wasPaused = False
def claim(self):
if self.debugger.isRunning():
self.wasPaused = True
self.debugger.pause()
return self.debugger._core
def release(self):
if self.wasPaused:
self.debugger.unpause()
class NativeDebugger(IRunner):
WATCHPOINT_WRITE = lib.WATCHPOINT_WRITE
WATCHPOINT_READ = lib.WATCHPOINT_READ
WATCHPOINT_RW = lib.WATCHPOINT_RW
BREAKPOINT_HARDWARE = lib.BREAKPOINT_HARDWARE
BREAKPOINT_SOFTWARE = lib.BREAKPOINT_SOFTWARE
ENTER_MANUAL = lib.DEBUGGER_ENTER_MANUAL
ENTER_ATTACHED = lib.DEBUGGER_ENTER_ATTACHED
ENTER_BREAKPOINT = lib.DEBUGGER_ENTER_BREAKPOINT
ENTER_WATCHPOINT = lib.DEBUGGER_ENTER_WATCHPOINT
ENTER_ILLEGAL_OP = lib.DEBUGGER_ENTER_ILLEGAL_OP
def __init__(self, native):
self._native = native
self._cbs = []
self._core = Core._detect(native.core)
self._core._load()
def pause(self):
lib.mDebuggerEnter(self._native, lib.DEBUGGER_ENTER_MANUAL, ffi.NULL)
def unpause(self):
self._native.state = lib.DEBUGGER_RUNNING
def isRunning(self):
return self._native.state == lib.DEBUGGER_RUNNING
def isPaused(self):
return self._native.state in (lib.DEBUGGER_PAUSED, lib.DEBUGGER_CUSTOM)
def useCore(self):
return DebuggerCoreOwner(self)
def setBreakpoint(self, address):
if not self._native.platform.setBreakpoint:
raise RuntimeError("Platform does not support breakpoints")
self._native.platform.setBreakpoint(self._native.platform, address)
def clearBreakpoint(self, address):
if not self._native.platform.setBreakpoint:
raise RuntimeError("Platform does not support breakpoints")
self._native.platform.clearBreakpoint(self._native.platform, address)
def setWatchpoint(self, address):
if not self._native.platform.setWatchpoint:
raise RuntimeError("Platform does not support watchpoints")
self._native.platform.setWatchpoint(self._native.platform, address)
def clearWatchpoint(self, address):
if not self._native.platform.clearWatchpoint:
raise RuntimeError("Platform does not support watchpoints")
self._native.platform.clearWatchpoint(self._native.platform, address)
def addCallback(self, cb):
self._cbs.append(cb)
class CLIBackend(object):
def __init__(self, backend):
self.backend = backend
def write(self, string):
self.backend.printf(string)
class CLIDebugger(NativeDebugger):
def __init__(self, native):
super(CLIDebugger, self).__init__(native)
self._cli = ffi.cast("struct CLIDebugger*", native)
def printf(self, message, *args, **kwargs):
message = message.format(*args, **kwargs)
self._cli.backend.printf(ffi.new("char []", b"%s"), ffi.new("char []", message.encode('utf-8')))
def installPrint(self):
sys.stdout = CLIBackend(self)
|
MatPoliquin/retro | tests/data/test_roms.py | <reponame>MatPoliquin/retro<filename>tests/data/test_roms.py
from retro.testing import game, handle
import retro.data
import retro.testing.tools
def test_hash(game):
errors = retro.data.verify_hash(*game)
handle([], errors)
def test_hash_collisions():
warnings, errors = retro.testing.tools.verify_hash_collisions()
handle(warnings, errors)
def test_rom(game):
warnings, errors = retro.testing.tools.verify_rom(*game)
handle(warnings, errors)
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/test_cinema.py | import pytest
import cinema.test
import mgba.log
import os.path
import yaml
mgba.log.installDefault(mgba.log.NullLogger())
def flatten(d):
l = []
for k, v in d.tests.items():
if v.tests:
l.extend(flatten(v))
else:
l.append(v)
l.sort()
return l
def pytest_generate_tests(metafunc):
if 'vtest' in metafunc.fixturenames:
tests = cinema.test.gatherTests(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'cinema'))
testList = flatten(tests)
params = []
for test in testList:
marks = []
xfail = test.settings.get('fail')
if xfail:
marks = pytest.mark.xfail(reason=xfail if isinstance(xfail, str) else None)
params.append(pytest.param(test, id=test.name, marks=marks))
metafunc.parametrize('vtest', params, indirect=True)
@pytest.fixture
def vtest(request):
return request.param
def test_video(vtest, pytestconfig):
vtest.setUp()
if pytestconfig.getoption('--rebaseline'):
vtest.generateBaseline()
else:
try:
vtest.test()
except IOError:
raise
if pytestconfig.getoption('--mark-succeeding') and 'fail' in vtest.settings:
# TODO: This can fail if an entire directory is marked as failing
settings = {}
try:
with open(os.path.join(vtest.path, 'manifest.yml'), 'r') as f:
settings = yaml.safe_load(f)
except IOError:
pass
if 'fail' in settings:
del settings['fail']
else:
settings['fail'] = False
if settings:
with open(os.path.join(vtest.path, 'manifest.yml'), 'w') as f:
yaml.dump(settings, f, default_flow_style=False)
else:
os.remove(os.path.join(vtest.path, 'manifest.yml'))
|
MatPoliquin/retro | retro/cores/gba/cinema/gb/mooneye-gb/update.py | #!/usr/bin/env python
import os
import os.path
import shutil
import yaml
from cinema.util import dictMerge
suffixes = {
'C': 'CGB',
'S': 'SGB',
'A': 'AGB',
'mgb': 'MGB',
'sgb': 'SGB',
'sgb2': 'SGB2',
'cgb': 'CGB',
'agb': 'AGB',
'ags': 'AGB',
}
def ingestDirectory(path, dest):
for root, _, files in os.walk(path, topdown=False):
root = root[len(os.path.commonprefix([root, path])):]
if root.startswith('utils'):
continue
for file in files:
fname, ext = os.path.splitext(file)
if ext not in ('.gb', '.sym'):
continue
try:
os.makedirs(os.path.join(dest, root, fname))
except OSError:
pass
if ext in ('.gb', '.sym'):
shutil.copy(os.path.join(path, root, file), os.path.join(dest, root, fname, 'test' + ext))
for suffix, model in suffixes.items():
if fname.endswith('-' + suffix):
manifest = {}
try:
with open(os.path.join(dest, root, fname, 'manifest.yml'), 'r') as f:
manifest = yaml.safe_load(f) or {}
except IOError:
pass
dictMerge(manifest, {
'config': {
'gb.model': model
}
})
with open(os.path.join(dest, root, fname, 'manifest.yml'), 'w') as f:
yaml.dump(manifest, f)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Update mooneye-gb test suite')
parser.add_argument('source', type=str, help='directory containing built tests')
parser.add_argument('dest', type=str, nargs='?', default=os.path.dirname(__file__), help='directory to contain ingested tests')
args = parser.parse_args()
ingestDirectory(args.source, args.dest)
|
MatPoliquin/retro | scripts/playback_movie.py | #!/usr/bin/env python
from retro.scripts.playback_movie import main
main()
|
MatPoliquin/retro | retro/enums.py | from enum import Enum
class State(Enum):
"""
Special values for setting the restart state of the environment. You can
also specify a string that is the name of the ``.state`` file
"""
DEFAULT = -1 #: Start the game at the default savestate from ``metadata.json``
NONE = 0 #: Start the game at the power on screen for the emulator
class Observations(Enum):
"""
Different settings for the observation space of the environment
"""
IMAGE = 0 #: Use RGB image observations
RAM = 1 #: Use RAM observations where you can see the memory of the game instead of the screen
class Actions(Enum):
"""
Different settings for the action space of the environment
"""
ALL = 0 #: MultiBinary action space with no filtered actions
FILTERED = 1 #: MultiBinary action space with invalid or not allowed actions filtered out
DISCRETE = 2 #: Discrete action space for filtered actions
MULTI_DISCRETE = 3 #: MultiDiscete action space for filtered actions |
MatPoliquin/retro | retro/cores/gba/src/platform/python/tests/mgba/test_vfs.py | <filename>retro/cores/gba/src/platform/python/tests/mgba/test_vfs.py
import pytest
import os
import mgba.vfs as vfs
from mgba._pylib import ffi
def test_vfs_open():
with open(__file__) as f:
vf = vfs.open(f)
assert vf
assert vf.close()
def test_vfs_openPath():
vf = vfs.openPath(__file__)
assert vf
assert vf.close()
def test_vfs_read():
vf = vfs.openPath(__file__)
buffer = ffi.new('char[13]')
assert vf.read(buffer, 13) == 13
assert ffi.string(buffer) == b'import pytest'
vf.close()
def test_vfs_readline():
vf = vfs.openPath(__file__)
buffer = ffi.new('char[16]')
linelen = vf.readline(buffer, 16)
assert linelen in (14, 15)
if linelen == 14:
assert ffi.string(buffer) == b'import pytest\n'
elif linelen == 15:
assert ffi.string(buffer) == b'import pytest\r\n'
vf.close()
def test_vfs_readAllSize():
vf = vfs.openPath(__file__)
buffer = vf.readAll()
assert buffer
assert len(buffer)
assert len(buffer) == vf.size()
vf.close()
def test_vfs_seek():
vf = vfs.openPath(__file__)
assert vf.seek(0, os.SEEK_SET) == 0
assert vf.seek(1, os.SEEK_SET) == 1
assert vf.seek(1, os.SEEK_CUR) == 2
assert vf.seek(-1, os.SEEK_CUR) == 1
assert vf.seek(0, os.SEEK_CUR) == 1
assert vf.seek(0, os.SEEK_END) == vf.size()
assert vf.seek(-1, os.SEEK_END) == vf.size() -1
vf.close()
def test_vfs_openPath_invalid():
vf = vfs.openPath('.invalid')
assert not vf
|
MatPoliquin/retro | scripts/import.py | <gh_stars>1000+
#!/usr/bin/env python
from retro.scripts.import_path import main
main()
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/tile.py | # Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import image
class Tile:
def __init__(self, data):
self.buffer = data
def toImage(self):
i = image.Image(8, 8)
self.composite(i, 0, 0)
return i
def composite(self, i, x, y):
for iy in range(8):
ffi.memmove(ffi.addressof(i.buffer, x + (iy + y) * i.stride), ffi.addressof(self.buffer, iy * 8), 8 * ffi.sizeof("color_t"))
class CacheSet:
def __init__(self, core):
self.core = core
self.cache = ffi.gc(ffi.new("struct mCacheSet*"), core._deinitCache)
core._initCache(self.cache)
class TileView:
def __init__(self, cache):
self.cache = cache
def getTile(self, tile, palette):
return Tile(lib.mTileCacheGetTile(self.cache, tile, palette))
class MapView:
def __init__(self, cache):
self.cache = cache
@property
def width(self):
return 1 << lib.mMapCacheSystemInfoGetTilesWide(self.cache.sysConfig)
@property
def height(self):
return 1 << lib.mMapCacheSystemInfoGetTilesHigh(self.cache.sysConfig)
@property
def image(self):
i = image.Image(self.width * 8, self.height * 8, alpha=True)
for y in range(self.height * 8):
if not y & 7:
lib.mMapCacheCleanRow(self.cache, y >> 3)
row = lib.mMapCacheGetRow(self.cache, y)
ffi.memmove(ffi.addressof(i.buffer, i.stride * y), row, self.width * 8 * ffi.sizeof("color_t"))
return i
class Sprite(object):
def constitute(self, tileView, tilePitch):
i = image.Image(self.width, self.height, alpha=True)
tileId = self.tile
for y in range(self.height // 8):
for x in range(self.width // 8):
tile = tileView.getTile(tileId, self.paletteId)
tile.composite(i, x * 8, y * 8)
tileId += 1
if tilePitch:
tileId += tilePitch - self.width // 8
self.image = i
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/mgba/memory.py | <reponame>MatPoliquin/retro
# Copyright (c) 2013-2016 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
class MemoryView(object):
def __init__(self, core, width, size, base=0, sign="u"):
self._core = core
self._width = width
self._size = size
self._base = base
self._busRead = getattr(self._core, "busRead" + str(width * 8))
self._busWrite = getattr(self._core, "busWrite" + str(width * 8))
self._rawRead = getattr(self._core, "rawRead" + str(width * 8))
self._rawWrite = getattr(self._core, "rawWrite" + str(width * 8))
self._mask = (1 << (width * 8)) - 1 # Used to force values to fit within range so that negative values work
if sign == "u" or sign == "unsigned":
self._type = "uint{}_t".format(width * 8)
elif sign == "i" or sign == "s" or sign == "signed":
self._type = "int{}_t".format(width * 8)
else:
raise ValueError("Invalid sign type: '{}'".format(sign))
def _addrCheck(self, address):
if isinstance(address, slice):
start = address.start or 0
stop = self._size - self._width if address.stop is None else address.stop
else:
start = address
stop = address + self._width
if start >= self._size or stop > self._size:
raise IndexError()
if start < 0 or stop < 0:
raise IndexError()
def __len__(self):
return self._size
def __getitem__(self, address):
self._addrCheck(address)
if isinstance(address, slice):
start = address.start or 0
stop = self._size - self._width if address.stop is None else address.stop
step = address.step or self._width
return [int(ffi.cast(self._type, self._busRead(self._core, self._base + a))) for a in range(start, stop, step)]
else:
return int(ffi.cast(self._type, self._busRead(self._core, self._base + address)))
def __setitem__(self, address, value):
self._addrCheck(address)
if isinstance(address, slice):
start = address.start or 0
stop = self._size - self._width if address.stop is None else address.stop
step = address.step or self._width
for a in range(start, stop, step):
self._busWrite(self._core, self._base + a, value[a] & self._mask)
else:
self._busWrite(self._core, self._base + address, value & self._mask)
def rawRead(self, address, segment=-1):
self._addrCheck(address)
return int(ffi.cast(self._type, self._rawRead(self._core, self._base + address, segment)))
def rawWrite(self, address, value, segment=-1):
self._addrCheck(address)
self._rawWrite(self._core, self._base + address, segment, value & self._mask)
class MemorySearchResult(object):
def __init__(self, memory, result):
self.address = result.address
self.segment = result.segment
self.guessDivisor = result.guessDivisor
self.type = result.type
if result.type == Memory.SEARCH_8:
self._memory = memory.u8
elif result.type == Memory.SEARCH_16:
self._memory = memory.u16
elif result.type == Memory.SEARCH_32:
self._memory = memory.u32
elif result.type == Memory.SEARCH_STRING:
self._memory = memory.u8
else:
raise ValueError("Unknown type: %X" % result.type)
@property
def value(self):
if self.type == Memory.SEARCH_STRING:
raise ValueError
return self._memory[self.address] * self.guessDivisor
@value.setter
def value(self, v):
if self.type == Memory.SEARCH_STRING:
raise IndexError
self._memory[self.address] = v // self.guessDivisor
class Memory(object):
SEARCH_INT = lib.mCORE_MEMORY_SEARCH_INT
SEARCH_STRING = lib.mCORE_MEMORY_SEARCH_STRING
SEARCH_GUESS = lib.mCORE_MEMORY_SEARCH_GUESS
SEARCH_EQUAL = lib.mCORE_MEMORY_SEARCH_EQUAL
READ = lib.mCORE_MEMORY_READ
WRITE = lib.mCORE_MEMORY_READ
RW = lib.mCORE_MEMORY_RW
def __init__(self, core, size, base=0):
self.size = size
self.base = base
self._core = core
self.u8 = MemoryView(core, 1, size, base, "u")
self.u16 = MemoryView(core, 2, size, base, "u")
self.u32 = MemoryView(core, 4, size, base, "u")
self.s8 = MemoryView(core, 1, size, base, "s")
self.s16 = MemoryView(core, 2, size, base, "s")
self.s32 = MemoryView(core, 4, size, base, "s")
def __len__(self):
return self._size
def search(self, value, type=SEARCH_GUESS, flags=RW, limit=10000, old_results=[]):
results = ffi.new("struct mCoreMemorySearchResults*")
lib.mCoreMemorySearchResultsInit(results, len(old_results))
params = ffi.new("struct mCoreMemorySearchParams*")
params.memoryFlags = flags
params.type = type
params.op = self.SEARCH_EQUAL
if type == self.SEARCH_INT:
params.valueInt = int(value)
else:
params.valueStr = ffi.new("char[]", str(value).encode("ascii"))
for result in old_results:
r = lib.mCoreMemorySearchResultsAppend(results)
r.address = result.address
r.segment = result.segment
r.guessDivisor = result.guessDivisor
r.type = result.type
if old_results:
lib.mCoreMemorySearchRepeat(self._core, params, results)
else:
lib.mCoreMemorySearch(self._core, params, results, limit)
new_results = [MemorySearchResult(self, lib.mCoreMemorySearchResultsGetPointer(results, i)) for i in range(lib.mCoreMemorySearchResultsSize(results))]
lib.mCoreMemorySearchResultsDeinit(results)
return new_results
def __getitem__(self, address):
if isinstance(address, slice):
return bytearray(self.u8[address])
else:
return self.u8[address]
|
MatPoliquin/retro | retro/scripts/import_path.py | #!/usr/bin/env python
import retro.data
import os
import sys
import zipfile
def _check_zipfile(f, process_f):
with zipfile.ZipFile(f) as zf:
for entry in zf.infolist():
_root, ext = os.path.splitext(entry.filename)
with zf.open(entry) as innerf:
if ext == ".zip":
_check_zipfile(innerf, process_f)
else:
process_f(entry.filename, innerf)
def main():
paths = sys.argv[1:] or ['.']
known_hashes = retro.data.get_known_hashes()
imported_games = 0
def save_if_matches(filename, f):
nonlocal imported_games
try:
data, hash = retro.data.groom_rom(filename, f)
except (IOError, ValueError):
return
if hash in known_hashes:
game, ext, curpath = known_hashes[hash]
print('Importing', game)
with open(os.path.join(curpath, game, 'rom%s' % ext), 'wb') as f:
f.write(data)
imported_games += 1
for path in paths:
for root, dirs, files in os.walk(path):
for filename in files:
filepath = os.path.join(root, filename)
with open(filepath, "rb") as f:
_root, ext = os.path.splitext(filename)
if ext == ".zip":
try:
_check_zipfile(f, save_if_matches)
except zipfile.BadZipFile:
pass
else:
save_if_matches(filename, f)
print('Imported %i games' % imported_games)
if __name__ == '__main__':
main()
|
MatPoliquin/retro | retro/examples/brute.py | <filename>retro/examples/brute.py
"""
Implementation of the Brute from "Revisiting the Arcade Learning Environment:
Evaluation Protocols and Open Problems for General Agents" by Machado et al.
https://arxiv.org/abs/1709.06009
This is an agent that uses the determinism of the environment in order to do
pretty well at a number of retro games. It does not save emulator state but
does rely on the same sequence of actions producing the same result when played
back.
"""
import random
import argparse
import numpy as np
import retro
import gym
EXPLORATION_PARAM = 0.005
class Frameskip(gym.Wrapper):
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, act):
total_rew = 0.0
done = None
for i in range(self._skip):
obs, rew, done, info = self.env.step(act)
total_rew += rew
if done:
break
return obs, total_rew, done, info
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super().__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class Node:
def __init__(self, value=-np.inf, children=None):
self.value = value
self.visits = 0
self.children = {} if children is None else children
def __repr__(self):
return "<Node value=%f visits=%d len(children)=%d>" % (
self.value,
self.visits,
len(self.children),
)
def select_actions(root, action_space, max_episode_steps):
"""
Select actions from the tree
Normally we select the greedy action that has the highest reward
associated with that subtree. We have a small chance to select a
random action based on the exploration param and visit count of the
current node at each step.
We select actions for the longest possible episode, but normally these
will not all be used. They will instead be truncated to the length
of the actual episode and then used to update the tree.
"""
node = root
acts = []
steps = 0
while steps < max_episode_steps:
if node is None:
# we've fallen off the explored area of the tree, just select random actions
act = action_space.sample()
else:
epsilon = EXPLORATION_PARAM / np.log(node.visits + 2)
if random.random() < epsilon:
# random action
act = action_space.sample()
else:
# greedy action
act_value = {}
for act in range(action_space.n):
if node is not None and act in node.children:
act_value[act] = node.children[act].value
else:
act_value[act] = -np.inf
best_value = max(act_value.values())
best_acts = [
act for act, value in act_value.items() if value == best_value
]
act = random.choice(best_acts)
if act in node.children:
node = node.children[act]
else:
node = None
acts.append(act)
steps += 1
return acts
def rollout(env, acts):
"""
Perform a rollout using a preset collection of actions
"""
total_rew = 0
env.reset()
steps = 0
for act in acts:
_obs, rew, done, _info = env.step(act)
steps += 1
total_rew += rew
if done:
break
return steps, total_rew
def update_tree(root, executed_acts, total_rew):
"""
Given the tree, a list of actions that were executed before the game ended, and a reward, update the tree
so that the path formed by the executed actions are all updated to the new reward.
"""
root.value = max(total_rew, root.value)
root.visits += 1
new_nodes = 0
node = root
for step, act in enumerate(executed_acts):
if act not in node.children:
node.children[act] = Node()
new_nodes += 1
node = node.children[act]
node.value = max(total_rew, node.value)
node.visits += 1
return new_nodes
class Brute:
"""
Implementation of the Brute
Creates and manages the tree storing game actions and rewards
"""
def __init__(self, env, max_episode_steps):
self.node_count = 1
self._root = Node()
self._env = env
self._max_episode_steps = max_episode_steps
def run(self):
acts = select_actions(self._root, self._env.action_space, self._max_episode_steps)
steps, total_rew = rollout(self._env, acts)
executed_acts = acts[:steps]
self.node_count += update_tree(self._root, executed_acts, total_rew)
return executed_acts, total_rew
def brute_retro(
game,
max_episode_steps=4500,
timestep_limit=1e8,
state=retro.State.DEFAULT,
scenario=None,
):
env = retro.make(game, state, use_restricted_actions=retro.Actions.DISCRETE, scenario=scenario)
env = Frameskip(env)
env = TimeLimit(env, max_episode_steps=max_episode_steps)
brute = Brute(env, max_episode_steps=max_episode_steps)
timesteps = 0
best_rew = float('-inf')
while True:
acts, rew = brute.run()
timesteps += len(acts)
if rew > best_rew:
print("new best reward {} => {}".format(best_rew, rew))
best_rew = rew
env.unwrapped.record_movie("best.bk2")
env.reset()
for act in acts:
env.step(act)
env.unwrapped.stop_record()
if timesteps > timestep_limit:
print("timestep limit exceeded")
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', default='Airstriker-Genesis')
parser.add_argument('--state', default=retro.State.DEFAULT)
parser.add_argument('--scenario', default=None)
args = parser.parse_args()
brute_retro(game=args.game, state=args.state, scenario=args.scenario)
if __name__ == "__main__":
main()
|
MatPoliquin/retro | retro/cores/gba/src/platform/python/cinema/movie.py | <reponame>MatPoliquin/retro
from mgba.image import Image
from collections import namedtuple
from . import VideoFrame
Output = namedtuple('Output', ['video'])
class Tracer(object):
def __init__(self, core):
self.core = core
self.fb = Image(*core.desiredVideoDimensions())
self.core.setVideoBuffer(self.fb)
self._videoFifo = []
def yieldFrames(self, skip=0, limit=None):
self.core.reset()
skip = (skip or 0) + 1
while skip > 0:
frame = self.core.frameCounter
self.core.runFrame()
skip -= 1
while frame <= self.core.frameCounter and limit != 0:
self._videoFifo.append(VideoFrame(self.fb.toPIL()))
yield frame
frame = self.core.frameCounter
self.core.runFrame()
if limit is not None:
assert limit >= 0
limit -= 1
def video(self, generator=None, **kwargs):
if not generator:
generator = self.yieldFrames(**kwargs)
try:
while True:
if self._videoFifo:
result = self._videoFifo[0]
self._videoFifo = self._videoFifo[1:]
yield result
else:
next(generator)
except StopIteration:
return
def output(self, **kwargs):
generator = self.yieldFrames(**kwargs)
return mCoreOutput(video=self.video(generator=generator, **kwargs))
|
MatPoliquin/retro | retro/testing/verify_changes.py | #!/usr/bin/env python
import pytest
import retro.testing as testdata
import subprocess
import sys
if len(sys.argv) == 2:
branches = [sys.argv[1]]
elif len(sys.argv) == 3:
branches = [sys.argv[1], sys.argv[2]]
else:
branches = ['master']
check = testdata.branch_new(*branches)
if check:
args = ['-q', '--tb=no', '--disable-warnings', '-k', ' or '.join(check)]
pytest.main(args)
for context, error in testdata.errors:
print('\33[31mE: %s: %s\33[0m' % (context, error))
for context, warning in testdata.warnings:
print('\33[33mW: %s: %s\33[0m' % (context, warning))
if testdata.errors:
sys.exit(1)
|
wieden-kennedy/haikus | haikus/haikutext.py | <reponame>wieden-kennedy/haikus
"""
Classes and utilities for extracting haiku from arbitrary text and evaluating them based on some programmatically
defined criteria
"""
import nltk
import string
from nltk.corpus import cmudict
from nltk_util import syllables_en
from haikus.evaluators import DEFAULT_HAIKU_EVALUATORS
global WORD_DICT
try:
WORD_DICT = cmudict.dict()
except LookupError:
nltk.download('cmudict')
WORD_DICT = cmudict.dict()
class NonwordError(Exception):
pass
class HaikuText(object):
"""
A wrapper around some sequence of text
"""
def __init__(self, text=None):
self._text = text
def get_text(self):
return self._text
def set_text(self, text):
self._text = text
def filtered_text(self):
"""
Strip punctuation from this text
"""
exclude = set(string.punctuation).difference(set("'"))
s = ''.join(ch for ch in self.get_text() if ch not in exclude)
return s
def filtered_word(self, word):
"""
Strip punctation from the given token so we can look it up in
our word dictionary
"""
exclude = set(string.punctuation).difference(set("'"))
filtered = ''.join(ch for ch in word if ch not in exclude)
return filtered
def word_syllables(self, word, override_word=None):
"""
Get the syllable count for the given word, according to WORD_DICT
"""
word = word.encode('ascii', 'ignore').strip().lower()
try:
matches = WORD_DICT[word]
for tree in matches:
return (len([phoneme for phoneme in tree if phoneme[-1].isdigit()]), word)
except KeyError:
return self.unknown_word_handler(word)
def syllable_map(self):
"""
Map words in this text to their syllable count
"""
s = self.filtered_text()
try:
return map(self.word_syllables, s.split())
except NonwordError:
return []
def syllable_count(self):
"""
Sum the syllable counts for all words in this text
"""
return sum([t[0] for t in self.syllable_map()])
def get_haiku(self):
"""
find a haiku at the beginning of the text
"""
syllable_map = self.syllable_map()
return self.find_haiku(syllable_map)
def get_haikus(self):
"""
find all haikus in the text
"""
haikus = []
syllable_map = self.syllable_map()
for i in range(len(syllable_map)):
portion = syllable_map[i:]
if (sum(word[0] for word in portion) >= 17):
haiku = self.find_haiku(portion)
if haiku:
haikus.append(haiku)
else:
break
return haikus
def find_haiku(self, syllable_map):
"""
Find a haiku in this text
"""
haiku = [5, 12, 17]
cumulative = [0]
for w in syllable_map:
cumulative.append(cumulative[-1] + w[0])
cumulative = cumulative[1:]
is_haiku = set(cumulative).intersection(haiku) == set(haiku)
if is_haiku:
lookup = dict((v,k) for k, v in enumerate(cumulative))
enum_lookup = list(enumerate(lookup))
start = 0
lines = []
for line in haiku:
section = syllable_map[start:lookup[line]+1]
words = [s[1] for s in section]
lines.append(' '.join(words))
try:
start = enum_lookup[lookup[line] + 1][0]
except IndexError:
pass
haiku = Haiku()
haiku.set_lines(lines)
return haiku
else:
return False
def has_haiku(self):
"""
Return True if this text contains a haiku
"""
return self.get_haiku() is not False
def unknown_word_handler(self, word):
"""
handle words outside of cmudict by attempting to count their syllables
"""
syllable_count = syllables_en.count(self.filtered_word(word))
if syllable_count > 0:
return (syllable_count, word)
else:
raise NonwordError("%s has no syllables" % word)
class Haiku(object):
"""
A simple wrapper for a haiku's three lines
"""
def get_lines(self):
return self._lines
def set_lines(self, lines):
self._lines = lines
def calculate_quality(self, evaluators=None):
"""
Calculate this haiku's quality
"""
score = 0
for evaluator_class, weight in evaluators:
evaluator = evaluator_class(weight=weight)
score += evaluator(self)
try:
score /= sum([weight for evaluator, weight in evaluators])
except ZeroDivisionError:
pass
return score
def line_end_bigrams(self):
"""
Find the bigrams that occur across any two lines in this text's
haiku
"""
bigrams = ()
lines = [line.split(" ") for line in self.get_lines()]
try:
bigrams = ((lines[0][-1],lines[1][0]),
(lines[1][-1],lines[2][0]))
except IndexError:
return (['', ''], ['', ''])
return bigrams
def flattened_lines(self):
return ' '.join(self.get_lines())
|
wieden-kennedy/haikus | haikus/tests.py | <reponame>wieden-kennedy/haikus
import math
from unittest import TestCase
from haikus import HaikuText
from haikus.evaluators import HaikuEvaluator, NounVerbAdjectiveLineEndingEvaluator, \
JoiningWordLineEndingEvaluator, EndsInNounEvaluator, PrepositionCountEvaluator
class TestHaiku(TestCase):
def test_calculate_quality(self):
haiku = HaikuText(text="An old silent pond... A frog jumps into the pond. Splash! Silence again.").get_haiku()
#some 'dummy' evaluators
class MediocreHaikuEvaluator(HaikuEvaluator):
def evaluate(self, haiku):
return 50
default = (HaikuEvaluator, 1)
mediocre = (MediocreHaikuEvaluator, 1)
#It's a haiku, check its quality
self.assertEqual(haiku.calculate_quality(evaluators=[default,]), 100)
#Evaluators are averaged
self.assertEqual(haiku.calculate_quality(evaluators=[default, mediocre]), 150/2)
class EvaluatorsTest(TestCase):
def test_line_ending_nva_evaluator(self):
"""
Test that the line noun/verb/adjective ending part-of-speech evaluator gives the expected scores to haikus
"""
pos_evaluator = NounVerbAdjectiveLineEndingEvaluator()
#comment with 2 lines that end in noun/verbs
text = HaikuText(text="An old silent pond... A frog jumps into the pond. Splash! Silence again.")
haiku = text.get_haiku()
#should score 66
self.assertEqual(pos_evaluator(haiku), 66)
# 1 verb, 1 noun, 1 pronoun
text.set_text("Application is the most wonderful artist that man can show us")
haiku = text.get_haiku()
#should score 66
self.assertEqual(pos_evaluator(haiku), 2*100/3)
#No verbs/nouns at line ends,
text.set_text("They jumped ship on us the boat is very never that man can show us")
haiku = text.get_haiku()
self.assertEqual(pos_evaluator(haiku), 0)
def test_joining_words_line_ending_evaluator(self):
"""
Test that the joining words line ending evaluator give the correct scores to haikus
with and without "joining" words at the end of lines.
"""
join_evaluator = JoiningWordLineEndingEvaluator()
#comment with 2 lines that end in noun/verbs
text = HaikuText(text="An old silent pond... A frog jumps into the pond. Splash! Silence again.")
haiku = text.get_haiku()
#should score 66
self.assertEqual(join_evaluator(haiku), 100)
# 2 good lines, one ending in is
text.set_text("Application and the most wonderful artist that man can show us")
haiku = text.get_haiku()
#should score 66
self.assertEqual(join_evaluator(haiku), 2*100/3)
#No verbs/nouns at line ends,
text.set_text("They jumped right on in the boat is never sunk and that man can show of")
haiku = text.get_haiku()
self.assertEqual(join_evaluator(haiku), 0)
def test_ends_in_noun_evaluator(self):
"""
Test that the EndsInNounEvaluator boosts the score of haikus that end in a noun
"""
noun_evaluator = EndsInNounEvaluator()
#Doesn't end in a noun
text = HaikuText(text="An old silent pond... A frog jumps into the pond. Splash! Silence shopping")
haiku = text.get_haiku()
#should score 0
self.assertEqual(noun_evaluator(haiku), 0)
#Ends in a pronoun
text.set_text("Application is the most wonderful artist that man can show us")
haiku = text.get_haiku()
#should score 100
self.assertEqual(noun_evaluator(haiku), 100)
#Ends in a noun
text.set_text("Application is the most wonderful artist that man can show god")
haiku = text.get_haiku()
#should score 100
self.assertEqual(noun_evaluator(haiku), 100)
class PrepositionalCountEvaluatorTest(TestCase):
"""
Test the preposition count evaluator.
"""
def setUp(self):
self.comment_a = HaikuText(text="Dog in the floor mat, one onto the home for it, jump into the pool")
self.comment_b = HaikuText(text="this is a new vogue, she always has a new vogue, she never repeats")
def test_preposition_count(self):
"""
Test A:
Dog in the floor at, one onto the home for it, jump into the pool
** ** **** *** ****
5 prepositions
15 words
"""
assert self.comment_a.has_haiku() is True
score = self.comment_a.get_haiku().calculate_quality(evaluators=[(PrepositionCountEvaluator, 1)])
self.assertEquals(score, 100 - math.exp(4))
"""
Test B:
this is a new vogue, she always has a new vogue, she never repeats
0 prepositions
15 words
"""
assert self.comment_b.has_haiku() is True
score = self.comment_b.get_haiku().calculate_quality(evaluators=[(PrepositionCountEvaluator, 1)])
self.assertEquals(score, 99)
class BigramExtraction(TestCase):
def setUp(self):
self.comment = HaikuText(text="Dog in the floor at, one onto the home for it, jump into the pool")
self.haiku = self.comment.get_haiku()
def test_bigram_extraction(self):
bigrams = self.haiku.line_end_bigrams()
self.assertEquals((('at', 'one'), ('it', 'jump')), bigrams)
class UnknownWordHandling(TestCase):
def test_handle_unknown(self):
haiku = HaikuText(text="this is a new vogue, she always has a new vogue, she never foobaz")
#foobar is not in cmudict!
from haikus.haikutext import WORD_DICT
self.assertEqual(WORD_DICT.get("foobaz"), None)
#however, we can count 2 syllables in it anyhow
self.assertTrue((2, "foobaz") in haiku.syllable_map())
|
wieden-kennedy/haikus | haikus/evaluators.py | <filename>haikus/evaluators.py
"""
Simple haiku evaluators. Callables that give a score (out of 100) to a
haiku based on some criteria.
"""
import re, math
import nltk
import nltk.collocations
from nltk.classify import NaiveBayesClassifier
class HaikuEvaluator(object):
"""
Base HaikuEvaluator -- simply a callable class
with weight and an evaluate method
"""
def __init__(self, weight=1):
self.weight = weight
self.pre_evaluate()
def __call__(self, haiku):
return self.weight * self.evaluate(haiku)
def pre_evaluate(self):
pass
def evaluate(self, haiku):
"""
Evaluate a comment. Override this in
subclasses.
"""
return 100
class NounVerbAdjectiveLineEndingEvaluator(HaikuEvaluator):
"""
Analyze the part of speech of each line ending,
boost lines ending in nouns or verbs.
Returns 0 - 100
"""
def evaluate(self, haiku):
score = 0
nv_regex = re.compile("(^N.*|^V.*|^J.*)")
for line in haiku.get_lines():
tagged_words = nltk.pos_tag(line.split())
if nv_regex.match(tagged_words[-1][1]) is not None:
score += 100
score = score/len(haiku.get_lines())
return score
class JoiningWordLineEndingEvaluator(HaikuEvaluator):
"""
If the line doesn't end in a preposition, in, and, or other
joining words, boost its score
"""
def evaluate(self, haiku):
score = 0
join_regex = re.compile("(^W.*$|IN|DT|CC|PRP\$|TO)")
for line in haiku.get_lines():
tagged_words = nltk.pos_tag(line.split())
if join_regex.match(tagged_words[-1][1]) is None:
score += 100
score = score/len(haiku.get_lines())
return score
class EndsInNounEvaluator(HaikuEvaluator):
"""
If the entire haiku ends in a noun, boost its score.
"""
def evaluate(self, haiku):
score = 0
noun_regex = re.compile("(^N.*$|PRP.*$)")
line = haiku.get_lines()[-1]
tagged_words = nltk.pos_tag(line.split())
if noun_regex.match(tagged_words[-1][1]) is not None:
score = 100
return score
class PrepositionCountEvaluator(HaikuEvaluator):
"""
If the entire haiku ends in a noun, boost its score.
"""
def evaluate(self, haiku):
tags = []
seeking = ['IN']
[tags.append(tag) for word, tag in nltk.pos_tag(haiku.flattened_lines().split())]
found = [tag for tag in tags if tag in seeking]
score = 100 - math.exp(len(found))
if score < 0:
return 0
else:
return score
DEFAULT_HAIKU_EVALUATORS = [
(NounVerbAdjectiveLineEndingEvaluator, 1),
(JoiningWordLineEndingEvaluator, 1),
(EndsInNounEvaluator, 1),
(PrepositionCountEvaluator, 1),
]
HAIKU_EVALUATORS = [
NounVerbAdjectiveLineEndingEvaluator,
JoiningWordLineEndingEvaluator,
EndsInNounEvaluator,
PrepositionCountEvaluator,
]
|
wieden-kennedy/haikus | setup.py | <reponame>wieden-kennedy/haikus<filename>setup.py<gh_stars>1-10
#/usr/bin/env python
import os
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
setup(
name="haikus",
description="Some classes for finding haikus in text",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/wieden-kennedy/haikus",
version="0.0.1",
install_requires=["nltk"],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
wieden-kennedy/haikus | haikus/__init__.py | <gh_stars>1-10
"""
Haiku module -- for finding haiku in some arbitary piece of text
"""
from haikus.haikutext import HaikuText, Haiku
|
stoffus/frigate | benchmark.py | import os
from statistics import mean
import multiprocessing as mp
import numpy as np
import datetime
from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
labels = load_labels('/labelmap.txt')
######
# Minimal same process runner
######
# object_detector = ObjectDetector()
# tensor_input = np.expand_dims(np.full((300,300,3), 0, np.uint8), axis=0)
# start = datetime.datetime.now().timestamp()
# frame_times = []
# for x in range(0, 1000):
# start_frame = datetime.datetime.now().timestamp()
# tensor_input[:] = my_frame
# detections = object_detector.detect_raw(tensor_input)
# parsed_detections = []
# for d in detections:
# if d[1] < 0.4:
# break
# parsed_detections.append((
# labels[int(d[0])],
# float(d[1]),
# (d[2], d[3], d[4], d[5])
# ))
# frame_times.append(datetime.datetime.now().timestamp()-start_frame)
# duration = datetime.datetime.now().timestamp()-start
# print(f"Processed for {duration:.2f} seconds.")
# print(f"Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######
# Separate process runner
######
def start(id, num_detections, detection_queue):
object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue)
start = datetime.datetime.now().timestamp()
frame_times = []
for x in range(0, num_detections):
start_frame = datetime.datetime.now().timestamp()
detections = object_detector.detect(my_frame)
frame_times.append(datetime.datetime.now().timestamp()-start_frame)
duration = datetime.datetime.now().timestamp()-start
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
edgetpu_process = EdgeTPUProcess()
# start(1, 1000, edgetpu_process.detect_lock, edgetpu_process.detect_ready, edgetpu_process.frame_ready)
####
# Multiple camera processes
####
camera_processes = []
for x in range(0, 10):
camera_process = mp.Process(target=start, args=(x, 100, edgetpu_process.detection_queue))
camera_process.daemon = True
camera_processes.append(camera_process)
start = datetime.datetime.now().timestamp()
for p in camera_processes:
p.start()
for p in camera_processes:
p.join()
duration = datetime.datetime.now().timestamp()-start
print(f"Total - Processed for {duration:.2f} seconds.") |
stoffus/frigate | frigate/edgetpu.py | <reponame>stoffus/frigate
import os
import datetime
import hashlib
import multiprocessing as mp
import numpy as np
import pyarrow.plasma as plasma
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond, listen
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, 'r', encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(' ', maxsplit=1)[0].isdigit():
pairs = [line.split(' ', maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
class ObjectDetector():
def __init__(self):
edge_tpu_delegate = None
try:
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0')
except ValueError:
print("No EdgeTPU detected. Falling back to CPU.")
if edge_tpu_delegate is None:
self.interpreter = tflite.Interpreter(
model_path='/cpu_model.tflite')
else:
self.interpreter = tflite.Interpreter(
model_path='/edgetpu_model.tflite',
experimental_delegates=[edge_tpu_delegate])
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
self.interpreter.invoke()
boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
detections = np.zeros((20,6), np.float32)
for i, score in enumerate(scores):
detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
return detections
def run_detector(detection_queue, avg_speed, start):
print(f"Starting detection process: {os.getpid()}")
listen()
plasma_client = plasma.connect("/tmp/plasma")
object_detector = ObjectDetector()
while True:
object_id_str = detection_queue.get()
object_id_hash = hashlib.sha1(str.encode(object_id_str))
object_id = plasma.ObjectID(object_id_hash.digest())
object_id_out = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{object_id_str}")).digest())
input_frame = plasma_client.get(object_id, timeout_ms=0)
if input_frame is plasma.ObjectNotAvailable:
continue
# detect and put the output in the plasma store
start.value = datetime.datetime.now().timestamp()
plasma_client.put(object_detector.detect_raw(input_frame), object_id_out)
duration = datetime.datetime.now().timestamp()-start.value
start.value = 0.0
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
def __init__(self):
self.detection_queue = mp.SimpleQueue()
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.detect_process = None
self.start_or_restart()
def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.detect_process.terminate()
print("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
if self.detect_process.exitcode is None:
print("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
self.detect_process = mp.Process(target=run_detector, args=(self.detection_queue, self.avg_inference_speed, self.detection_start))
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
def __init__(self, name, labels, detection_queue):
self.labels = load_labels(labels)
self.name = name
self.fps = EventsPerSecond()
self.plasma_client = plasma.connect("/tmp/plasma")
self.detection_queue = detection_queue
def detect(self, tensor_input, threshold=.4):
detections = []
now = f"{self.name}-{str(datetime.datetime.now().timestamp())}"
object_id_frame = plasma.ObjectID(hashlib.sha1(str.encode(now)).digest())
object_id_detections = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{now}")).digest())
self.plasma_client.put(tensor_input, object_id_frame)
self.detection_queue.put(now)
raw_detections = self.plasma_client.get(object_id_detections, timeout_ms=10000)
if raw_detections is plasma.ObjectNotAvailable:
self.plasma_client.delete([object_id_frame])
return detections
for d in raw_detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
self.plasma_client.delete([object_id_frame, object_id_detections])
self.fps.update()
return detections |
stoffus/frigate | detect_objects.py | <filename>detect_objects.py
import os
import sys
import traceback
import signal
import cv2
import time
import datetime
import queue
import yaml
import threading
import multiprocessing as mp
import subprocess as sp
import numpy as np
import logging
from flask import Flask, Response, make_response, jsonify, request
import paho.mqtt.client as mqtt
from frigate.video import track_camera, get_ffmpeg_input, get_frame_shape, CameraCapture, start_or_restart_ffmpeg
from frigate.object_processing import TrackedObjectProcessor
from frigate.util import EventsPerSecond
from frigate.edgetpu import EdgeTPUProcess
FRIGATE_VARS = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
with open('/config/config.yml') as f:
CONFIG = yaml.safe_load(f)
MQTT_HOST = CONFIG['mqtt']['host']
MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
MQTT_USER = CONFIG.get('mqtt', {}).get('user')
MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
if not MQTT_PASS is None:
MQTT_PASS = MQTT_PASS.format(**FRIGATE_VARS)
MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
# Set the default FFmpeg config
FFMPEG_CONFIG = CONFIG.get('ffmpeg', {})
FFMPEG_DEFAULT_CONFIG = {
'global_args': FFMPEG_CONFIG.get('global_args',
['-hide_banner','-loglevel','panic']),
'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
[]),
'input_args': FFMPEG_CONFIG.get('input_args',
['-avoid_negative_ts', 'make_zero',
'-fflags', 'nobuffer',
'-flags', 'low_delay',
'-strict', 'experimental',
'-fflags', '+genpts+discardcorrupt',
'-vsync', 'drop',
'-rtsp_transport', 'tcp',
'-stimeout', '5000000',
'-use_wallclock_as_timestamps', '1']),
'output_args': FFMPEG_CONFIG.get('output_args',
['-f', 'rawvideo',
'-pix_fmt', 'rgb24'])
}
GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
WEB_PORT = CONFIG.get('web_port', 5000)
DEBUG = (CONFIG.get('debug', '0') == '1')
def start_plasma_store():
plasma_cmd = ['plasma_store', '-m', '400000000', '-s', '/tmp/plasma']
plasma_process = sp.Popen(plasma_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
time.sleep(1)
rc = plasma_process.poll()
if rc is not None:
return None
return plasma_process
class CameraWatchdog(threading.Thread):
def __init__(self, camera_processes, config, tflite_process, tracked_objects_queue, plasma_process):
threading.Thread.__init__(self)
self.camera_processes = camera_processes
self.config = config
self.tflite_process = tflite_process
self.tracked_objects_queue = tracked_objects_queue
self.plasma_process = plasma_process
def run(self):
time.sleep(10)
while True:
# wait a bit before checking
time.sleep(10)
now = datetime.datetime.now().timestamp()
# check the plasma process
rc = self.plasma_process.poll()
if rc != None:
print(f"plasma_process exited unexpectedly with {rc}")
self.plasma_process = start_plasma_store()
# check the detection process
detection_start = self.tflite_process.detection_start.value
if (detection_start > 0.0 and
now - detection_start > 10):
print("Detection appears to be stuck. Restarting detection process")
self.tflite_process.start_or_restart()
elif not self.tflite_process.detect_process.is_alive():
print("Detection appears to have stopped. Restarting detection process")
self.tflite_process.start_or_restart()
# check the camera processes
for name, camera_process in self.camera_processes.items():
process = camera_process['process']
if not process.is_alive():
print(f"Track process for {name} is not alive. Starting again...")
camera_process['process_fps'].value = 0.0
camera_process['detection_fps'].value = 0.0
camera_process['read_start'].value = 0.0
process = mp.Process(target=track_camera, args=(name, self.config[name], GLOBAL_OBJECT_CONFIG, camera_process['frame_queue'],
camera_process['frame_shape'], self.tflite_process.detection_queue, self.tracked_objects_queue,
camera_process['process_fps'], camera_process['detection_fps'],
camera_process['read_start'], camera_process['detection_frame']))
process.daemon = True
camera_process['process'] = process
process.start()
print(f"Track process started for {name}: {process.pid}")
if not camera_process['capture_thread'].is_alive():
frame_shape = camera_process['frame_shape']
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
ffmpeg_process = start_or_restart_ffmpeg(camera_process['ffmpeg_cmd'], frame_size)
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, camera_process['frame_queue'],
camera_process['take_frame'], camera_process['camera_fps'], camera_process['detection_frame'])
camera_capture.start()
camera_process['ffmpeg_process'] = ffmpeg_process
camera_process['capture_thread'] = camera_capture
elif now - camera_process['capture_thread'].current_frame > 5:
print(f"No frames received from {name} in 5 seconds. Exiting ffmpeg...")
ffmpeg_process = camera_process['ffmpeg_process']
ffmpeg_process.terminate()
try:
print("Waiting for ffmpeg to exit gracefully...")
ffmpeg_process.communicate(timeout=30)
except sp.TimeoutExpired:
print("FFmpeg didnt exit. Force killing...")
ffmpeg_process.kill()
ffmpeg_process.communicate()
def main():
# connect to mqtt and setup last will
def on_connect(client, userdata, flags, rc):
print("On connect called")
if rc != 0:
if rc == 3:
print ("MQTT Server unavailable")
elif rc == 4:
print ("MQTT Bad username or password")
elif rc == 5:
print ("MQTT Not authorized")
else:
print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
# publish a message to signal that the service is running
client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
client = mqtt.Client(client_id=MQTT_CLIENT_ID)
client.on_connect = on_connect
client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
if not MQTT_USER is None:
client.username_pw_set(MQTT_USER, password=<PASSWORD>)
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start()
plasma_process = start_plasma_store()
##
# Setup config defaults for cameras
##
for name, config in CONFIG['cameras'].items():
config['snapshots'] = {
'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True)
}
# Queue for cameras to push tracked objects to
tracked_objects_queue = mp.SimpleQueue()
# Start the shared tflite process
tflite_process = EdgeTPUProcess()
# start the camera processes
camera_processes = {}
for name, config in CONFIG['cameras'].items():
# Merge the ffmpeg config with the global config
ffmpeg = config.get('ffmpeg', {})
ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
ffmpeg_global_args = ffmpeg.get('global_args', FFMPEG_DEFAULT_CONFIG['global_args'])
ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args'])
ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args'])
ffmpeg_cmd = (['ffmpeg'] +
ffmpeg_global_args +
ffmpeg_hwaccel_args +
ffmpeg_input_args +
['-i', ffmpeg_input] +
ffmpeg_output_args +
['pipe:'])
if 'width' in config and 'height' in config:
frame_shape = (config['height'], config['width'], 3)
else:
frame_shape = get_frame_shape(ffmpeg_input)
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
take_frame = config.get('take_frame', 1)
detection_frame = mp.Value('d', 0.0)
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
frame_queue = mp.SimpleQueue()
camera_fps = EventsPerSecond()
camera_fps.start()
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, frame_queue, take_frame, camera_fps, detection_frame)
camera_capture.start()
camera_processes[name] = {
'camera_fps': camera_fps,
'take_frame': take_frame,
'process_fps': mp.Value('d', 0.0),
'detection_fps': mp.Value('d', 0.0),
'detection_frame': detection_frame,
'read_start': mp.Value('d', 0.0),
'ffmpeg_process': ffmpeg_process,
'ffmpeg_cmd': ffmpeg_cmd,
'frame_queue': frame_queue,
'frame_shape': frame_shape,
'capture_thread': camera_capture
}
camera_process = mp.Process(target=track_camera, args=(name, config, GLOBAL_OBJECT_CONFIG, frame_queue, frame_shape,
tflite_process.detection_queue, tracked_objects_queue, camera_processes[name]['process_fps'],
camera_processes[name]['detection_fps'],
camera_processes[name]['read_start'], camera_processes[name]['detection_frame']))
camera_process.daemon = True
camera_processes[name]['process'] = camera_process
for name, camera_process in camera_processes.items():
camera_process['process'].start()
print(f"Camera_process started for {name}: {camera_process['process'].pid}")
object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue)
object_processor.start()
camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], tflite_process, tracked_objects_queue, plasma_process)
camera_watchdog.start()
# create a flask app that encodes frames a mjpeg on demand
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/')
def ishealthy():
# return a healh
return "Frigate is running. Alive and healthy!"
@app.route('/debug/stack')
def processor_stack():
frame = sys._current_frames().get(object_processor.ident, None)
if frame:
return "<br>".join(traceback.format_stack(frame)), 200
else:
return "no frame found", 200
@app.route('/debug/print_stack')
def print_stack():
pid = int(request.args.get('pid', 0))
if pid == 0:
return "missing pid", 200
else:
os.kill(pid, signal.SIGUSR1)
return "check logs", 200
@app.route('/debug/stats')
def stats():
stats = {}
total_detection_fps = 0
for name, camera_stats in camera_processes.items():
total_detection_fps += camera_stats['detection_fps'].value
capture_thread = camera_stats['capture_thread']
stats[name] = {
'camera_fps': round(capture_thread.fps.eps(), 2),
'process_fps': round(camera_stats['process_fps'].value, 2),
'skipped_fps': round(capture_thread.skipped_fps.eps(), 2),
'detection_fps': round(camera_stats['detection_fps'].value, 2),
'read_start': camera_stats['read_start'].value,
'pid': camera_stats['process'].pid,
'ffmpeg_pid': camera_stats['ffmpeg_process'].pid,
'frame_info': {
'read': capture_thread.current_frame,
'detect': camera_stats['detection_frame'].value,
'process': object_processor.camera_data[name]['current_frame_time']
}
}
stats['coral'] = {
'fps': round(total_detection_fps, 2),
'inference_speed': round(tflite_process.avg_inference_speed.value*1000, 2),
'detection_start': tflite_process.detection_start.value,
'pid': tflite_process.detect_process.pid
}
rc = camera_watchdog.plasma_process.poll()
stats['plasma_store_rc'] = rc
return jsonify(stats)
@app.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label):
if camera_name in CONFIG['cameras']:
best_frame = object_processor.get_best(camera_name, label)
if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8)
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return "Camera named {} not found".format(camera_name), 404
@app.route('/<camera_name>')
def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360'))
if camera_name in CONFIG['cameras']:
# return a multipart response
return Response(imagestream(camera_name, fps, height),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return "Camera named {} not found".format(camera_name), 404
def imagestream(camera_name, fps, height):
while True:
# max out at specified FPS
time.sleep(1/fps)
frame = object_processor.get_current_frame(camera_name)
if frame is None:
frame = np.zeros((height,int(height*16/9),3), np.uint8)
width = int(height*frame.shape[1]/frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
object_processor.join()
plasma_process.terminate()
if __name__ == '__main__':
main()
|
stoffus/frigate | frigate/video.py | <reponame>stoffus/frigate<filename>frigate/video.py<gh_stars>1-10
import os
import time
import datetime
import cv2
import queue
import threading
import ctypes
import pyarrow.plasma as plasma
import multiprocessing as mp
import subprocess as sp
import numpy as np
import copy
import itertools
import json
from collections import defaultdict
from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, PlasmaManager
from frigate.objects import ObjectTracker
from frigate.edgetpu import RemoteObjectDetector
from frigate.motion import MotionDetector
def get_frame_shape(source):
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
print(ffprobe_cmd)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
print(info)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
ret, frame = video.read()
frame_shape = frame.shape
video.release()
return frame_shape
def get_ffmpeg_input(ffmpeg_input):
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
return ffmpeg_input.format(**frigate_vars)
def filtered(obj, objects_to_track, object_filters, mask):
object_name = obj[0]
if not object_name in objects_to_track:
return True
if object_name in object_filters:
obj_settings = object_filters[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.get('min_area',-1) > obj[3]:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.get('max_area', 24000000) < obj[3]:
return True
# if the score is lower than the threshold, skip
if obj_settings.get('threshold', 0) > obj[1]:
return True
# compute the coordinates of the object and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
y_location = min(int(obj[2][3]), len(mask)-1)
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(mask[0])-1)
# if the object is in a masked location, don't add it to detected objects
if mask[y_location][x_location] == [0]:
return True
return False
def create_tensor_input(frame, region):
cropped_frame = frame[region[1]:region[3], region[0]:region[2]]
# Resize to 300x300 if needed
if cropped_frame.shape != (300, 300, 3):
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
return np.expand_dims(cropped_frame, axis=0)
def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
if not ffmpeg_process is None:
print("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate()
try:
print("Waiting for ffmpeg to exit gracefully...")
ffmpeg_process.communicate(timeout=30)
except sp.TimeoutExpired:
print("FFmpeg didnt exit. Force killing...")
ffmpeg_process.kill()
ffmpeg_process.communicate()
ffmpeg_process = None
print("Creating ffmpeg process...")
print(" ".join(ffmpeg_cmd))
process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
return process
class CameraCapture(threading.Thread):
def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, detection_frame):
threading.Thread.__init__(self)
self.name = name
self.frame_shape = frame_shape
self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
self.frame_queue = frame_queue
self.take_frame = take_frame
self.fps = fps
self.skipped_fps = EventsPerSecond()
self.plasma_client = PlasmaManager()
self.ffmpeg_process = ffmpeg_process
self.current_frame = 0
self.last_frame = 0
self.detection_frame = detection_frame
def run(self):
frame_num = 0
self.skipped_fps.start()
while True:
if self.ffmpeg_process.poll() != None:
print(f"{self.name}: ffmpeg process is not running. exiting capture thread...")
break
frame_bytes = self.ffmpeg_process.stdout.read(self.frame_size)
self.current_frame = datetime.datetime.now().timestamp()
if len(frame_bytes) == 0:
print(f"{self.name}: ffmpeg didnt return a frame. something is wrong.")
continue
self.fps.update()
frame_num += 1
if (frame_num % self.take_frame) != 0:
self.skipped_fps.update()
continue
# if the detection process is more than 1 second behind, skip this frame
if self.detection_frame.value > 0.0 and (self.last_frame - self.detection_frame.value) > 1:
self.skipped_fps.update()
continue
# put the frame in the plasma store
self.plasma_client.put(f"{self.name}{self.current_frame}",
np
.frombuffer(frame_bytes, np.uint8)
.reshape(self.frame_shape)
)
# add to the queue
self.frame_queue.put(self.current_frame)
self.last_frame = self.current_frame
def track_camera(name, config, global_objects_config, frame_queue, frame_shape, detection_queue, detected_objects_queue, fps, detection_fps, read_start, detection_frame):
print(f"Starting process for {name}: {os.getpid()}")
listen()
detection_frame.value = 0.0
# Merge the tracked object config with the global config
camera_objects_config = config.get('objects', {})
# combine tracked objects lists
objects_to_track = set().union(global_objects_config.get('track', ['person', 'car', 'truck']), camera_objects_config.get('track', []))
# merge object filters
global_object_filters = global_objects_config.get('filters', {})
camera_object_filters = camera_objects_config.get('filters', {})
objects_with_config = set().union(global_object_filters.keys(), camera_object_filters.keys())
object_filters = {}
for obj in objects_with_config:
object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
frame = np.zeros(frame_shape, np.uint8)
# load in the mask for object detection
if 'mask' in config:
mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
else:
mask = None
if mask is None:
mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue)
object_tracker = ObjectTracker(10)
plasma_client = PlasmaManager()
avg_wait = 0.0
fps_tracker = EventsPerSecond()
fps_tracker.start()
object_detector.fps.start()
while True:
read_start.value = datetime.datetime.now().timestamp()
frame_time = frame_queue.get()
duration = datetime.datetime.now().timestamp()-read_start.value
read_start.value = 0.0
avg_wait = (avg_wait*99+duration)/100
detection_frame.value = frame_time
# Get frame from plasma store
frame = plasma_client.get(f"{name}{frame_time}")
if frame is plasma.ObjectNotAvailable:
continue
fps_tracker.update()
fps.value = fps_tracker.eps()
detection_fps.value = object_detector.fps.eps()
# look for motion
motion_boxes = motion_detector.detect(frame)
tracked_objects = object_tracker.tracked_objects.values()
# merge areas of motion that intersect with a known tracked object into a single area to look at
areas_of_interest = []
used_motion_boxes = []
for obj in tracked_objects:
x_min, y_min, x_max, y_max = obj['box']
for m_index, motion_box in enumerate(motion_boxes):
if intersection_over_union(motion_box, obj['box']) > .2:
used_motion_boxes.append(m_index)
x_min = min(obj['box'][0], motion_box[0])
y_min = min(obj['box'][1], motion_box[1])
x_max = max(obj['box'][2], motion_box[2])
y_max = max(obj['box'][3], motion_box[3])
areas_of_interest.append((x_min, y_min, x_max, y_max))
unused_motion_boxes = set(range(0, len(motion_boxes))).difference(used_motion_boxes)
# compute motion regions
motion_regions = [calculate_region(frame_shape, motion_boxes[i][0], motion_boxes[i][1], motion_boxes[i][2], motion_boxes[i][3], 1.2)
for i in unused_motion_boxes]
# compute tracked object regions
object_regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
for a in areas_of_interest]
# merge regions with high IOU
merged_regions = motion_regions+object_regions
while True:
max_iou = 0.0
max_indices = None
region_indices = range(len(merged_regions))
for a, b in itertools.combinations(region_indices, 2):
iou = intersection_over_union(merged_regions[a], merged_regions[b])
if iou > max_iou:
max_iou = iou
max_indices = (a, b)
if max_iou > 0.1:
a = merged_regions[max_indices[0]]
b = merged_regions[max_indices[1]]
merged_regions.append(calculate_region(frame_shape,
min(a[0], b[0]),
min(a[1], b[1]),
max(a[2], b[2]),
max(a[3], b[3]),
1
))
del merged_regions[max(max_indices[0], max_indices[1])]
del merged_regions[min(max_indices[0], max_indices[1])]
else:
break
# resize regions and detect
detections = []
for region in merged_regions:
tensor_input = create_tensor_input(frame, region)
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
if filtered(det, objects_to_track, object_filters, mask):
continue
detections.append(det)
#########
# merge objects, check for clipped objects and look again up to N times
#########
refining = True
refine_count = 0
while refining and refine_count < 4:
refining = False
# group by name
detected_object_groups = defaultdict(lambda: [])
for detection in detections:
detected_object_groups[detection[0]].append(detection)
selected_objects = []
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = [(o[2][0], o[2][1], o[2][2]-o[2][0], o[2][3]-o[2][1])
for o in group]
confidences = [o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for index in idxs:
obj = group[index[0]]
if clipped(obj, frame_shape):
box = obj[2]
# calculate a new region that will hopefully get the entire object
region = calculate_region(frame_shape,
box[0], box[1],
box[2], box[3])
tensor_input = create_tensor_input(frame, region)
# run detection on new region
refined_detections = object_detector.detect(tensor_input)
for d in refined_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
if filtered(det, objects_to_track, object_filters, mask):
continue
selected_objects.append(det)
refining = True
else:
selected_objects.append(obj)
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects
if refining:
refine_count += 1
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, detections)
# add to the queue
detected_objects_queue.put((name, frame_time, object_tracker.tracked_objects))
print(f"{name}: exiting subprocess") |
advpro4/Project-solution-C122 | take_screenshot.py | <filename>take_screenshot.py
import numpy as np
import pyautogui
import imutils
import cv2
import mediapipe as mp
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()
mp_draw = mp.solutions.drawing_utils
cap = cv2.VideoCapture(0)
finger_tips =[8, 12, 16, 20]
thumb_tip= 4
while True:
ret,img = cap.read()
img = cv2.flip(img, 1)
h,w,c = img.shape
results = hands.process(img)
if results.multi_hand_landmarks:
for hand_landmark in results.multi_hand_landmarks:
#accessing the landmarks by their position
lm_list=[]
for id ,lm in enumerate(hand_landmark.landmark):
lm_list.append(lm)
#array to hold true or false if finger is folded
finger_fold_status =[]
for tip in finger_tips:
#getting the landmark tip position and drawing blue circle
x,y = int(lm_list[tip].x*w), int(lm_list[tip].y*h)
cv2.circle(img, (x,y), 15, (255, 0, 0), cv2.FILLED)
#writing condition to check if finger is folded i.e checking if finger tip starting value is smaller than finger starting position which is inner landmark. for index finger
#if finger folded changing color to green
if lm_list[tip].x < lm_list[tip - 3].x:
cv2.circle(img, (x,y), 15, (0, 255, 0), cv2.FILLED)
finger_fold_status.append(True)
else:
finger_fold_status.append(False)
print(finger_fold_status)
#checking if all fingers are folded
if all(finger_fold_status):
# take a screenshot of the screen and store it in memory, then
# convert the PIL/Pillow image to an OpenCV compatible NumPy array
# and finally write the image to disk
image = pyautogui.screenshot()
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
cv2.imwrite("in_memory_to_disk.png", image)
# this time take a screenshot directly to disk
pyautogui.screenshot("straight_to_disk.png")
# we can then load our screenshot from disk in OpenCV format
image = cv2.imread("straight_to_disk.png")
cv2.imshow("Screenshot", imutils.resize(image, width=600))
mp_draw.draw_landmarks(img, hand_landmark,
mp_hands.HAND_CONNECTIONS, mp_draw.DrawingSpec((0,0,255),2,2),
mp_draw.DrawingSpec((0,255,0),4,2))
cv2.imshow("hand tracking", img)
cv2.waitKey(1)
|
mpatek/runcalc | runcalc/cli.py | <reponame>mpatek/runcalc
import datetime
import click
import re
_multipliers = {
's': 1,
'm': 60,
'h': 3600,
}
_pattern = re.compile(
'(?:(?:(?P<h>\d+):)?(?P<m>\d+):)?(?P<s>\d+(?:\.\d+)?)'
)
def time_str_to_seconds(s):
"""
Convert a string representation of a time to number of seconds.
Args:
s (str): A string representation of a time.
Returns:
float: The number of seconds represented by the time string.
Raises:
ValueError: If the time string is in an unrecognized format.
Examples:
>>> time_str_to_seconds('123.45')
123.45
>>> time_str_to_seconds('7:15.45')
435.45
>>> time_str_to_seconds('1:07:15.45')
4035.45
"""
match = _pattern.match(s)
if match:
return sum(
_multipliers[k] * float(v)
for k, v in match.groupdict().items()
if v and k in _multipliers
)
raise ValueError('Unknown time format: "{}"'.format(s))
def format_timedelta(td):
"""
Format a timedelta
Args:
td (datetime.timedelta): A timedelta
Returns:
str: A string which represents the timedelta
Examples:
>>> import datetime
>>> td = datetime.timedelta(days=3)
>>> format_timedelta(td)
'3 days'
>>> td = datetime.timedelta(days=1)
>>> format_timedelta(td)
'1 day'
>>> td = datetime.timedelta(seconds=14.2567)
>>> format_timedelta(td)
'14.26 seconds'
>>> td = datetime.timedelta(seconds=64.6734)
>>> format_timedelta(td)
'1 minute 4.67 seconds'
>>> td = datetime.timedelta(seconds=3600)
>>> format_timedelta(td)
'1 hour'
>>> td = datetime.timedelta(seconds=3673.123)
>>> format_timedelta(td)
'1 hour 1 minute 13.12 seconds'
>>> td = datetime.timedelta(seconds=.878)
>>> format_timedelta(td)
'0.88 seconds'
>>> td = datetime.timedelta(seconds=0)
>>> format_timedelta(td)
'0 seconds'
>>> td = datetime.timedelta(seconds=1)
>>> format_timedelta(td)
'1 second'
>>> td = datetime.timedelta(seconds=1.234)
>>> format_timedelta(td)
'1.23 seconds'
"""
if not td:
return '0 seconds'
parts = []
if td.days:
parts.append('{} day{}'.format(td.days, 's' if td.days > 1 else ''))
if td.seconds or td.microseconds:
hours = td.seconds // 3600
if hours:
parts.append('{} hour{}'.format(hours, 's' if hours > 1 else ''))
minutes = (td.seconds % 3600) // 60
seconds = (td.seconds % 3600) % 60
else:
minutes = td.seconds // 60
seconds = td.seconds % 60
if minutes:
parts.append('{} minute{}'.format(
minutes,
's' if minutes > 1 else '',
))
if seconds or td.microseconds:
hundredths = int(round(td.microseconds / 10000.))
f_hundredths = '.{}'.format(hundredths) if hundredths else ''
parts.append('{}{} second{}'.format(
seconds,
f_hundredths,
'' if (seconds == 1 and not f_hundredths) else 's',
))
return ' '.join(parts)
class TimeType(click.ParamType):
name = 'time'
def convert(self, value, param, ctx):
try:
return time_str_to_seconds(value)
except ValueError as e:
self.fail(e, param, ctx)
TIME_PARAM = TimeType()
@click.command()
@click.option('--time', '-t', type=TIME_PARAM)
@click.option('--distance', '-d', type=float)
@click.option('--unit', '-u', default='mile')
def cli(time, distance, unit):
""" Calculate running pace. """
if not time:
time = time_str_to_seconds(
str(input('Enter the run time: '))
)
if not distance:
distance = float(
input('Enter the run distance: ')
)
pace = time / distance
td = datetime.timedelta(seconds=pace)
print('Pace: {} per {}'.format(format_timedelta(td), unit))
if __name__ == '__main__':
cli()
|
mpatek/runcalc | setup.py | <filename>setup.py
from setuptools import setup
setup(
name='runcalc',
version='0.1.1',
description='Running pace calculator',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mpatek/runcalc',
download_url='https://github.com/mpatek/runcalc/tarball/0.1.1',
packages=['runcalc'],
include_package_data=True,
entry_points={
'console_scripts': [
'runcalc=runcalc.cli:cli'
]
},
install_requires=['click'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
keywords=['running', 'exercise', 'cli'],
)
|
dbt-labs/fullcontact-stitch | lambda/__init__.py | <reponame>dbt-labs/fullcontact-stitch
import os
import sys
sys.path.append(os.getenv('LAMBDA_TASK_ROOT'))
import fullcontact # noqa
if __name__ == "__main__":
fullcontact.handle_fanout(None, None)
|
dbt-labs/fullcontact-stitch | lambda/common.py | <gh_stars>1-10
import base64
import boto3
import json
import os
import psycopg2
import time
import traceback
kinesis_client = boto3.client('kinesis')
def log(s):
now = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print('{} | {}'.format(now, s))
def enqueue_records(records):
if len(records) > 500:
log("ERROR: a max of 500 records can be queued at once")
raise RuntimeError
s = time.time()
log('Writing {} records to kinesis.'.format(len(records)))
response = kinesis_client.put_records(
Records=[{
'Data': json.dumps(record).encode('utf-8'),
'PartitionKey': json.dumps(record)
} for record in records],
StreamName=os.getenv('KINESIS_STREAM_NAME'))
e = time.time()
if response.get('FailedRecordCount'):
for record in records:
if response.get('ErrorCode'):
if response.get('ErrorCode') == 'ProvisionedThroughputExceededException': # noqa
log('Throughput exceeded, trying again in 5 seconds.')
time.sleep(5)
return enqueue_records(records)
else:
log('Error: {}'.format(response.get('ErrorMessage')))
raise RuntimeError
log('Wrote {} records to kinesis in {} seconds.'.format(
len(records),
round(e-s, 2)))
return len(records)
def handle_fanout(event, context, sql_generation_fn):
try:
connection = psycopg2.connect(
host=os.getenv('POSTGRES_HOST'),
user=os.getenv('POSTGRES_USER'),
password=os.getenv('POSTGRES_PASSWORD'),
port=os.getenv('POSTGRES_PORT'),
dbname=os.getenv('POSTGRES_DBNAME'))
sql = sql_generation_fn()
cursor = connection.cursor()
cursor.execute(sql)
total = cursor.rowcount
log("Enqueuing {} records.".format(total))
while True:
records = cursor.fetchmany(500)
if len(records) == 0:
break
enqueue_records(records)
time.sleep(0.5)
cursor.close()
log("Done.")
return total
except Exception as e:
print(traceback.format_exc())
raise e
def handle_worker(event, context, worker_fn):
records = event.get('Records', {})
for record in records:
data = json.loads(
base64.b64decode(
record.get('kinesis', {}).get('data')))
worker_fn(data)
return len(records)
|
dbt-labs/fullcontact-stitch | lambda/fullcontact.py | import os
import requests
import time
import common
def persist_to_stitch(data):
url = 'https://api.stitchdata.com/v2/import/push'
api_key = os.getenv('STITCH_API_KEY')
return requests.post(
url,
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(api_key)},
json=[data])
def get_select():
return 'SELECT "{}" from "{}"."{}"'.format(
os.getenv('FULLCONTACT_INPUT_EMAIL_ADDRESS_FIELD'),
os.getenv('FULLCONTACT_INPUT_SCHEMA'),
os.getenv('FULLCONTACT_INPUT_TABLE'))
def workon_record(record):
(email_address,) = record
url = "https://api.fullcontact.com/v2/person.json"
api_key = os.getenv('FULLCONTACT_API_KEY')
response = requests.get(
url,
headers={'X-FullContact-APIKey': api_key},
params={'email': email_address})
requested_at = int(round(time.time()))
success_at = None
unavailable_at = None
to_persist = {}
if response.status_code == 200:
success_at = int(round(time.time()))
to_persist = response.json().copy()
desired_keys = set(['photos', 'contactInfo', 'organizations',
'demographics', 'socialProfiles'])
all_keys = set(to_persist.keys())
unwanted_keys = all_keys - desired_keys
for k in unwanted_keys:
del to_persist[k]
elif response.status_code == 202:
# we tried, but there's no data yet. just tell stitch that we tried.
pass
elif (response.status_code == 404 and
'No results found for this Id.' in response.text):
unavailable_at = int(round(time.time()))
else:
common.log(
"WARNING: Fullcontact request failed with {}."
.format(response.status_code))
common.log(response.text)
raise RuntimeError
to_persist['email_address'] = email_address
to_persist['requested_at'] = requested_at
to_persist['success_at'] = success_at
to_persist['unavailable_at'] = unavailable_at
result = persist_to_stitch({
'client_id': int(os.getenv('STITCH_CLIENT_ID')),
'table_name': 'fullcontact_person',
'sequence': int(round(time.time())),
'action': 'upsert',
'key_names': ['email_address'],
'data': to_persist,
})
if result.status_code >= 400:
common.log(
"WARNING: Stitch request failed with {}."
.format(result.status_code))
common.log(result.text)
raise RuntimeError
else:
common.log("Persisted to Stitch successfully.")
def handle_fanout(event, context):
return common.handle_fanout(event, context, get_select)
def handle_worker(event, context):
return common.handle_worker(event, context, workon_record)
|
StarlightHunter/qitime | qitime.py | #!/usr/bin/python3
##
## qitime - Quality Imaging Time
## (C) 2020 <NAME> <<EMAIL>>
##
## Calculates the Quality Imaging Time (dark hours) for a given date.
## Based on a concept developed by <NAME>:
## https://digitalstars.wordpress.com/
##
import argparse
import datetime
import ephem
def get_lunar_phase(lunation):
if lunation < 6.25 or lunation > 93.75:
phase = "🌑"
elif lunation < 18.75:
phase = "🌒"
elif lunation < 31.25:
phase = "🌓"
elif lunation < 43.75:
phase = "🌔"
elif lunation < 56.25:
phase = "🌕"
elif lunation < 68.75:
phase = "🌖"
elif lunation < 81.25:
phase = "🌗"
elif lunation <= 93.75:
phase = "🌘"
return phase
def get_total_dark_hours(dusk, dawn):
midnight_prev = datetime.datetime(
dusk.year,
dusk.month,
dusk.day,
23, 59, 59
)
midnight_next = datetime.datetime(
dawn.year,
dawn.month,
dawn.day
)
prev_hours = midnight_prev - dusk
next_hours = dawn - midnight_next
return prev_hours + next_hours
def quality_time(
date_time,
latitude,
longitude,
moon_display=False,
debug=False,
header=False,
):
""" Calculate quality time. """
## Observer data
observer = ephem.Observer()
observer.lon = latitude
observer.lat = longitude
observer.elevation = 0
observer.pressure = 1013 # USNO
observer.temp = 10
observer.horizon = '-0:34' # USNO
observer.date = date_time # Local time
if debug:
print("= Observer")
print(" Date:{}\tLon:{}\tLat:{}".format(
observer.date,
observer.lon,
observer.lat
))
## Objects
sun = ephem.Sun()
moon = ephem.Moon()
# Compute
sun.compute(observer)
moon.compute(observer)
# Calculate moon phase
next_new_moon = ephem.next_new_moon(observer.date)
prev_new_moon = ephem.previous_new_moon(observer.date)
# 50 = full moon, 0 = new moon
lunation = (observer.date - prev_new_moon) / (next_new_moon - prev_new_moon) * 100
objects = { 'Sun': sun, 'Moon': moon }
times = {}
if debug: print("= Rise/Transit/Set")
for target in objects:
t = objects[target]
times[target] = {
'rise' : None,
'transit' : None,
'set' : None,
'always_up': False,
'never_up': False,
}
try:
times[target]['rise'] = ephem.localtime(observer.next_rising(t, use_center=True))
times[target]['transit'] = ephem.localtime(observer.next_transit(t))
times[target]['set'] = ephem.localtime(observer.next_setting(t, use_center=True))
if debug:
print(" {}\tRise:{}\tTransit:{}\tSet:{}".format(
target,
times[target]['rise'],
times[target]['transit'],
times[target]['set']
))
except ephem.AlwaysUpError:
if debug: print(" {} always up".format(target))
times[target]['always_up'] = True
except ephem.NeverUpError:
if debug: print(" {} never up".format(target))
times[target]['never_up'] = True
## Twilight
# https://stackoverflow.com/questions/2637293/calculating-dawn-and-sunset-times-using-pyephem
# fred.horizon = '-6' #-6=civil twilight, -12=nautical, -18=astronomical
if debug: print("= Twilight")
twilight = {
#'Civil': '-6',
#'Nautical': '-12',
'Quality': '-15',
#'Astronomical': '-18'
}
for twilight_type in twilight:
observer.horizon = twilight[twilight_type]
dawn_t = "{}_dawn".format(twilight_type)
dusk_t = "{}_dusk".format(twilight_type)
always_t = "{}_always".format(twilight_type)
never_t = "{}_never".format(twilight_type)
times[dawn_t] = None
times[dusk_t] = None
times[always_t] = False
times[never_t] = False
try:
# Calculate twilight times
times[dusk_t] = ephem.localtime(observer.next_setting(sun, use_center=True))
times[dawn_t] = ephem.localtime(observer.next_rising(sun, use_center=True))
if debug:
print(" {}\tDawn:{}\tDusk:{}".format(
twilight_type,
times[dusk_t],
times[dawn_t]
))
except ephem.AlwaysUpError:
times[always_t] = True
if debug: print(" There is not {} night".format(twilight_type))
except ephem.NeverUpError:
times[never_t] = True
if debug: print(" There is not {} night".format(twilight_type))
## Dark Night
if debug: print("= Dark night (without any Moon)")
# Calculate limits
for twilight_type in twilight:
dawn_t = "{}_dawn".format(twilight_type)
dusk_t = "{}_dusk".format(twilight_type)
always_t = "{}_always".format(twilight_type)
never_t = "{}_never".format(twilight_type)
if debug:
print(" Darkness ({})\tStart:{}\tEnd:{}".format(
twilight_type,
times[dusk_t],
times[dawn_t]
))
total_dark_hours = get_total_dark_hours(times[dusk_t], times[dawn_t])
print(" Total dark hours: {}".format(total_dark_hours))
dt = observer.date.datetime()
if debug:
print(" ", end='')
for i in range(0,24):
print("{:02} ".format(i), end='')
print(" Moon phase")
print(" ", end='')
# Get lunar phase
phase = get_lunar_phase(lunation)
for h in range(0,24):
for m in [0, 30]:
current_date = ephem.localtime(ephem.Date("{}-{}-{} {:02d}:{:02d}:00".format(
dt.year,
dt.month,
dt.day,
h,
m
)))
if times[always_t]:
print("🌞", end='')
elif not times[never_t] \
and times[dawn_t] < current_date < times[dusk_t]:
print("🌞", end='')
elif moon_display:
observer.horizon = "0"
observer.date = current_date
moon.compute(observer)
if moon.alt > 0:
print(phase, end='')
else:
print("🌌", end='')
else:
print("🌌", end='')
print(" {}".format(phase))
if (__name__ == '__main__'):
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--lat", help="Observer latitude", required=True)
parser.add_argument("--lon", help="Observer longitude", required=True)
parser.add_argument("--date", help="Date to calculate ephemeris", required=True)
args = parser.parse_args()
# Display header
print("Quality Imaging Time")
# Calculate and display Quality Imaging ephemeris
quality_time(
args.date,
latitude=args.lat,
longitude=args.lon,
debug=True,
moon_display=True,
header=True,
)
# TODO: Calendar
"""
header_display = True
for week in range(0,52):
for day in [4,5,6]:
# Friday, Saturday
date = datetime.datetime.strptime('2020 {} {}'.format(week, day), '%Y %U %w')
date_str = "2020-{:02}-{:02} 00:00".format(date.month, date.day)
print("2020-{:02}-{:02}".format(date.month, date.day), end='')
quality_time(
date_str,
latitude=args.lat,
longitude=args.lon,
debug=False,
moon_display=True,
header=header_display
)
if header_display == True:
header_display = False
"""
|
zhangce/elementary | dep/iomln.py |
import sys
PRED_FILE = sys.argv[1]
FOLDER = sys.argv[2]
WEIGHT_FILE = FOLDER + "/factor_weight.tsv"
FACTOR_MEANING = FOLDER + "/factor_meaning.tsv"
VARIABLE_MEANING = FOLDER + "/variable_meaning.tsv"
OUTPUT_WEIGHT = FOLDER + ".prog.txt"
OUTPUT_PRED = FOLDER + ".pred.txt"
vm = {}
fm = {}
for l in open(FACTOR_MEANING, 'r'):
(id, meaning) = l.rstrip().split('\t')
fm[int(id)] = meaning
for l in open(VARIABLE_MEANING, 'r'):
(id, meaning) = l.rstrip().split('\t')
vm[int(id)] = meaning
fo = open(OUTPUT_WEIGHT, 'w')
ct = 0
for l in open(WEIGHT_FILE, 'r'):
fo.write( l.rstrip() + " " + fm[ct] + "\n" )
ct = ct + 1
fo.close()
if PRED_FILE != '~':
fo = open(OUTPUT_PRED, 'w')
for l in open(PRED_FILE, 'r'):
ss = l.rstrip().split('\t')
prob = ""
if len(ss) == 3:
prob = ss[2]
vid = int(ss[0])
pred = ss[1]
if pred == "0":
continue
if prob != "":
fo.write( prob + " " + vm[vid] + "\n" )
else:
fo.write( vm[vid] + "\n" )
fo.close()
|
zhangce/elementary | examples/elly/LR/to_factor.py | <reponame>zhangce/elementary
m = {}
for l in open('lr_feat_unigram_nf_1.tsv', 'r'):
(fid, vid, feature) = l.rstrip().split()
fid = int(fid)
vid = int(vid)
feature = int(feature)
m[vid/1000] = feature
m[0] = 0
fof = open('unigram.tsv', 'w')
fov = open('__vf.tsv', 'w')
ctf = -1
for v in sorted(m.iterkeys()):
ctf = ctf + 1
fof.write('%d\t1\t0\t%d\t%d\n' % (ctf, m[v], v))
fov.write('%d\t23\t1\t0\t%d\n' % (v, ctf))
fof.close()
fov.close()
|
zhangce/elementary | examples/elly/LDA/view.py | <filename>examples/elly/LDA/view.py
import sys
vid2word = {}
for l in open(sys.argv[2], 'r'):
(vid, word) = l.rstrip().split('\t')
vid2word[vid] = word
topic2word = {}
for l in open(sys.argv[1], 'r'):
(vid, topic) = l.rstrip().split('\t')
word = vid2word[vid]
if topic not in topic2word:
topic2word[topic] = {}
if word not in topic2word[topic]:
topic2word[topic][word] = 0
topic2word[topic][word] = topic2word[topic][word] + 1
topics = []
for topic in topic2word:
topics.append(int(topic))
topics.sort()
for ntopic in topics:
topic = '%d' % ntopic
ct = 0
sys.stdout.write('TOPIC #')
sys.stdout.write(topic)
sys.stdout.write(' \n')
for word in sorted(topic2word[topic], key=topic2word[topic].get, reverse=True):
ct = ct + 1
if ct == 20:
break
sys.stdout.write(' ')
sys.stdout.write(word)
sys.stdout.write('(')
sys.stdout.write('%d) ' % topic2word[topic][word])
sys.stdout.write('\n\n')
|
dyelsey/Juhuri-Keyboard | resources/analysis.py | <filename>resources/analysis.py
#<NAME>
import codecs
from collections import defaultdict, Counter
from operator import itemgetter
import sys
def add_pound(word):
return '#'+word.rstrip()+'#'
def make_histogram(name):
f = codecs.open(name, encoding='utf-8')
stop_words = [u'\n']
histogram = defaultdict(int)
for line in f:
for i in line:
i = i.lower()
if i not in stop_words:
histogram[i] += 1
f.close()
return histogram
def print_hist(histogram):
result = sorted(histogram.items(), key=itemgetter(1), reverse=True)
for i, item in enumerate(result):
if i > 30:
break
print item[0]," : ",item[1]
def make_trie(name):
f = codecs.open(name, encoding='utf-8')
trie = defaultdict(lambda: defaultdict(int))
for line in f:
words = line.split()
for word in words:
word = word.lower()
for letter in range(len(word)):
if letter+1 == len(word):
trie[word[letter]]['#'] += 1
else:
trie[word[letter]][word[letter+1]] += 1
return trie
def find_max(trie):
max_count = {}
for i, item in enumerate(trie):
max_l = u''
max_c = 0
for i in trie[item].keys():
if trie[item][i] > max_c:
max_l = trie[item].keys()[0]
max_c = trie[item][i]
max_count[item] = (u' '.join(max_l), max_c)
return max_count
def add_to(data):
histogram = {}
for data_set in data:
for letter in data_set.keys():
if letter in histogram.keys():
if histogram[letter][1] < data_set[letter][1]:
histogram[letter] = data_set[letter]
else:
histogram[letter] = data_set[letter]
return histogram
def print_trie(trie):
print "\nMost likely succeeding letters\n============================="
for i in trie.keys():
print i, " ==> ", trie[i][0]
if sys.argv[1] == '-h':
poems = make_histogram('jdt.poems.2017-02-01.txt')
reviews = make_histogram('jdt.reviews.2017-01-31.txt')
stories = make_histogram('jdt.stories.2017-01-31.txt')
stories1 = make_histogram('jdt.stories.2017-02-01.txt')
histogram = Counter(stories) + Counter(poems) + Counter(reviews) + Counter(stories1)
print_hist(histogram)
elif sys.argv[1] == '-b':
histogram = []
histogram.append(find_max(make_trie('jdt.poems.2017-02-01.txt')))
histogram.append(find_max(make_trie('jdt.reviews.2017-01-31.txt')))
histogram.append(find_max(make_trie('jdt.stories.2017-01-31.txt')))
histogram.append(find_max(make_trie('jdt.stories.2017-02-01.txt')))
histogram = add_to(histogram)
print_trie(histogram)
else:
print "Usage: analysis.py -h [historgram] or -b [bigram]"
|
dyelsey/Juhuri-Keyboard | Linux jdt keyboard/add_rule.py | <gh_stars>0
#<NAME>
#Adds keyboard in evdev.xml right before </layoutList>
l = []
for i in range(16):
l.append('')
l[0] = " <layout>\n"
l[1] = " <configItem>\n"
l[2] = " <name>jdt-cyr</name>\n"
l[3] = " <shortDescription>jdt-cyr</shortDescription>\n"
l[4] = " <description>Judeo-Tat (Cyrillic)</description>\n"
l[5] = " <languageList><iso639Id>jdt</iso639Id><iso639Id>jdt-cyr</iso639Id></languageList>\n"
l[6] = " </configItem>\n"
l[7] = " </layout>\n"
l[8] = " <layout>\n"
l[9] = " <configItem>\n"
l[10] = " <name>jdt-cyr-russian</name>\n"
l[11] = " <shortDescription>jdt-cyr-russian</shortDescription>\n"
l[12] = " <description>Judeo-Tat (Russian)</description>\n"
l[13] = " <languageList><iso639Id>jdt</iso639Id><iso639Id>jdt-cyr-russian</iso639Id></languageList>\n"
l[14] = " </configItem>\n"
l[15] = " </layout>\n"
to_add = ''
for line in l:
to_add += line
buf = []
with open("evdev.xml", "r") as in_file:
buf = in_file.readlines()
with open("evdev.xml", "w") as out_file:
for line in buf:
if "</layoutList>" in line:
line = to_add + line
out_file.write(line)
|
gVallverdu/pychemcurv | pychemcurv/core.py | <reponame>gVallverdu/pychemcurv
# coding: utf-8
"""
Module ``pychemcur.core`` implements several classes in order to represents a vertex of
a molecular squeleton and compute geometrical and chemical indicators related
to the local curvature around this vertex.
A complete and precise definition of all the quantities computed in the
classes of this module can be found in article [JCP2020]_.
.. [JCP2020] <NAME>, <NAME>, <NAME>
and <NAME> *Relating the molecular topology and local geometry:
Haddon’s pyramidalization angle and the Gaussian curvature*, J. Chem. Phys.
**152**, 244310 (2020). https://aip.scitation.org/doi/10.1063/5.0008368
.. [POAV2] <NAME>, <NAME>, <NAME>
and <NAME> *Haddon's POAV2 versus POAV theory for non planar
molecules* (to be published).
"""
import numpy as np
from scipy.linalg import null_space
from .geometry import get_plane, circum_center, center_of_mass, get_dihedral
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__email__ = "<EMAIL>"
__all__ = ["VertexAtom", "TrivalentVertex", "POAV1", "POAV2"]
class VertexAtom:
r"""
This class represents an atom (or a point) associated to a vertex of the
squeleton of a molecule. The used notations are the following.
We denote by A a given atom caracterized by its cartesian coordinates
corresponding to a vector in :math:`\mathbb{R}^3`. This atom A is bonded to
one or several atoms B. The atoms B, bonded to atoms A belong to
:math:`\star(A)` and are caracterized by their cartesian coordinates defined
as vectors in :math:`\mathbb{R}^3`. The geometrical
object obtained by drawing a segment between bonded atoms is called the
skeleton of the molecule and is the initial geometrical picture for a molecule.
This class is defined from the cartesian coordinates of atom A and the atoms
belonging to :math:`\star(A)`.
More generally, the classes only considers points in :math:`\mathbb{R}^3`.
The is not any chemical consideration here. In consequence, the class can be
used for all cases where a set of point in :math:`\mathbb{R}^3` is relevant.
"""
def __init__(self, a, star_a):
r"""
Args:
a (np.ndarray): cartesian coordinates of point/atom A in :math:`\mathbb{R}^3`
star_a (nd.array): (N x 3) cartesian coordinates of points/atoms B in :math:`\star(A)`
"""
# check point/atom A
try:
self._a = np.array(a, dtype=np.float64).reshape(3)
except ValueError:
print("a = ", a)
raise ValueError("Cannot convert a in a numpy array of floats.")
# check points/atoms B in *(A)
try:
self._star_a = np.array(star_a, dtype=np.float64)
self._star_a = self._star_a.reshape(self._star_a.size // 3, 3)
except ValueError:
print("*A, star_a = ", star_a)
raise ValueError("Cannot convert star_a in a numpy array of floats"
" with a shape (N, 3).")
if self._star_a.shape[0] < 3:
print("*A, star_a = ", star_a)
raise ValueError("The shape of *(A) is not relevant. Needs at least"
" 3 points/atoms in *(A)")
# compute the regularized coordinates of atoms/points B in *(A)
u = self._star_a - self._a
self._distances = np.linalg.norm(u, axis=1)
u /= self._distances[:, np.newaxis]
self._reg_star_a = self._a + u
# center of mass of atoms/points B in *(A)
self._com = center_of_mass(self._star_a)
# compute a normal vector of *(A)
_, _, self._normal = get_plane(self._star_a)
# compute a normal vector of the plane Reg *(A) using the regularized
# coordinates of atoms/points B in *(A)
_, _, self._reg_normal = get_plane(self._reg_star_a)
# make the direction IA and the normal vectors of *(A) or Reg *(A) the same
# I is the center of mass of *(A)
IA = self.a - self.com
if np.dot(IA, self._normal) < 0:
self._normal = -self._normal
if np.dot(IA, self.reg_normal) < 0:
self._reg_normal = -self.reg_normal
@staticmethod
def from_pyramid(length, theta, n_star_A=3, radians=False, perturb=None):
r"""Set up a VertexAtom from an ideal pyramidal structure.
Build an ideal pyramidal geometry given the angle theta and randomize
the positions by adding a noise of a given magnitude. The vertex of the
pyramid is the point A and :math:`\star(A)`. are the points linked to
the vertex. The size of :math:`\star(A)`. is at least 3.
:math:`\theta` is the angle between the normal vector of the plane defined
from :math:`\star(A)` and the bonds between A and :math:`\star(A)`.
The pyramidalisation angle is defined from :math:`\theta` such as
.. math::
pyrA = \theta - \frac{\pi}{2}
Args:
length (float): the bond length
theta (float): Angle to define the pyramid
n_star_A (int): number of point bonded to A the vertex of the pyramid.
radian (bool): True if theta is in radian (default False)
perturb (float): Give the width of a normal distribution from which
random numbers are choosen and added to the coordinates.
Returns:
A VertexAtom instance
"""
r_theta = theta if radians else np.radians(theta)
if n_star_A < 3:
raise ValueError(
"n_star_A = {} and must be greater than 3.".format(n_star_A))
# build an ideal pyramid
IB = length * np.sin(r_theta)
step_angle = 2 * np.pi / n_star_A
coords = [[0, 0, -length * np.cos(r_theta)]]
coords += [[IB * np.cos(iat * step_angle),
IB * np.sin(iat * step_angle),
0] for iat in range(n_star_A)]
coords = np.array(coords, dtype=np.float64)
# randomize positions
if perturb:
coords[1:, :] += np.random.normal(0, perturb, size=(n_star_A, 3))
return VertexAtom(coords[0], coords[1:])
@property
def a(self):
""" Coordinates of atom A """
return self._a
@property
def star_a(self):
r""" Coordinates of atoms B belonging to :math:`\star(A)` """
return self._star_a
@property
def reg_star_a(self):
r"""
Regularized coordinates of atoms/points B in :math:`\star(A)` such as all
distances between A and points B are equal to unity. This corresponds to
:math:`Reg_{\epsilon}\star(A)` with :math:`\epsilon` = 1.
"""
return self._reg_star_a
@property
def normal(self):
r"""
Unitary vector normal to the plane or the best fitting plane of
atoms/points Bi in :math:`\star(A)`.
"""
return self._normal
@property
def reg_normal(self):
r"""
Unitary vector normal to the plane or the best fitting plane of
atoms/points :math:`Reg B_i` in :math:`\star(A)`.
"""
return self._reg_normal
@property
def com(self):
r""" Center of mass of atoms/points B in :math:`\star(A)` """
return self._com
@property
def distances(self):
r"""
Return all distances between atom A and atoms B belonging to
:math:`\star(A)`. Distances are in the same order as the atoms in
``vertex.star_a``.
"""
return self._distances
def get_angles(self, radians=True):
r"""
Compute angles theta_ij between the bonds ABi and ABj, atoms Bi and
Bj belonging to :math:`\star(A)`. The angle theta_ij is made by the
vectors ABi and ABj in the affine plane defined by this two vectors and
atom A. The computed angles are such as bond ABi are in a consecutive
order.
Args:
radians (bool): if True (default) angles are returned in radians
"""
if self._star_a.shape[0] == 3:
angles = dict()
for i, j in [(0, 1), (0, 2), (1, 2)]:
u = self.reg_star_a[i, :] - self._a
v = self.reg_star_a[j, :] - self._a
cos = np.dot(u, v)
if radians:
angles[(i, j)] = np.arccos(cos)
else:
angles[(i, j)] = np.degrees(np.arccos(cos))
else:
# get P the plane of *(A)
vecx, vecy, _ = get_plane(self.reg_star_a)
# compute all angles with vecx in order to sort atoms of *(A)
com = center_of_mass(self.reg_star_a)
u = self.reg_star_a - com
norm = np.linalg.norm(u, axis=1)
u /= norm[:, np.newaxis]
cos = np.dot(u, vecx)
angles = np.where(np.dot(u, vecy) > 0, np.arccos(cos),
2 * np.pi - np.arccos(cos))
# sort points according to angles
idx = np.arange(angles.size)
idx = idx[np.argsort(angles)]
idx = np.append(idx, idx[0])
# compute curvature
angles = dict()
for i, j in np.column_stack([idx[:-1], idx[1:]]):
u = self.reg_star_a[i, :] - self._a
u /= np.linalg.norm(u)
v = self.reg_star_a[j, :] - self._a
v /= np.linalg.norm(v)
cos = np.dot(u, v)
if radians:
angles[(i, j)] = np.arccos(cos)
else:
angles[(i, j)] = np.degrees(np.arccos(cos))
return angles
@property
def angular_defect(self):
r"""
Compute the angular defect in radians as a measure of the discrete
curvature around the vertex, point A.
The calculation first looks for the best fitting plane of points
belonging to :math:`\star(A)` and sorts that points in order to compute
the angles between the edges connected to the vertex (A). See the
get_angles method.
"""
angles = self.get_angles(radians=True)
ang_defect = 2 * np.pi - sum(angles.values())
return ang_defect
@property
def pyr_distance(self):
r"""
Compute the distance of atom A to the plane define by :math:`\star(A)` or
the best fitting plane of :math:`\star(A)`. The unit of the distance is the
same as the unit of the coordinates of A and :math:`\star(A)`.
"""
return np.abs(np.dot(self._a - self.com, self.normal))
def as_dict(self, radians=True):
"""
Return a dict version of all the properties that can be computed using
this class.
Args:
radians (bool): if True, angles are returned in radians (default)
"""
data = {
"atom_A": self.a,
"star_A": self.star_a,
"reg_star_A": self.reg_star_a,
"distances": self.distances,
"angles": self.get_angles(radians=radians),
"n_star_A": len(self.star_a),
"angular_defect": self.angular_defect if radians else np.degrees(self.angular_defect),
"pyr_distance": self.pyr_distance,
}
return data
def write_file(self, species="C", filename="vertex.xyz"):
r"""Write the coordinates of atom A and atoms :math:`\star(A)`
in a file in xyz format. You can set the name of species or a list but
the length of the list must be equal to the number of atoms.
If filename is None, returns the string corresponding to the xyz file.
Args:
species (str, list): name of the species or list of the species names
filename (str): path of the output file or None to get a string
Returns:
None if filename is a path, else, the string corresponding to the
xyz file.
"""
nat = len(self.star_a) + 1
if len(species) != nat:
species = nat * "C"
lines = "%d\n" % nat
lines += "xyz file from pychemcurv\n"
lines += "%2s %12.6f %12.6f %12.6f\n" % (species[0],
self.a[0], self.a[1], self.a[2])
for iat in range(1, nat):
lines += "%2s " % species[iat]
lines += " ".join(["%12.6f" % x for x in self.star_a[iat - 1]])
lines += "\n"
if filename is not None:
with open(filename, "w", encoding="utf-8") as f:
f.write(lines)
else:
return lines
def __str__(self):
""" str representatio of the vertex atom """
s = "angular defect: {:.4f} degrees\n".format(
np.degrees(self.angular_defect))
s += "size of *(A): {}\n".format(len(self.star_a))
s += "Atom A:\n{}\n".format(self.a)
s += "Atoms B in *(A):\n{}\n".format(self.star_a)
return s
def __repr__(self):
""" representation of the vertex atom """
return "VertexAtom(a={}, star_a={})".format(self.a, self.star_a)
class TrivalentVertex(VertexAtom):
r"""
This object represents an atom (or a point) associated to a vertex of the
squeleton of a molecule bonded to exactly 3 other atoms (or linked to 3
other points). This correspond to the trivalent case.
We denote by A a given atom caracterized by its cartesian coordinates
corresponding to a vector in :math:`\mathbb{R}^3`. This atom A is bonded to
3 atoms B. The atoms B, bonded to atom A belong to
:math:`\star(A)` and are caracterized by their cartesian coordinates defined
as vectors in :math:`\mathbb{R}^3`. The geometrical
object obtained by drawing a segment between bonded atoms is called the
skeleton of the molecule and is the initial geometrical picture for a molecule.
This class is defined from the cartesian coordinates of atom A and the atoms
belonging to :math:`\star(A)`.
More generally, the classes only considers points in :math:`\mathbb{R}^3`.
The is not any chemical consideration here. In consequence, the class can be
used for all cases where a set of point in :math:`\mathbb{R}^3` is relevant.
The following quantities are computed according the reference [JCP2020]_
pyramidalization angle ``pyrA``
The pyramidalization angle, **in degrees**. :math:`pyrA = \theta - \pi/2`
where :math:`\theta` is the angle between the normal vector of the plane
containing the atoms B of :math:`\star(A)` and a vector along a bond
between atom A and one B atom.
An exact definition of pyrA needs that A is bonded to exactly 3 atoms in
order to be able to define a uniq plane that contains the atoms B
belonging to :math:`\star(A)`. Nevertheless, pyrA is computed if
more than 3 atoms are bonded to atom A by computing the best fitting plane
of atoms belonging to :math:`\star(A)`.
pyramidalization angle, ``pyrA_r``
The pyramidalization angle **in radians**.
improper angle, ``improper``
The improper angle corresponding to the dihedral angle between the
planes defined by atoms (i, j, k) and (j, k, l), atom i being atom A and
atoms j, k and l being atoms of :math:`\star(A)`. In consequence, the
improper angle is defined only if there are 3 atoms in :math:`\star(A)`.
The value of the improper angle is returned in radians.
angular defect, ``angular_defect``
The angluar defect is defined as
.. math:
2\pi - \sum_{F\in\star(A)} \alpha_F
where :math:`\alpha_F` are the angles at the vertex A of the faces
:math:`F\in\star(A)`. The angular defect is computed whatever the number
of atoms in :math:`\star(A)`.
The value of the angular defect is returned in radians.
spherical curvature, ``spherical_curvature``
The spherical curvature is computed as the radius of the osculating
sphere of atoms A and atoms belonging to :math:`\star(A)`. The
spherical curvature is computed as
.. math::
\kappa(A) = \frac{1}{\sqrt{\ell^2 + \dfrac{(OA^2 - \ell^2)^2}{4z_A^2}}}
where O is the center of the circumbscribed circle of atoms in
:math:`\star(A)` ; A the vertex atom ; OA the distance between O and A ;
:math:`\ell` the distance between O and atoms B of :math:`\star(A)` ;
:math:`z_A` the distance of atom A to the plane defined by
:math:`\star(A)`. The spherical curvature is defined only if there are
3 atoms in :math:`\star(A)`.
pyramidalization distance ``pyr_distance``
Distance of atom A to the plane define by :math:`\star(A)` or
the best fitting plane of :math:`\star(A)`.
The value of the distance is in the same unit as the coordinates.
If the number of atoms B in :math:`\star(A)` is not suitable to compute some
properties, `np.nan` is returned.
Note that the plane defined by atoms B belonging to :math:`\star(A)` is exactly
defined *only* in the case where there are three atoms B in :math:`\star(A)`.
In the case of pyrA, if there are more than 3 atoms in :math:`\star(A)`, the
class use the best fitting plane considering all atoms in :math:`\star(A)` and
compute the geometrical quantities.
"""
def __init__(self, a, star_a):
r"""
Args:
a (np.ndarray): cartesian coordinates of point/atom A in :math:`\mathbb{R}^3`
star_a (nd.array): (N x 3) cartesian coordinates of points/atoms B in :math:`\star(A)`
"""
super().__init__(a, star_a)
if self._star_a.shape[0] != 3:
raise ValueError("The number of atoms/points in *(A) must be 3."
" star_a.shape is {}".format(self._star_a.shape))
@staticmethod
def from_pyramid(length, theta, radians=False, perturb=None):
r"""Set up a VertexAtom from an ideal pyramidal structure.
Build an ideal pyramidal geometry given the angle theta and randomize
the positions by adding a noise of a given magnitude. The vertex of the
pyramid is the point A and :math:`\star(A)`. are the points linked to
the vertex. The size of :math:`\star(A)`. is 3.
:math:`\theta` is the angle between the normal vector of the plane defined
from :math:`\star(A)` and the bonds between A and :math:`\star(A)`.
The pyramidalisation angle is defined from :math:`\theta` such as
.. math::
pyrA = \theta - \frac{\pi}{2}
Args:
length (float): the bond length
theta (float): Angle to define the pyramid
radian (bool): True if theta is in radian (default False)
perturb (float): Give the width of a normal distribution from which
random numbers are choosen and added to the coordinates.
Returns:
A TrivalentVertex instance
"""
va = VertexAtom.from_pyramid(
length, theta, n_star_A=3, radians=radians, perturb=perturb
)
return TrivalentVertex(a=va.a, star_a=va.star_a)
@property
def improper(self):
r"""
Compute the improper angle in randians between planes defined by atoms
(i, j, k) and (j, k, l). Atom A, is atom i and atoms j, k and l belong
to :math:`\star(A)`.
::
l
|
i
/ \
j k
This quantity is available only if the length of :math:`\star(A)` is
equal to 3.
"""
return get_dihedral(np.concatenate((self._a[np.newaxis, :], self._star_a)))
@property
def pyrA_r(self):
""" Return the pyramidalization angle in radians. """
# compute pyrA
v = self.reg_star_a[0] - self._a
v /= np.linalg.norm(v)
pyrA = np.arccos(np.dot(v, self.reg_normal)) - np.pi / 2
return pyrA
@property
def pyrA(self):
""" Return the pyramidalization angle in degrees. """
return np.degrees(self.pyrA_r)
@property
def spherical_curvature(self):
r"""
Compute the spherical curvature associated to the osculating sphere of
points A and points B belonging to :math:`\star(A)`.
Here, we assume that there is exactly 3 atoms B in :math:`\star(A)`.
"""
# plane *(A)
point_O = circum_center(self._star_a)
# needed length
l = np.linalg.norm(self._star_a[0] - point_O)
z_A = np.dot(self._a - point_O, self.normal)
OA = np.linalg.norm(self._a - point_O)
# spherical curvature
if np.isclose(z_A, 0, atol=0, rtol=1e-7):
kappa = np.nan
else:
kappa = 1 / np.sqrt(l**2 + (OA**2 - l**2)**2 / (4 * z_A**2))
return kappa
def as_dict(self, radians=True):
"""
Return a dict version of all the properties that can be computed using
this class.
Args:
radians (bool): if True, angles are returned in radians (default)
"""
data = super().as_dict(radians=radians)
data.update({
"pyrA": self.pyrA_r if radians else self.pyrA,
"spherical_curvature": self.spherical_curvature,
"improper": self.improper if radians else np.degrees(self.improper),
})
return data
def __str__(self):
""" str representatio of the vertex atom """
s = "pyrA: {:.4f} degrees\n".format(self.pyrA)
s += "Atom A:\n{}\n".format(self.a)
s += "Atoms B in *(A):\n{}\n".format(self.star_a)
return s
def __repr__(self):
""" representation of the vertex atom """
return "TrivalentVertex(a={}, star_a={})".format(self.a, self.star_a)
class POAV1:
r"""
In the case of the POAV1 theory
the POAV vector has the property to make a constant angle with each bond
connected to atom A.
This class computes indicators related to the POAV1 theory of <NAME>
following the link established between pyrA and the hybridization of a
trivalent atom in reference [JCP2020]_.
A chemical picture of the hybridization can be drawn by considering the
contribution of the :math:`p` atomic oribtals to the system :math:`\sigma`,
or the contribution of the s atomic orbital to the system :math:`\pi`. This
is achieved using the m and n quantities. For consistency with POAV2 class,
the attributes, ``hybridization``, ``sigma_hyb_nbr`` and ``pi_hyb_nbr``
are also implemented but return the same values.
"""
def __init__(self, vertex):
r"""
POAV1 is defined from the local geometry of an atom at a vertex of the
molecule's squeleton.
Args:
vertex (TrivalentVertex): the trivalent vertex atom
"""
if isinstance(vertex, TrivalentVertex):
self.vertex = vertex
elif isinstance(vertex, VertexAtom):
self.vertex = TrivalentVertex(vertex.a, vertex.star_a)
else:
raise TypeError("vertex must be of type VertexAtom or of type"
" TrivalentVertex. vertex is {}".format(type(vertex)))
@property
def pyrA(self):
""" Pyramidalization angle in degrees """
return self.vertex.pyrA
@property
def pyrA_r(self):
""" Pyramidalization angle in radians """
return self.vertex.pyrA_r
@property
def poav(self):
""" Return a unitary vector along the POAV vector """
return self.vertex.reg_normal
@property
def c_pi(self):
r"""
Value of :math:`c_{\pi}` in the ideal case of a :math:`C_{3v}`
geometry. Equation (22), with :math:`c_{1,2} = \sqrt{2/3}`.
.. math::
c_{\pi} = \sqrt{2} \tan Pyr(A)
"""
return np.sqrt(2) * np.tan(self.pyrA_r)
@property
def lambda_pi(self):
r"""
value of :math:`\lambda_{\pi}` in the ideal case of a :math:`C_{3v}`
geometry. Equation (23), with :math:`c^2_{1,2} = 2/3`.
.. math::
\lambda_{\pi} = \sqrt{1 - 2 \tan^2 Pyr (A)}
"""
# check domain definition of lambda_pi
value = 1 - 2 * np.tan(self.pyrA_r) ** 2
if value < 0:
raise ValueError("lambda_pi is not define. "
"pyrA (degrees) = {}".format(self.pyrA))
else:
return np.sqrt(value)
@property
def m(self):
r"""
value of hybridization number m, see equation (44)
.. math::
m = \left(\frac{c_{\pi}}{\lambda_{\pi}}\right)^2
"""
return (self.c_pi / self.lambda_pi) ** 2
@property
def n(self):
"""
value of hybridization number n, see equation (47)
.. math::
n = 3m + 2
"""
return 3 * self.m + 2
@property
def pi_hyb_nbr(self):
r""" This quantity measure the weight of the s atomic orbital with
respect to the p atomic orbital in the :math:`h_{\pi}` hybrid orbital
along the POAV vector.
This is equal to m.
"""
return self.m
@property
def sigma_hyb_nbr(self):
""" This quantity measure the weight of the p atomic orbitals with
respect to s in the hi hybrid orbitals along the bonds with atom A.
This is equal to n
"""
return self.n
@property
def hybridization(self):
r""" Compute the hybridization such as
.. math::
s p^{(2 + c_{\pi}^2) / (1 - c_{\pi}^2)}
This quantity corresponds to the amount of p AO in the system
:math:`\sigma`. This is equal to n and corresponds to the
:math:`\tilde{n}` value defined by Haddon.
TODO: verifier si cette quantité est égale à n uniquement dans
le cas C3v.
"""
# return self.n
return (2 + self.c_pi ** 2) / (1 - self.c_pi ** 2)
def as_dict(self, radians=True, include_vertex=False):
r""" Return a dict version of all the properties that can be
computed with this class. Note that in the case of
:math:`\lambda_{\pi}` and :math:`c_{\pi}` the squared values are
returned as as they are more meaningfull.
"""
data = {
"hybridization": self.hybridization,
"n": self.n,
"m": self.m,
# "lambda_pi": self.lambda_pi,
# "c_pi": self.c_pi,
"c_pi^2": self.c_pi ** 2,
"lambda_pi^2": self.lambda_pi ** 2,
"poav": self.poav.tolist(),
}
if include_vertex:
data.update(self.vertex.as_dict(radians=radians))
return data
class POAV2:
r""" In the case of the POAV2 theory the POAV2 vector on atom A is
such as the set of hybrid molecular orbitals :math:`{h_{\pi}, h_1, h_2, h_3}`
is orthogonal ; where the orbitals :math:`h_i` are hybrid orbitals
along the bonds with atoms linked to atom A and :math:`h_{\pi}` is
the orbital along the POAV2 :math:`\vec{u}_{\pi}` vector.
This class computes indicators related to the POAV2 theory of
<NAME> following the demonstrations in the reference [POAV2]_.
"""
def __init__(self, vertex):
r""" POAV1 is defined from the local geometry of an atom at a
vertex of the molecule's squeleton.
Args:
vertex (TrivalentVertex): the trivalent vertex atom
"""
if isinstance(vertex, TrivalentVertex):
self.vertex = vertex
elif isinstance(vertex, VertexAtom):
self.vertex = TrivalentVertex(vertex.a, vertex.star_a)
else:
raise TypeError("vertex must be of type VertexAtom or of type"
" TrivalentVertex. vertex is {}".format(type(vertex)))
self.angles = self.vertex.get_angles(radians=True)
@property
def matrix(self):
""" Compute and return the sigma-orbital hybridization numbers
n1, n2 and n3 """
cos_01 = np.cos(self.angles[(0, 1)])
cos_02 = np.cos(self.angles[(0, 2)])
cos_12 = np.cos(self.angles[(1, 2)])
ui = self.vertex.reg_star_a - self.vertex.a
M = np.array([
[ui[2, 0] * cos_01 - ui[1, 0] * cos_02,
ui[2, 1] * cos_01 - ui[1, 1] * cos_02,
ui[2, 2] * cos_01 - ui[1, 2] * cos_02],
[ui[0, 0] * cos_12 - ui[2, 0] * cos_01,
ui[0, 1] * cos_12 - ui[2, 1] * cos_01,
ui[0, 2] * cos_12 - ui[2, 2] * cos_01],
[ui[1, 0] * cos_02 - ui[0, 0] * cos_12,
ui[1, 1] * cos_02 - ui[0, 1] * cos_12,
ui[1, 2] * cos_02 - ui[0, 2] * cos_12]
])
return M
@property
def u_pi(self):
r"""
Return vector :math:`u_{\pi}` as the basis of the zero space of the
matrix M. This unitary vector support the POAV2 vector.
"""
u = null_space(self.matrix)
rank = u.shape[1]
if rank != 1:
raise ValueError("The rank of the null space is not equal to 1. "
"The POAV2 u_pi vector may not exist. "
"rank = %d" % rank)
u = u.ravel()
# make the direction of u_pi the same as IA (and thus reg_normal)
# I is the center of mass of *(A)
IA = self.vertex.a - self.vertex.com
if np.dot(IA, u) < 0:
u *= -1
return u
@property
def sigma_hyb_nbrs(self):
r"""
Compute and return the sigma-orbital hybridization numbers n1, n2 and n3.
These quantities measure the weight of the p atomic orbitals with
respect to s in each of the :math:`h_i` hybrid orbitals along the bonds
with atom A.
"""
cos_01 = np.cos(self.angles[(0, 1)])
cos_02 = np.cos(self.angles[(0, 2)])
cos_12 = np.cos(self.angles[(1, 2)])
n1 = - cos_12 / cos_01 / cos_02
n2 = - cos_02 / cos_12 / cos_01
n3 = - cos_01 / cos_02 / cos_12
return n1, n2, n3
@property
def pi_hyb_nbr(self):
r"""
This quantity measure the weight of the s atomic orbital with
respect to the p atomic orbital in the :math:`h_{\pi}` hybrid orbital
along the POAV2 vector.
"""
n = self.sigma_hyb_nbrs
w_sigma = sum([1 / (1 + ni) for ni in n])
m = 1 / w_sigma - 1
return m
@property
def pyrA_r(self):
r"""
Compute the angles between vector :math:`u_{\pi}` and all the bonds
between atom A and atoms B in :math:`\star(A)`.
"""
ui = self.vertex.reg_star_a - self.vertex.a
scal = np.dot(ui, self.u_pi)
return np.arccos(scal)
@property
def pyrA(self):
return np.degrees(self.pyrA_r)
def as_dict(self, radians=True, include_vertex=False):
r"""
Return a dict version of all the properties that can be computed with
this class.
"""
data = {
"pi_hyb_nbr": self.pi_hyb_nbr,
"u_pi": self.u_pi.tolist(),
"matrix": self.matrix.tolist(),
}
data.update({"n_%d" % i: ni
for i, ni in enumerate(self.sigma_hyb_nbrs, 1)})
if include_vertex:
data.update(self.vertex.as_dict(radians=radians))
return data
|
gVallverdu/pychemcurv | pychemcurv/geometry.py | # coding: utf-8
"""
This module implements utility functions to compute several geometric
properties.
"""
import numpy as np
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__email__ = "<EMAIL>"
__all__ = ["center_of_mass", "circum_center", "get_plane", "get_dihedral"]
def center_of_mass(coords, masses=None):
r"""Compute the center of mass of the points at coordinates `coords` with
masses `masses`.
Args:
coords (np.ndarray): (N, 3) matrix of the points in :math:`\mathbb{R}^3`
masses (np.ndarray): vector of length N with the masses
Returns:
The center of mass as a vector in :math:`\mathbb{R}^3`
"""
# check coord array
try:
coords = np.array(coords, dtype=np.float64)
coords = coords.reshape(coords.size // 3, 3)
except ValueError:
print("coords = ", coords)
raise ValueError("Cannot convert coords in a numpy array of floats"
" with a shape (N, 3).")
# check masses
if masses is None:
masses = np.ones(coords.shape[0])
else:
try:
masses = np.array(masses, dtype=np.float64)
masses = masses.reshape(coords.shape[0])
except ValueError:
print("masses = ", masses)
raise ValueError("Cannot convert masses in a numpy array of "
"floats with length coords.shape[0].")
if masses is None:
masses = np.ones(coords.shape[0])
return np.sum(coords * masses[:, np.newaxis], axis=0) / masses.sum()
def circum_center(coords):
r"""Compute the coordinates of the center of the circumscribed circle from
three points A, B and C in :math:`\mathbb{R}^3`.
Args:
coords (ndarray): (3x3) cartesian coordinates of points A, B and C.
Returns
The coordinates of the center of the cicumscribed circle
"""
try:
coords = np.array(coords, dtype=np.float64).reshape(3, 3)
except ValueError:
print("coords = ", coords)
raise ValueError("Cannot convert coords in a numpy array of floats"
" with a shape (3, 3).")
# get coords of poins A, B and C
a, b, c = coords
# normal vector to ABC plane
ABvAC = np.cross(b - a, c - a)
# matrix M and vector B
M = np.array([b - a, c - a, ABvAC])
B = np.array([np.dot(b - a, (b + a) / 2),
np.dot(c - a, (c + a) / 2),
np.dot(ABvAC, a)])
# solve linear system and return coordinates
return np.dot(np.linalg.inv(M), B)
def get_plane(coords, masses=None):
r"""Given a set of N points in :math:`\mathbb{R}^3`, compute an orthonormal
basis of vectors, the first two belonging to the plane and the third one
being normal to the plane. In the particular case where N equal 3, there is
an exact definition of the plane as the three points define an unique plan.
If N = 3, use a gram-schmidt orthonormalization to compute the vectors. If
N > 3, the orthonormal basis is obtained from SVD.
Args:
coords (np.ndarray): (N, 3) matrix of the points in :math:`\mathbb{R}^3`
masses (np.ndarray): vector of length N with the masses
Returns:
Returns the orthonormal basis (vecx, vecy, n_a), vector n_a being
normal to the plane.
"""
# check coord array
try:
coords = np.array(coords, dtype=np.float64)
coords = coords.reshape(coords.size // 3, 3)
except ValueError:
print("coords = ", coords)
raise ValueError("Cannot convert coords in a numpy array of floats"
" with a shape (N, 3).")
# check masses
if masses is None:
masses = np.ones(coords.shape[0])
else:
try:
masses = np.array(masses, dtype=np.float64)
masses = masses.reshape(coords.shape[0])
except ValueError:
print("masses = ", masses)
raise ValueError("Cannot convert masses in a numpy array of "
"floats with length coords.shape[0].")
com = center_of_mass(coords, masses)
if coords.shape == (3, 3):
# the plane is exactly defined from 3 points
vecx = coords[1] - coords[0]
vecx /= np.linalg.norm(vecx)
# vecy, orthonormal with vecx
vecy = coords[2] - coords[0]
vecy -= np.dot(vecy, vecx) * vecx
vecy /= np.linalg.norm(vecy)
# normal vector
n_a = np.cross(vecx, vecy)
else:
# get the best fitting plane from SVD.
_, _, (vecx, vecy, n_a) = np.linalg.svd(coords - com)
return vecx, vecy, n_a
def get_dihedral(coords):
r"""
Compute the improper angle in randians between planes defined by points
(0, 1, 2) and (1, 2, 3). The returned angle is a dihedral angle if the
points 0, 1, 2 and 3 form a chain of bonded atoms in this order.
::
0 3
\ /
1 -- 2
The returned angle is an improper angle if point 0 is at the center and
linked to other points.
::
3
|
0
/ \
1 2
Args:
coords (ndarray): numpy array of the cartesian coordinates with shape (4, 3)
Returns
The dihedral angle value in radians.
"""
# (i, 0) (j, 1) (k, 2) (l, 3)
# compute vectors
vij = coords[1] - coords[0]
vjk = coords[2] - coords[1]
vlk = coords[2] - coords[3]
m = np.cross(vij, vjk) # perpendicular to ijk
n = np.cross(vlk, vjk) # perpendicular to jkl
# compute the angle
theta = np.arctan2(np.dot(vij, n) * np.linalg.norm(vjk), np.dot(m, n))
# theta2 = np.arccos(np.dot(m, n) / np.linalg.norm(m) / np.linalg.norm(n))
# print(np.degrees(theta), np.degrees(theta2))
return theta
|
gVallverdu/pychemcurv | setup.py | # coding: utf-8
import setuptools
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__email__ = "<EMAIL>"
__version__ = "2020.6.3"
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pychemcurv",
version=__version__,
author=__author__,
author_email=__email__,
url="https://github.com/gVallverdu/pychemcurv",
# A short description
description="Discrete and local curvature applied to chemistry and chemical reactivity",
# long description
long_description=long_description,
long_description_content_type="text/x-rst",
# requirements
install_requires=[
"numpy", "pandas", "pymatgen", "matplotlib", "scipy",
],
# extra requirements
extras_require={
# for nglview visualization in jupyter notebook
"viz": ["jupyter", "ase", "nglview"],
# to run the dash app locally
"app": ["dash", "dash-bio"],
},
# find_packages()
packages=setuptools.find_packages(exclude=["pychemcurv-data"]),
#
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Chemistry",
],
python_requires='>=3.6',
) |
gVallverdu/pychemcurv | pychemcurv/analysis.py | <reponame>gVallverdu/pychemcurv
# coding: utf-8
"""
This module implements the `CurvatureAnalyze` class to perform curvature
analyses on molecular or periodic structures.
"""
import numpy as np
import pandas as pd
from pymatgen.core import Molecule, Structure
from pymatgen.core.bonds import obtain_all_bond_lengths
from .core import VertexAtom, TrivalentVertex, POAV1, POAV2
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__email__ = "<EMAIL>"
__all__ = ["CurvatureAnalyzer"]
class CurvatureAnalyzer:
""" This class provides helpful methods to analyze the local curvature
on all atoms of a given structure. The structure is either a molecule or
a periodic structure. Once the structure is read, the class determines the
connectivity of the structure in order to define all vertices. The
connectivity is defined on a distance criterion.
"""
def __init__(self, structure, bond_tol=0.2, rcut=2.5, bond_order=None):
""" The class needs a pymatgen.Structure or pymatgen.Molecule object as
first argument. The other arguments are used to defined if two atoms are
bonded or not.
Args:
structure (Structure, Molecule): A Structure or Molecule pymatgen
objects
bond_tol (float): Tolerance used to determine if two atoms are
bonded. Look at `pymatgen.core.CovalentBond.is_bonded`.
rcut (float): Cutoff distance in case the bond is not not known
bond_order (dict): Not yet implemented
"""
if isinstance(structure, (Molecule, Structure)):
self.structure = structure
else:
raise TypeError("structure must a Molecule or Structure pymatgen"
" object. type(structure) is: " + str(type(structure)))
self.bond_tol = bond_tol
self.rcut = rcut
self.bond_order = bond_order
# compute distance matrix one time. You must call only one time
# structure.distance_matrix to save computational time
self._distance_matrix = self.structure.distance_matrix
# look for bonds and set vertices
self._vertices = []
self._bonds = set()
self._vertices_idx = []
self._get_vertex()
# fill a DataFrame with datas
self._data = pd.DataFrame([])
self._compute_data()
@property
def vertices(self):
""" List of vertices associated to each atom of the molecule """
return self._vertices
@property
def bonds(self):
""" Set of tuples of bonded atom index """
return self._bonds
@property
def vertices_idx(self):
r""" List of tuples of the indexes of atoms in each vetex. The first
index is atom A, the following are atoms of :math:`\star(A)`. """
return self._vertices_idx
@property
def data(self):
"""
Return a Data Frame that contains all the geometric and hybridization
data.
"""
return self._data
@property
def distance_matrix(self):
""" Returns the distance matrix between all atoms. For periodic
structures, this returns the nearest image distances. """
return self._distance_matrix
def _get_vertex(self):
""" Look for all vertex defined as atoms bonded to at least
3 neighbors and set up a list of VertexAtom object."""
vertices = list()
vertices_idx = list()
bonds = set()
errors = set()
for isite, site_i in enumerate(self.structure):
atom_A = site_i.coords
star_a = list()
vertex_idx = [isite]
for jsite, site_j in enumerate(self.structure):
if isite == jsite:
continue
# check if i and j are bonded
distance = self._distance_matrix[isite, jsite]
bonded = False
try:
# look for bond length database of pymatgen
# equivalent to CovalentBonds.is_bonded but avoid to compute
# two times the bond length
ref_distances = obtain_all_bond_lengths(site_i.specie,
site_j.specie)
# TODO: use ref_distances from a bond order
for rcut in ref_distances.values():
if distance < (1 + self.bond_tol) * rcut:
bonded = True
except ValueError as e:
errors.add(str(e))
bonded = distance <= self.rcut
# increment *(A) if i and j are bonded
if bonded:
star_a.append(site_j.coords)
vertex_idx.append(jsite)
bonds.add(tuple(sorted([isite, jsite])))
# set up VertexAtom objects
if len(star_a) >= 3:
vertices.append(VertexAtom(atom_A, star_a))
vertices_idx.append(tuple(vertex_idx))
else:
vertices.append(None)
vertices_idx.append(tuple(vertex_idx))
self._vertices = vertices
self._vertices_idx = vertices_idx
self._bonds = bonds
if errors:
print("errors\n", "\n".join(errors))
print("Default cutoff of {} was used for the above bond".format(self.rcut))
def _compute_data(self):
""" Compute geometric and hybridation data for all vertex in the
structure and store them in a DataFrame. """
data = list()
nan_array = np.empty(3)
nan_array.fill(np.nan)
for vertex, vertex_idx in zip(self.vertices, self.vertices_idx):
if vertex is None:
vdata = {}
vdata["n_star_A"] = 0
else:
if len(vertex.star_a) == 3:
vertex = TrivalentVertex(vertex.a, vertex.star_a)
try:
poav1 = POAV1(vertex=vertex)
poav2 = POAV2(vertex=vertex)
except ValueError as e:
print("Unable to compute all data.",
vertex.as_dict(radians=False))
print(e)
vdata = {
**poav1.as_dict(radians=False, include_vertex=True),
**poav2.as_dict(radians=False)
}
else:
vdata = vertex.as_dict(radians=False)
ia = vertex_idx[0]
vdata.update(atom_idx=ia, species=self.structure[ia].specie.symbol)
distances = [self.distance_matrix[ia, j] for j in vertex_idx[1:]]
vdata.update({"ave. neighb. dist.": np.mean(distances)})
data.append(vdata)
self._data = pd.DataFrame(data)
@staticmethod
def from_file(path, periodic=None):
""" Returns a CurvatureAnalyze object from the structure at the
given path. This method relies on the file format supported with
pymatgen Molecule and Structure classes.
Supported formats for periodic structure include CIF, POSCAR/CONTCAR,
CHGCAR, LOCPOT, vasprun.xml, CSSR, Netcdf and pymatgen’s JSON serialized
structures.
Supported formats for molecule include include xyz, gaussian input
(gjf|g03|g09|com|inp), Gaussian output (.out|and pymatgen’s JSON
serialized molecules.
Args:
path (str): Path to the structure file
periodic (bool): if True, assume that the file correspond to a
periodic structure. Default is None. The method tries to read
the file, first from the Molecule class and second from the
Structure class of pymatgen.
"""
if periodic is None:
# try to read as a molecule
try:
structure = Molecule.from_file(path)
except ValueError as e1:
print("Cannot read file as a molecule.")
# Try to read as a periodic structure
try:
structure = Structure.from_file(path)
except ValueError as e2:
print("Cannot read file as a periodic structure.")
print("Try as a molecule, error:", e1)
print("Try as a structure, error:", e2)
raise ValueError(
"Unable to load structure from file '%s'" % path)
elif periodic:
# Structure object
structure = Structure.from_file(path)
else:
# Molecule object
structure = Molecule.from_file(path)
print("Read structure, done.")
return CurvatureAnalyzer(structure)
def get_molecular_data(self):
"""
Set up a model data dictionnary that contains species, coordinates and
bonds of the structure. This dictionnary can be used as model data for
further visulization in bio-dash.
"""
# set up json file
model_data = {"atoms": [], "bonds": []}
# structure part
for iat, site in enumerate(self.structure):
name = "%s%d" % (site.specie.symbol, iat + 1)
model_data["atoms"].append({"name": name,
"serial": iat,
"element": site.specie.symbol,
"positions": site.coords.tolist()})
# bonds part
for bond in self.bonds:
iat, jat = bond
model_data["bonds"].append(
{"atom1_index": iat, "atom2_index": jat}
)
return model_data |
gVallverdu/pychemcurv | tests/test_core.py | #!/usr/bin/env python
# coding: utf-8
# import sys
# sys.path.append("../")
from pytest import approx
from pychemcurv import VertexAtom, TrivalentVertex, POAV1, POAV2
import numpy as np
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__email__ = "<EMAIL>"
class TestVertexAtom:
""" Test for class pychemcurv.core.VertexAtom """
def setup_method(self):
self.theta_sp3 = np.arccos(-1 / 3)
self.theta_sp2 = np.pi / 2
self.l = 1.3
# sp3 pyramid
coords = [[0, 0, -self.l * np.cos(self.theta_sp3)]]
IB = self.l * np.sin(self.theta_sp3)
for angle in [0, 2 * np.pi / 3, 4 * np.pi / 3]:
coords.append([IB * np.cos(angle), IB * np.sin(angle), 0])
coords = np.array(coords, dtype=np.float64)
self.va_sp3 = VertexAtom(coords[0], coords[1:])
# squared pyramid
theta = np.radians(100.0)
coords = [[0, 0, -self.l * np.cos(theta)]]
IB = self.l * np.sin(theta)
for i in range(4):
angle = i * np.pi / 2
coords.append([IB * np.cos(angle), IB * np.sin(angle), 0])
coords = np.array(coords, dtype=np.float64)
self.va_sq = VertexAtom(coords[0], coords[1:])
# random case:
coords = [[-2.62985741, 6.99670582, -2.89817324],
[-2.32058737, 5.49122664, -3.13957301],
[-2.92519373, 6.96241176, -1.65009278],
[-1.62640146, 7.93539179, -3.17337668]]
self.va_rand = VertexAtom(coords[0], coords[1:])
def test_a_star_a_shape(self):
assert self.va_sp3.a.shape == (3, )
assert self.va_sq.a.shape == (3, )
assert self.va_sp3.star_a.shape == (3, 3)
assert self.va_sq.star_a.shape == (4, 3)
def test_a_star_a_values(self):
assert self.va_rand.a == approx(
[-2.62985741, 6.99670582, -2.89817324])
star_a = np.array([[-2.32058737, 5.49122664, -3.13957301],
[-2.92519373, 6.96241176, -1.65009278],
[-1.62640146, 7.93539179, -3.17337668]])
assert self.va_rand.star_a.flatten() == approx(star_a.flatten())
def test_reg_star_a(self):
reg = np.array([[-2.43106709, 6.02902499, -3.05333841],
[-2.86004832, 6.96997636, -1.92539491],
[-1.91379553, 7.66654812, -3.09455724]]).flatten()
assert self.va_rand.reg_star_a.flatten() == approx(reg)
def test_reg_normal(self):
assert self.va_rand.reg_normal == approx(
[-0.81987531, 0.24597365, -0.51701203])
def test_angles(self):
angles = self.va_sp3.get_angles()
for a in angles.values():
assert a == approx(np.arccos(-1/3))
angles = self.va_sp3.get_angles(radians=False)
for a in angles.values():
assert a == approx(np.degrees(np.arccos(-1/3)))
def test_distances(self):
for d in self.va_sp3.distances:
assert d == approx(self.l)
for d in self.va_sq.distances:
assert d == approx(self.l)
def test_normal(self):
assert self.va_sp3.normal == approx([0., 0., 1.])
assert self.va_rand.normal == approx(
[-0.80739002, 0.22175107, -0.54676121])
def test_pyr_distance(self):
assert self.va_sp3.pyr_distance == approx(
-self.l * np.cos(self.theta_sp3))
assert self.va_sq.pyr_distance == approx(
-self.l * np.cos(np.radians(100)))
def test_from_pyramid(self):
# sp3 pyramid
va = VertexAtom.from_pyramid(self.l, self.theta_sp3, radians=True)
assert va.a.shape == (3, )
assert self.va_sp3.a == approx(va.a)
assert va.star_a.shape == (3, 3)
assert self.va_sp3.star_a.flatten() == approx(va.star_a.flatten())
va = VertexAtom.from_pyramid(self.l, 100, 4, radians=False)
assert va.a.shape == (3,)
assert va.star_a.shape == (4, 3)
assert self.va_sq.a == approx(va.a)
assert self.va_sq.star_a.flatten() == approx(va.star_a.flatten())
class TestTrivalentVertex:
""" Tests about the pychemcurv.TrivalentVertex class """
def setup_method(self):
self.theta_sp3 = np.arccos(-1 / 3)
self.theta_sp2 = np.pi / 2
self.l = 1.3
# sp3 pyramid
coords = [[0, 0, -self.l * np.cos(self.theta_sp3)]]
IB = self.l * np.sin(self.theta_sp3)
for angle in [0, 2 * np.pi / 3, 4 * np.pi / 3]:
coords.append([IB * np.cos(angle), IB * np.sin(angle), 0])
coords = np.array(coords, dtype=np.float64)
self.va_sp3 = TrivalentVertex(coords[0], coords[1:])
# sp2 case
coords = [[0, 0, 0]]
for angle in [0, 2 * np.pi / 3, 4 * np.pi / 3]:
coords.append([self.l * np.cos(angle), self.l * np.sin(angle), 0])
coords = np.array(coords, dtype=np.float64)
self.va_sp2 = TrivalentVertex(coords[0], coords[1:])
# random case:
coords = [[-2.62985741, 6.99670582, -2.89817324],
[-2.32058737, 5.49122664, -3.13957301],
[-2.92519373, 6.96241176, -1.65009278],
[-1.62640146, 7.93539179, -3.17337668]]
self.va_rand = TrivalentVertex(coords[0], coords[1:])
def test_pyrA(self):
assert self.va_sp3.pyrA == approx(np.degrees(self.theta_sp3) - 90.)
assert self.va_sp3.pyrA_r == approx(self.theta_sp3 - np.pi / 2)
assert self.va_sp2.pyrA == approx(0)
assert self.va_rand.pyrA == approx(18.7104053164)
assert self.va_rand.pyrA_r == approx(np.radians(18.7104053164))
def test_angular_defect(self):
assert self.va_sp3.angular_defect == approx(
2 * np.pi - 3 * np.arccos(- 1 / 3))
assert self.va_sp2.angular_defect == approx(0)
assert np.degrees(self.va_rand.angular_defect) == approx(29.83127456)
def test_spherical_curvature(self):
assert self.va_sp3.spherical_curvature == approx(0.5128205128205)
assert np.isnan(self.va_sp2.spherical_curvature)
assert self.va_rand.spherical_curvature == approx(0.4523719038)
def test_improper(self):
assert self.va_sp3.improper == approx(np.radians(-35.2643896828))
assert self.va_sp2.improper == approx(0.)
assert self.va_rand.improper == approx(np.radians(-30.021240733))
def test_pyr_distance(self):
assert self.va_sp2.pyr_distance == approx(0.)
dist = self.l * np.sin(self.theta_sp3 - np.pi / 2)
assert self.va_sp3.pyr_distance == approx(dist)
assert self.va_rand.pyr_distance == approx(0.4515551342307116)
def test_from_pyramid(self):
# sp3 pyramid
va = VertexAtom.from_pyramid(self.l, self.theta_sp3, radians=True)
assert self.va_sp3.a.shape == (3, )
assert self.va_sp3.a == approx(va.a)
assert self.va_sp3.star_a.shape == (3, 3)
assert self.va_sp3.star_a.flatten() == approx(va.star_a.flatten())
# sp2 case
va = VertexAtom.from_pyramid(self.l, self.theta_sp2, radians=True)
assert self.va_sp2.a.shape == (3, )
assert self.va_sp2.a == approx(va.a)
assert self.va_sp2.star_a.shape == (3, 3)
assert self.va_sp2.star_a.flatten() == approx(va.star_a.flatten())
class TestPOAV1:
""" Tests about the pychemcurv.core.POAV1 class """
def setup_method(self):
theta = np.degrees(np.arccos(-1 / 3))
self.pyrA_sp3 = theta - 90
v_sp3 = TrivalentVertex.from_pyramid(1.3, theta)
v_sp2 = TrivalentVertex.from_pyramid(1.3, 90.)
v_r = TrivalentVertex.from_pyramid(1.3, 108.2)
self.poav_sp3 = POAV1(v_sp3)
self.poav_sp2 = POAV1(v_sp2)
self.poav_a = POAV1(v_r)
# random coordinates of a pyramid with pyrA = 0.326558177 radians
coords = [[-2.62985741, 6.99670582, -2.89817324],
[-2.32058737, 5.49122664, -3.13957301],
[-2.92519373, 6.96241176, -1.65009278],
[-1.62640146, 7.93539179, -3.17337668]]
self.poav_b1 = POAV1(VertexAtom(coords[0], coords[1:]))
def test_pyrA(self):
assert self.poav_sp3.pyrA == approx(self.pyrA_sp3)
assert self.poav_sp2.pyrA == approx(0.)
assert self.poav_a.pyrA == approx(18.2)
assert self.poav_sp3.pyrA_r == approx(np.arccos(-1 / 3) - np.pi / 2)
assert self.poav_sp2.pyrA_r == approx(0.)
assert self.poav_a.pyrA_r == approx(np.radians(18.2))
assert self.poav_b1.pyrA_r == approx(0.326558177)
assert self.poav_b1.pyrA == approx(18.71040530758611)
def test_coeffs(self):
poavs = [self.poav_sp3, self.poav_sp2, self.poav_a]
c_pis = [1/2, 0., 0.46496976]
for poav, c_pi in zip(poavs, c_pis):
assert poav.c_pi == approx(c_pi)
lambda_pi2s = [3/4, 1., 0.78380312]
for poav, l_pi2 in zip(poavs, lambda_pi2s):
assert poav.lambda_pi**2 == approx(l_pi2)
ns = [3, 2, 2.82749177]
ms = [1/3, 0, 0.27583059]
for poav, n, m in zip(poavs, ns, ms):
assert poav.n == approx(n)
assert poav.m == approx(m)
def test_POAV(self):
assert self.poav_sp3.poav == approx([0., 0., 1.])
assert self.poav_b1.poav == approx(
[-0.81987531, 0.24597365, -0.51701203])
class TestPOAV2:
""" Tests about the pychemcurv.core.POAV2 class """
def setup_method(self):
# random coordinates of a pyramid with pyrA = 0.326558177 radians
coords = [[-2.62985741, 6.99670582, -2.89817324],
[-2.32058737, 5.49122664, -3.13957301],
[-2.92519373, 6.96241176, -1.65009278],
[-1.62640146, 7.93539179, -3.17337668]]
self.poav = POAV2(TrivalentVertex(coords[0], coords[1:]))
def test_matrix(self):
m = np.array([[-0.23175585, -0.12713934, 0.49598426],
[0.04802619, 0.47612631, 0.02444729],
[0.18372966, -0.34898698, -0.52043155]])
assert self.poav.matrix.flatten() == approx(m.flatten())
def test_pyrA(self):
assert self.poav.pyrA_r == approx([1.80097239, 1.75115937, 2.09343774])
assert self.poav.pyrA == approx(
[103.18811722, 100.33404089, 119.94514689])
def test_u_pi(self):
assert self.poav.u_pi == approx(
[-0.91097524, 0.11226749, -0.39688806])
def test_sigma_hyb_nbrs(self):
assert self.poav.sigma_hyb_nbrs == approx((4.602500609983161,
7.444750673988855,
0.961463263653626))
def test_pi_hyb_nbr(self):
assert self.poav.pi_hyb_nbr == approx(0.23956910356339622)
|
gVallverdu/pychemcurv | app/app.py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
"""
## Documentation
This application aims to visualize local geometric informations about a
molecular structure using the
[`pychemcurv`](https://pychemcurv.readthedocs.io/) package. In particular,
the application computes geometric quantities which provide an insight of the
discrete curvature of a molecular or periodic structure.
Custom data can be visualized by editing manually the table.
### Global overview
The dashed box on the top of the page
allows you to upload an xyz file. Click into the box or drag and
drop your file there. The page is splitted in three parts. On the left, you can
visualize your structure, on the rigth, the selected data are plotted and below
a that gathers the data.
#### On the left
* The *"Select data"* dropdown, allows to select the data you want to map on
the structure.
* The *"Select colormap"* dropdown, allows you to select a colormap. The `_r`
label corresponds to colormap in reverse order.
* The *"bounds"* inputs change the min and max values used to
compute the colors associated to the data.
* The *"Nan color"* input, can be used to set a color to atoms for which the
selected data does not exist.
#### On the right
By default, the right panel displays an histogram of the selected data, used
on the structure visualization. On top of this plot, a box plot presents an
overview of the distribution. Below the plot, a table gathers statistical
information of the data. In that case, the slider allows you to change
the number of bins of the histogram.
The dropdown menu *"Hisogram or abscissa"* allows you to plot either an
histogram of the selected data (default) or to plot the selected data as
a function of another data. In that case, a trend line is also plotted.
Statistical information of both data are then
displayed in the table below the plot.
#### Data table
Below the visualization part, a table displays all the data provided by the
`pychemcurv` package. Select the columns you want to see using the dropdown
menu. All the table is editable, manually, and the
visualization is updated each time you modify a value.
If you want to add manualy custom data, you can add the `custom` column to the
table and fill it with your values. You can copy and paste data from a
spreadsheet or a text file.
The whole data can be downloaded in csv format from the `export` button at the
top.
**Warning:** If you edit the data in the table, you have first to refresh the
application before uploading a new molecule.
### Geometrical data
All the definitions of the atomic quantities available in this application are
defined in details in
[this publication](https://hal.archives-ouvertes.fr/hal-02490358/document)
or are briefly described in the
[pychemcurv documentation](https://pychemcurv.readthedocs.io/en/latest/).
Hereafter is a quick list that gives the units of the quantities:
`atom_idx`
: index of the atom in the system, starting from 0
`species`
: chemical element as provided in the xyz file
`pyrA`
: pyramidalization angle in degrees
`angular_defect`
: angular defect in degrees
`n_star_A`
: number of atoms bonded to this atom
`spherical_curvature`
: spherical curvature, no unit
`improper`
: improper angle in degrees
`pyr_distance`
: distance of atom A from the plane defined from atoms bonded to A
`atom_A`
: coordinates of atom A
`star_A`
: coordinates of atom bonded to A
`hybridization`
: hybridization as define by Haddon et al., n tilde, amount of pz AO in
the system sigma
`m`
: `m = (c_pi / lambda_pi)^2`
`n`
: `n = 3m + 2`
`c_pi^2`
: c_pi is the coefficient of the s AO in the h_pi hybrid orbital
`lambda_pi^2`
: lambda_pi is the coefficient of the p_pi AO in the h_pi hybrid orbital
`ave. neighb. dist.`
: Average distance with neighbors of atom A.
### File and data upload
The application accepts standard xyz files.
Such a file is suposed to display the number of atoms on the first line,
followed by a title line and followed by the structure in cartesian
coordinates. Each line contains the element as first column and the
cartesian coordinates as 2d, 3th and 4th columns, for example:
3
H2O molecule
O -0.111056 0.033897 0.043165
H 0.966057 0.959148 -1.089095
H 0.796629 -1.497157 0.403985
Coordinates have to be in angstrom to determine correctly the connectivity.
"""
import os
import base64
import re
import yaml
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import plotly.express as px
import dash_bio
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pymatgen as mg
from pychemcurv import CurvatureAnalyzer
__author__ = "<NAME>"
__title__ = "Pychemcurv data viewer"
__subtitle__ = "Part of the Mosaica project"
HEX_COLOR_PATT = re.compile(r"^#[A-Fa-f0-9]{6}$")
# ---- Set up App ----
ext_css = ["https://use.fontawesome.com/releases/v5.8.1/css/all.css"]
app = dash.Dash(__name__,
external_stylesheets=ext_css,
url_base_pathname="/mosaica/",
suppress_callback_exceptions=True)
server = app.server
# with open(app.get_asset_url("data/elementColors.yml"), "r") as fyml:
with open("assets/data/elementColors.yml", "r") as fyml:
ELEMENT_COLORS = yaml.load(fyml, Loader=yaml.SafeLoader)["jmol"]
#
# Layout
# ------------------------------------------------------------------------------
# --- define tab style
tab_style_header = {
'backgroundColor': 'white',
"padding": "5px",
'fontWeight': 'bold',
"textAlign": "center",
"borderBottom": "2px solid rgb(60, 93, 130)",
"borderTop": "2px solid rgb(60, 93, 130)",
"fontFamily": "sans-serif"
}
style_data_conditional = [
{'if': {'row_index': 'odd'}, 'backgroundColor': 'rgba(60, 93, 130, .05)'}
]
# --- header ---
header = html.Div(className="head", children=[
html.Div(className="container", children=[
html.H1(children=[html.Span(className="fas fa-atom"), " ", __title__]),
# html.H2(__subtitle__)
html.A(
id="github-link",
href="https://github.com/gVallverdu/pychemcurv",
children=[
"View on GitHub",
]
),
html.Span(id="github-icon", className="fab fa-github fa-2x"),
])
])
# --- Footer ---
footer = html.Div(className="foot", children=[
html.Div(className="container", children=[
html.Div(className="row", children=[
html.Div(className="eight columns", children=[
html.H5("About:"),
html.A("<NAME>",
href="https://gsalvatovallverdu.gitlab.io/"),
html.Br(),
html.A("University of Pau & Pays Adour",
href="https://www.univ-pau.fr")
]),
html.Div(className="four columns", children=[
html.A(href="https://www.univ-pau.fr", children=[
html.Img(
src=app.get_asset_url("img/LogoUPPAblanc.png"),
)
])
])
]),
])
])
# --- Body: main part of the app ---
body = html.Div(className="container", children=[
# --- store components for the data
dcc.Store(id="data-storage", storage_type="memory"),
# --- upload part
html.Div(className="row", id="top-panel", children=[
# --- upload xyz file
html.Div(className="four columns", children=[
dcc.Upload(
id='file-upload',
children=html.Div(
className="upload-area control",
children="Upload xyz file here"
),
),
]),
# --- intro text
html.Div(className="eight columns", children=[
dcc.Markdown("""
The [documentation is available at the bottom of this page](#documentation).
Upload an xyz file on the left. The structure will appear
on the left and the data can be plotted on the right.
A [data table is available below](#data-table-title).
""")
])
]),
html.Div(className="row", children=[
# --- left panel: 3D visualization
html.Div(id="left-panel", children=[
# --- dash bio Molecule 3D Viewer
html.Div(id="dash-bio-viewer"),
# --- color bar
dcc.Graph(id='colorbar', config=dict(displayModeBar=False)),
# --- controls of mapped data
html.Div(className="row", children=[
# --- select data
html.Div(className="six columns", children=[
html.Span("Select data", className="control-label"),
dcc.Dropdown(id='dropdown-data',
placeholder="Select data"),
]),
# --- colormap
html.Div(className="six columns", children=[
html.Span("Colormap", className="control-label"),
dcc.Dropdown(
id='dropdown-colormap',
options=[{"label": cm, "value": cm}
for cm in plt.cm.cmap_d],
value="cividis"
),
# --- colormap boundaries
html.Div(className="row", children=[
html.Div(className="four columns", children=[
html.Span(
"bounds", className="control-label",
style={"lineHeight": "38px"}
)
]),
html.Div(className="four columns", children=[
dcc.Input(
id="cm-min-value", type="number", debounce=True,
placeholder="min", style={"width": "100%"}),
]),
html.Div(className="four columns", children=[
dcc.Input(
id="cm-max-value", type="number", debounce=True,
placeholder="max", style={"width": "100%"}),
]),
], style={"marginTop": "10px"}),
# --- nan color selector
html.Div(className="row", children=[
html.Div(className="four columns", children=[
html.Span(
"Nan color", className="control-label",
style={"lineHeight": "38px"}
)
]),
html.Div(className="eight columns", children=[
dcc.Input(
id="nan-color-value",
debounce=True,
placeholder="#000000",
type="text",
pattern=u"^#[A-Fa-f0-9]{6}$",
style={"width": "100%"},
),
]),
], style={"marginTop": "10px"})
]),
]),
]),
# --- right panel: plot data
html.Div(id="right-panel", children=[
# --- plot figure
dcc.Graph(id='plot-data'),
# --- a table of the selected data
dash_table.DataTable(
id="plot-data-table",
style_header=tab_style_header,
style_data_conditional=style_data_conditional,
),
html.Div(className="row", children=[
# --- select histogram or absicssa
html.Div(className="six columns", children=[
html.Span("Histogram or abscissa", className="control-label"),
dcc.Dropdown(
id="plot-data-selector",
options=[{"label": "histogram", "value": "histogram"}],
value="histogram",
placeholder="Plot more data",
),
]),
# --- number of bins for histogram
html.Div(className="six columns", children=[
html.Span("# bins", className="control-label"),
dcc.Slider(
id="nbins-slider",
min=5, max=50, step=1,
value=30,
marks={i: "%d" % i for i in range(5, 51, 5)},
),
]),
], style={"marginTop": "10px"}),
]),
]),
# --- Data table
html.Div(id="data-table-container", children=[
html.H4("Data Table", id="data-table-title"),
html.Div(className="column-selector-label",
children="Select the columns of the table:"),
dcc.Dropdown(
id="data-column-selector",
multi=True,
clearable=False,
),
html.Div(children=[
dash_table.DataTable(
id="data-table",
export_format='csv',
export_columns="all",
editable=True,
style_header=tab_style_header,
style_data_conditional=style_data_conditional,
)
]),
]),
# --- Documentation
html.Div(className="documentation", children=[
dcc.Markdown(__doc__, id="documentation")
])
])
app.layout = html.Div([header, body, footer])
#
# callbacks
# ------------------------------------------------------------------------------
@app.callback(
[Output("data-storage", "data"),
Output("dash-bio-viewer", "children"),
Output("dropdown-data", "options"),
Output("data-column-selector", "options"),
Output("data-column-selector", "value"),
Output("plot-data-selector", "options")],
[Input("file-upload", "contents"),
Input("file-upload", "filename"),
Input('data-table', 'data_timestamp')],
[State("data-storage", "data"),
State("data-table", "data"),
State("data-column-selector", "value"),
State("dash-bio-viewer", "children")
]
)
def upload_data(content, filename, table_ts, stored_data, table_data,
selected_columns, dbviewer):
"""
Uploads the data from an xyz file and store them in the store component.
Then set up the dropdowns, the table and the molecule viewer.
"""
if table_ts is not None:
# update stored data from current data in the table
df = pd.DataFrame(stored_data)
try:
table_df = pd.DataFrame(table_data)
table_df = table_df.astype({col: np.float for col in table_df
if col != "species"})
df.update(table_df)
except ValueError:
print("No update of data")
all_data = df.to_dict("records")
else:
# Initial set up, read data from upload
# read a default file
# filename = app.get_asset_url("data/C28-D2.xyz")
filename = "assets/data/C28-D2.xyz"
mol = mg.Molecule.from_file(filename)
if content:
content_type, content_str = content.split(",")
_, ext = os.path.splitext(filename)
decoded = base64.b64decode(content_str).decode("utf-8")
# fdata = io.StringIO(decoded)
try:
mol = mg.Molecule.from_str(decoded, fmt=ext[1:])
except NameError:
# TODO: Manage format error
print("Unable to read format")
# comute data
ca = CurvatureAnalyzer(mol)
# add a custom column for manual editing
ca.data["custom"] = 0.0
# all data for the store component
all_data = ca.data.to_dict("records")
# Set the molecule 3D Viewer component
dbviewer = dash_bio.Molecule3dViewer(
id='molecule-viewer',
backgroundColor="#FFFFFF",
# backgroundOpacity='0',
modelData=ca.get_molecular_data(),
atomLabelsShown=True,
selectionType='atom'
)
# options for the checklist in order to select the columns of the table
selected_columns = ["atom_idx", "species", "angular_defect",
"pyrA", "n_star_A"]
# options to select data mapped on atoms
options = [{"label": name, "value": name} for name in ca.data
if name not in ["atom_idx", "species", "atom_A", "star_A"]]
options2 = [{"label": "histogram", "value": "histogram"}] + options
# checklist options to select table columns
tab_options = [{"label": name, "value": name} for name in ca.data]
return all_data, dbviewer, options, tab_options, selected_columns, options2
@app.callback(
[Output("data-table", "data"),
Output("data-table", "columns")],
[Input("data-storage", "modified_timestamp"),
Input("data-column-selector", "value")],
[State("data-storage", "data")]
)
def select_table_columns(ts, values, data):
"""
Select columns displayed in the table. A custom column is available and
filled with zero by default.
"""
# get data from the Store component
df = pd.DataFrame(data)
if values is None:
# initial set up
return [], []
else:
# fill the table with the selected columns
tab_df = df[values]
data = tab_df.to_dict("records")
# add format
columns = list()
for column in tab_df:
if column in {"atom_idx", "species", "neighbors", "custom"}:
columns.append({"name": column, "id": column})
elif column == "custom":
columns.append(
{"name": column, "id": column, "editable": True})
else:
columns.append({
"name": column, "id": column, "type": "numeric",
"format": Format(
precision=4,
scheme=Scheme.fixed,
)
})
return data, columns
@app.callback(
Output('molecule-viewer', 'styles'),
[Input('dropdown-data', 'value'),
Input('dropdown-colormap', "value"),
Input("data-storage", "modified_timestamp"),
Input("cm-min-value", "value"),
Input("cm-max-value", "value"),
Input("nan-color-value", "value")],
[State("data-storage", "data")]
)
def map_data_on_atoms(selected_data, cm_name, ts, cm_min, cm_max, nan_color, data):
"""
Map the selected data on the structure using a colormap.
"""
df = pd.DataFrame(data)
if selected_data:
values = df[selected_data].values
minval, maxval = np.nanmin(values), np.nanmax(values)
# get cm boundaries values from inputs if they exist
if cm_min is not None:
minval = cm_min
if cm_max is not None:
maxval = cm_max
# check nan_color value
if nan_color is None or not HEX_COLOR_PATT.match(nan_color):
nan_color = "#000000"
normalize = mpl.colors.Normalize(minval, maxval)
cm = plt.cm.get_cmap(cm_name)
colors = list()
for value in values:
if np.isnan(value):
colors.append(nan_color)
else:
colors.append(mpl.colors.rgb2hex(
cm(X=normalize(value), alpha=1)))
# nan_idx = np.nonzero(np.isnan(values))[0]
# norm_cm = cm(X=normalize(values), alpha=1)
# colors = [mpl.colors.rgb2hex(color) for color in norm_cm]
styles_data = {
str(iat): {
"color": colors[iat],
"visualization_type": "stick"
}
for iat in range(len(df))
}
else:
styles_data = {
str(iat): {
"color": ELEMENT_COLORS[df.species[iat]]
if df.species[iat] in ELEMENT_COLORS else "#000000",
"visualization_type": "stick"
}
for iat in range(len(df))
}
return styles_data
@app.callback(
Output("colorbar", "figure"),
[Input('dropdown-data', 'value'),
Input('dropdown-colormap', 'value'),
Input("data-storage", "modified_timestamp"),
Input("cm-min-value", "value"),
Input("cm-max-value", "value")],
[State("data-storage", "data")]
)
def plot_colorbar(selected_data, cm_name, data_ts, cm_min, cm_max, data):
"""
Display a colorbar according to the selected data mapped on to the structure.
"""
if selected_data:
# get data and boundaries
values = pd.DataFrame(data)[selected_data].values
minval, maxval = np.nanmin(values), np.nanmax(values)
# get cm boundaries values from inputs if they exist
if cm_min is not None:
minval = cm_min
if cm_max is not None:
maxval = cm_max
# set up fake data and compute corresponding colors
npts = 100
values = np.linspace(minval, maxval, npts)
normalize = mpl.colors.Normalize(minval, maxval)
cm = plt.cm.get_cmap(cm_name)
cm_RGBA = cm(X=normalize(values), alpha=1) * 255
cm_rgb = ["rgb(%d, %d, %d)" % (int(r), int(g), int(b))
for r, g, b, a in cm_RGBA]
colors = [[x, c] for x, c in zip(np.linspace(0, 1, npts), cm_rgb)]
trace = [
go.Contour(
z=[values, values],
x0=values.min(),
dx=(values.max() - values.min()) / (npts - 1),
colorscale=colors,
autocontour=False,
showscale=False,
contours=go.contour.Contours(coloring="heatmap"),
line=go.contour.Line(width=0),
hoverinfo="skip",
),
]
figure = go.Figure(
data=trace,
layout=go.Layout(
width=600, height=100,
xaxis=dict(showgrid=False, title=selected_data),
yaxis=dict(ticks="", showticklabels=False),
margin=dict(t=0, b=0, l=0, r=0)
# margin=dict(l=40, t=0, b=40, r=20, pad=0)
)
)
else:
figure = go.Figure(
data=[],
layout=go.Layout(
width=600, height=100,
xaxis=dict(ticks="", showticklabels=False, showgrid=False,
title=selected_data, zeroline=False),
yaxis=dict(ticks="", showticklabels=False, showgrid=False,
title=selected_data, zeroline=False),
margin=dict(l=0, t=0, b=0, r=0, pad=0)
)
)
return figure
@app.callback(
[Output("plot-data", "figure"),
Output("plot-data-table", "data"),
Output("plot-data-table", "columns")],
[Input('dropdown-data', 'value'),
Input("data-storage", "modified_timestamp"),
Input("plot-data-selector", "value"),
Input("nbins-slider", "value")],
[State("data-storage", "data")]
)
def plot_data(selected_data1, data_ts, selected_data2, nbins, data):
"""
Make a plot according to the data mapped on the structure. By default
a histogram of the data is plotted. If another abscissa is chosen the
data are plotted against this abscissa.
Statistical descriptors of these data are displayed on a table.
"""
figure = go.Figure(data=[],
layout=go.Layout(template="plotly_white", height=600))
tabdata = list()
columns = list()
if selected_data1:
df = pd.DataFrame(data).dropna()
# plot a histogram
if selected_data2 == "histogram":
figure = px.histogram(
data_frame=df,
x=selected_data1,
histnorm="probability",
marginal="box",
nbins=nbins,
height=600,
color_discrete_sequence=["#2980b9"],
title=selected_data1,
template="plotly_white",
)
figure.layout.update(
yaxis=dict(showgrid=False),
xaxis=dict(showgrid=False),
)
# scatter plot with trend line
else:
figure = px.scatter(
data_frame=df,
x=selected_data2, y=selected_data1,
symbol_sequence=["circle-open"],
color_discrete_sequence=["#2980b9"],
template="plotly_white",
)
figure.update_traces(marker=dict(size=10, line=dict(width=3)))
# add a polynomial trend line to the plot
xmin = np.nanmin(df[selected_data2])
xmax = np.nanmax(df[selected_data2])
xmin -= .05 * (xmax - xmin)
xmax += .05 * (xmax - xmin)
x = np.linspace(xmin, xmax, 100)
p = np.poly1d(np.polyfit(
df[selected_data2], df[selected_data1], deg=2))
figure.add_trace(
go.Scatter(
x=x, y=p(x),
mode="lines", showlegend=False,
line=dict(color="#2980b9", width=1),
)
)
# set up table of plotted data with statistical descriptors
if selected_data2 == "histogram":
tabdata = df[selected_data1].describe().to_frame(
name=selected_data1)
else:
tabdata = df[[selected_data1, selected_data2]].describe()
tabdata = tabdata.transpose()
tabdata.index.name = "data"
tabdata.reset_index(inplace=True)
columns = ["data", 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
tabdata = tabdata[columns].to_dict("records")
fformat = Format(precision=4, scheme=Scheme.fixed)
columns = [{"name": c, "id": c, "type": "numeric", "format": fformat}
for c in columns]
return figure, tabdata, columns
if __name__ == '__main__':
app.run_server(debug=True)
|
gVallverdu/pychemcurv | pychemcurv/__init__.py | <filename>pychemcurv/__init__.py
# coding: utf-8
"""
This python packages provides classes in order to compute the local curvature
in a molecule or a material at the atomic scale and the hybridization of the
molecular orbitals of the atoms. The `utils` module allows to compute all
the quantities for all the atoms of a molecule or a unit cell.
"""
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__version__ = "2020.4.22"
__email__ = "<EMAIL>"
from .core import VertexAtom, TrivalentVertex, POAV1, POAV2
from .analysis import CurvatureAnalyzer
from .vis import CurvatureViewer
# import convenient object from pymatgen
from pymatgen.core import Molecule, Structure |
gVallverdu/pychemcurv | pychemcurv/vis.py | # coding: utf-8
"""
The ``pychemcurv.vis`` module implements the ``CurvatureViewer``
class in order to visualize a molecule or a periodic structure in a jupyter
notebook and map a given properties on the atoms using a color scale.
This class needs, `nglview <https://github.com/arose/nglview>`_ and uses
ipywidgets in a jupyter notebook to display the visualization. Run the
following instructions to install nglview and achieve the configuration
in order to be able to use nglview in a jupyter notebook
::
conda install nglview -c conda-forge
jupyter-nbextension enable nglview --py --sys-prefix
or
::
pip install nglview
jupyter-nbextension enable nglview --py --sys-prefix
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from pymatgen.core import Molecule, Structure
from .analysis import CurvatureAnalyzer
__author__ = "<NAME>"
__copyright__ = "University of Pau and Pays Adour"
__email__ = "<EMAIL>"
__all__ = ["CurvatureViewer"]
class CurvatureViewer:
""" This class provides a constructor for a NGLView widget in order to
visualize the wanted properties using a color scale mapped on the 3D structure
of the molecule or the structure.
"""
def __init__(self, structure, bond_tol=0.2, rcut=2.5, bond_order=None):
""" The class needs a pymatgen.Structure or pymatgen.Molecule object as
first argument. The other arguments are used to defined if two atoms are
bonded or not.
Args:
structure (Structure, Molecule): A Structure or Molecule pymatgen
objects
bond_tol (float): Tolerance used to determine if two atoms are
bonded. Look at `pymatgen.core.CovalentBond.is_bonded`.
rcut (float): Cutoff distance in case the bond is not not known
bond_order (dict): Not yet implemented
"""
if isinstance(structure, (Molecule, Structure)):
self.structure = structure
else:
raise TypeError("structure must a Molecule or Structure pymatgen"
" object. type(structure) is: " + str(type(structure)))
# compute data from CurvatureAnalyzer
self.data = CurvatureAnalyzer(
structure, bond_tol, rcut, bond_order).data
def get_view(self, representation="ball+stick", radius=0.25, aspect_ratio=2,
unitcell=False, width="700px", height="500px"):
""" Set up a simple NGLView widget with the ball and stick or
licorice representation of the structure.
Args:
representation (str): representation: 'ball+stick' or 'licorice'
radius (float): bond (stick) radius
aspect_ratio (float): ratio between the balls and stick radiuses
unitcell (bool): If True and structure is periodic, show the unitcell.
width (str): width of the nglview widget, default '700px'
height (str): height of the nglview widget, default '500px'
Returns:
Return a ``NGLWidget`` object
"""
# try to import nglview
try:
import nglview as nv
except ImportError as e:
print("WARNING: You need to install ase and nglview to perform "
"visualization.")
print(e)
return None
if representation not in ["ball+stick", "licorice"]:
print("Switch representation to 'ball+stick'")
view = nv.show_pymatgen(self.structure)
view.clear()
view.center()
view.add_representation(
representation,
radius=radius,
aspect_ratio=aspect_ratio,
)
# check unitcell
if isinstance(self.structure, Structure) and unitcell:
view.add_unitcell()
# resize nglview widget
view._remote_call("setSize", targe="Widget", args=[width, height])
return view
def map_view(self, prop, radius=0.25, aspect_ratio=2, unitcell=False,
cm="viridis", minval=None, maxval=None, orientation="vertical",
label=None, width="700px", height="500px"):
""" Map the given properties on a color scale on to the molecule using
a ball and stick representations. The properties can be either the name
of a column of the data computed using the CurvatureAnalyzer class, or,
an array of values of a custum property. In the last case, the size of
the array must be consistent with the number of atoms in the system.
Args:
prop (str or array): name of the properties or values you want to map
radius (float): bond (stick) radius
aspect_ratio (float): ratio between the balls and stick radiuses
unitcell (bool): If True and structure is periodic, show the unitcell.
cm (str): colormap from ``matplotlib.cm``.
minval (float): minimum value to consider for the color sacle
maxval (float): maximum value to consider for the color sacle
orientation (str): orientation of the colorbar ``'horizontal'`` or ``'vertical'``
label (str): Name of the colorbar. If None, use prop.
width (str): width of the nglview widget, default '700px'
height (str): height of the nglview widget, default '500px'
Returns:
Returns an ipywidgets ``HBox`` or ``VBox`` with the ``NGLWidget``
and a color bar associated to the mapped properties. The
``NGLWidget`` is the first element of the children, the colorbar
is the second one.
"""
# try to import ipywidgets
try:
from ipywidgets import HBox, VBox, Output
except ImportError as e:
print("You need ipywidgets available with jupyter notebook.")
print(e)
return None
# check property data
if isinstance(prop, str):
if prop in self.data.columns:
prop_vals = self.data[prop].values
label = prop if label is None else label
else:
print("Available data are", data.columns)
raise ValueError("prop %s not found in data." % prop)
else:
try:
prop_vals = np.array(prop, dtype=np.float64).reshape(
len(self.structure))
except ValueError:
print("property = ", prop)
raise ValueError(
"Cannot convert prop in a numpy array of floats.")
# colorbar label
label = "" if label is None else label
# check orientation
if orientation not in ["vertical", "horizontal"]:
orientation = "horizontal"
# find property boundary
if minval is None:
minval = np.nanmin(prop_vals)
if maxval is None:
maxval = np.nanmax(prop_vals)
# normalize colors
normalize = mpl.colors.Normalize(minval, maxval)
cmap = mpl.cm.get_cmap(cm)
# set up a matplotlib figure for the colorbar
if orientation == "horizontal":
_, ax = plt.subplots(figsize=(8, 1))
else:
_, ax = plt.subplots(figsize=(1, 8))
mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=normalize,
orientation=orientation)
ax.set_title(label)
# set up the visualization
view = self.get_view(representation="ball+stick", radius=radius,
aspect_ratio=aspect_ratio, unitcell=unitcell,
width=width, height=height)
# resize nglview widget
view._remote_call("setSize", targe="Widget", args=[width, height])
# set the atom colors
for iat, val in enumerate(prop_vals):
if np.isnan(val):
continue
color = mpl.colors.rgb2hex(cmap(X=normalize(val), alpha=1))
view.add_representation('ball+stick', selection=[iat], color=color,
radius=1.05 * radius,
aspect_ratio=aspect_ratio)
# resize nglview widget
view._remote_call("setSize", targe="Widget", args=[width, height])
# place the colorbar in an Output() widget
out = Output()
with out:
plt.show()
# gather the view and colorbar in a vbox or hbox depending on
# the orientation
if orientation == "vertical":
box = HBox(children=[view, out])
else:
box = VBox(children=[view, out])
return box
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.