content
stringlengths 5
1.05M
|
|---|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from accelerator_abstract.models import BaseStartupLabel
from accelerator_abstract.models.label_model import LabelModel
class StartupLabel(BaseStartupLabel):
class Meta(LabelModel.Meta):
swappable = swapper.swappable_setting(BaseStartupLabel.Meta.app_label,
"StartupLabel")
|
import deep_ga
import tensorflow as tf
from tensorflow import keras
import os
import matplotlib.pyplot as plt
class AssertWeightsFinite(keras.callbacks.Callback):
def __init__(self):
super(AssertWeightsFinite, self).__init__()
def on_epoch_end(self, epoch, logs=None):
for weight in self.model.weights:
if not tf.math.reduce_all(tf.math.is_finite(weight)):
print(f"Weight {weight.name} not finite, stop training")
self.model.stop_training = True
break
class PlotPrediction(keras.callbacks.Callback):
def __init__(self, folder, tgen, vgen, save=True):
self.folder = folder
self.tgen = tgen
self.vgen = vgen
self.save = save
if self.save:
os.makedirs(folder, exist_ok=True)
super(PlotPrediction, self).__init__()
def on_epoch_end(self, epoch, logs=None):
ylabel = ("Match Probability"
if self.model.loss.__name__ == "BCE"
else "Predicted distance")
tx, ty = self.tgen.get_batch(200)
typ = self.model.predict(tx)[:, 0]
plt.clf()
plt.scatter(ty, typ, 2)
plt.title("Training Data")
plt.xlabel("Distance between local and global patch")
plt.xlim(0, plt.xlim()[1])
plt.ylim(0, plt.ylim()[1])
if self.model.loss.__name__ == "BCE":
plt.ylim(0, 1)
plt.ylabel(ylabel)
if self.save:
plt.savefig(os.path.join(self.folder, f"train_{epoch:03}.pdf"))
else:
plt.show()
vx, vy = self.vgen.get_batch(200)
vyp = self.model.predict(vx)[:, 0]
plt.clf()
plt.scatter(vy, vyp, 2)
plt.title("Validation Data")
plt.xlabel("Distance between local and global patch")
plt.xlim(0, plt.xlim()[1])
plt.ylim(0, plt.ylim()[1])
if self.model.loss.__name__ == "BCE":
plt.ylim(0, 1)
plt.ylabel(ylabel)
if self.save:
plt.savefig(os.path.join(self.folder, f"valid_{epoch:03}v.pdf"))
else:
plt.show()
class ValidationProgbar(keras.callbacks.Callback):
def __init__(self, vgen):
self.pbar = None
self.N = len(vgen)
super(ValidationProgbar, self).__init__()
def on_test_batch_end(self, batch, logs=None):
if batch == 0:
print("\nValidation:")
self.pbar = keras.utils.Progbar(
self.N, stateful_metrics=logs.keys())
self.pbar.add(1, values=logs.items())
|
"""Tests for retrieving license information."""
from unittest import TestCase, mock
from flask import Flask
from ....domain.submission import License
from .. import models, get_licenses, current_session
from .util import in_memory_db
class TestGetLicenses(TestCase):
"""Test :func:`.get_licenses`."""
def test_get_all_active_licenses(self):
"""Return a :class:`.domain.License` for each active license."""
# mock_util.json_factory.return_value = SQLiteJSON
with in_memory_db():
session = current_session()
session.add(models.License(
name="http://arxiv.org/licenses/assumed-1991-2003",
sequence=9,
label="Assumed arXiv.org perpetual, non-exclusive license to",
active=0
))
session.add(models.License(
name="http://creativecommons.org/licenses/publicdomain/",
sequence=4,
label="Creative Commons Public Domain Declaration",
active=1
))
session.commit()
licenses = get_licenses()
self.assertEqual(len(licenses), 1,
"Only the active license should be returned.")
self.assertIsInstance(licenses[0], License,
"Should return License instances.")
self.assertEqual(licenses[0].uri,
"http://creativecommons.org/licenses/publicdomain/",
"Should use name column to populate License.uri")
self.assertEqual(licenses[0].name,
"Creative Commons Public Domain Declaration",
"Should use label column to populate License.name")
|
import enum
from pydantic import BaseModel, EmailStr
from typing import Optional, List
# from stripe.api_resources import payment_method
class YesNoNone(enum.Enum):
YES = "yes"
NO = "no"
NONE = "null"
class ClientBase(BaseModel):
class Config:
orm_mode = True
class ClientInfo(ClientBase):
id: Optional[int]
api_key: Optional[str]
firstName: Optional[str]
lastName: Optional[str]
birthday: Optional[str]
address: Optional[str]
city: Optional[str]
state: Optional[str]
zip: Optional[int]
phone: Optional[str]
email: Optional[EmailStr]
referring: Optional[str]
conditions: List[str]
otherCondition: Optional[str]
diseases: List[str]
medications: str
covidTestedPositive: Optional[YesNoNone]
covidVaccine: Optional[YesNoNone]
stressfulLevel: Optional[int]
consentMinorChild: Optional[bool]
diagnosticProcedures: Optional[bool]
# relationshipChild: Optional[str]
visits: Optional[List]
class Client(ClientBase):
id: Optional[int]
api_key: Optional[str]
first_name: str
last_name: str
phone: Optional[str]
email: Optional[EmailStr]
# rougue_mode: Optional[bool]
req_date: Optional[str]
visits: Optional[List]
class ClientQueue(ClientBase):
id: Optional[int]
api_key: Optional[str]
email: Optional[EmailStr]
first_name: str
last_name: str
phone: Optional[str]
req_date: Optional[str]
tests: Optional[List]
visits: Optional[List]
progress_date: Optional[str]
place_in_queue: Optional[int]
class ClientInTake(ClientBase):
api_key: str
rougue_mode: Optional[bool]
place_in_queue: Optional[int]
class EditedClientData(ClientBase):
api_key: Optional[str]
first_name: str
last_name: str
birthday: str
address: str
city: str
state: str
zip: str
phone: str
email: str
class ClientPhone(ClientBase):
phone: Optional[str]
class ClientInfoStripe(ClientBase):
id: str
description: Optional[str]
amount: int
api_key: str
email: str
name: str
class ClientStripeSubscription(ClientBase):
payment_method: str
email: str
description: str
api_key: str
amount: Optional[int]
interval: Optional[str]
interval_count: Optional[str]
email: Optional[str]
name: Optional[str]
number: Optional[str]
exp_month: Optional[int]
exp_year: Optional[int]
cvc: Optional[int]
class ClientCarePlan(ClientBase):
api_key: str
start_time: Optional[str]
end_time: Optional[str]
class ClientCarePlanDelete(ClientBase):
id: int
api_key: Optional[str]
|
from fuzzywuzzy import fuzz
class ActionsHandler:
"""
actions object function:
There are 3 different cases that a text could match.
1:
A text contains exactly the text specified in a exactly attribute.
2:
A text matches with a text specified in a match attribute, by more or exactly the
percentage specified in the accuracy attribute.
3:
A text contains all words specified in the tags attribute.
If the case matches the action is returned.
"""
@staticmethod
def check_and_sort_actions(actions):
"""
Check the content of a actions object.
:param actions:
:return:
"""
exactly = []
match = []
tags = []
for a in actions:
keys = a.keys()
if 'exactly' not in keys and 'match' not in keys and 'tags' not in keys:
raise ValueError('The actions dict is missing either the exactly, match or tags key.', str(a))
elif 'exactly' in keys and not a['exactly']:
raise ValueError('Found exactly key, but it has no content.', str(a))
elif 'match' in keys and not a['match']:
raise ValueError('Found match key, but it has no content.', str(a))
elif 'tags' in keys:
if len(a['tags']) == 0:
raise ValueError('Found tags key, but it has no content.', str(a))
else:
for item in a['tags']:
if not item or type(item) != str:
raise ValueError('Tags key, has no or wrong content (has to be str).', str(a))
if 'action' not in keys or not a['action']:
raise ValueError('Missing a action key.', str(a))
elif 'action' in keys:
ac = a['action']
if type(ac) == str:
if ac == 'reply':
if 'text' not in keys or not a['text']:
raise ValueError('Missing a text key for action reply.', str(a))
elif ac != 'retweet' and ac != 'favor' and ac != 'follow':
raise ValueError(
'Found a action key, but it has no or wrong content (not reply, retweet, favor or follow).',
str(a)
)
elif type(ac) == list:
act_list = ['reply', 'favor', 'retweet', 'follow']
for item in act_list:
if ac.count(item) > 1:
raise ValueError('Only one of every type of action should be in the action list.', str(a))
if 'reply' in ac:
if 'text' not in keys or not a['text']:
raise ValueError('Missing a text key for a action reply.', str(a))
for item in a['action']:
if item not in act_list:
raise ValueError(
'Found a action key, but there is wrong content in the action list (not reply, '
'retweet, favor or follow)',
str(a)
)
else:
raise ValueError('The action has to be of type str or list. Found:', type(a['action']))
if 'match' in keys and 'accuracy' not in keys:
raise ValueError('Found match key, missing a accuracy key.', str(a))
elif 'accuracy' in keys and (not a['accuracy'] or a['accuracy'] > 1):
raise ValueError('Found accuracy key, but it has no or wrong content (> 1).', str(a))
if 'options' in keys:
opt_keys = a['options'].keys()
if 'case_sensitive' not in opt_keys:
raise ValueError('Unknown key found in options.', str(opt_keys), str(a))
elif type(a['options']['case_sensitive']) != bool:
raise ValueError('Case_sensitive value has to be of type bool.', str(a))
if 'exactly' in keys and 'action' in keys:
exactly.append(a)
elif 'match' in keys and 'accuracy' in keys and 'action' in keys:
match.append(a)
elif 'tags' in keys and len(a['tags']) > 0 and 'action' in keys:
tags.append(a)
return {
'exactly': exactly,
'match': match,
'tags': tags
}
@staticmethod
def find_action(f_actions, text):
"""
Finds action to react to a text.
:param f_actions:
:param text:
:return:
"""
exactly_list = f_actions['exactly']
match_list = f_actions['match']
tags_list = f_actions['tags']
matched_action = None
for entry in exactly_list:
if 'options' in entry and 'case_sensitive' in entry['options'] and entry['options']['case_sensitive']:
if text == entry['exactly']:
matched_action = entry
break
else:
if text.lower() == entry['exactly'].lower():
matched_action = entry
break
if not matched_action:
for entry in match_list:
if 'options' in entry and 'case_sensitive' in entry['options'] and entry['options']['case_sensitive']:
if (fuzz.ratio(text, entry['match']) / 100) > entry['accuracy']:
matched_action = entry
break
else:
if ((fuzz.ratio(text.lower(), entry['match'].lower()) / 100) >
entry['accuracy']):
matched_action = entry
break
if not matched_action:
for entry in tags_list:
every_tag = True
if 'options' in entry and 'case_sensitive' in entry['options'] and entry['options']['case_sensitive']:
for tag in entry['tags']:
if tag not in text:
every_tag = False
break
else:
for tag in entry['tags']:
if tag.lower() not in text.lower():
every_tag = False
break
if every_tag:
matched_action = entry
break
return matched_action
|
"""
Given the index of an image, take this image and feed it to a trained autoencoder,
then plot the original image and reconstructed image.
This code is used to visually verify the correctness of the autoencoder
"""
import torch
import argparse
import numpy as np
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torch import nn
from os import listdir
import random
import os.path
from os.path import isfile, join, isdir
from utils import UnsuperviseDataset, inference
from utils import DenseAutoencoder
from utils import ConvAutoencoder
from utils import ConvAutoencoder_dense_out
from utils import ConvAutoencoder_conv1x1
from utils import ConvAutoencoder_conv1x1_layertest
from utils import ConvAutoencoder_deeper1
from dataset_statistics import dataSetStatistics
def getArgs():
parser = argparse.ArgumentParser('python')
parser.add_argument('-data_dir',
default='../bae-data-images/',
required=False,
help='directory of training images')
parser.add_argument('-style',
default='conv_1x1',
required=False,
choices=['conv', 'dense', 'conv_dense_out', 'conv_1x1', 'conv_deeper', 'conv_1x1_test'],
help='style of autoencoder')
parser.add_argument('-feature_size',
default=1024,
type=int,
required=False,
help='size of output feature of the autoencoder')
parser.add_argument('-normalize',
default=True,
required=False,
help='whether to normalize dataset')
parser.add_argument('-model',
default='./log/conv_1x1.pt',
required=False,
help='trained model location')
parser.add_argument('-num_data',
type=int,
default=50000,
required=False,
help='the batch size, normally 2^n.')
parser.add_argument('-gpu_to_cpu',
default=False,
required=False,
help='whether to reconstruct image using model made with gpu on a cpu')
parser.add_argument('-img_count',
type=int,
default=10,
required=False,
help='how many reconstructed images to save and show')
return parser.parse_args()
if __name__ == "__main__":
args = getArgs()
data_dir = args.data_dir
style = args.style
feature_size = args.feature_size
normalize = args.normalize
model_file = args.model
num_data = args.num_data
gpu_to_cpu = args.gpu_to_cpu
img_count = args.img_count
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Current device: '+str(device))
# Normalizing data and transforming images to tensors
statistics = dataSetStatistics(data_dir, 128, num_data)
data_mean = statistics[0].tolist()
data_std = statistics[1].tolist()
if normalize == True:
print('normalizing data:')
print('mean:', data_mean)
print('std:', data_std)
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((data_mean[0], data_mean[1], data_mean[2]),
(data_std[0], data_std[1], data_std[2]))])
else:
transform = transforms.Compose([transforms.ToTensor()])
# Put images into dataset
img_list = []
for item in listdir(data_dir):
if isfile(join(data_dir, item)):
img_list.append(item)
elif isdir(join(data_dir, item)):
update_data_dir = join(data_dir, item)
for f in listdir( update_data_dir):
if isfile(join(update_data_dir, f)):
img_list.append(item + '/' + f)
elif isdir(join(update_data_dir, f)):
deeper_data_dir = join(update_data_dir, f)
for y in listdir(deeper_data_dir):
if isfile(join(deeper_data_dir, y)):
img_list.append(item + '/' + f + '/' + y)
dataset = UnsuperviseDataset(data_dir, img_list, transform=transform)
# Instantiate and load model
if style == 'conv':
model = ConvAutoencoder()
elif style == 'dense':
model = DenseAutoencoder(input_size, feature_size)
elif style == 'conv_dense_out':
model = ConvAutoencoder_dense_out(feature_size)
elif style == 'conv_1x1':
model = ConvAutoencoder_conv1x1()
elif style == 'conv_1x1_test':
model = ConvAutoencoder_conv1x1_layertest()
elif style == 'conv_deeper':
model = ConvAutoencoder_deeper1()
# Loading the trained model
# Converting the trained model if trained to be usable on the cpu
# if gpu is unavailable and the model was trained using gpus
if gpu_to_cpu == True:
# original saved file with DataParallel
state_dict = torch.load(model_file, map_location='cpu')
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
else:
# if there are multiple GPUs, split the batch to different GPUs
if torch.cuda.device_count() > 1:
print("Using "+str(torch.cuda.device_count())+" GPUs...")
model = nn.DataParallel(model)
model.load_state_dict(torch.load(model_file))
def reconstruction_creation(index, dataset):
'''create reconstructed image and calculate loss between original and reconstructed image'''
image, name = dataset[index]
reconstruct_image = inference(device, image.unsqueeze(0), model) # create reconstructed image using model
recon_detach = reconstruct_image.detach()
recon_cpu = recon_detach.cpu() # send to cpu
recon_numpy = recon_cpu.numpy() # convert image to numpy array for easier calculations
recon_numpy = np.squeeze(recon_numpy, axis=0)
criterion = nn.MSELoss()
loss = str(criterion(image.unsqueeze(0).cpu(), reconstruct_image.cpu()).item()) # calculate loss
return recon_numpy, loss
def original_image_extraction(index, dataset):
'''extraxt the original image and file name from dataset to plot alongside reconstructed image'''
image, name = dataset[index]
og_img = image.numpy() # create list of original images as numpy arrays
file_name = name # create list of file names
return og_img, file_name
# Reconstructing images and plotting the root mean square error
print('Calculating root mean squared error...')
# only calculating rmse for half of dataset due to the size.
rmse_lst = []
for index in range(int(dataset.__len__()/2.)): # remove '/.2' to calculate rmse for every img in dataset
recon_img = reconstruction_creation(index, dataset)[0]
og_image = original_image_extraction(index, dataset)[0]
N = 1
for dim in og_image.shape: # determine N for root mean square error
N *= dim
rmse = ((np.sum((og_image - recon_img) ** 2) / N) ** .5) # calculate rmse
rmse_lst.append(rmse)
# Plot histogram of root mean squared errors between reconstructed images and original images
print('Plotting root mean squared error...')
plt.hist(np.asarray(rmse_lst), bins=30)
plt.ylabel('Number of Image Pairs')
plt.xlabel('Root Mean Squared')
plt.title('RMSE — ' + model_file)
plt.savefig('./images/' + 'rmse.png')
# plt.show()
# Plot images before and after reconstruction
print('Constructing figures before and after reconstruction...')
# create a random list of indices to select images from dataset
# to compare to their respective reconstructed images
random_index_lst = []
for i in range(img_count):
random_index_lst.append(random.randint(0, dataset.__len__()-1))
original_lst = []
reconstruction_lst = []
file_name_lst = []
loss_lst = []
for index in random_index_lst: # extract and create corresponding original and reconstructed images for visual tests
recon_img = reconstruction_creation(index, dataset)[0]
loss = reconstruction_creation(index, dataset)[1]
og_image = original_image_extraction(index, dataset)[0]
file_name = original_image_extraction(index, dataset)[1]
reconstruction_lst.append(recon_img)
original_lst.append(og_image)
file_name_lst.append(file_name)
loss_lst.append(loss)
for index in range(len(file_name_lst)): # plotting the original image next to the reconstructed image with loss value
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(14, 7))
ax1.imshow(np.transpose(original_lst[index], (1, 2, 0)))
ax1.set_title('Original Normalized Image — ' + file_name_lst[index])
ax2.imshow(np.transpose(reconstruction_lst[index], (1, 2, 0)))
ax2.set_title('Reconstructed Image — ' + file_name_lst[index])
txt = 'Loss between before and after: ' + loss_lst[index]
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12)
# show both figures
plt.savefig('./images/' + 'reconstructed_' + file_name_lst[index].split('.')[0] + '.png')
plt.imshow(np.transpose(original_lst[index], (1, 2, 0)))
plt.imshow(np.transpose(reconstruction_lst[index], (1, 2, 0)))
# plt.show()
|
# ***** merge code for NPOL data *****
# Author: Stacy Brodzik, University of Washington
# Date: October 24, 2016
# Description:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging as log
import sys
# ------------------------------------ START INPUTS ------------------------------------
## input and output directories
inDir = '/home/disk/bob/olympex/raw/npol_qc2/'
outDir = inDir
binDir = '/home/disk/meso-home/meso/build/bin/'
date = ['20151113']
#date = ['20151105','20151112','20151113','20151114','20151115','20151116',
# '20151117','20151118','20151119','20151120','20151121','20151122',
# '20151123','20151124','20151125','20151126','20151130','20151201',
# '20151202','20151203','20151204','20151205','20151206','20151207',
# '20151208','20151209','20151210','20151211','20151212','20151213',
# '20151214','20151215','20151216','20151217','20151218','20151219',
# '20160103','20160104','20160105','20160106','20160108','20160110',
# '20160111','20160112','20160113','20160114','20160115']
## RadxConvert param file
paramFile = '/home/disk/meso-home/meso/projDir/ingest/params/RadxConvert.npol_qc.rhi'
# ------------------------------------- END INPUTS -------------------------------------
# set up logging
log.basicConfig(format='%(levelname)s:%(message)s',level=log.INFO)
# process data for each year and month
for i,idate in enumerate(date):
log.info('i = {} and idate = {}'.format(i,idate) )
os.chdir(inDir+idate+'/rhi_a')
for file in os.listdir(inDir+idate+'/rhi_a'):
log.info('file = {}'.format(file) )
if '00-20' in file:
# determine if there is a matching 20-40 file
file2 = file[0:file.find('00-20')]+'20-40.uf'
if os.path.isfile(file2):
command = binDir+'RadxConvert -v -param '+paramFile+' -f '+file+' '+ file2
else:
command = binDir+'RadxConvert -v -param '+paramFile+' -f '+file
log.info(' command = {}'.format(command) )
|
from functools import wraps
import traceback
import copy
import socket
def catch_exception(origin_func):
def wrapper(self, *args, **kwargs):
try:
u = origin_func(self, *args, **kwargs)
return u
except Exception as e:
# print(origin_func.__name__ + ": " + e.__str__())
self.log(str(origin_func.__name__ + ": " + e.__str__()))
traceback.print_exc()
return 'an Exception raised.'
return wrapper
def message_log(origin_func):
def message_log_wrapper(self, *args, **kwargs):
# print(origin_func.__name__)
origin_res = origin_func(self, *args, **kwargs)
res = copy.deepcopy(origin_res)
if isinstance(res, tuple) or isinstance(res, list):
res = [str(each) for each in res]
res = " ".join(res)
elif isinstance(res, bytes):
res = res.decode()
self.log(origin_func.__name__ + ": " + str(res))
return origin_res
return message_log_wrapper
# mode cmd create_conn trans_data
def need_data_conn(origin_func):
def need_data_conn_wrapper(self, *args, **kwargs):
assert(self.mode != self.MODE_NONE)
# set mode
if self.mode == self.MODE_PASV:
self.handlePASV()
elif self.mode == self.MODE_PORT:
self.handlePORT()
# send cmd
step_by = origin_func(self, *args, **kwargs)
next(step_by)
# create conn
if self.mode == self.MODE_PASV:
self.dataSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dataSocket.connect((self.pasvDataIp, self.pasvDataPort))
code, content = self.recvMsg()
if code != 150:
return
elif self.mode == self.MODE_PORT:
assert(self.portListenSocket)
self.dataSocket, _ = self.portListenSocket.accept()
code, content = self.recvMsg()
if code != 150:
return
self.guiTransStatus(False)
# recv/send data and do sth.
res = next(step_by)
self.guiTransStatus(True)
# close data session
if self.dataSocket:
self.dataSocket.close()
self.dataSocket = None
if self.portListenSocket:
self.portListenSocket.close()
self.portListenSocket = None
self.recvMsg()
return res
return need_data_conn_wrapper
|
from django import forms
from . import models
class FileForm(forms.ModelForm):
class Meta:
model = models.File
fields = '__all__'
class ImageForm(forms.ModelForm):
class Meta:
model = models.Image
fields = '__all__'
|
from ctypes import *
import ctypes.util
import _ctypes
from os import system
def generate_getter(user_fun_name):
file = open("casadi_fun_ptr_getter.c", "w")
file.write("int "+user_fun_name+"(const double** arg, double** res, int* iw, double* w, void* mem);\n")
file.write("int "+user_fun_name+"_work(int *sz_arg, int* sz_res, int *sz_iw, int *sz_w);\n")
file.write("const int* "+user_fun_name+"_sparsity_in(int i);\n")
file.write("const int* "+user_fun_name+"_sparsity_out(int i);\n")
file.write("int "+user_fun_name+"_n_in(void);\n")
file.write("int "+user_fun_name+"_n_out(void);\n")
file.write("\n")
# file.write("void *get_fun_fun() { return &"+user_fun_name+"; }\n")
file.write("void *get_fun_fun() { return &"+user_fun_name+"; }\n")
file.write("void *get_fun_work() { return &"+user_fun_name+"_work; }\n")
file.write("void *get_fun_sparsity_in() { return &"+user_fun_name+"_sparsity_in; }\n")
file.write("void *get_fun_sparsity_out() { return &"+user_fun_name+"_sparsity_out; }\n")
file.write("void *get_fun_n_in() { return &"+user_fun_name+"_n_in; }\n")
file.write("void *get_fun_n_out() { return &"+user_fun_name+"_n_out; }\n")
file.close()
def set_function_pointers(acados, model_name, user_fun_name, acados_ext_fun):
# print(user_fun_name)
# generate function pointers getter
generate_getter(user_fun_name)
# compile and load shared library
model_getter_name = user_fun_name + '_getter.so'
# model_getter_name = 'fun_getter.so' # XXX it needs an unique name !!!
system('gcc -fPIC -shared casadi_fun_ptr_getter.c -o ' + model_getter_name + ' -L. ' + model_name)
# print(model_name)
# system('nm ' + model_name)
model_getter = CDLL(model_getter_name)
# set up function pointers
model_getter.get_fun_fun.restype = c_void_p
tmp_ptr = model_getter.get_fun_fun()
# print(tmp_ptr)
acados.external_function_casadi_set_fun.argtypes = [c_void_p, c_void_p]
acados.external_function_casadi_set_fun(acados_ext_fun, tmp_ptr)
model_getter.get_fun_work.restype = c_void_p
tmp_ptr = model_getter.get_fun_work()
acados.external_function_casadi_set_work.argtypes = [c_void_p, c_void_p]
acados.external_function_casadi_set_work(acados_ext_fun, tmp_ptr)
model_getter.get_fun_sparsity_in.restype = c_void_p
tmp_ptr = model_getter.get_fun_sparsity_in()
acados.external_function_casadi_set_sparsity_in.argtypes = [c_void_p, c_void_p]
acados.external_function_casadi_set_sparsity_in(acados_ext_fun, tmp_ptr)
model_getter.get_fun_sparsity_out.restype = c_void_p
tmp_ptr = model_getter.get_fun_sparsity_out()
acados.external_function_casadi_set_sparsity_out.argtypes = [c_void_p, c_void_p]
acados.external_function_casadi_set_sparsity_out(acados_ext_fun, tmp_ptr)
model_getter.get_fun_n_in.restype = c_void_p
tmp_ptr = model_getter.get_fun_n_in()
acados.external_function_casadi_set_n_in.argtypes = [c_void_p, c_void_p]
acados.external_function_casadi_set_n_in(acados_ext_fun, tmp_ptr)
model_getter.get_fun_n_out.restype = c_void_p
tmp_ptr = model_getter.get_fun_n_out()
acados.external_function_casadi_set_n_out.argtypes = [c_void_p, c_void_p]
acados.external_function_casadi_set_n_out(acados_ext_fun, tmp_ptr)
_ctypes.dlclose(model_getter._handle)
system('rm casadi_fun_ptr_getter.c')
system('rm ' + model_getter_name)
|
import CaboCha
class Kongming:
def __init__(self, stopwords=None):
self.cabocha = CaboCha.Parser()
self.stopwords = stopwords
def _get_modifier(self, tree, chunk):
surface = ''
for i in range(chunk.token_pos, chunk.token_pos + chunk.head_pos + 1):
token = tree.token(i)
features = token.feature.split(',')
surface += token.surface
return surface
def _get_function(self, tree, chunk):
surface = ''
for i in range(chunk.token_pos + chunk.head_pos + 1, chunk.token_pos + chunk.token_size):
token = tree.token(i)
features = token.feature.split(',')
if not features[1] in ['読点', '句点']:
surface += token.surface
return surface
def _get_head(self, tree, chunk):
start = chunk.token_pos
end = start + chunk.head_pos + 1 if chunk.head_pos != chunk.func_pos else start + chunk.token_size
surface = ''
for i in range(start, end):
token = tree.token(i)
features = token.feature.split(',')
if self.stopwords and token.surface in self.stopwords:
continue
if features[0] in ['名詞', '形容詞', '記号']:
surface += token.surface
elif features[0] == '動詞':
surface += features[6]
break
return surface
def _extract_arrows(self, tree):
chunks = {}
for i in range(0, tree.chunk_size()):
chunks[i] = tree.chunk(i)
arrows = []
for chunk_id, chunk in chunks.items():
if not chunk.link > 0:
continue
modifier = self._get_modifier(tree, chunk)
function = self._get_function(tree, chunk)
head_chunk = chunks[chunk.link]
head = self._get_head(tree, head_chunk)
arrow = {"modifier": modifier, "function": function, "head": head}
arrows.append(arrow)
return arrows
def collect(self, text):
tree = self.cabocha.parse(text)
dependencies = self._extract_arrows(tree)
return dependencies
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( x , y ) :
if ( x == 1 ) :
return ( y == 1 )
pow = 1
while ( pow < y ) :
pow = pow * x
return ( pow == y )
#TOFILL
if __name__ == '__main__':
param = [
(57,1,),
(3,9,),
(10,101,),
(10,10000,),
(6,46656,),
(2,2048,),
(1,40,),
(20,79,),
(96,98,),
(25,5,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dj.choices import Choices
from django.contrib.auth.models import AbstractUser
from django.db import models as db
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from rest_framework.authtoken.models import Token
class ScroogeUser(AbstractUser):
pass
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
app_label = 'ralph_scrooge'
ordering = ['username']
def _full_name_or_username(self):
full_name = self.get_full_name().strip()
return full_name or self.username
def __unicode__(self):
return '{}'.format(self._full_name_or_username())
@property
def api_token_key(self):
try:
return self.auth_token.key
except Token.DoesNotExist:
return None
@receiver(post_save, sender=ScroogeUser)
def create_auth_token(sender, instance=None, created=False, **kwargs):
"""
Create token for newly created user.
"""
if not instance.api_token_key:
Token.objects.create(user=instance)
class OwnershipType(Choices):
_ = Choices.Choice
technical = _("Technical owner")
business = _("Business owner")
class ServiceOwnership(db.Model):
service = db.ForeignKey(
'Service',
verbose_name=_("service"),
null=False,
blank=False,
)
owner = db.ForeignKey(
ScroogeUser,
verbose_name=_("owner"),
null=False,
blank=False,
)
type = db.PositiveIntegerField(
null=False,
blank=False,
default=1,
choices=OwnershipType(),
verbose_name=_("Type"),
)
class Meta:
app_label = 'ralph_scrooge'
unique_together = ('owner', 'service', 'type')
def __unicode__(self):
return '{} / {}'.format(self.service, self.owner)
class TeamManager(db.Model):
team = db.ForeignKey(
'Team',
verbose_name=_("team"),
null=False,
blank=False,
)
manager = db.ForeignKey(
ScroogeUser,
verbose_name=_("manager"),
null=False,
blank=False,
)
class Meta:
app_label = 'ralph_scrooge'
unique_together = ('manager', 'team')
def __unicode__(self):
return '{} / {}'.format(self.team, self.manager)
|
'''-------------------------------------------------------------------------------
Tool Name: UpdateDischargeMap
Source Name: UpdateDischargeMap.py
Version: ArcGIS 10.2
License: Apache 2.0
Author: Environmental Systems Research Institute Inc.
Updated by: Environmental Systems Research Institute Inc.
Description: Create a dischage map document.
History: Initial coding - 05/26/2015, version 1.0
Updated: Version 1.0, 06/02/2015 Bug fixing: uses arcpy.mapping.UpdateLayer instead of apply symbology
from layer
Version 1.1, 06/24/2015 Adapted to the group layer in the map document
Version 1.1, 04/01/2016 deleted the lines for importing unnecessary modules
-------------------------------------------------------------------------------'''
import os
import arcpy
import time
class UpdateDischargeMap(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Update Discharge Map"
self.description = "Update a discharge map document for stream flow visualization based on \
the .mxd file and a new discharge table with the same name"
self.GDBtemplate_layer = os.path.join(os.path.dirname(__file__), "templates", "FGDB_TimeEnabled.lyr")
self.SQLtemplate_layer = os.path.join(os.path.dirname(__file__), "templates", "SQL_TimeEnabled.lyr")
self.errorMessages = ["Incorrect map document"]
self.canRunInBackground = False
self.category = "Postprocessing"
def getParameterInfo(self):
"""Define parameter definitions"""
param0 = arcpy.Parameter(name = "in_discharge_map",
displayName = "Input Discharge Map",
direction = "Input",
parameterType = "Required",
datatype = "DEMapDocument"
)
param1 = arcpy.Parameter(name = "out_discharge_map",
displayName = "Output Discharge Map",
direction = "Output",
parameterType = "Derived",
datatype = "DEMapDocument"
)
params = [param0, param1]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
'''Check if .mxd is the suffix of the input map document name'''
if parameters[0].altered:
(dirnm, basenm) = os.path.split(parameters[0].valueAsText)
if not basenm.endswith(".mxd"):
parameters[0].setErrorMessage(self.errorMessages[0])
return
def execute(self, parameters, messages):
"""The source code of the tool."""
arcpy.env.overwriteOutput = True
in_map_document = parameters[0].valueAsText
'''Update symbology for each layer in the map document'''
mxd = arcpy.mapping.MapDocument(in_map_document)
df = arcpy.mapping.ListDataFrames(mxd)[0]
for lyr in arcpy.mapping.ListLayers(mxd):
if not lyr.isGroupLayer:
(dirnm, basenm) = os.path.split(lyr.dataSource)
template_lyr = self.GDBtemplate_layer
if not dirnm.endswith('.gdb'):
template_lyr = self.SQLtemplate_layer
# Update symbology from template
templateLayer = arcpy.mapping.Layer(template_lyr)
arcpy.mapping.UpdateLayer(df, lyr, templateLayer, True)
mxd.save()
del mxd, df, templateLayer
return
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import pdb
# This is a PyTorch data augmentation library, that takes PyTorch Tensor as input
# Functions can be applied in the __getitem__ function to do augmentation on the fly during training.
# These functions can be easily parallelized by setting 'num_workers' in pytorch dataloader.
# tensor_img: 1, C, (D), H, W
def gaussian_noise(tensor_img, std, mean=0):
return tensor_img + torch.randn(tensor_img.shape).to(tensor_img.device) * std + mean
def brightness_additive(tensor_img, std, mean=0, per_channel=False):
if per_channel:
C = tensor_img.shape[1]
else:
C = 1
if len(tensor_img.shape) == 5:
rand_brightness = torch.normal(mean, std, size=(1, C, 1, 1, 1)).to(tensor_img.device)
elif len(tensor_img.shape) == 4:
rand_brightness = torch.normal(mean, std, size=(1, C, 1, 1)).to(tensor_img.device)
else:
raise ValueError('Invalid input tensor dimension, should be 5d for volume image or 4d for 2d image')
return tensor_img + rand_brightness
def brightness_multiply(tensor_img, multiply_range=[0.7, 1.3], per_channel=False):
if per_channel:
C = tensor_img.shape[1]
else:
C = 1
assert multiply_range[1] > multiply_range[0], 'Invalid range'
span = multiply_range[1] - multiply_range[0]
if len(tensor_img.shape) == 5:
rand_brightness = torch.rand(size=(1, C, 1, 1, 1)).to(tensor_img.device) * span + multiply_range[0]
elif len(tensor_img.shape) == 4:
rand_brightness = torch.rand(size=(1, C, 1, 1)).to(tensor_img.device) * span + multiply_range[0]
else:
raise ValueError('Invalid input tensor dimension, should be 5d for volume image or 4d for 2d image')
return tensor_img * rand_brightness
def gamma(tensor_img, gamma_range=(0.5, 2), per_channel=False, retain_stats=False):
if len(tensor_img.shape) == 5:
dim = '3d'
_, C, D, H, W = tensor_img.shape
elif len(tensor_img.shape) == 4:
dim = '2d'
_, C, H, W = tensor_img.shape
else:
raise ValueError('Invalid input tensor dimension, should be 5d for volume image or 4d for 2d image')
tmp_C = C if per_channel else 1
tensor_img = tensor_img.view(tmp_C, -1)
minm, _ = tensor_img.min(dim=1)
maxm, _ = tensor_img.max(dim=1)
minm, maxm = minm.unsqueeze(1), maxm.unsqueeze(1) # unsqueeze for broadcast machanism
rng = maxm - minm
mean = tensor_img.mean(dim=1).unsqueeze(1)
std = tensor_img.std(dim=1).unsqueeze(1)
gamma = torch.rand(C, 1) * (gamma_range[1] - gamma_range[0]) + gamma_range[0]
tensor_img = torch.pow((tensor_img - minm) / rng, gamma) * rng + minm
if retain_stats:
tensor_img -= tensor_img.mean(dim=1).unsqueeze(1)
tensor_img = tensor_img / tensor_img.std(dim=1).unsqueeze(1) * std + mean
if dim == '3d':
return tensor_img.view(1, C, D, H, W)
else:
return tensor_img.view(1, C, H, W)
def contrast(tensor_img, contrast_range=(0.65, 1.5), per_channel=False, preserve_range=True):
if len(tensor_img.shape) == 5:
dim = '3d'
_, C, D, H, W = tensor_img.shape
elif len(tensor_img.shape) == 4:
dim = '2d'
_, C, H, W = tensor_img.shape
else:
raise ValueError('Invalid input tensor dimension, should be 5d for volume image or 4d for 2d image')
tmp_C = C if per_channel else 1
tensor_img = tensor_img.view(tmp_C, -1)
minm, _ = tensor_img.min(dim=1)
maxm, _ = tensor_img.max(dim=1)
minm, maxm = minm.unsqueeze(1), maxm.unsqueeze(1) # unsqueeze for broadcast machanism
mean = tensor_img.mean(dim=1).unsqueeze(1)
factor = torch.rand(C, 1) * (contrast_range[1] - contrast_range[0]) + contrast_range[0]
tensor_img = (tensor_img - mean) * factor + mean
if preserve_range:
tensor_img = torch.clamp(tensor_img, min=minm, max=maxm)
if dim == '3d':
return tensor_img.view(1, C, D, H, W)
else:
return tensor_img.view(1, C, H, W)
def mirror(tensor_img, axis=0):
'''
Args:
tensor_img: an image with format of pytorch tensor
axis: the axis for mirroring. 0 for the first image axis, 1 for the second, 2 for the third (if volume image)
'''
if len(tensor_img.shape) == 5:
dim = '3d'
assert axis in [0, 1, 2], "axis should be either 0, 1 or 2 for volume images"
elif len(tensor_img.shape) == 4:
dim = '2d'
assert axis in [0, 1], "axis should be either 0 or 1 for 2D images"
else:
raise ValueError('Invalid input tensor dimension, should be 5d for volume image or 4d for 2d image')
return torch.flip(tensor_img, dims=[2+axis])
def random_scale_rotate_translate_2d(tensor_img, tensor_lab, scale, rotate, translate):
# implemented with affine transformation
if isinstance(scale, float) or isinstance(scale, int):
scale = [scale] * 2
if isinstance(translate, float) or isinstance(translate, int):
translate = [translate] * 2
scale_x = 1 - scale[0] + np.random.random() * 2*scale[0]
scale_y = 1 - scale[1] + np.random.random() * 2*scale[1]
shear_x = np.random.random() * 2*scale[0] - scale[0]
shear_y = np.random.random() * 2*scale[1] - scale[1]
translate_x = np.random.random() * 2*translate[0] - translate[0]
translate_y = np.random.random() * 2*translate[1] - translate[1]
theta_scale = torch.tensor([[scale_x, shear_x, translate_x],
[shear_y, scale_y, translate_y],
[0, 0, 1]]).float()
angle = (float(np.random.randint(-rotate, max(rotate, 1))) / 180.) * math.pi
theta_rotate = torch.tensor([[math.cos(angle), -math.sin(angle), 0],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1]]).float()
theta = torch.mm(theta_scale, theta_rotate)[0:2, :]
grid = F.affine_grid(theta.unsqueeze(0), tensor_img.size(), align_corners=True)
tensor_img = F.grid_sample(tensor_img, grid, mode='bilinear', padding_mode='zeros', align_corners=True)
tensor_lab = F.grid_sample(tensor_lab.float(), grid, mode='nearest', padding_mode='zeros', align_corners=True).long()
return tensor_img, tensor_lab
def random_scale_rotate_translate_3d(tensor_img, tensor_lab, scale, rotate, translate, noshear=True):
if isinstance(scale, float) or isinstance(scale, int):
scale = [scale] * 3
if isinstance(translate, float) or isinstance(translate, int):
translate = [translate] * 3
if isinstance(rotate, float) or isinstance(rotate, int):
rotate = [rotate] * 3
scale_z = 1 - scale[0] + np.random.random() * 2*scale[0]
scale_x = 1 - scale[1] + np.random.random() * 2*scale[1]
scale_y = 1 - scale[2] + np.random.random() * 2*scale[2]
shear_xz = 0 if noshear else np.random.random() * 2*scale[0] - scale[0]
shear_yz = 0 if noshear else np.random.random() * 2*scale[0] - scale[0]
shear_zx = 0 if noshear else np.random.random() * 2*scale[1] - scale[1]
shear_yx = 0 if noshear else np.random.random() * 2*scale[1] - scale[1]
shear_zy = 0 if noshear else np.random.random() * 2*scale[2] - scale[2]
shear_xy = 0 if noshear else np.random.random() * 2*scale[2] - scale[2]
translate_z = np.random.random() * 2*translate[0] - translate[0]
translate_x = np.random.random() * 2*translate[1] - translate[1]
translate_y = np.random.random() * 2*translate[2] - translate[2]
theta_scale = torch.tensor([[scale_y, shear_xy, shear_zy, translate_y],
[shear_yx, scale_x, shear_zx, translate_x],
[shear_yz, shear_xz, scale_z, translate_z],
[0, 0, 0, 1]]).float()
angle_xy = (float(np.random.randint(-rotate[0], max(rotate[0], 1))) / 180.) * math.pi
angle_xz = (float(np.random.randint(-rotate[1], max(rotate[1], 1))) / 180.) * math.pi
angle_yz = (float(np.random.randint(-rotate[2], max(rotate[2], 1))) / 180.) * math.pi
theta_rotate_xz = torch.tensor([[1, 0, 0, 0],
[0, math.cos(angle_xz), -math.sin(angle_xz), 0],
[0, math.sin(angle_xz), math.cos(angle_xz), 0],
[0, 0, 0, 1]]).float()
theta_rotate_xy = torch.tensor([[math.cos(angle_xy), -math.sin(angle_xy), 0, 0],
[math.sin(angle_xy), math.cos(angle_xy), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]).float()
theta_rotate_yz = torch.tensor([[math.cos(angle_yz), 0, -math.sin(angle_yz), 0],
[0, 1, 0, 0],
[math.sin(angle_yz), 0, math.cos(angle_yz), 0],
[0, 0, 0, 1]]).float()
theta = torch.mm(theta_rotate_xy, theta_rotate_xz)
theta = torch.mm(theta, theta_rotate_yz)
theta = torch.mm(theta, theta_scale)[0:3, :].unsqueeze(0)
grid = F.affine_grid(theta, tensor_img.size(), align_corners=True)
tensor_img = F.grid_sample(tensor_img, grid, mode='bilinear', padding_mode='zeros', align_corners=True)
tensor_lab = F.grid_sample(tensor_lab.float(), grid, mode='nearest', padding_mode='zeros', align_corners=True).long()
return tensor_img, tensor_lab
def crop_2d(tensor_img, tensor_lab, crop_size, mode):
assert mode in ['random', 'center'], "Invalid Mode, should be \'random\' or \'center\'"
if isinstance(crop_size, int):
crop_size = [crop_size] * 2
_, _, H, W = tensor_img.shape
diff_H = H - crop_size[0]
diff_W = W - crop_size[1]
if mode == 'random':
rand_x = np.random.randint(0, max(diff_H, 1))
rand_y = np.random.randint(0, max(diff_W, 1))
else:
rand_x = diff_H // 2
rand_y = diff_W // 2
cropped_img = tensor_img[:, :, rand_x:rand_x+crop_size[0], rand_y:rand_y+crop_size[1]]
cropped_lab = tensor_lab[:, :, rand_x:rand_x+crop_size[0], rand_y:rand_y+crop_size[1]]
return cropped_img, cropped_lab
def crop_3d(tensor_img, tensor_lab, crop_size, mode):
assert mode in ['random', 'center'], "Invalid Mode, should be \'random\' or \'center\'"
if isinstance(crop_size, int):
crop_size = [crop_size] * 3
_, _, D, H, W = tensor_img.shape
diff_D = D - crop_size[0]
diff_H = H - crop_size[1]
diff_W = W - crop_size[2]
if mode == 'random':
rand_z = np.random.randint(0, max(diff_D, 1))
rand_x = np.random.randint(0, max(diff_H, 1))
rand_y = np.random.randint(0, max(diff_W, 1))
else:
rand_z = diff_D // 2
rand_x = diff_H // 2
rand_y = diff_W // 2
cropped_img = tensor_img[:, :, rand_z:rand_z+crop_size[0], rand_x:rand_x+crop_size[1], rand_y:rand_y+crop_size[2]]
cropped_lab = tensor_lab[:, :, rand_z:rand_z+crop_size[0], rand_x:rand_x+crop_size[1], rand_y:rand_y+crop_size[2]]
return cropped_img, cropped_lab
|
import dataclasses as dc
@dc.dataclass(unsafe_hash=True)
class Demand:
route: str
origin: str
destination: str
demand_per_second: float
|
import os
from hendrix.resources import DjangoStaticResource
from django.conf import settings
from django.contrib.staticfiles import finders
class DjangoStaticsFinder:
"""
finds all static resources for this django installation
and creates a static resource for each's base directory
"""
namespace = settings.STATIC_URL
@staticmethod
def get_resources():
ignore_patterns = [
'*.less',
'*.scss',
'*.styl',
]
existing = []
for finder in finders.get_finders():
for staticfile, storage in finder.list([]):
dirname = os.path.dirname(staticfile)
path = os.path.join(storage.base_location, dirname)
if not path in existing and dirname:
yield DjangoStaticResource(
path,
settings.STATIC_URL + '%s/' % dirname
)
existing.append(path)
"""
The rest is for compatibility with existing code based on the deprecated
classes below.
"""
try:
DefaultDjangoStaticResource = DjangoStaticResource(
settings.STATIC_ROOT, settings.STATIC_URL
)
except AttributeError:
raise AttributeError(
"Please make sure you have assigned your STATIC_ROOT and STATIC_URL"
" settings"
)
try:
from django.contrib import admin
admin_media_path = os.path.join(admin.__path__[0], 'static/admin/')
DjangoAdminStaticResource = DjangoStaticResource(
admin_media_path, settings.STATIC_URL+'admin/'
)
except:
raise
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
proj_details = get_project_details()
pr_item_map = get_purchased_items_cost()
se_item_map = get_issued_items_cost()
dn_item_map = get_delivered_items_cost()
data = []
for project in proj_details:
data.append([project.name, pr_item_map.get(project.name, 0),
se_item_map.get(project.name, 0), dn_item_map.get(project.name, 0),
project.project_name, project.status, project.company,
project.customer, project.estimated_costing, project.expected_start_date,
project.expected_end_date])
return columns, data
def get_columns():
return [_("Project Id") + ":Link/Project:140", _("Cost of Purchased Items") + ":Currency:160",
_("Cost of Issued Items") + ":Currency:160", _("Cost of Delivered Items") + ":Currency:160",
_("Project Name") + "::120", _("Project Status") + "::120", _("Company") + ":Link/Company:100",
_("Customer") + ":Link/Customer:140", _("Project Value") + ":Currency:120",
_("Project Start Date") + ":Date:120", _("Completion Date") + ":Date:120"]
def get_project_details():
return frappe.db.sql(""" select name, project_name, status, company, customer, estimated_costing,
expected_start_date, expected_end_date from tabProject where docstatus < 2""", as_dict=1)
def get_purchased_items_cost():
pr_items = frappe.db.sql("""select project, sum(base_net_amount) as amount
from `tabPurchase Receipt Item` where ifnull(project, '') != ''
and docstatus = 1 group by project""", as_dict=1)
pr_item_map = {}
for item in pr_items:
pr_item_map.setdefault(item.project, item.amount)
return pr_item_map
def get_issued_items_cost():
se_items = frappe.db.sql("""select se.project, sum(se_item.amount) as amount
from `tabStock Entry` se, `tabStock Entry Detail` se_item
where se.name = se_item.parent and se.docstatus = 1 and ifnull(se_item.t_warehouse, '') = ''
and ifnull(se.project, '') != '' group by se.project""", as_dict=1)
se_item_map = {}
for item in se_items:
se_item_map.setdefault(item.project, item.amount)
return se_item_map
def get_delivered_items_cost():
dn_items = frappe.db.sql("""select dn.project, sum(dn_item.base_net_amount) as amount
from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item
where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != ''
group by dn.project""", as_dict=1)
si_items = frappe.db.sql("""select si.project, sum(si_item.base_net_amount) as amount
from `tabSales Invoice` si, `tabSales Invoice Item` si_item
where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1
and si.is_pos = 1 and ifnull(si.project, '') != ''
group by si.project""", as_dict=1)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project, item.amount)
for item in si_items:
dn_item_map.setdefault(item.project, item.amount)
return dn_item_map
|
"""DNA design container classes"""
import string
# Global DNA nt groups
group = {"A": "A", "T": "T", "C": "C", "G": "G",
"W": "AT", "S": "CG", "M": "AC", "K": "GT",
"B": "CGT", "V": "ACG", "D": "AGT", "H": "ACT",
"N": "ACGT"} # SpuriousC group codes
rev_group = dict([(v, k) for (k, v) in list(group.items())]) # A reverse lookup for group.
complement = {"A": "T", "T": "A", "C": "G", "G": "C",
"W": "W", "S": "S", "M": "K", "K": "M",
"B": "V", "V": "B", "D": "H", "H": "D",
"N": "N"} # Should satisfy set(group[complement[X]]) == set(seq_comp(group[X]))
def seq_comp(seq):
"""Returns the WC complement of a nucleotide sequence."""
return "".join(complement[nt] for nt in reversed(seq))
class Sequence(object):
"""Container for sequences"""
def __init__(self, name, template):
self.name = name
self.template = template # The template of constraints on sequence
self.seq = None # Sequence has not been designed yet
self.length = len(self.template)
self.reversed = False
# Build the dummy sequence for the W-C complement
self.wc = ReverseSequence(self)
def set_seq(self, seq):
"""Set the sequence."""
if self.seq: # If this sequence is already defined
assert self.seq == seq, "Sequence %s was designed with 2 different sequences: %s and %s" % (self.name, self.seq, seq)
else: # If it's not defined yet
self.seq = seq
self.wc.seq = seq_comp(seq)
def get_seq(self):
"""Return designed sequence or template as default."""
if self.seq:
return self.seq
else:
return self.template
def __repr__(self):
return "Sequence(%(name)r, %(template)r)" % self.__dict__
class ReverseSequence(Sequence):
"""Complements of defined sequences"""
def __init__(self, wc):
self.name = wc.name + "*"
self.template = seq_comp(wc.template)
self.seq = None
self.length = wc.length
self.reversed = True
self.wc = wc
class SuperSequence(object):
"""Logical grouping of sequences"""
def __init__(self, name, sub_seqs):
self.name = name
self.seq = None # Stores the sequence once it has been designed
self.seqs = sub_seqs
# Find length and base_seqs
self.base_seqs = [] # The atomic Sequence's that constrain this SuperSequence
self.length = 0
for sub_seq in self.seqs:
if isinstance(sub_seq, Sequence):
self.base_seqs.append(sub_seq)
else:
assert isinstance(sub_seq, SuperSequence)
self.base_seqs += sub_seq.base_seqs
self.length += sub_seq.length
self.reversed = False
self.wc = ReverseSuperSequence(self)
def set_seq(self, seq):
"""Set the sequence and apply it to all subsequences."""
assert len(seq) == self.length, "Designed sequence length mismatch. %d != %d" % (len(seq), self.length)
if self.seq: # If this sequence is already defined
assert self.seq == seq, "Sequence %s was designed with 2 different sequences: %s and %s" % (self.name, self.seqs, seqs)
else: # If it's not defined yet
self.seq = seq
self.wc.seq = seq_comp(seq)
i = 0
for sub_seq in self.seqs:
sub_seq.set_seq(seq[i:i+sub_seq.length])
i += sub_seq.length
def get_seq(self):
"""Return the sequence (or reconstruct it from subsequences)"""
if self.seq:
return self.seq
else:
return "".join([sub_seq.get_seq() for sub_seq in self.seqs])
def __repr__(self):
return "SuperSequence(%(name)r, %(seqs)r)" % self.__dict__
class ReverseSuperSequence(SuperSequence):
def __init__(self, wc):
self.name = wc.name + "*"
self.seq = None # Stores the sequence once it has been designed
self.seqs = [seq.wc for seq in reversed(wc.seqs)]
self.base_seqs = [seq.wc for seq in reversed(wc.base_seqs)]
self.length = wc.length
self.reversed = True
self.wc = wc
class Strand(SuperSequence):
"""Container for strands. Inherits from SuperSequence for convinience."""
def __init__(self, name, seqs, dummy):
SuperSequence.__init__(self, name, seqs)
self.dummy = dummy
def __repr__(self):
return "Strand(%(name)r, %(seqs)r, %(dummy)r)" % self.__dict__
def get_bonds(struct):
"""Get a list of bonds in a dot-paren structure."""
struct = struct.replace("+", "") # Positions do NOT include strand breaks
bonds = [] # Will be a list of pairs (open_postion, close_postion)
open_pos = [] # A FILO of open parentheses positions
for pos, symb in enumerate(struct):
if symb == "(":
open_pos.append(pos)
elif symb == ")":
assert len(open_pos) != 0
start = open_pos.pop() # The most recent open-paren
bonds.append( (start, pos) )
else:
assert symb == ".", "Structure '%s' not in dot-paren form" % struct
return bonds
class Structure(object):
"""Container for structures/complexes"""
def __init__(self, name, strands, struct, params):
self.name = name
self.params = params # Optimization parameters, TODO: actually deal with these
self.struct = struct
self.bonds = get_bonds(struct)
self.strands = strands
self.seq = None # Stores the sequence once it has been defined.
# Find length and base_seqs
self.base_seqs = []
self.length = 0
sub_structs = [strand_struct for strand_struct in self.struct.split("+")] # Check that lengths match up
assert len(strands) == len(sub_structs), "Mismatch: Structure %s is defined by %d strands, but secondary structure has %d strands" % (name, len(strands), len(sub_structs))
for strand, sub_struct in zip(strands, sub_structs):
assert isinstance(strand, Strand), "Structure %s must get strands" % name
assert strand.length == len(sub_struct), "Mismatch: Strand %s in structure %s has length %d, but sub-structure %s implies %d" % (strand.name, name, strand.length, sub_struct, len(sub_struct))
self.base_seqs += strand.base_seqs
self.length += strand.length
def get_seq(self):
"""Get sequence from strands which have been set."""
self.seq = "+".join(strand.seq for strand in self.strands)
def __repr__(self):
return "Structure(%(name)r, %(strands)r, %(struct)r, %(params)r)" % self.__dict__
|
# position/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
# Diagrams here: https://docs.google.com/drawings/d/1DsPnl97GKe9f14h41RPeZDssDUztRETGkXGaolXCeyo/edit
from django.db import models
from election_office_measure.models import CandidateCampaign, MeasureCampaign
from exception.models import handle_exception, handle_exception_silently, handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from organization.models import Organization
from twitter.models import TwitterUser
from django.contrib.auth.models import User
# from voter.models import Voter # Replace User with this once we have figured out User -> Voter object linking
SUPPORT = 'SUPPORT'
STILL_DECIDING = 'STILL_DECIDING'
NO_STANCE = 'NO_STANCE'
INFORMATION_ONLY = 'INFO_ONLY'
OPPOSE = 'OPPOSE'
POSITION_CHOICES = (
# ('SUPPORT_STRONG', 'Strong Supports'), # I do not believe we will be offering 'SUPPORT_STRONG' as an option
(SUPPORT, 'Supports'),
(STILL_DECIDING, 'Still deciding'),
(NO_STANCE, 'No stance'),
(INFORMATION_ONLY, 'Information only'),
(OPPOSE, 'Opposes'),
# ('OPPOSE_STRONG', 'Strongly Opposes'), # I do not believe we will be offering 'OPPOSE_STRONG' as an option
)
class PositionEntered(models.Model):
"""
Any position entered by any person gets its own PositionEntered entry. We then
generate Position entries that get used to display an particular org's position.
"""
# We are relying on built-in Python id field
# The id for the generated position that this PositionEntered entry influences
position_id = models.BigIntegerField(null=True, blank=True)
date_entered = models.DateTimeField(verbose_name='date entered', null=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
# The voter expressing the opinion
voter_id = models.BigIntegerField(null=True, blank=True)
# The election this position is for
election_id = models.BigIntegerField(verbose_name='election id', null=True, blank=True)
# The unique We Vote id of the tweet that is the source of the position
tweet_source_id = models.BigIntegerField(null=True, blank=True)
# This is the voter / authenticated user who entered the position for an organization
# (NOT the voter expressing opinion)
voter_entering_position = models.ForeignKey(
User, verbose_name='authenticated user who entered position', null=True, blank=True)
# The Twitter user account that generated this position
twitter_user_entered_position = models.ForeignKey(TwitterUser, null=True, verbose_name='')
# This is the candidate/politician that the position refers to.
# Either candidate_campaign is filled OR measure_campaign, but not both
# candidate_campaign = models.ForeignKey(
# CandidateCampaign, verbose_name='candidate campaign', null=True, blank=True,
# related_name='positionentered_candidate')
candidate_campaign_id = models.BigIntegerField(verbose_name='id of candidate_campaign', null=True, blank=True)
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
# This is the measure/initiative/proposition that the position refers to.
# Either measure_campaign is filled OR candidate_campaign, but not both
# measure_campaign = models.ForeignKey(
# MeasureCampaign, verbose_name='measure campaign', null=True, blank=True, related_name='positionentered_measure')
measure_campaign_id = models.BigIntegerField(verbose_name='id of measure_campaign', null=True, blank=True)
# Strategic denormalization - this is redundant but will make generating the voter guide easier.
# geo = models.ForeignKey(Geo, null=True, related_name='pos_geo')
# issue = models.ForeignKey(Issue, null=True, blank=True, related_name='')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES, default=NO_STANCE) # supporting/opposing
statement_text = models.TextField(null=True, blank=True,)
statement_html = models.TextField(null=True, blank=True,)
# A link to any location with more information about this position
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more info about this position')
# Did this position come from a web scraper?
from_scraper = models.BooleanField(default=False)
# Was this position certified by an official with the organization?
organization_certified = models.BooleanField(default=False)
# Was this position certified by an official We Vote volunteer?
volunteer_certified = models.BooleanField(default=False)
# link = models.URLField(null=True, blank=True,)
# link_title = models.TextField(null=True, blank=True, max_length=128)
# link_site = models.TextField(null=True, blank=True, max_length=64)
# link_txt = models.TextField(null=True, blank=True)
# link_img = models.URLField(null=True, blank=True)
# Set this to True after getting all the link details (title, txt, img etc)
# details_loaded = models.BooleanField(default=False)
# video_embed = models.URLField(null=True, blank=True)
# spam_flag = models.BooleanField(default=False)
# abuse_flag = models.BooleanField(default=False)
# orig_json = models.TextField(blank=True)
def __unicode__(self):
return self.stance
class Meta:
ordering = ('date_entered',)
def is_support(self):
if self.stance == SUPPORT:
return True
return False
def is_oppose(self):
if self.stance == OPPOSE:
return True
return False
def is_no_stance(self):
if self.stance == NO_STANCE:
return True
return False
def is_information_only(self):
if self.stance == INFORMATION_ONLY:
return True
return False
def is_still_deciding(self):
if self.stance == STILL_DECIDING:
return True
return False
def candidate_campaign(self):
try:
candidate_campaign = CandidateCampaign.objects.get(id=self.candidate_campaign_id)
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e)
print "position.candidate_campaign Found multiple"
return None
except CandidateCampaign.DoesNotExist as e:
handle_exception_silently(e)
print "position.candidate_campaign did not find"
return None
return candidate_campaign
def organization(self):
try:
organization = Organization.objects.get(id=self.organization_id)
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e)
print "position.candidate_campaign Found multiple"
return None
except Organization.DoesNotExist as e:
handle_exception_silently(e)
print "position.candidate_campaign did not find"
return None
return organization
class Position(models.Model):
"""
This is a table of data generated from PositionEntered. Not all fields copied over from PositionEntered
"""
# We are relying on built-in Python id field
# The PositionEntered entry that was copied into this entry based on verification rules
position_entered_id = models.BigIntegerField(null=True, blank=True)
date_entered = models.DateTimeField(verbose_name='date entered', null=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
# The election this position is for
election_id = models.BigIntegerField(verbose_name='election id', null=True, blank=True)
candidate_campaign = models.ForeignKey(
CandidateCampaign, verbose_name='candidate campaign', null=True, blank=True, related_name='position_candidate')
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
# This is the measure/initiative/proposition that the position refers to.
# Either measure_campaign is filled OR candidate_campaign, but not both
measure_campaign = models.ForeignKey(
MeasureCampaign, verbose_name='measure campaign', null=True, blank=True, related_name='position_measure')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES) # supporting/opposing
statement_text = models.TextField(null=True, blank=True,)
statement_html = models.TextField(null=True, blank=True,)
# A link to any location with more information about this position
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more info about this position')
def __unicode__(self):
return self.name
class Meta:
ordering = ('date_entered',)
# def display_ballot_item_name(self):
# """
# Organization supports 'ballot_item_name' (which could be a campaign name, or measure name
# :return:
# """
# # Try to retrieve the candidate_campaign
# if candidate_campaign.id:
class PositionListForCandidateCampaign(models.Model):
"""
A way to retrieve all of the positions stated about this CandidateCampaign
"""
# candidate_campaign = models.ForeignKey(
# CandidateCampaign, null=False, blank=False, verbose_name='candidate campaign')
# position = models.ForeignKey(
# PositionEntered, null=False, blank=False, verbose_name='position about candidate')
def retrieve_all_positions_for_candidate_campaign(self, candidate_campaign_id, stance_we_are_looking_for):
# TODO Error check stance_we_are_looking_for
# Retrieve the support positions for this candidate_campaign_id
organization_position_list_found = False
try:
organization_position_list = PositionEntered.objects.order_by('date_entered')
organization_position_list = organization_position_list.filter(candidate_campaign_id=candidate_campaign_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE
organization_position_list = organization_position_list.filter(stance=stance_we_are_looking_for)
# organization_position_list = organization_position_list.filter(election_id=election_id)
if len(organization_position_list):
organization_position_list_found = True
except Exception as e:
handle_record_not_found_exception(e)
if organization_position_list_found:
return organization_position_list
else:
organization_position_list = {}
return organization_position_list
def calculate_positions_followed_by_voter(
self, all_positions_list_for_candidate_campaign, organizations_followed_by_voter):
"""
We need a list of positions that were made by an organization that this voter follows
:param all_positions_list_for_candidate_campaign:
:param organizations_followed_by_voter:
:return:
"""
this_voter_id = 1
positions_followed_by_voter = []
# Only return the positions if they are from organizations the voter follows
for position in all_positions_list_for_candidate_campaign:
if position.voter_id == this_voter_id: # TODO DALE Is this the right way to do this?
positions_followed_by_voter.append(position)
elif position.organization_id in organizations_followed_by_voter:
print "position {position_id} followed by voter (org {org_id})".format(
position_id=position.id, org_id=position.organization_id)
positions_followed_by_voter.append(position)
return positions_followed_by_voter
def calculate_positions_not_followed_by_voter(
self, all_positions_list_for_candidate_campaign, organizations_followed_by_voter):
"""
We need a list of positions that were made by an organization that this voter follows
:param all_positions_list_for_candidate_campaign:
:param organizations_followed_by_voter:
:return:
"""
positions_not_followed_by_voter = []
# Only return the positions if they are from organizations the voter follows
for position in all_positions_list_for_candidate_campaign:
# Some positions are for individual voters, so we want to filter those out
if position.organization_id \
and position.organization_id not in organizations_followed_by_voter:
print "position {position_id} NOT followed by voter (org {org_id})".format(
position_id=position.id, org_id=position.organization_id)
positions_not_followed_by_voter.append(position)
return positions_not_followed_by_voter
class PositionEnteredManager(models.Model):
def __unicode__(self):
return "PositionEnteredManager"
def retrieve_organization_candidate_campaign_position(self, organization_id, candidate_campaign_id):
"""
Find a position based on the organization_id & candidate_campaign_id
:param organization_id:
:param candidate_campaign_id:
:return:
"""
position_id = 0
voter_id = 0
measure_campaign_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position(
position_id, organization_id, voter_id, candidate_campaign_id, measure_campaign_id)
def retrieve_voter_candidate_campaign_position(self, voter_id, candidate_campaign_id):
organization_id = 0
position_id = 0
measure_campaign_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position(
position_id, organization_id, voter_id, candidate_campaign_id, measure_campaign_id)
def retrieve_position_from_id(self, position_id):
organization_id = 0
voter_id = 0
candidate_campaign_id = 0
measure_campaign_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position(
position_id, organization_id, voter_id, candidate_campaign_id, measure_campaign_id)
def retrieve_position(self, position_id, organization_id, voter_id, candidate_campaign_id, measure_campaign_id):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
position_on_stage = PositionEntered()
try:
if position_id > 0:
position_on_stage = PositionEntered.objects.get(id=position_id)
position_id = position_on_stage.id
elif organization_id > 0 and candidate_campaign_id > 0:
position_on_stage = PositionEntered.objects.get(
organization_id=organization_id, candidate_campaign_id=candidate_campaign_id)
# If still here, we found an existing position
position_id = position_on_stage.id
elif organization_id > 0 and measure_campaign_id > 0:
position_on_stage = PositionEntered.objects.get(
organization_id=organization_id, measure_campaign_id=measure_campaign_id)
position_id = position_on_stage.id
elif voter_id > 0 and candidate_campaign_id > 0:
position_on_stage = PositionEntered.objects.get(
voter_id=voter_id, candidate_campaign_id=candidate_campaign_id)
position_id = position_on_stage.id
elif voter_id > 0 and measure_campaign_id > 0:
position_on_stage = PositionEntered.objects.get(
voter_id=voter_id, measure_campaign_id=measure_campaign_id)
position_id = position_on_stage.id
except PositionEntered.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e)
error_result = True
exception_multiple_object_returned = True
except PositionEntered.DoesNotExist as e:
handle_exception_silently(e)
error_result = True
exception_does_not_exist = True
results = {
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'position_found': True if position_id > 0 else False,
'position_id': position_id,
'position': position_on_stage,
'is_support': position_on_stage.is_support(),
'is_oppose': position_on_stage.is_oppose(),
'is_no_stance': position_on_stage.is_no_stance(),
'is_information_only': position_on_stage.is_information_only(),
'is_still_deciding': position_on_stage.is_still_deciding(),
}
return results
def toggle_on_voter_support_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = SUPPORT
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_off_voter_support_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = NO_STANCE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_on_voter_oppose_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = OPPOSE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_off_voter_oppose_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = NO_STANCE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_on_voter_position_for_candidate_campaign(self, voter_id, candidate_campaign_id, stance):
# Does a position from this voter already exist?
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_voter_candidate_campaign_position(voter_id, candidate_campaign_id)
voter_position_on_stage_found = False
position_id = 0
if results['position_found']:
print "yay!"
voter_position_on_stage = results['position']
# Update this position with new values
try:
voter_position_on_stage.stance = stance
# voter_position_on_stage.statement_text = statement_text
voter_position_on_stage.save()
position_id = voter_position_on_stage.id
voter_position_on_stage_found = True
except Exception as e:
handle_record_not_saved_exception(e)
elif results['MultipleObjectsReturned']:
print "delete all but one and take it over?"
elif results['DoesNotExist']:
try:
# Create new
voter_position_on_stage = PositionEntered(
voter_id=voter_id,
candidate_campaign_id=candidate_campaign_id,
stance=stance,
# statement_text=statement_text,
)
voter_position_on_stage.save()
position_id = voter_position_on_stage.id
voter_position_on_stage_found = True
except Exception as e:
handle_record_not_saved_exception(e)
results = {
'success': True if voter_position_on_stage_found else False,
'position_id': position_id,
'position': voter_position_on_stage,
}
return results
|
import streamlit as st
def show():
st.markdown("The termination criterion is a preset number of generations.")
max_gens = st.slider("Number of Generations", min_value=1, max_value=100, value=50)
|
lado = float (input ("Qual é o valor do lado do quadrado: "))
area = lado * lado
area_aoquadrado = area * 2
print ("A área do quadrado é {} e o dobro do quadrado é {}".format (area,area_aoquadrado))
|
from django.views.generic import View
from django.conf import settings
from django.http import HttpResponseRedirect
from . import forms
class IndexView(View):
def get(self, request):
next = "/"
if "next" in request.GET:
next = request.GET.get("next")
response = HttpResponseRedirect(next)
if not request.GET:
return response
form = forms.LanguageCodeForm(data=request.GET)
if not form.is_valid():
return response
language = form.cleaned_data["language"]
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME,
language,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
secure=settings.LANGUAGE_COOKIE_SECURE,
httponly=settings.LANGUAGE_COOKIE_HTTPONLY,
samesite=settings.LANGUAGE_COOKIE_SAMESITE,
)
return response
|
#! /usr/bin/env python
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the LICENSE file in the
root directory of this source tree.
Trains a simple GPT-2 based disambiguation model.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import collections
import json
import os
import torch
import torch.nn as nn
from transformers import (
GPT2Tokenizer,
get_linear_schedule_with_warmup,
AdamW,
)
from tqdm import tqdm as progressbar
from dataloader import Dataloader
from disambiguator import Disambiguator
def evaluate_model(model, loader, batch_size, save_path=None):
num_matches = 0
results = collections.defaultdict(list)
with torch.no_grad():
for batch in progressbar(loader.get_entire_batch(batch_size)):
output = model(batch)
predictions = torch.argmax(output, dim=1)
num_matches += (predictions == batch["gt_label"]).sum().item()
# Save results if need be.
if save_path:
for ii in range(predictions.shape[0]):
new_instance = {
"turn_id": batch["turn_id"][ii],
"disambiguation_label": predictions[ii].cpu().item(),
}
results[batch["dialog_id"][ii]].append(new_instance)
# Restructure results JSON and save.
if save_path:
results = [
{"dialog_id": dialog_id, "predictions": predictions,}
for dialog_id, predictions in results.items()
]
print(f"Saving: {save_path}")
with open(save_path, "w") as file_id:
json.dump(results, file_id)
accuracy = num_matches / loader.num_instances * 100
return accuracy
def main(args):
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
num_added_tokenss = tokenizer.add_special_tokens(
{"additional_special_tokens": ["<USER>", "SYS>"]}
)
# Dataloader.
train_loader = Dataloader(tokenizer, args["train_file"], args)
val_loader = Dataloader(tokenizer, args["dev_file"], args)
test_loader = Dataloader(tokenizer, args["devtest_file"], args)
# Model.
model = Disambiguator(tokenizer, args)
model.train()
# loss function.
criterion = nn.CrossEntropyLoss()
# Prepare optimizer and schedule (linear warmup and decay)
optimizer = AdamW(
model.parameters(), lr=args["learning_rate"], eps=args["adam_epsilon"]
)
total_steps = (
int(train_loader.num_instances / args["batch_size"] * args["num_epochs"]) + 1
)
num_iters_epoch = train_loader.num_instances // args["batch_size"]
num_iters = 0
total_loss = None
# batch = train_loader.get_random_batch(args["batch_size"])
while True:
epoch = num_iters / (float(train_loader.num_instances) / args["batch_size"])
batch = train_loader.get_random_batch(args["batch_size"])
output = model(batch)
loss = criterion(output, batch["gt_label"])
if total_loss:
total_loss = 0.95 * total_loss + 0.05 * loss.item()
else:
total_loss = loss.item()
if num_iters % 100 == 0:
print("[Ep: {:.2f}][Loss: {:.2f}]".format(epoch, total_loss))
loss.backward()
optimizer.step()
model.zero_grad()
# Evaluate_model every epoch.
if num_iters % 1000 == 0:
model.eval()
accuracy = evaluate_model(model, val_loader, args["batch_size"] * 5)
print("Accuracy [dev]: {}".format(accuracy))
# Save devtest results.
if args["result_save_path"]:
save_path = os.path.join(
args["result_save_path"], f"results_devtest_{num_iters}.json"
)
else:
save_path = None
accuracy = evaluate_model(
model, test_loader, args["batch_size"] * 5, save_path
)
print("Accuracy [devtest]: {}".format(accuracy))
model.train()
num_iters += 1
if epoch > args["num_epochs"]:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--train_file", required=True, help="Path to the training file")
parser.add_argument("--dev_file", required=True, help="Path to the dev file")
parser.add_argument(
"--devtest_file", required=True, help="Path to the devtest file"
)
parser.add_argument(
"--result_save_path", default=None, help="Path to save devtest results"
)
parser.add_argument(
"--max_turns", type=int, default=3, help="Number of turns in history"
)
parser.add_argument("--batch_size", type=int, default=128, help="Batch Size")
parser.add_argument(
"--max_length", type=int, default=512, help="Maximum length in utterance"
)
parser.add_argument(
"--num_epochs", type=int, default=10, help="Maximum number of epochs"
)
parser.add_argument(
"--learning_rate", type=float, default=5e-5, help="Learning rate"
)
parser.add_argument(
"--warmup_steps", type=int, default=0, help="Linear warmup over warmup_steps"
)
parser.add_argument(
"--adam_epsilon", type=float, default=1e-8, help="Eps for Adam optimizer"
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay")
parser.add_argument("--use_gpu", dest="use_gpu", action="store_true", default=False)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
from django.contrib import admin
from lists.models import List
@admin.register(List)
class ListAdmin(admin.ModelAdmin):
"""Register List model at admin panel
Search by:
name : icontains
"""
list_display = ("name", "user", "count_rooms")
search_fields = ("name",)
filter_horizontal = ("rooms",)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
m = hashlib.sha256()
m.update('Passw0rd!')
secret_hash = map(ord, m.digest())
def var():
env.append(None)
return len(env) - 1
def instr(fn, arg1, arg2, result=None, lhs = None):
if lhs is None:
lhs = len(env)
env.append(result)
else:
env[lhs] = result
code.append([lhs, fn, arg1, arg2])
print_instr(**locals())
return lhs
def print_instr(fn, arg1, arg2, result, lhs):
def var(index):
if env[index] is None:
return 'x%d' % index
else:
return 'x%s=%s' % (index, repr(env[index]))
lhs = 'x%d' % lhs
fn = ['get_attr', 'apply', 'add', 'chr'][fn] if fn < 4 else var(fn)
arg1 = var(arg1)
arg2 = var(arg2)
result = '?' if result is None else result
print '%s = %s(%s, %s) = %s' % (lhs, fn, arg1, arg2, result)
def number(n):
if n in env:
return env.index(n)
else:
if (n&(n-1)) == 0:
return instr(add, number(n/2), number(n/2), n)
for i, c in enumerate(bin(n)[2:][::-1]):
if c == '1':
return instr(add, number(n - (2**i)), number(2**i), n)
def zero(var):
to_zero.append(var)
return var
def string(s):
if s in env or s in map(str,[0,1,2,3,4,5,6,7,8,9]):
return env.index(s if s in env else int(s))
else:
if len(s) == 1:
n = number(ord(s))
return instr(chr, n, 0, s)
else:
cached = s[:-1] in env
prefix = string(s[:-1])
return zero(instr(add, prefix, string(s[-1]), s, None if cached else prefix))
def get(obj, prop):
return zero(instr(get_attr, obj, string(prop)))
def fn(code):
return zero(instr(get(get_attr, 'constructor'), string('x'), string(code)))
def pair(a, b):
return zero(instr(array, a, b))
def hash(text):
crypto = get(window, 'crypto')
subtle = get(crypto, 'subtle')
digest = get(subtle, 'digest')
hash_method = string('sha-256')
digest_args = pair(hash_method, password)
apply_args = pair(subtle, digest_args)
result = zero(instr(apply, digest, apply_args))
return result
alphabet = map(
unichr,
range(ord(u'a'), ord(u'z')) +
range(ord(u'A'), ord(u'Z')) +
range(ord(u'\u0400'), ord(u'\u04FF'))
)
def final_code():
result = u''
for line in code:
for c in line:
result += alphabet[c]
return result
env = []
code = []
get_attr = var()
apply = var()
add = var()
chr = var()
env.append(0)
env.append(1)
password = var()
out = var()
#log = var()
to_zero = []
# Pre-warm string and number cache
for i in range(len(secret_hash)):
number(i)
for n in secret_hash:
number(n)
zero(string('return '))
window = zero(instr(fn('return window'), string('x'), string('x')))
array = get(window, 'Array')
xor = fn('return x[0]^x[1]')
bit_or = fn('return x[0]|x[1]')
h = hash(password)
h = zero(instr(get(window, 'Uint8Array'), h, string('x')))
n = instr(add, env.index(0), env.index(0), 0)
pair_to_compare = zero(instr(add, env.index(0), env.index(0), 0))
pair_to_or = zero(instr(add, env.index(0), env.index(0), 0))
equal = instr(add, env.index(0), env.index(0), 0)
#instr(log, string('hash:'), h)
for i, x in enumerate(secret_hash):
instr(get_attr, h, number(i), None, n)
instr(array, n, number(secret_hash[i]), None, pair_to_compare)
#instr(log, string('compare:'), pair_to_compare)
instr(xor, pair_to_compare, string('x'), None, equal)
instr(array, out, equal, None, pair_to_or)
instr(bit_or, pair_to_or, string('x'), None, out)
for x in set(to_zero):
instr(add, env.index(0), env.index(0), 0, x)
print 'out', out, alphabet[out]
print 'array', array, alphabet[array]
print 'xor', xor, alphabet[xor]
print 'bit_or', bit_or, alphabet[bit_or]
print 'h', h, alphabet[h]
print env
print code
c = final_code()
print len(c), repr(final_code())
|
import os
import tensorflow as tf
from config import cfg
class Model(object):
def __init__(self, img_channels=3, num_label=2, call_type='training'):
'''
Args:
img_channels: Integer, the channels of input.
num_label: Integer, the category number.
'''
self.batch_size = cfg.batch_size
if call_type == 'training':
self.height = cfg.patch_size
self.width = cfg.patch_size
else:
self.height = cfg.test_patch_size
self.width = cfg.test_patch_size
self.img_channels = img_channels
self.num_label = num_label
from ictnet import build_fc_densenet as build_model
self.graph = tf.Graph()
with self.graph.as_default():
self.image = tf.placeholder(tf.float32, shape=(self.batch_size, self.height, self.width, self.img_channels))
self.labels = tf.placeholder(tf.uint8, shape=(self.batch_size, self.height, self.width, 1))
if cfg.is_training and call_type=='training':
if cfg.is_one_hot:
self.mask = tf.one_hot(self.labels, depth=self.num_label, axis=-1, dtype=tf.float32)
else:
self.mask = self.labels
print("Model creation start.")
self.network, self.prediction, self.probabilities = build_model(self.image, num_classes=self.num_label, preset_model='FC-DenseNet103', dropout_p=0.0)
print("Model creation complete.")
self.loss()
self._train_summary()
self.global_step = tf.Variable(1, name='global_step', trainable=False)
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0001, decay=0.995)
self.train_op = self.optimizer.minimize(self.loss, var_list=[var for var in tf.trainable_variables()], global_step=self.global_step)
else:
self.network, self.prediction, self.probabilities = build_model(self.image, num_classes=self.num_label, preset_model='FC-DenseNet103', dropout_p=0.0)
def loss(self):
# 1. Cross-entropy loss
self.cross_entropy_loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=self.network, labels=self.mask))
# 2. Total loss
self.loss = self.cross_entropy_loss
# Accuracy
correct_prediction = tf.equal(tf.to_int32(self.labels), tf.reshape(self.prediction, shape=[self.batch_size, self.height, self.width, 1]))
self.accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
# Summary
def _train_summary(self):
train_summary = []
train_summary.append(tf.summary.scalar('train/cross_entropy_loss', self.loss))
train_summary.append(tf.summary.scalar('train/accuracy', self.accuracy))
train_summary.append(tf.summary.image('train/original', self.image))
train_summary.append(tf.summary.image('train/ground_truth', tf.to_float(self.labels)))
train_summary.append(tf.summary.image('train/building_prob', tf.reshape(self.probabilities[:, :, :, 1], shape=[self.batch_size, self.height, self.width, 1])))
train_summary.append(tf.summary.image('train/building_pred', tf.reshape(tf.to_float(self.prediction), shape=[self.batch_size, self.height, self.width, 1])))
#train_summary.append(tf.summary.histogram('train/activation', self.activation))
self.train_summary = tf.summary.merge(train_summary)
|
class Scene(object):
def enter(self):
4 pass
5
6
class Engine(object):
8
9 def __init__(self, scene_map):
10 pass
11
12 def play(self):
13 pass
14
class Death(Scene):
16
17 def enter(self):
18 pass
19
class CentralCorridor(Scene):
21
22 def enter(self):
23 pass
24
class LaserWeaponArmory(Scene):
26
27 def enter(self):
28 pass
29
class TheBridge(Scene):
31
32 def enter(self):
33 pass
34
class EscapePod(Scene):
36
37 def enter(self):
38 pass
39
40
class Map(object):
42
43 def __init__(self, start_scene):
44 pass
45
46 def next_scene(self, scene_name):
47 pass
48
49 def opening_scene(self):
50 pass
51
52
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
"""[summary]
Init module
[description]
The init module creates Flask object, databases, and logging handler
"""
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import sqlite3
from config import Config
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import event
import os
import csv
import logging
from logging.handlers import RotatingFileHandler
from logging.handlers import SMTPHandler
# create application object of class Flask
app = Flask(__name__)
app.config.from_object(Config) # retrieve database configuration from the class Config
db = SQLAlchemy(app)
from app import dbmodels
from app import routes
#if not app.debug:
# initialize the log handler: The handler used is RotatingFileHandler which rotates the log file when the size of the file exceeds a certain limit.
logHandler = RotatingFileHandler('info.log', maxBytes=1000, backupCount=1)
# set the log handler level
logHandler.setLevel(logging.INFO)
# create formatter and add it to the handlers: date time - name of package - file name (module name) - function name - line number - level (error, infor,...) - message
formatter = logging.Formatter('%(asctime)s - %(name)s - %(module)s - %(funcName)s - %(lineno)d- %(levelname)s - %(message)s')
logHandler.setFormatter(formatter)
# set the app logger level: ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'). See http://flask.pocoo.org/docs/0.12/errorhandling/
app.logger.setLevel(logging.INFO)
app.logger.addHandler(logHandler)
#provision initial values from csv and remove file on success
def insert_initial_values(*args, **kwargs):
if os.access(app.config['PROVISION_FILE'], os.R_OK):
app.logger.info("Provisioning file found, loading intial user data; provision_file='%s'" % app.config['PROVISION_FILE'])
with open(app.config['PROVISION_FILE'], 'r') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
app.logger.info("Provisioning user; username='%s'" % (row['Username'],))
dbmodels.add_user(uname=row['Username'], passwd=row['Password'], email=row['Email'], role=dbmodels.ADMIN_ROLE)
f.close()
db.session.commit()
os.unlink(app.config['PROVISION_FILE'])
else:
app.logger.info("Could not read provisioning file, skipping initial user addition; provision_file='%s'" % app.config['PROVISION_FILE'])
event.listen(dbmodels.User.__table__, 'after_create', insert_initial_values)
# create all databases from dbmodels
db.create_all()
db.session.commit()
#if not app.debug:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='System Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR) # change to CRITICAL later
app.logger.addHandler(mail_handler)
|
# header html
from libraries.string_processes import pcom_create_html_from_array
from libraries import constants as ct
from libraries import schematics as sch
# adds footer
def polimorf_add_footer(footer_data,meta_present):
out_html = ct.PCOM_NO_ENTRY
if meta_present:
add_footer = pcom_create_html_from_array(footer_data)
if (add_footer):
out_html = (sch.PM_FOOTER_WRAP_OPEN + ct.NL
+ sch.PM_FOOTER_OUTER_OPEN + ct.NL
+ sch.PM_FOOTER_INNER_OPEN + ct.NL
+ add_footer
+ sch.PM_FOOTER_INNER_CLOSE + ct.NL
+ sch.PM_FOOTER_OUTER_CLOSE + ct.NL
+ sch.PM_FOOTER_WRAP_CLOSE)
return out_html
|
from math import *
from sympy import *
global Y,deriv,dydx,count
count=0
h=5
global Vgb,Vfb,q,Es,Cox,Po,No,St,NA,ND
def func( Y ):
global Vgb,Vfb,q,Es,Cox,Po,No,St,NA,ND
try:
p=Vfb + Y - (sqrt(2*q*Es)/Cox) *(sqrt( Po*St*( e**(-Y/St )-1) +( NA-ND )*Y + No*St*( e**(Y/St )-1) ) ) -Vgb
return p
except ZeroDivisionError:
print("Error!!!!!!!!!!!", Y)
return 0
def derivFunc( Y ):
k= deriv.doit().subs({t:Y})
if k==0:
return 1
else:
return k
# Function to find the root
def newtonRaphson( Y ):
global count
global Vgb,Vfb,q,Es,Cox,Po,No,St,NA,ND,h
if derivFunc(Y)!=0:
h = func(Y) / derivFunc(Y)
while abs(h) >= 0.01:
count=count+1
try:
h = func(Y)/derivFunc(Y)
except ZeroDivisionError:
print("Error! - derivative zero for x = ", Y)
# x(i+1) = x(i) - f(x) / f'(x)
Y = Y - h
print("The value of the root is : ",
"%.4f"% Y)
print("the no of iterations is ",count)
t= Symbol('t')
# Vfb= Symbol('Vfb')
# q= Symbol('q')
# Es= Symbol('Es')
# Cox= Symbol('Cox')
# Vgb= Symbol('Vgb')
# St= Symbol('St')
# Po= Symbol('Po')
# No= Symbol('No')
# NA= Symbol('NA')
# ND= Symbol('ND')
Vgb=1
Vfb=-1
NA=10**24
ND=10**15
St=0.026
q=1.6*10**(-19)
Es=1.05*10**(-10)
Cox=1.726*10**(-2)
No=10**15
Po=10**24
f=Vfb + t - (sqrt(2*q*Es)/Cox) *(sqrt( Po*St*( e**(-t/St )-1) +( NA-ND )*t + No*St*( e**(t/St )-1) ) ) -Vgb
deriv= Derivative(f, t)
print(deriv.doit().subs({t:-5}))
dydx=deriv.doit()
#deriv.doit()
#newtonRaphson(x0)
|
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import tensorflow as tf
from .. import volume
@pytest.mark.parametrize("shape", [(10, 10, 10), (10, 10, 10, 3)])
@pytest.mark.parametrize("scalar_labels", [True, False])
def test_apply_random_transform(shape, scalar_labels):
x = np.ones(shape).astype(np.float32)
if scalar_labels:
transform_func = volume.apply_random_transform_scalar_labels
y_shape = (1,)
else:
transform_func = volume.apply_random_transform
y_shape = shape
y_in = np.random.randint(0, 2, size=y_shape).astype(np.float32)
x, y = transform_func(x, y_in)
x = x.numpy()
y = y.numpy()
# Test that values were not changed in the labels.
if scalar_labels:
assert_array_equal(y, y_in)
else:
assert_array_equal(np.unique(y), [0, 1])
assert x.shape == shape
assert y.shape == y_shape
with pytest.raises(ValueError):
inconsistent_shape = tuple([sh + 1 for sh in shape])
x, y = transform_func(np.ones(shape), np.ones(inconsistent_shape))
with pytest.raises(ValueError):
y_shape = (1,) if scalar_labels else (10, 10)
x, y = transform_func(np.ones((10, 10)), np.ones(y_shape))
x = np.random.randn(*shape).astype(np.float32)
y_shape = (1,) if scalar_labels else shape
y = np.random.randint(0, 2, size=y_shape).astype(np.float32)
x0, y0 = transform_func(x, y)
x1, y1 = transform_func(x, y)
assert not np.array_equal(x, x0)
assert not np.array_equal(x, x1)
assert not np.array_equal(x0, x1)
if scalar_labels:
assert np.array_equal(y, y0)
assert np.array_equal(y, y1)
assert np.array_equal(y0, y1)
else:
assert not np.array_equal(y, y0)
assert not np.array_equal(y, y1)
assert not np.array_equal(y0, y1)
# Test that new iterations yield different augmentations.
x = np.arange(64).reshape(1, 4, 4, 4).astype(np.float32)
y_shape = (1, 1) if scalar_labels else x.shape
y = np.random.randint(0, 2, size=y_shape).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
# sanity check
x0, y0 = next(iter(dataset))
x1, y1 = next(iter(dataset))
assert_array_equal(x[0], x0)
assert_array_equal(x0, x1)
assert_array_equal(y[0], y0)
assert_array_equal(y0, y1)
# Need to reset the seed, because it is set in other tests.
tf.random.set_seed(None)
dataset = dataset.map(transform_func)
x0, y0 = next(iter(dataset))
x1, y1 = next(iter(dataset))
assert not np.array_equal(x0, x1)
if scalar_labels:
assert_array_equal(y0, y1)
else:
assert not np.array_equal(y0, y1)
assert_array_equal(np.unique(y0), [0, 1])
assert_array_equal(np.unique(y1), [0, 1])
# Naive test that features were interpolated without nearest neighbor.
assert np.any(x0 % 1)
assert np.any(x1 % 1)
def test_binarize():
x = [
0.49671415,
-0.1382643,
0.64768854,
1.52302986,
-0.23415337,
-0.23413696,
1.57921282,
0.76743473,
]
x = np.asarray(x, dtype="float64")
expected = np.array([True, False, True, True, False, False, True, True])
result = volume.binarize(x)
assert_array_equal(expected, result)
assert result.dtype == tf.float64
result = volume.binarize(x.astype(np.float32))
assert_array_equal(expected, result)
assert result.dtype == tf.float32
x = np.asarray([-2, 0, 2, 0, 2, -2, -1, 1], dtype=np.int32)
expected = np.array([False, False, True, False, True, False, False, True])
result = volume.binarize(x)
assert_array_equal(expected, result)
assert result.dtype == tf.int32
result = volume.binarize(x.astype(np.int64))
assert_array_equal(expected, result)
assert result.dtype == tf.int64
@pytest.mark.parametrize("replace_func", [volume.replace, volume.replace_in_numpy])
def test_replace(replace_func):
data = np.arange(5)
mapping = {0: 10, 1: 20, 2: 30, 3: 40, 4: 30}
output = replace_func(data, mapping)
assert_array_equal(output, [10, 20, 30, 40, 30])
# Test that overlapping keys and values gives correct result.
data = np.arange(5)
mapping = {0: 1, 1: 2, 2: 3, 3: 4}
output = replace_func(data, mapping)
assert_array_equal(output, [1, 2, 3, 4, 0])
data = np.arange(8).reshape(2, 2, 2)
mapping = {0: 100, 100: 10, 10: 5, 3: 5}
outputs = replace_func(data, mapping, zero=False)
expected = data.copy()
expected[0, 0, 0] = 100
expected[0, 1, 1] = 5
assert_array_equal(outputs, expected)
# Zero values not in mapping values.
outputs = replace_func(data, mapping, zero=True)
expected = np.zeros_like(data)
expected[0, 0, 0] = 100
expected[0, 1, 1] = 5
assert_array_equal(outputs, expected)
@pytest.mark.parametrize("std_func", [volume.standardize, volume.standardize_numpy])
def test_standardize(std_func):
x = np.random.randn(10, 10, 10).astype(np.float32)
outputs = np.array(std_func(x))
assert np.allclose(outputs.mean(), 0, atol=1e-07)
assert np.allclose(outputs.std(), 1, atol=1e-07)
if std_func == volume.standardize:
x = np.random.randn(10, 10, 10).astype(np.float64)
outputs = np.array(std_func(x))
assert outputs.dtype == np.float32
def _stack_channels(_in):
return np.stack([_in, 2 * _in, 3 * _in], axis=-1)
@pytest.mark.parametrize("multichannel", [True, False])
@pytest.mark.parametrize("to_blocks_func", [volume.to_blocks, volume.to_blocks_numpy])
def test_to_blocks(multichannel, to_blocks_func):
x = np.arange(8).reshape(2, 2, 2)
block_shape = (1, 1, 1)
if multichannel:
x = _stack_channels(x)
block_shape = (1, 1, 1, 3)
outputs = np.array(to_blocks_func(x, block_shape))
expected = np.array(
[[[[0]]], [[[1]]], [[[2]]], [[[3]]], [[[4]]], [[[5]]], [[[6]]], [[[7]]]]
)
if multichannel:
expected = _stack_channels(expected)
assert_array_equal(outputs, expected)
block_shape = 2
if multichannel:
block_shape = (2, 2, 2, 3)
outputs = np.array(to_blocks_func(x, block_shape))
assert_array_equal(outputs, x[None])
block_shape = (3, 3, 3)
if multichannel:
block_shape = (3, 3, 3, 3)
with pytest.raises((tf.errors.InvalidArgumentError, ValueError)):
to_blocks_func(x, block_shape)
block_shape = (3, 3)
with pytest.raises(ValueError):
to_blocks_func(x, block_shape)
@pytest.mark.parametrize("multichannel", [True, False])
@pytest.mark.parametrize(
"from_blocks_func", [volume.from_blocks, volume.from_blocks_numpy]
)
def test_from_blocks(multichannel, from_blocks_func):
x = np.arange(64).reshape(4, 4, 4)
block_shape = (2, 2, 2)
if multichannel:
x = _stack_channels(x)
block_shape = (2, 2, 2, 3)
outputs = from_blocks_func(volume.to_blocks(x, block_shape), x.shape)
assert_array_equal(outputs, x)
with pytest.raises(ValueError):
x = np.arange(80).reshape(10, 2, 2, 2)
outputs = from_blocks_func(x, (4, 4, 4))
def test_blocks_numpy_value_errors():
with pytest.raises(ValueError):
x = np.random.rand(4, 4)
output_shape = (4, 4, 4)
volume.to_blocks_numpy(x, output_shape)
with pytest.raises(ValueError):
x = np.random.rand(4, 4, 4)
output_shape = (4, 4, 4)
volume.from_blocks_numpy(x, output_shape)
with pytest.raises(ValueError):
x = np.random.rand(4, 4, 4, 4)
output_shape = (4, 4)
volume.from_blocks_numpy(x, output_shape)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 11 16:19:39 2014
"""
import os
import sys
import imp
# Put location of
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + '\\modules') # add ODYM module directory to system path
#NOTE: Hidden variable __file__ must be know to script for the directory structure to work.
# Therefore: When first using the model, run the entire script with F5 so that the __file__ variable can be created.
import dynamic_stock_model as dsm # remove and import the class manually if this unit test is run as standalone script
imp.reload(dsm)
import numpy as np
import unittest
###############################################################################
"""My Input for fixed lifetime"""
Time_T_FixedLT = np.arange(0,10)
Inflow_T_FixedLT = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5])}
lifetime_FixedLT0 = {'Type': 'Fixed', 'Mean': np.array([0])}
#lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5,5,5,5,5,5,5,5,5,5])}
lifetime_NormLT = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
lifetime_NormLT0 = {'Type': 'Normal', 'Mean': np.array([0]), 'StdDev': np.array([1.5])}
###############################################################################
"""My Output for fixed lifetime"""
Outflow_T_FixedLT = np.array([0, 0, 0, 0, 0, 1, 2, 3, 4, 5])
Outflow_TC_FixedLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 5, 0, 0, 0, 0, 0]])
Stock_T_FixedLT = np.array([1, 3, 6, 10, 15, 20, 25, 30, 35, 40])
StockChange_T_FixedLT = np.array([1, 2, 3, 4, 5, 5, 5, 5, 5, 5])
Stock_TC_FixedLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0],
[0, 2, 3, 4, 5, 6, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 6, 7, 0, 0, 0],
[0, 0, 0, 4, 5, 6, 7, 8, 0, 0],
[0, 0, 0, 0, 5, 6, 7, 8, 9, 0],
[0, 0, 0, 0, 0, 6, 7, 8, 9, 10]])
Bal = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
"""My Output for normally distributed lifetime"""
Stock_TC_NormLT = np.array([[ 9.99570940e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.96169619e-01, 1.99914188e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.77249868e-01, 1.99233924e+00, 2.99871282e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.08788780e-01, 1.95449974e+00, 2.98850886e+00,
3.99828376e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 7.47507462e-01, 1.81757756e+00, 2.93174960e+00,
3.98467848e+00, 4.99785470e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 5.00000000e-01, 1.49501492e+00, 2.72636634e+00,
3.90899947e+00, 4.98084810e+00, 5.99742564e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 2.52492538e-01, 1.00000000e+00, 2.24252239e+00,
3.63515512e+00, 4.88624934e+00, 5.97701772e+00,
6.99699658e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.12112197e-02, 5.04985075e-01, 1.50000000e+00,
2.99002985e+00, 4.54394390e+00, 5.86349921e+00,
6.97318734e+00, 7.99656752e+00, 0.00000000e+00,
0.00000000e+00],
[ 2.27501319e-02, 1.82422439e-01, 7.57477613e-01,
2.00000000e+00, 3.73753731e+00, 5.45273268e+00,
6.84074908e+00, 7.96935696e+00, 8.99613846e+00,
0.00000000e+00],
[ 3.83038057e-03, 4.55002639e-02, 2.73633659e-01,
1.00997015e+00, 2.50000000e+00, 4.48504477e+00,
6.36152146e+00, 7.81799894e+00, 8.96552657e+00,
9.99570940e+00]])
Stock_T_NormLT = np.array([ 0.99957094, 2.9953115 , 5.96830193, 9.85008113,
14.4793678 , 19.60865447, 24.99043368, 30.46342411,
35.95916467, 41.45873561])
Outflow_T_NormLT = np.array([ 4.29060333e-04, 4.25944090e-03, 2.70095728e-02,
1.18220793e-01, 3.70713330e-01, 8.70713330e-01,
1.61822079e+00, 2.52700957e+00, 3.50425944e+00,
4.50042906e+00])
Outflow_TC_NormLT = np.array([[ 4.29060333e-04, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 3.40132023e-03, 8.58120666e-04, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.89197514e-02, 6.80264047e-03, 1.28718100e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 6.84610878e-02, 3.78395028e-02, 1.02039607e-02,
1.71624133e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.61281318e-01, 1.36922176e-01, 5.67592541e-02,
1.36052809e-02, 2.14530167e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 2.47507462e-01, 3.22562636e-01, 2.05383263e-01,
7.56790055e-02, 1.70066012e-02, 2.57436200e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 2.47507462e-01, 4.95014925e-01, 4.83843953e-01,
2.73844351e-01, 9.45987569e-02, 2.04079214e-02,
3.00342233e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.61281318e-01, 4.95014925e-01, 7.42522387e-01,
6.45125271e-01, 3.42305439e-01, 1.13518508e-01,
2.38092416e-02, 3.43248267e-03, -0.00000000e+00,
-0.00000000e+00],
[ 6.84610878e-02, 3.22562636e-01, 7.42522387e-01,
9.90029850e-01, 8.06406589e-01, 4.10766527e-01,
1.32438260e-01, 2.72105619e-02, 3.86154300e-03,
-0.00000000e+00],
[ 1.89197514e-02, 1.36922176e-01, 4.83843953e-01,
9.90029850e-01, 1.23753731e+00, 9.67687907e-01,
4.79227614e-01, 1.51358011e-01, 3.06118821e-02,
4.29060333e-03]])
StockChange_T_NormLT = np.array([ 0.99957094, 1.99574056, 2.97299043, 3.88177921, 4.62928667,
5.12928667, 5.38177921, 5.47299043, 5.49574056, 5.49957094])
"""My Output for Weibull-distributed lifetime"""
Stock_TC_WeibullLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # computed with Excel and taken from there
[0.367879441, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0.100520187, 0.735758882, 3, 0, 0, 0, 0, 0, 0, 0],
[0.023820879, 0.201040373, 1.103638324, 4, 0, 0, 0, 0, 0, 0],
[0.005102464, 0.047641758, 0.30156056, 1.471517765,5, 0, 0, 0, 0, 0],
[0.001009149, 0.010204929, 0.071462637, 0.402080746,1.839397206, 6, 0, 0, 0, 0],
[0.000186736, 0.002018297, 0.015307393, 0.095283516, 0.502600933, 2.207276647, 7, 0, 0, 0],
[3.26256E-05, 0.000373472, 0.003027446, 0.020409858, 0.119104394, 0.60312112, 2.575156088, 8, 0, 0],
[5.41828E-06, 6.52513E-05, 0.000560208, 0.004036594, 0.025512322, 0.142925273, 0.703641306, 2.943035529, 9, 0],
[8.59762E-07, 1.08366E-05, 9.78769E-05, 0.000746944, 0.005045743, 0.030614786, 0.166746152, 0.804161493, 3.310914971, 10]])
Stock_T_WeibullLT = np.array([1,2.367879441,3.836279069,5.328499576,6.825822547,8.324154666,9.822673522,11.321225,12.8197819,14.31833966])
Outflow_T_WeibullLT = np.array([0,0.632120559,1.531600372,2.507779493,3.502677029,4.50166788,5.501481144,6.501448519,7.5014431,8.501442241])
Outflow_TC_WeibullLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.632120559, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.267359255, 1.264241118, 0, 0, 0, 0, 0, 0, 0, 0],
[0.076699308, 0.534718509, 1.896361676, 0, 0, 0, 0, 0, 0, 0],
[0.018718414, 0.153398615, 0.802077764, 2.528482235, 0, 0, 0, 0, 0, 0],
[0.004093316, 0.037436829, 0.230097923, 1.069437018, 3.160602794, 0, 0, 0, 0, 0],
[0.000822413, 0.008186632, 0.056155243, 0.306797231, 1.336796273, 3.792723353, 0, 0, 0, 0],
[0.00015411, 0.001644825, 0.012279947, 0.074873658, 0.383496539, 1.604155527, 4.424843912, 0, 0, 0],
[2.72074E-05, 0.000308221, 0.002467238, 0.016373263, 0.093592072, 0.460195846, 1.871514782, 5.056964471, 0, 0],
[4.55852E-06, 5.44147E-05 , 0.000462331 , 0.00328965, 0.020466579, 0.112310487, 0.536895154, 2.138874037, 5.689085029, 0]])
StockChange_T_WeibullLT = np.array([1,1.367879441,1.468399628,1.492220507,1.497322971,1.49833212,1.498518856,1.498551481,1.4985569,1.498557759])
lifetime_WeibullLT = {'Type': 'Weibull', 'Shape': np.array([1.2]), 'Scale': np.array([1])}
InitialStock_WB = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50])
Inflow_WB = np.array([11631.1250671964, 1845.6048709861, 2452.0593141014, 1071.0305279511, 198.1868742385, 391.9674590243, 83.9599583940, 29.8447516023, 10.8731273138, 7.5000000000])
# We need 10 digits AFTER the . to get a 9 digits after the . overlap with np.testing.
# The total number of counting digits is higher, because there are up to 5 digits before the .
# For the stock-driven model with initial stock, colculated with Excel
Sc_InitialStock_2_Ref = np.array([[ 3.29968072, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3.28845263, 5.1142035 , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3.2259967 , 5.09680099, 2.0068288 , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3. , 5. , 2. , 4. , 0. ,
0. , 0. , 0. , 0. ],
[ 2.46759471, 4.64972578, 1.962015 , 3.98638888, 4.93427563,
0. , 0. , 0. , 0. ],
[ 1.65054855, 3.82454624, 1.82456634, 3.91067739, 4.91748538,
3.8721761 , 0. , 0. , 0. ],
[ 0.83350238, 2.55819937, 1.50076342, 3.63671549, 4.82409004,
3.85899993, 2.78772936, 0. , 0. ],
[ 0.30109709, 1.2918525 , 1.00384511, 2.9913133 , 4.48613916,
3.78570788, 2.77824333, 3.36180162, 0. ],
[ 0.07510039, 0.46667297, 0.5069268 , 2.00085849, 3.68999109,
3.5205007 , 2.72547754, 3.35036215, 3.66410986]])
Sc_InitialStock_2_Ref_Sum = np.array([ 3.29968072, 8.40265614, 10.32962649, 14. ,
18. , 20. , 20. , 20. , 20. ])
Oc_InitialStock_2_Ref = np.array([[ 1.41636982e-03, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 1.12280883e-02, 2.19524375e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 6.24559363e-02, 1.74025106e-02, 8.61420234e-04,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 2.25996698e-01, 9.68009922e-02, 6.82879736e-03,
1.71697802e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 5.32405289e-01, 3.50274224e-01, 3.79849998e-02,
1.36111209e-02, 2.11801070e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 8.17046165e-01, 8.25179532e-01, 1.37448656e-01,
7.57114903e-02, 1.67902556e-02, 1.66211031e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 8.17046165e-01, 1.26634687e+00, 3.23802924e-01,
2.73961897e-01, 9.33953405e-02, 1.31761643e-02,
1.19661751e-03, -0.00000000e+00, -0.00000000e+00],
[ 5.32405289e-01, 1.26634687e+00, 4.96918311e-01,
6.45402188e-01, 3.37950879e-01, 7.32920558e-02,
9.48603036e-03, 1.44303487e-03, -0.00000000e+00],
[ 2.25996698e-01, 8.25179532e-01, 4.96918311e-01,
9.90454815e-01, 7.96148072e-01, 2.65207178e-01,
5.27657861e-02, 1.14394721e-02, 1.57279902e-03]])
I_InitialStock_2_Ref = np.array([ 3.30109709, 5.11639875, 2.00769022, 4.00171698, 4.93639364, 3.87383821, 2.78892598, 3.36324466, 3.66568266])
""" Test case with fixed lifetime for initial stock"""
Time_T_FixedLT_X = np.arange(1, 9, 1)
lifetime_FixedLT_X = {'Type': 'Fixed', 'Mean': np.array([5])}
InitialStock_X = np.array([0, 0, 0, 7, 5, 4, 3, 2])
Inflow_X = np.array([0, 0, 0, 7, 5, 4, 3, 2])
Time_T_FixedLT_XX = np.arange(1, 11, 1)
lifetime_NormLT_X = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
InitialStock_XX = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50])
Inflow_XX = np.array([ 2.61070664, 0.43955789, 0.87708508, 0.79210262, 0.4,
2.67555857, 2.20073139, 3.06983925, 4.01538044, 7.50321933])
""" Test case with normally distributed lifetime for initial stock and stock-driven model"""
Time_T_FixedLT_2 = np.arange(1, 10, 1)
lifetime_NormLT_2 = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
InitialStock_2 = np.array([3,5,2,4])
FutureStock_2 = np.array([0,0,0,0,18,20,20,20,20])
ThisSwitchTime = 5 # First year with future stock curve, start counting from 1.
Inflow_2 = np.array([3.541625588, 5.227890554,2.01531097,4])
###############################################################################
"""Create Dynamic Stock Models and hand over the pre-defined values."""
# For zero lifetime: border case
myDSM0 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_FixedLT0)
# For fixed LT
myDSM = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_FixedLT)
myDSM2 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_FixedLT, lt=lifetime_FixedLT)
myDSMx = dsm.DynamicStockModel(t=Time_T_FixedLT_X, lt=lifetime_FixedLT_X)
TestInflow_X = myDSMx.compute_i_from_s(InitialStock=InitialStock_X)
myDSMxy = dsm.DynamicStockModel(t=Time_T_FixedLT_X, i=TestInflow_X, lt=lifetime_FixedLT_X)
# For zero normally distributed lifetime: border case
myDSM0n = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_NormLT0)
# For normally distributed Lt
myDSM3 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_NormLT)
myDSM4 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_NormLT, lt=lifetime_NormLT)
myDSMX = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, lt=lifetime_NormLT_X)
TestInflow_XX = myDSMX.compute_i_from_s(InitialStock=InitialStock_XX)
myDSMXY = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, i=TestInflow_XX, lt=lifetime_NormLT_X)
# Test compute_stock_driven_model_initialstock:
TestDSM_IntitialStock = dsm.DynamicStockModel(t=Time_T_FixedLT_2, s=FutureStock_2, lt=lifetime_NormLT_2)
Sc_InitialStock_2,Oc_InitialStock_2,I_InitialStock_2 = TestDSM_IntitialStock.compute_stock_driven_model_initialstock(InitialStock = InitialStock_2, SwitchTime = ThisSwitchTime)
# Compute stock back from inflow
TestDSM_IntitialStock_Verify = dsm.DynamicStockModel(t=Time_T_FixedLT_2, i=I_InitialStock_2, lt=lifetime_NormLT_2)
Sc_Stock_2 = TestDSM_IntitialStock_Verify.compute_s_c_inflow_driven()
Sc_Stock_2_Sum = Sc_Stock_2.sum(axis =1)
Sc_Stock_Sum = TestDSM_IntitialStock_Verify.compute_stock_total()
Sc_Outflow_t_c = TestDSM_IntitialStock_Verify.compute_o_c_from_s_c()
# For Weibull-distributed Lt
myDSMWB1 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_WeibullLT)
myDSMWB2 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_WeibullLT, lt=lifetime_WeibullLT)
myDSMWB3 = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, lt=lifetime_WeibullLT)
TestInflow_WB = myDSMWB3.compute_i_from_s(InitialStock=InitialStock_XX)
myDSMWB4 = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, i=TestInflow_WB, lt=lifetime_WeibullLT)
# Compute full stock model in correct order
###############################################################################
"""Unit Test Class"""
class KnownResultsTestCase(unittest.TestCase):
def test_inflow_driven_model_fixedLifetime_0(self):
"""Test Inflow Driven Model with Fixed product lifetime of 0."""
np.testing.assert_array_equal(myDSM0.compute_s_c_inflow_driven(), np.zeros(Stock_TC_FixedLT.shape))
np.testing.assert_array_equal(myDSM0.compute_stock_total(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0.compute_stock_change(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0.compute_outflow_mb(), Inflow_T_FixedLT)
np.testing.assert_array_equal(myDSM0.check_stock_balance(), Bal.transpose())
def test_inflow_driven_model_fixedLifetime(self):
"""Test Inflow Driven Model with Fixed product lifetime."""
np.testing.assert_array_equal(myDSM.compute_s_c_inflow_driven(), Stock_TC_FixedLT)
np.testing.assert_array_equal(myDSM.compute_stock_total(),Stock_T_FixedLT)
np.testing.assert_array_equal(myDSM.compute_o_c_from_s_c(), Outflow_TC_FixedLT)
np.testing.assert_array_equal(myDSM.compute_outflow_total(), Outflow_T_FixedLT)
np.testing.assert_array_equal(myDSM.compute_stock_change(), StockChange_T_FixedLT)
np.testing.assert_array_equal(myDSM.check_stock_balance(), Bal.transpose())
def test_stock_driven_model_fixedLifetime(self):
"""Test Stock Driven Model with Fixed product lifetime."""
np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[0], Stock_TC_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[1], Outflow_TC_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[2], Inflow_T_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_outflow_total(), Outflow_T_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_stock_change(), StockChange_T_FixedLT)
np.testing.assert_array_equal(myDSM2.check_stock_balance(), Bal.transpose())
def test_inflow_driven_model_normallyDistrLifetime_0(self):
"""Test Inflow Driven Model with Fixed product lifetime of 0."""
np.testing.assert_array_equal(myDSM0n.compute_s_c_inflow_driven(), np.zeros(Stock_TC_FixedLT.shape))
np.testing.assert_array_equal(myDSM0n.compute_stock_total(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0n.compute_stock_change(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0n.compute_outflow_mb(), Inflow_T_FixedLT)
np.testing.assert_array_equal(myDSM0n.check_stock_balance(), Bal.transpose())
def test_inflow_driven_model_normallyDistLifetime(self):
"""Test Inflow Driven Model with normally distributed product lifetime."""
np.testing.assert_array_almost_equal(myDSM3.compute_s_c_inflow_driven(), Stock_TC_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_stock_total(), Stock_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_o_c_from_s_c(), Outflow_TC_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_outflow_total(), Outflow_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_stock_change(), StockChange_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.check_stock_balance(), Bal.transpose(), 12)
def test_stock_driven_model_normallyDistLifetime(self):
"""Test Stock Driven Model with normally distributed product lifetime."""
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_driven_model()[0], Stock_TC_NormLT, 8)
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_driven_model()[1], Outflow_TC_NormLT, 8)
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_driven_model()[2], Inflow_T_FixedLT, 8)
np.testing.assert_array_almost_equal(myDSM4.compute_outflow_total(), Outflow_T_NormLT, 8)
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_change(), StockChange_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM4.check_stock_balance(), Bal.transpose(), 12)
def test_inflow_driven_model_WeibullDistLifetime(self):
"""Test Inflow Driven Model with Weibull-distributed product lifetime."""
np.testing.assert_array_almost_equal(
myDSMWB1.compute_s_c_inflow_driven(), Stock_TC_WeibullLT, 9)
np.testing.assert_array_almost_equal(myDSMWB1.compute_stock_total(), Stock_T_WeibullLT, 8)
np.testing.assert_array_almost_equal(myDSMWB1.compute_o_c_from_s_c(), Outflow_TC_WeibullLT, 9)
np.testing.assert_array_almost_equal(myDSMWB1.compute_outflow_total(), Outflow_T_WeibullLT, 9)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_change(), StockChange_T_WeibullLT, 9)
np.testing.assert_array_almost_equal(myDSMWB1.check_stock_balance(), Bal.transpose(), 12)
def test_stock_driven_model_WeibullDistLifetime(self):
"""Test Stock Driven Model with Weibull-distributed product lifetime."""
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_driven_model()[0], Stock_TC_WeibullLT, 8)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_driven_model()[1], Outflow_TC_WeibullLT, 8)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_driven_model()[2], Inflow_T_FixedLT, 8)
np.testing.assert_array_almost_equal(myDSMWB1.compute_outflow_total(), Outflow_T_WeibullLT, 9)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_change(), StockChange_T_WeibullLT, 8)
np.testing.assert_array_almost_equal(myDSMWB1.check_stock_balance(), Bal.transpose(), 12)
def test_inflow_from_stock_fixedLifetime(self):
"""Test computation of inflow from stock with Fixed product lifetime."""
np.testing.assert_array_equal(TestInflow_X, Inflow_X)
np.testing.assert_array_equal(myDSMxy.compute_s_c_inflow_driven()[-1, :], InitialStock_X)
def test_inflow_from_stock_normallyDistLifetime(self):
"""Test computation of inflow from stock with normally distributed product lifetime."""
np.testing.assert_array_almost_equal(TestInflow_XX, Inflow_XX, 8)
np.testing.assert_array_almost_equal(myDSMXY.compute_s_c_inflow_driven()[-1, :], InitialStock_XX, 9)
def test_inflow_from_stock_WeibullDistLifetime(self):
"""Test computation of inflow from stock with Weibull-distributed product lifetime."""
np.testing.assert_array_almost_equal(TestInflow_WB, Inflow_WB, 9)
np.testing.assert_array_almost_equal(myDSMWB4.compute_s_c_inflow_driven()[-1, :], InitialStock_WB, 9)
def test_compute_stock_driven_model_initialstock(self):
"""Test stock-driven model with initial stock given."""
np.testing.assert_array_almost_equal(I_InitialStock_2, I_InitialStock_2_Ref, 8)
np.testing.assert_array_almost_equal(Sc_InitialStock_2, Sc_InitialStock_2_Ref, 8)
np.testing.assert_array_almost_equal(Sc_InitialStock_2.sum(axis =1), Sc_InitialStock_2_Ref_Sum, 8)
np.testing.assert_array_almost_equal(Oc_InitialStock_2, Oc_InitialStock_2_Ref, 8)
if __name__ == '__main__':
unittest.main()
|
from common.utils import read_file
from . import food
def main() -> None:
food_list = read_file("d21/data/input.txt", food.parse)
# Part1
result1 = food.count_impossible(food_list)
print(f"Result 1-> {result1} <-1")
# Part2
result2 = food.get_canonical(food_list)
print(f"Result 2-> {result2} <-2")
if __name__ == "__main__":
main()
|
import warnings
import numpy as np
import pandas as pd
from astropy.time import Time
__all__ = [
"UNKNOWN_ID_REGEX",
"preprocessObservations",
]
UNKNOWN_ID_REGEX = "^u[0-9]{12}$"
def preprocessObservations(
observations,
column_mapping,
astrometric_errors=None,
mjd_scale="utc"
):
"""
Create two seperate data frames: one with all observation data needed to run THOR stripped of
object IDs and the other with known object IDs and attempts to attribute unknown observations to
the latest catalog of known objects from the MPC.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing at minimum a column of observation IDs, exposure times in MJD (with scale
set by mjd_scale), RA in degrees, Dec in degrees, 1-sigma error in RA in degrees, 1-sigma error in
Dec in degrees and the observatory code.
column_mapping : dict
Dictionary containing internal column names as keys mapped to column names in the data frame as values.
Should include the following:
{# Internal : # External
"obs_id" : column name or None,
"mjd" : column name,
"RA_deg" : column name,
"Dec_deg" : column name,
"RA_sigma_deg" : column name or None,
"Dec_sigma_deg" : column name or None,
"observatory_code" : column name,
"obj_id" : column name or None,
"mag" : optional, column name or None,
"mag_sigma" : optional, column name or None,
"filter" : optional, column name or None,
"astrometric_catalog" : optional, column name or None,
}
Description of columns and their assumed values:
'obs_id' : column name or None
Observation IDs as type string. If None, THOR will assign
an observation ID to each observation.
'mjd' : column name
Observation time in MJD, the input time scale can be set with the
'time_scale' parameter. Time scale will be converted if not in UTC.
'RA_deg' : column name
Topocentric J2000 Right Ascension in degrees.
'Dec_deg' : column name
Topocentric J2000 Declination in degrees.
'RA_sigma_deg' : column name or None
1-sigma astrometric uncertainty in RA in degrees.
If certain or all observations are missing astrometric errors, use
the 'astrometric_errors' parameter to configure defaults for all observatories
or for each observatory individually. If None, THOR will use the 'astrometric_error'
parameter to assign errors.
'Dec_sigma_deg' : column name or None
1-sigma astrometric uncertainty in Dec in degrees.
If certain or all observations are missing astrometric errors, use
the 'astrometric_errors' parameter to configure defaults for all observatories
or for each observatory individually. If None, THOR will use the 'astrometric_error'
parameter to assign errors.
'observatory_code' : column name
The MPC observatory code from which each observation was made. THOR currently
only supports ground-based observatories.
'obj_id' : column name or None
If known, the designation in unpacked or packed form. If unknown, object ID should be
set to 'NaN'. If None, THOR will assume no observations have been associated.
'mag' : optional, column name or None
Observed magnitude. Magnitudes are currently unused by THOR but may be convenient to have
for visual inspection of results.
'mag_sigma' : optional, column name or None.
1-sigma photometric uncertainty in magnitudes.
'filter' : optional, column name or None.
The bandpass or filter with which the observation was made.
'astrometric_catalog' : optional, column name or None.
Astrometric catalog with which astrometric measurements were calibrated. Unused by THOR outside of
creating ADES files from recoveries and discoveries.
'night_id' : optional, column_name or None.
ID representing the night on which an observation was made. Useful for filter for observations on
single nights rather than using the observation time.
mjd_scale : str, optional
Time scale of the input MJD exposure times ("utc", "tdb", etc...)
Returns
-------
preprocessed_observations : `~pandas.DataFrame`
DataFrame with observations in the format required by THOR.
preprocessed_attributions : `~pandas.DataFrame`
DataFrame containing associations, any observations with no known label
will be assigned a unique unknown ID with regex pattern "^u[0-9]{12}$".
Raises
------
ValueError
If the astrometric_errors parameter is not of type list or dictionary,
or if the errors are not correctly defined.
Warns
-----
UserWarning:
If the observation ID, object_ID, or astrometric error columns are not
present in the column_mapping dictionary.
"""
# Required columns THOR needs
cols = [
"obs_id",
"mjd",
"RA_deg",
"Dec_deg",
"RA_sigma_deg",
"Dec_sigma_deg",
"observatory_code",
"obj_id"
]
# Optional columns that can be used for filtering
# and ADES file production
optional_cols = [
# ADES Columns
"mag",
"mag_sigma",
"filter",
"astrometric_catalog",
# Useful non-ADES columns
"night_id"
]
# Check if observation IDs need to be assigned
assign_obs_ids = False
if column_mapping["obs_id"] == None:
warning = (
"No observation ID column defined in the column_mapping dictionary.\n"
"Assigning observation IDs...\n"
)
warnings.warn(
warning,
UserWarning
)
assign_obs_ids = True
cols.remove("obs_id")
# Check if object IDs need to be assigned
assign_obj_ids = False
if column_mapping["obj_id"] == None:
warning = (
"No object ID column defined in the column_mapping dictionary.\n"
"Assuming no observations have been associated with a known object...\n"
)
warnings.warn(
warning,
UserWarning
)
assign_obj_ids = True
cols.remove("obj_id")
# Check if astrometric errors need to be added
use_astrometric_errors = False
if (column_mapping["RA_sigma_deg"] == None) and (column_mapping["Dec_sigma_deg"] == None):
warning = (
"No astrometric error columns defined in the column_mapping dictionary.\n"
"Using 'astrometric_errors' parameter to assign errors...\n"
)
warnings.warn(
warning,
UserWarning
)
use_astrometric_errors = True
cols.remove("RA_sigma_deg")
cols.remove("Dec_sigma_deg")
# Create a copy of the relevant columns in observations
# Add any optional columns that may have been provided by the user
obs_cols = [column_mapping[c] for c in cols]
added_cols = []
for c in optional_cols:
if c in column_mapping.keys():
obs_cols.append(column_mapping[c])
added_cols.append(c)
preprocessed_observations = observations[obs_cols].copy()
# Rename preprocessed observation columns to those expected by THOR
# (involves inverting the column_mapping dictionary and removing any potential
# None values passed by the user)
column_mapping_inv = {v : k for k, v in column_mapping.items()}
if None in column_mapping_inv.keys():
column_mapping_inv.pop(None)
preprocessed_observations.rename(
columns=column_mapping_inv,
inplace=True
)
if use_astrometric_errors:
if type(astrometric_errors) == list:
if len(astrometric_errors) != 2:
err = (
"astrometric_errors list is not of length 2."
)
else:
preprocessed_observations.loc[:, "RA_sigma_deg"] = astrometric_errors[0]
preprocessed_observations.loc[:, "Dec_sigma_deg"] = astrometric_errors[1]
elif type(astrometric_errors) == dict:
for code, errors in astrometric_errors.items():
if len(errors) != 2:
err = (
"Astrometric errors for observatory {} should be a list of length 2 with\n"
"the 1-sigma astrometric uncertainty in RA as the first element and the\n"
"1-sigma astrometric uncertainty in Dec as the second element."
)
raise ValueError(err.format(code))
else:
observatory_mask = preprocessed_observations["observatory_code"].isin([code])
preprocessed_observations.loc[observatory_mask, "RA_sigma_deg"] = errors[0]
preprocessed_observations.loc[observatory_mask, "Dec_sigma_deg"] = errors[1]
else:
err = (
"'astrometric_errors' should be one of {None, list, dict}.\n"
"If None, then the given observations must have the ra_sigma_deg\n"
" and dec_sigma_deg columns.\n"
"If a dictionary, then each observatory code present observations in\n"
" the observations must have a corresponding key with a list of length 2\n"
" as their values. The first element in the list is assumed to be the 1-sigma\n"
" astrometric error in RA, while the second is assumed to be the same but in Dec.\n"
"If a list, then the first element in the list is assumed to be the 1-sigma\n"
" astrometric error in RA, while the second is assumed to be the same but in Dec.\n"
" Each observation will be given these errors regardless of if one is present or not.\n"
)
raise ValueError(err)
# Make sure all observations have astrometric errors
missing_codes = preprocessed_observations[(
(preprocessed_observations["RA_sigma_deg"].isna())
| (preprocessed_observations["Dec_sigma_deg"].isna())
)]["observatory_code"].unique()
if len(missing_codes) > 0:
err = (
"Missing astrometric errors for observations from:\n"
" {}\n"
)
raise ValueError(err.format(", ".join(missing_codes)))
# Make sure all observations are given in UTC, if not convert to UTC
if mjd_scale != "utc":
mjds = Time(
preprocessed_observations["mjd"].values,
format="mjd",
scale=mjd_scale
)
preprocessed_observations["mjd"] = mjds.utc.mjd
# Add _utc to mjd column name
preprocessed_observations.rename(
columns={
"mjd" : "mjd_utc"
},
inplace=True
)
# Make sure that the observations are sorted by observation time
preprocessed_observations.sort_values(
by=["mjd_utc"],
inplace=True,
ignore_index=True
)
# Assign obervation IDs if needed
if assign_obs_ids:
preprocessed_observations.loc[:, "obs_id"] = ["obs{:09d}".format(i) for i in range(len(preprocessed_observations))]
else:
if type(preprocessed_observations["obs_id"]) != object:
warn = ("Observation IDs should be of type string, converting...")
warnings.warn(warn)
preprocessed_observations["obs_id"] = preprocessed_observations["obs_id"].astype(str)
# Assign object IDs if needed
if assign_obj_ids:
# This must match UNKNOWN_ID_REGEX
preprocessed_observations.loc[:, "obj_id"] = [f"u{i:012d}" for i in range(len(preprocessed_observations))]
else:
if type(preprocessed_observations["obj_id"]) != object:
warn = ("Object IDs should be of type string, converting...")
warnings.warn(warn)
num_unassociated = len(preprocessed_observations[preprocessed_observations["obj_id"].isna()])
# This must match UNKNOWN_ID_REGEX
preprocessed_observations.loc[preprocessed_observations["obj_id"].isna(), "obj_id"] = [f"u{i:012d}" for i in range(num_unassociated)]
preprocessed_observations["obj_id"] = preprocessed_observations["obj_id"].astype(str)
# Split observations into two dataframes (make THOR run only on completely blind observations)
preprocessed_associations = preprocessed_observations[[
"obs_id",
"obj_id"
]].copy()
cols_sorted = [
"obs_id",
"mjd_utc",
"RA_deg",
"Dec_deg",
"RA_sigma_deg",
"Dec_sigma_deg",
"observatory_code"
]
cols_sorted += added_cols
preprocessed_observations = preprocessed_observations[cols_sorted]
return preprocessed_observations, preprocessed_associations
|
from evalmate import confusion
from . import evaluator
class EventEvaluation(evaluator.Evaluation):
"""
Result of an evaluation of any event-based alignment.
Arguments:
utt_to_label_pairs (dict) Dict containing the alignment for every utterance.
Key is the utterance-id, value is a list of :py:class:`evalmate.alignment.LabelPair`.
Attributes:
ref_outcome (Outcome): The outcome of the ground-truth/reference.
hyp_outcome (Outcome): The outcome of the system-output/hypothesis.
confusion (AggregatedConfusion): Confusion statistics
"""
def __init__(self, ref_outcome, hyp_outcome, utt_to_label_pairs):
super(EventEvaluation, self).__init__(ref_outcome, hyp_outcome)
self.utt_to_label_pairs = utt_to_label_pairs
self.confusion = confusion.create_from_label_pairs(self.label_pairs)
@property
def default_template(self):
return 'event'
@property
def template_data(self):
return {
'evaluation': self,
'ref_outcome': self.ref_outcome,
'hyp_outcome': self.hyp_outcome,
'utt_to_label_pairs': self.utt_to_label_pairs,
'label_pairs': self.label_pairs,
'confusion': self.confusion
}
@property
def label_pairs(self):
"""
Return a list of all label-pairs (from all utterances together).
"""
lp = []
for pairs in self.utt_to_label_pairs.values():
lp.extend(pairs)
return lp
@property
def failing_utterances(self):
"""
Return list of utterance-ids that are not correct.
"""
utt_ids = []
for utt_idx, pairs in self.utt_to_label_pairs.items():
is_ok = True
for pair in pairs:
if pair.ref is None or pair.hyp is None:
is_ok = False
elif pair.ref.value != pair.hyp.value:
is_ok = False
if not is_ok:
utt_ids.append(utt_idx)
return utt_ids
@property
def correct_utterances(self):
"""
Return list of utterance-ids that are correct.
"""
failing = self.failing_utterances
correct = set(self.utt_to_label_pairs.keys()) - set(failing)
return list(correct)
class EventEvaluator(evaluator.Evaluator):
"""
Class to compute evaluation results for any event-based alignment.
Arguments:
aligner (EventAligner): An instance of an event-aligner to use.
"""
def __init__(self, aligner):
self.aligner = aligner
@classmethod
def default_label_list_idx(cls):
return 'word-transcript'
def do_evaluate(self, ref, hyp):
utt_to_label_pairs = self.create_alignment(ref, hyp)
return EventEvaluation(ref, hyp, utt_to_label_pairs)
def create_alignment(self, ref, hyp):
utt_to_label_pairs = {}
for utterance_idx, ll_ref in ref.label_lists.items():
ll_hyp = hyp.label_lists[utterance_idx]
utt_to_label_pairs[utterance_idx] = self.aligner.align(ll_ref.labels, ll_hyp.labels)
return utt_to_label_pairs
|
# Generated by Django 3.1a1 on 2020-06-03 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20200603_1029'),
]
operations = [
migrations.CreateModel(
name='Programme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('short_name', models.CharField(max_length=10)),
('code', models.CharField(blank=True, max_length=255)),
],
),
migrations.AddField(
model_name='school',
name='programmes',
field=models.ManyToManyField(blank=True, null=True, related_name='schools', to='core.Programme'),
),
]
|
import json
import logging
import pandas as pd
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import bif
from iotfunctions.metadata import EntityType, BaseCustomEntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
EngineLogging.configure_console_logging(logging.DEBUG)
# replace with a credentials dictionary or provide a credentials file
with open('/Users/ryan/watson-iot/functions/scripts/credentials_as_dev.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db = Database(credentials=credentials)
db_schema = None # set if you are not using the default
table = db.get_table('MIKE_ROBOT_JUNE_25')
dim = db.get_table('MIKE_ROBOT_JUNE_25_DIMENSION')
group_by = {'plant_abv': func.left(table.c['plant_code'], 3), 'manufacturer': dim.c['manufacturer']}
aggs = {'avg_speed': (table.c['speed'], func.avg)}
def prepare_aggregate_query(group_by, aggs):
# build a sub query.
sargs = []
for alias, expression in list(group_by.items()):
sargs.append(expression.label(alias))
for alias, (metric, agg) in list(aggs.items()):
sargs.append(metric.label(alias))
db.start_session()
query = db.session.query(*sargs)
return query
def build_aggregate_query(subquery, group_by, aggs):
# turn the subquery into a selectable
subquery = subquery.subquery('a').selectable
# build an aggregation query
args = []
grp = []
for alias, expression in list(group_by.items()):
args.append(subquery.c[alias])
grp.append(subquery.c[alias])
for alias, (metric, agg) in list(aggs.items()):
args.append(agg(subquery.c[alias]).label(alias))
query = db.session.query(*args)
query = query.group_by(*grp)
return query
sub = prepare_aggregate_query(group_by=group_by, aggs=aggs)
sub = sub.join(dim, dim.c['deviceid'] == table.c['deviceid'])
query = build_aggregate_query(sub, group_by, aggs)
print(query)
|
from .Scheduler import Scheduler
from .MongoConn import MongoConn
from .Catalog import Catalog, Course
from .Response import Response
from .SlackConn import SlackConn
from .Output import output
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
import numpy as np
import pandas as pd
import networkx as nx
from copy import deepcopy
import pandapower as pp
from pandapower.shortcircuit import calc_sc
from pandapower.create import _get_index_with_check
from pandapower.topology import create_nxgraph
__all__ = ["detect_power_station_unit", "calc_sc_on_line"]
def detect_power_station_unit(net, mode="auto",
max_gen_voltage_kv=80, max_distance_km=0.01):
"""
Identifies the power station units configuration (gen and trafo) according to IEC 60909.
Updates the power_station_trafo in the gen table
INPUT:
**net** - panpdapower net
**mode** (str, ("auto", trafo""))
**max_gen_voltage_level** (float)
**max_distance_km** (float)
"""
logger.info("This function will overwrites the value 'power_station_trafo' in gen table")
net.gen["power_station_trafo"] = np.nan
required_gen = net.gen.loc[net.bus.loc[net.gen.bus.values, "vn_kv"].values < max_gen_voltage_kv,:]
gen_bus = required_gen.loc[:, "bus"].values
if mode.lower() == "auto":
required_trafo = net.trafo.loc[net.bus.loc[net.trafo.lv_bus.values, "vn_kv"].values < max_gen_voltage_kv, :]
elif mode.lower() == "trafo":
if "power_station_unit" in net.trafo.columns:
required_trafo = net.trafo.loc[net.trafo.power_station_unit, :]
else:
logger.warning("Using mode 'trafo' requires 'power_station_unit' defined for trafo! Using 'auto' mode instead!")
required_trafo = net.trafo.loc[net.bus.loc[net.trafo.lv_bus.values, "vn_kv"].values < max_gen_voltage_kv, :]
else:
raise UserWarning(f"Unsupported modes: {mode}")
trafo_lv_bus = net.trafo.loc[required_trafo.index, "lv_bus"].values
trafo_hv_bus = net.trafo.loc[required_trafo.index, "hv_bus"].values
g = create_nxgraph(net, respect_switches=True,
nogobuses=None, notravbuses=trafo_hv_bus)
for t_ix in required_trafo.index:
t_lv_bus = required_trafo.at[t_ix, "lv_bus"]
bus_dist = pd.Series(nx.single_source_dijkstra_path_length(g, t_lv_bus, weight='weight'))
connected_bus_at_lv_side = bus_dist[bus_dist < max_distance_km].index.values
gen_bus_at_lv_side = np.intersect1d(connected_bus_at_lv_side, gen_bus)
if len(gen_bus_at_lv_side) == 1:
# Check parallel trafo
if not len(np.intersect1d(connected_bus_at_lv_side, trafo_lv_bus)) == 1:
raise UserWarning("Failure in power station units detection! Parallel trafos on generator detected!")
if np.in1d(net.gen.bus.values, gen_bus_at_lv_side).sum() > 1:
raise UserWarning("More than 1 gen detected at the lv side of a power station trafo")
net.gen.loc[np.in1d(net.gen.bus.values, gen_bus_at_lv_side),
"power_station_trafo"] = t_ix
def _create_element_from_exisiting(net, ele_type, ele_ix):
net[ele_type] = net[ele_type].append(pd.Series(net[ele_type].loc[ele_ix, :].to_dict(),
name=_get_index_with_check(net, ele_type, None)))
return net[ele_type].index.to_numpy()[-1]
def _create_aux_net(net, line_ix, distance_to_bus0):
if distance_to_bus0 < 0 or distance_to_bus0 > 1:
raise UserWarning("Calculating SC current on line failed! distance_to_bus0 must be between 0-1!")
aux_net = deepcopy(net)
# Create auxiliary bus
aux_bus = pp.create_bus(aux_net, vn_kv=aux_net.bus.at[aux_net.line.at[line_ix, "from_bus"], "vn_kv"],
name="aux_bus_sc_calc")
# Create auxiliary line, while preserve the original index
aux_line0 = _create_element_from_exisiting(aux_net, "line", line_ix)
aux_line1 = _create_element_from_exisiting(aux_net, "line", line_ix)
## Update distance and auxiliary bus
aux_net.line.at[aux_line0, "length_km"] = distance_to_bus0 * aux_net.line.at[line_ix, "length_km"]
aux_net.line.at[aux_line0, "to_bus"] = aux_bus
aux_net.line.at[aux_line0, "name"] += "_aux_line0"
aux_net.line.at[aux_line1, "length_km"] = (1 - distance_to_bus0) * aux_net.line.at[line_ix, "length_km"]
aux_net.line.at[aux_line1, "from_bus"] = aux_bus
aux_net.line.at[aux_line1, "name"] += "_aux_line1"
## Disable original line
aux_net.line.at[line_ix, "in_service"] = False
## Update line switch
for switch_ix in aux_net.switch.query(f" et == 'l' and element == {line_ix}").index:
aux_switch_ix = _create_element_from_exisiting(aux_net, "switch", switch_ix)
if aux_net.switch.at[aux_switch_ix, "bus"] == aux_net.line.at[line_ix, "from_bus"]:
# The from side switch connected to aux_line0
aux_net.switch.at[aux_switch_ix, "element"] = aux_line0
else:
# The to side switch connected to aux_line1
aux_net.switch.at[aux_switch_ix, "element"] = aux_line1
return aux_net, aux_bus
def calc_sc_on_line(net, line_ix, distance_to_bus0, **kwargs):
"""
Calculate the shortcircuit in the middle of the line, returns a modified network
with the shortcircuit calculation results and the bus added
INPUT:
**net** - panpdapower net
**line_ix** (int) - The line of the shortcircuit
**distance_to_bus0** (float) - The position of the shortcircuit should be between 0-1
OPTIONAL:
**kwargs**** - the parameters required for the pandapower calc_sc function
"""
# Update network
aux_net, aux_bus = _create_aux_net(net, line_ix, distance_to_bus0)
pp.rundcpp(aux_net)
calc_sc(aux_net, bus=aux_bus, **kwargs)
# Return the new net and the aux bus
return aux_net, aux_bus
|
import os
# setting for the searchable path
curr_dir = os.getcwd()
curr_dir = curr_dir.replace('tasks', '')
"""
Data dimensions of the dataset in use
Assumption: all of the samples are having same size
Note: data are not down or up-sampled
"""
DATA_DIMENSION = {'height': 256,
'width': 256,
'classes': 5}
# print neural network
SHOW_MODEL = False
# directory for saving weights
weights_dir = curr_dir + '/models/'
|
#!/usr/bin/env python
#Communicate with end devices via LoRa.
#Communicate with server via MQTT(hbmqtt) and HTTP POST.
#Save data in the sqlite database.
#Parse JSON from MQTT and LoRa protocol.
#Communication module: LoRa.
#Communication method with device via LoRa.
#Uart port drive LoRa module.
#Parse JSON between device and gateway via LoRa channel.
#LoRa module: E32-TTL-100
#Pin specification:
#M0 <--> GPIO(OUT) #mode setting connct to GND is OK!(Low)
#M1 <--> GPIO(OUT) #mode setting connct to GND is OK!(Low)
#RXD <--> 8(TXD) #ttyS0
#TXD <--> 10(RXD) #ttyS0
#AUX <--> GPIO/INT(IN) #module status detecting
#VCC
#GND
#You need to install pyserial manually, install command is below:
#pip install pyserial
import serial
import time
import json
#ser = serial.Serial("/dev/ttyS0", 9600)
ser = serial.Serial("/dev/ttyS0", 9600, timeout=0.2)
def main():
while True:
#Waiting for LoRa module message from uart port.
count = ser.inWaiting()
if count != 0:
recv = ser.readline() #readline() need to set timeout, otherwise results block
ser.flushInput()
print(recv)
json_lora = json.loads(recv)
#Parse JSON
#print(json_lora.get("ID"))
#print(json_lora["ID"])
#if json_lora.get("ID") == '1' : #Device ID-1 existed in gateway database
if int(json_lora.get("ID")) == 1 : #Device ID-1 existed in gateway database
if json_lora.get("CMD") == 'Online':
response = '{"ID":"1", "CMD":"Online", "TYPE":"Light2", "VALUE":"On"}'
print(response)
ser.write(response)
elif json_lora.get("CMD") == 'Env':
if json_lora.get("TYPE") == 'moisture':
if int(json_lora.get("VALUE")) < 2000: # soil moisture is lower than standard
response = '{"ID":"1", "CMD":"irrigate", "TYPE":"Open", "VALUE":"100"}'
ser.write(response)
else:
print('init_device')
#init_device() #Create sqlite table for device 1.
time.sleep(0.1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
if ser != None:
ser.close()
|
# Kertaus kerrasta 1
# Tulostaminen näytölle: print
print("Moikka")
# Laskutoimitukset
print(5 + 7)
# Mitä tämä tulostaa?
print(9 + 9)
# Mitä tämä tulostaa?
print("9 + 9")
# Kommentti
# Tämä on kommentti
|
# Generated by Django 2.2.2 on 2019-07-08 19:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_import_data', '0017_auto_20190708_1504'),
]
operations = [
migrations.RemoveField(
model_name='modelimportattempt',
name='row_data',
),
]
|
__all__=["Enigma","Rotor","Reflector"]
|
from selenium import webdriver
from bs4 import BeautifulSoup
from time import sleep
from common.flags import GlobalState
from modules.scrappers.scrapper import Scrapper
class NorwegianScrapper(Scrapper):
def __init__(self):
super().__init__("norwegian")
self.url_base = "https://www.norwegian.com/es/reserva/reserve-su-vuelo/precios-baratos/?A_City={}" \
"&AdultCount=1&ChildCount=0&CurrencyCode=EUR&D_City={}&D_Day=02&D_Month={}" \
"&D_SelectedDay=02&IncludeTransit=true&InfantCount=0&R_Day=18&R_Month={}&TripType=1" \
"&mode=ab#/?origin={}&destination={}&outbound={}&adults=1&oneWay=true¤cy=EUR"
def do_scrapping(self, norweigan_scrap_data):
# --- OBTAIN HTML DATA ---
month_aux = month_aux = "%02d" % norweigan_scrap_data.month
date = "{}{}".format(norweigan_scrap_data.year, month_aux)
date2 = "{}-{}".format(norweigan_scrap_data.year, month_aux)
url_for = self.url_base.format(norweigan_scrap_data.dest, norweigan_scrap_data.ori, date, date,
norweigan_scrap_data.ori, norweigan_scrap_data.dest, date2)
driver = webdriver.Firefox()
driver.get(url_for)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "html.parser")
calendar = soup.find("div", {"class": "lowfare-calendar-combo__content"})
if calendar is not None:
# --- EXTRACT PRICES ---
prices = list()
list_row = soup.findAll("tr", {"class": "lowfare-calendar__row lowfare-calendar__row--animate"})
for tr in list_row:
list_td = tr.findAll("td", {"class": "lowfare-calendar__cell"})
for td in list_td:
dummy_list = td.findAll("div", {"class": "lowfare-calendar__item--dummy"})
if len(dummy_list) == 0:
button = td.button
# Get day
span = button.find("span", {"class": "lowfare-calendar__date"})
day = span.get_text().replace(".", "")
price = 0
not_empty = "lowfare-calendar__item--empty" not in button["class"]
if not_empty:
price_element = button.find("strong", {"class": "lowfare-calendar__price"})
if price_element is not None:
price_text = price_element.get_text().strip().replace(".", "").replace(",", ".")
price = float(price_text)
prices.append(price)
# --- STORE INFO ---
for i in range(1, len(prices)+1):
self.store_info_journey(norweigan_scrap_data, i, prices[i - 1])
driver.close()
GlobalState.finished_scrapper()
# if __name__ == "__main__":
#
# if len(sys.argv) != 6:
# print("Usage: python testSky.py ori dest adults year month")
# print("Example: python testSky.py mad krk 2 18 9")
# else:
# airport_ori = sys.argv[1]
# airport_dest = sys.argv[2]
# num_adults = sys.argv[3]
# year = int(sys.argv[4])
# month = int(sys.argv[5])
# skyscanner_scrap_data = SkyscannerScrapData(airport_ori, airport_dest, num_adults, year, month)
# scrap_skyscanner(skyscanner_scrap_data)
|
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from poliastro.atmosphere import COESA76
from poliastro.atmosphere.coesa76 import p_coeff, rho_coeff
coesa76 = COESA76()
def test_outside_altitude_range_coesa76():
with pytest.raises(ValueError) as excinfo:
r0 = 6356.766 * u.km
coesa76._check_altitude(1001 * u.km, r0)
assert (
"ValueError: Geometric altitude must be in range [0.0 km, 1000.0 km]"
in excinfo.exconly()
)
def test_get_index_coesa76():
expected_i = 7
z = 86 * u.km
i = coesa76._get_index(z, coesa76.zb_levels)
assert i == expected_i
def test_coefficients_over_86km():
# Expected pressure coefficients
expected_p = [9.814674e-11, -1.654439e-07, 1.148115e-04, -0.05431334, -2.011365]
expected_rho = [1.140564e-10, -2.130756e-07, 1.570762e-04, -0.07029296, -12.89844]
assert coesa76._get_coefficients_avobe_86(350 * u.km, p_coeff) == expected_p
assert coesa76._get_coefficients_avobe_86(350 * u.km, rho_coeff) == expected_rho
# SOLUTIONS DIRECTLY TAKEN FROM COESA76 REPORT
coesa76_solutions = {
0.5 * u.km: [284.90 * u.K, 9.5461e2 * u.mbar, 1.1673 * u.kg / u.m ** 3],
1.0 * u.km: [281.651 * u.K, 8.9876e2 * u.mbar, 1.1117 * u.kg / u.m ** 3],
10 * u.km: [223.252 * u.K, 2.6499e2 * u.mbar, 4.1351e-1 * u.kg / u.m ** 3],
77 * u.km: [204.493 * u.K, 1.7286e-2 * u.mbar, 2.9448e-5 * u.kg / u.m ** 3],
86 * u.km: [186.87 * u.K, 3.7338e-3 * u.mbar, 6.958e-6 * u.kg / u.m ** 3],
92 * u.km: [186.96 * u.K, 1.2887e-3 * u.mbar, 2.393e-6 * u.kg / u.m ** 3],
230 * u.km: [915.78 * u.K, 3.9276e-7 * u.mbar, 1.029e-10 * u.kg / u.m ** 3],
1000 * u.km: [1000.0 * u.K, 7.5138e-11 * u.mbar, 3.561e-15 * u.kg / u.m ** 3],
}
@pytest.mark.parametrize("z", coesa76_solutions.keys())
def test_properties_coesa76(z):
# Get expected values from official data
expected_T = coesa76_solutions[z][0]
expected_p = coesa76_solutions[z][1]
expected_rho = coesa76_solutions[z][2]
T, p, rho = coesa76.properties(z)
assert_quantity_allclose(T, expected_T, rtol=1e-4)
assert_quantity_allclose(p, expected_p, rtol=1e-4)
assert_quantity_allclose(rho, expected_rho, rtol=1e-3)
|
"""This is the entry point into the Mesh application"""
import argparse
import logging
import sys
from mesh.configuration import get_config
from mesh.interactive import first_run_setup
def setup_args():
"""Initialize the CLI argument parser"""
parser = argparse.ArgumentParser(
prog='Mesh',
description='Two-way synchronization between Plex and Trakt')
parser.add_argument(
'--log_level',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='warning')
return parser.parse_args()
def setup_logging(level):
"""Initialize logging"""
logging.basicConfig(level=logging.getLevelName(level.upper()))
def run():
"""Run the application"""
args = setup_args()
setup_logging(args.log_level)
config = get_config()
if config.is_new:
print('A new config file has been generated. Fill in the missing '
'fields and rerun the application')
# sys.exit()
identifier, token = first_run_setup()
config.plex_serveridentifier = identifier
config.plex_serverownertoken = token
config.save()
run()
|
# MIT License
#
# Copyright (c) [2018] [Victor Manuel Cajes Gonzalez - vcajes@gmail.com]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from decimal import Decimal
from bancardconnectorpython.exceptions import BancardAPIInvalidParameterException
# number of decimals for an amount of a given currency
CURRENCIES_DECIMALS = {"PYG": 0}
def is_python_version_greater_igual_than_3x():
"""
Returns True if the Python version that runs this library is greater or equal than 3.x
:return: a boolean that states if the python version if >= 3.x
:rtype bool
"""
return sys.version_info >= (3,)
def merge_dict(first_dict, *next_dicts):
"""
Returns the merge of all the dictionaries received as input parameters.
:param first_dict: one dictionary
:type first_dict: dict
:param next_dicts: list of dicionaries to be merged with first_dict
:type next_dicts: dict
:return: the merged dictionary
:rtype dict
"""
result_dict = dict()
for curr_dict in (first_dict,) + next_dicts:
result_dict.update(curr_dict)
return result_dict
def currency_decimal_to_string(currency, decimal_value):
"""
Returns the amount in a string format depending on the number of decimals of a given currency.
:param currency: the currency of the decimal_value
:type currency: str
:param decimal_value: the Decimal value
:type decimal_value: Decimal
:return: the string that represents the decimal_value with the proper number of decimals depending on the currency
:rtype str
"""
if currency not in CURRENCIES_DECIMALS:
raise BancardAPIInvalidParameterException("The currency is not allowed.")
if not isinstance(decimal_value, Decimal):
raise BancardAPIInvalidParameterException("The amount is not a Decimal value.")
decimals = CURRENCIES_DECIMALS[currency] if currency in CURRENCIES_DECIMALS else 2
ret = ("%." + str(decimals) + "f") % decimal_value
return ret
|
from __future__ import absolute_import, division, print_function
import time
from mmtbx import monomer_library
import mmtbx.refinement.real_space.fit_residue
import iotbx.pdb
from mmtbx.rotamer.rotamer_eval import RotamerEval
import mmtbx.utils
import sys
from libtbx import easy_pickle
import libtbx.load_env
from six.moves import range
from libtbx import easy_mp
from libtbx import group_args
import math
mon_lib_srv = monomer_library.server.server()
rotamer_eval = RotamerEval()
pdb_files = [
"ala.pdb",
"asn.pdb",
"asp.pdb",
"cys.pdb",
"gln.pdb",
"glu.pdb",
"gly.pdb",
"his.pdb",
"ile.pdb",
"leu.pdb",
"met.pdb",
"mse.pdb", # is ignored with rotamer named None
"phe.pdb",
"pro.pdb", # BAD all-rotamers files
"ser.pdb",
"thr.pdb",
"trp.pdb",
"tyr.pdb",
"val.pdb",
"arg.pdb",
"lys.pdb"
]
def get_nested_loop(n, fine, start=0, end=360):
assert n >= 1 and n<=4
result = []
#
if(fine):
if (n in [1,2]): step=1
elif (n == 3): step=3
else:
step = 10
#
if(n==1):
for a1 in range(start,end+step,step):
result.append([a1])
elif(n==2):
for a1 in range(start,end+step,step):
for a2 in range(start,end+step,step):
result.append([a1, a2])
elif(n==3):
for a1 in range(start,end+step,step):
for a2 in range(start,end+step,step):
for a3 in range(start,end+step,step):
result.append([a1, a2, a3])
elif(n==4):
if(fine):
for a1 in range(start,end+7,7):
for a2 in range(start,end+8,8):
for a3 in range(start,end+9,9):
for a4 in range(start,end+10,10):
result.append([a1, a2, a3, a4])
else:
for a1 in range(start,end+step,step):
for a2 in range(start,end+step,step):
for a3 in range(start,end+step,step):
for a4 in range(start,end+step,step):
result.append([a1, a2, a3, a4])
return result
def get_clusters_and_angles(file_name, fine):
path=libtbx.env.find_in_repositories("mmtbx/idealized_aa_residues/data")
pdb_inp = iotbx.pdb.input(file_name=path+"/"+file_name)
pdb_hierarchy = pdb_inp.construct_hierarchy()
xrs = pdb_inp.xray_structure_simple()
residue = pdb_hierarchy.only_residue()
clusters = mmtbx.refinement.real_space.aa_residue_axes_and_clusters(
residue = residue,
mon_lib_srv = mon_lib_srv,
backbone_sample = False).clusters
if(len(clusters)==0): return None,None
nested_loop = get_nested_loop(n=len(clusters), fine=fine)
return clusters, nested_loop
def chunker(x, dim):
return (x[i::dim] for i in range(dim))
def run_one(args):
clusters, chunk, file_name, include, collect_states = args
#
path=libtbx.env.find_in_repositories("mmtbx/idealized_aa_residues/data")
pdb_inp = iotbx.pdb.input(file_name=path+"/"+file_name)
pdb_hierarchy = pdb_inp.construct_hierarchy()
xrs = pdb_inp.xray_structure_simple()
residue = pdb_hierarchy.only_residue()
ri = mmtbx.refinement.real_space.fit_residue.get_rotamer_iterator(
mon_lib_srv = mon_lib_srv,
residue = residue)
if(len(clusters)==0): return
for rotamer, rotamer_sites_cart in ri:
residue.atoms().set_xyz(rotamer_sites_cart)
xrs = xrs.replace_sites_cart(rotamer_sites_cart)
if(collect_states):
states = mmtbx.utils.states(
xray_structure=xrs, pdb_hierarchy=pdb_hierarchy)
else:
states = None # Collecting states with multiprocessing won't work!
good_angles = mmtbx.refinement.real_space.generate_angles_nested(
clusters = clusters,
residue = residue,
rotamer_eval = rotamer_eval,
nested_loop = chunk,
include = include,
states = states)
break
#
good_angles_ = []
for chi in good_angles:
chi_ = []
for ch in chi:
chi_.append(ch*math.pi/180)
good_angles_.append(chi_)
good_angles = good_angles_
#
return group_args(good_angles = good_angles, states = states)
def exercise(file_name, include, NPROC=96):
fine = False
if(len(include)==2): fine = True
suffix = "_".join([s.lower() for s in include])
clusters, nested_loop = get_clusters_and_angles(file_name=file_name, fine=fine)
if(clusters is None): return
chunks = list(chunker(nested_loop, NPROC))
tt = 0
if(NPROC>1):
argss = []
for chunk in chunks:
argss.append([clusters, chunk, file_name, include, False])
stdout_and_results = easy_mp.pool_map(
processes = NPROC,
fixed_func = run_one,
args = argss,
func_wrapper = "buffer_stdout_stderr")
good_angles = []
for result in stdout_and_results:
good_angles.extend(result[1].good_angles)
states = None # Undefined if multiprocessing is used
else:
t0 = time.time()
args = [clusters, nested_loop, file_name, include, True]
result = run_one(args)
good_angles = result.good_angles
states = result.states
tt = time.time()-t0
if(states is not None):
states.write(file_name="%s_%s.pdb"%(file_name[:-4],suffix))
print("file_name, n_clusters, n_good_angles, total:", file_name, \
len(clusters), len(good_angles), len(nested_loop), tt)
sys.stdout.flush()
easy_pickle.dump(
file_name="%s_%s.pkl"%(file_name[:-4], suffix),
obj=good_angles)
if(__name__ == "__main__"):
for it in [["FAVORED","ALLOWED"], ["FAVORED"]]:
print (it, "-"*20)
if(len(sys.argv[1:])==1):
exercise(file_name=sys.argv[1:][0], include=it)
else:
for fn in pdb_files:
exercise(file_name=fn, include=it)
|
"""Setup for deniable."""
import os
import codecs
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""Return multiple read calls to different readable objects as a single
string."""
return codecs.open(os.path.join(HERE, *parts), 'r').read()
LONG_DESCRIPTION = read('README.rst')
setup(name='deniable',
version='v1.0.1',
description='Deniable encryption application of a RSA cryptosystem',
long_description=LONG_DESCRIPTION,
url='https://github.com/victormn/rsa-deniable-encryption',
author='Victor Nunes',
author_email='victor95nunes@gmail.com',
tests_require=['pytest', 'pytest-cov', 'python-coveralls'],
install_requires=['pycrypto>=2.6.1'],
license='MIT',
entry_points={
'console_scripts': [
'deniablecollision = deniable.scripts.deniablecollision:main',
'deniablekeys = deniable.scripts.deniablekeys:main',
'deniablersa = deniable.scripts.deniablersa:main',
],
},
packages=['deniable', 'deniable.scripts'],
include_package_data=True,
platforms='any',
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
import os, functools, math, sys, itertools, re, time, threading, xlwt, configparser
import numpy as np
import tkinter as tk
from xlwt import Workbook
from tkinter import ttk, filedialog, messagebox
#CONSTANTS
# Data from: Pyykko, P. and Atsumi, M., Chem. Eur. J. 2009, 15, 186.
element_radii=[
["None",None],['H' , 32],['He' , 46],['Li' , 133],['Be' , 102],['B' , 85],['C' , 75],
['N' , 71],['O' , 63],['F' , 64],['Ne' , 67],['Na' , 155],['Mg' , 139],['Al' , 126],
['Si' , 116],['P' , 111],['S' , 103],['Cl' , 99],['Ar' , 96],['K' , 196],['Ca' , 171],
['Sc' , 148],['Ti' , 136],['V' , 134],['Cr' , 122],['Mn' , 119],['Fe' , 116],['Co' , 111],
['Ni' , 110],['Cu' , 112],['Zn' , 118],['Ga' , 124],['Ge' , 121],['As' , 121],['Se' , 116],
['Br' , 114],['Kr' , 117],['Rb' , 210],['Sr' , 185],['Y' , 163],['Zr' , 154],['Nb' , 147],
['Mo' , 138],['Tc' , 128],['Ru' , 125],['Rh' , 125],['Pd' , 120],['Ag' , 128],['Cd' , 136],
['In' , 142],['Sn' , 140],['Sb' , 140],['Te' , 136],['I' , 133],['Xe' , 131],['Cs' , 232],
['Ba' , 196],['La' , 180],['Ce' , 163],['Pr' , 176],['Nd' , 174],['Pm' , 173],['Sm' , 172],
['Eu' , 168],['Gd' , 169],['Tb' , 168],['Dy' , 167],['Ho' , 166],['Er' , 165],['Tm' , 164],
['Yb' , 170],['Lu' , 162],['Hf' , 152],['Ta' , 146],['W' , 137],['Re' , 131],['Os' , 129],
['Ir' , 122],['Pt' , 123],['Au' , 124],['Hg' , 133],['Tl' , 144],['Pb' , 144],['Bi' , 151],
['Po' , 145],['At' , 147],['Rn' , 142],['Fr' , 223],['Ra' , 201],['Ac' , 186],['Th' , 175],
['Pa' , 169],['U' , 170],['Np' , 171],['Pu' , 172],['Am' , 166],['Cm' , 166],['Bk' , 168],
['Cf' , 168],['Es' , 165],['Fm' , 167],['Md' , 173],['No' , 176],['Lr' , 161],['Rf' , 157],
['Db' , 149],['Sg' , 143],['Bh' , 141],['Hs' , 134],['Mt' , 129],['Ds' , 128],['Rg' , 121],
['Cn' , 122],['Nh' , 136],['Fl' , 143],['Mc' , 162],['Lv' , 175],['Ts' , 165],['Og' , 157]]
elements = tuple(i[0] for i in element_radii)
keywords = \
['1-bromo-2-methylpropane', '1-bromooctane', '1-bromopentane', '1-bromopropane', '1-butanol',
'1-chlorohexane', '1-chloropentane', '1-chloropropane', '1-decanol', '1-fluorooctane', '1-heptanol',
'1-hexanol', '1-hexene', '1-hexyne', '1-iodobutane', '1-iodohexadecane', '1-iodopentane',
'1-iodopropane', '1-nitropropane', '1-nonanol', '1-pentanol', '1-pentene', '1-propanol',
'1-trichloroethane', '2-bromopropane', '2-butanol', '2-chlorobutane', '2-dibromoethane',
'2-dichloroethene', '2-dimethylcyclohexane', '2-ethanediol', '2-heptanone', '2-hexanone',
'2-methoxyethanol', '2-methyl-1-propanol', '2-methyl-2-propanol', '2-methylpentane',
'2-methylpyridine', '2-nitropropane', '2-octanone', '2-pentanone', '2-propanol', '2-propen-1-ol',
'2-trichloroethane', '2-trifluoroethanol', '3-methylpyridine', '3-pentanone', '4-dimethylpentane',
'4-dimethylpyridine', '4-dioxane', '4-heptanone', '4-methyl-2-pentanone', '4-methylpyridine',
'4-trimethylbenzene', '4-trimethylpentane', '5-nonanone', '6-dimethylpyridine', 'a-chlorotoluene',
'aceticacid', 'acetone', 'acetonitrile', 'acetophenone', 'allcheck', 'aniline', 'anisole', 'apfd',
'argon', 'b1b95', 'b1lyp', 'b3lyp', 'b3p86', 'b3pw91', 'b971', 'b972', 'b97d', 'b97d3', 'benzaldehyde',
'benzene', 'benzonitrile', 'benzylalcohol', 'betanatural', 'bhandh', 'bhandhlyp', 'bromobenzene',
'bromoethane', 'bromoform', 'butanal', 'butanoicacid', 'butanone', 'butanonitrile', 'butylamine',
'butylethanoate', 'calcall', 'calcfc', 'cam-b3lyp', 'carbondisulfide', 'carbontetrachloride',
'cartesian', 'checkpoint', 'chkbasis', 'chlorobenzene', 'chloroform', 'cis-1', 'cis-decalin',
'connectivity', 'counterpoise', 'cyclohexane', 'cyclohexanone', 'cyclopentane', 'cyclopentanol',
'cyclopentanone', 'd95v', 'decalin-mixture', 'def2qzv', 'def2qzvp', 'def2qzvpp', 'def2sv', 'def2svp',
'def2svpp', 'def2tzv', 'def2tzvp', 'def2tzvpp', 'density', 'densityfit', 'dibromomethane',
'dibutylether', 'dichloroethane', 'dichloromethane', 'diethylamine', 'diethylether', 'diethylsulfide',
'diiodomethane', 'diisopropylether', 'dimethyldisulfide', 'dimethylsulfoxide', 'diphenylether',
'dipropylamine', 'e-2-pentene', 'empiricaldispersion', 'ethanethiol', 'ethanol', 'ethylbenzene',
'ethylethanoate', 'ethylmethanoate', 'ethylphenylether', 'extrabasis', 'extradensitybasis', 'finegrid',
'fluorobenzene', 'formamide', 'formicacid', 'freq', 'full', 'gd3bj', 'genecp', 'geom', 'gfinput',
'gfprint', 'hcth', 'hcth147', 'hcth407', 'hcth93', 'heptane', 'hexanoicacid', 'hissbpbe', 'hseh1pbe',
'integral', 'iodobenzene', 'iodoethane', 'iodomethane', 'isopropylbenzene', 'isoquinoline', 'kcis',
'krypton', 'lanl2dz', 'lanl2mb', 'lc-wpbe', 'loose', 'm-cresol', 'm-xylene', 'm062x', 'm06hf', 'm06l',
'm11l', 'maxcycles', 'maxstep', 'mesitylene', 'methanol', 'methylbenzoate', 'methylbutanoate',
'methylcyclohexane', 'methylethanoate', 'methylmethanoate', 'methylpropanoate', 'minimal', 'mn12l',
'mn12sx', 'modredundant', 'mpw1lyp', 'mpw1pbe', 'mpw1pw91', 'mpw3pbe', 'n-butylbenzene', 'n-decane',
'n-dimethylacetamide', 'n-dimethylformamide', 'n-dodecane', 'n-hexadecane', 'n-hexane',
'n-methylaniline', 'n-methylformamide-mixture', 'n-nonane', 'n-octane', 'n-octanol', 'n-pentadecane',
'n-pentane', 'n-undecane', 'n12sx', 'nitrobenzene', 'nitroethane', 'nitromethane', 'noeigentest',
'nofreeze', 'noraman', 'nosymm', 'nprocshared', 'o-chlorotoluene', 'o-cresol', 'o-dichlorobenzene',
'o-nitrotoluene', 'o-xylene', 'o3lyp', 'ohse1pbe', 'ohse2pbe', 'oniom', 'output', 'p-isopropyltoluene',
'p-xylene', 'pbe1pbe', 'pbeh', 'pbeh1pbe', 'pentanal', 'pentanoicacid', 'pentylamine',
'pentylethanoate', 'perfluorobenzene', 'pkzb', 'population', 'propanal', 'propanoicacid',
'propanonitrile', 'propylamine', 'propylethanoate', 'pseudo', 'pw91', 'pyridine', 'qst2', 'qst3',
'quinoline', 'qzvp', 'rdopt', 'read', 'readfc', 'readfreeze', 'readopt', 'readoptimize', 'regular',
'restart', 's-dioxide', 'savemixed', 'savemulliken', 'savenbos', 'savenlmos', 'scrf', 'sddall',
'sec-butylbenzene', 'sogga11', 'sogga11x', 'solvent', 'spinnatural', 'tert-butylbenzene',
'tetrachloroethene', 'tetrahydrofuran', 'tetrahydrothiophene-s', 'tetralin', 'thcth', 'thcthhyb',
'thiophene', 'thiophenol', 'tight', 'toluene', 'tpss', 'tpssh', 'trans-decalin', 'tributylphosphate',
'trichloroethene', 'triethylamine', 'tzvp', 'ultrafine', 'uncharged', 'v5lyp', 'verytight', 'vp86',
'vsxc', 'vwn5', 'water', 'wb97', 'wb97x', 'wb97xd', 'wpbeh', 'x3lyp', 'xalpha', 'xenon',
'xylene-mixture']
#GENERAL PURPOSE FUNCTIONS
def is_str_float(i):
"""Check if a string can be converted into a float"""
try: float(i); return True
except ValueError: return False
except TypeError: return False
def trim_str(string, max_len=40):
assert type(string) == str
assert type(max_len) == int
if len(string) > max_len: return "..." + string[-max_len:]
else: return string
def read_item(file_name):
"""Reads an .xyz, .gjf, .com or .log item and returns a list of its contents ready for class instantiation"""
with open(file_name,"r") as in_file:
in_content = [file_name]
in_content.extend(list(in_file.read().splitlines()))
return in_content
def lock_release(func):
def new_func(*args, **kw):
global frame_a, frame_b
if frame_a.lock or frame_b.lock: return None
frame_a.lock, frame_b.lock = True, True
for a in frame_a.check_buttons: a.config(state=tk.DISABLED)
for a in frame_b.check_buttons: a.config(state=tk.DISABLED)
for a in frame_a.buttons: a.config(state=tk.DISABLED)
for a in frame_b.buttons: a.config(state=tk.DISABLED)
result = func(*args, **kw)
for a in frame_a.check_buttons: a.config(state=tk.NORMAL)
for a in frame_b.check_buttons: a.config(state=tk.NORMAL)
for a in frame_a.buttons: a.config(state=tk.NORMAL)
for a in frame_b.buttons: a.config(state=tk.NORMAL)
frame_a.lock, frame_b.lock = False, False
return result
return new_func
#DATA FILE CLASSES
class LogFile:
calc_types = ["TS","Red","IRC","Opt","SP"]
def __init__(self,file_content,fragment_link_one=False):
self.list = file_content
self.lenght = len(self.list)
self.name = self.list[0].strip()
self.empty_line_idxs = []
self.charge_mult = None
self.input_geom_idx = None
self.start_xyz_idxs = []
self.end_resume_idxs = []
self.start_resume_idxs = []
self.linked_job_idxs = []
self.multi_dash_idxs =[]
self.scf_done = []
####.thermal = ["ZPC","TCE","TCH","TCG","SZPE","STE","STH","STG"]
self.thermal = [None, None, None, None, None , None, None, None]
self.oc_orb_energies = []
self.uno_orb_energies = []
self.hash_line_idxs = []
self.norm_term_idxs = []
self.errors = []
self.irc_points = []
self.scan_points = []
self.opt_points = []
self.force_const_mat = []
self.distance_matrix = []
self.s_squared = []
self.muliken_spin_densities_idxs = []
self.muliken_charge_idxs = []
self.chelpg_charge_idxs = []
self.pop_analysis_idxs = []
self.npa_start_idxs = []
self.npa_end_idxs = []
self.apt_charge_idxs =[]
for i,a in enumerate(a.strip() for a in self.list):
# i = index
# a = line.strip()
# b = line.split()
# c = len(b)
if a == "": self.empty_line_idxs.append(i); continue
if a[-1] == "@": self.end_resume_idxs.append(i); continue
elif a[0] == "1":
if a.startswith(r"1\1"): self.start_resume_idxs.append(i); continue
if a[0].isdigit() or a[0].islower(): continue
elif a[0] == "-":
if a.startswith("------"): self.multi_dash_idxs.append(i); continue
elif a[0] == "!":
b = a.split(); c = len(b)
if c == 4:
condition_a = all(x in y for x,y in zip(b,("!",["Optimized","Non-Optimized"],"Parameters","!")))
if condition_a: self.scan_points.append([i,b[1]]); continue
elif a[0] == "A":
text_a = "Alpha occ. eigenvalues --"
text_b = "Alpha virt. eigenvalues --"
text_c = "Atom No Natural Electron Configuration"
text_d = "APT charges:"
if a.startswith(text_a): self.oc_orb_energies.append(i); continue
elif a.startswith(text_b): self.uno_orb_energies.append(i); continue
elif a.split() == text_c.split(): self.npa_end_idxs.append(i); continue
elif a.startswith(text_d): self.apt_charge_idxs.append(i); continue
elif a[0] == "C":
b = a.split(); c = len(b)
if all((a.startswith("Charge"),self.charge_mult is None, c == 6)):
pattern = ("Charge", "=", "Multiplicity", "=")
if all(x == b[n] for x,n in zip(pattern,(0,1,3,4))):
self.input_geom_idx = i; self.charge_mult = b[2::3]; continue
elif a[0] == "D":
if a.startswith("Distance matrix (angstroms):"): self.distance_matrix.append(i); continue
elif a[0] == "E":
if a.startswith("Error"): self.errors.append(i); continue
elif a.startswith("ESP charges:"): self.chelpg_charge_idxs.append(i);continue
elif a[0] == "F":
if a.startswith("Full mass-weighted force constant matrix:"): self.force_const_mat.append(i); continue
elif a[0] == "I":
if a == "Input orientation:": self.start_xyz_idxs.append(i + 5); continue
elif a[0] == "L":
if a.startswith("Link1:"):
self.linked_job_idxs.append(i)
if fragment_link_one:
self.lenght = len(self.list[:i])
self.link_one = [self.list[0]]
try:
_ = self.list[i+1]
self.link_one.extend(self.list[i + 1:])
self.link_one = LogFile(self.link_one,fragment_link_one)
self.list = self.list[:i]
break
except IndexError:
pass
elif a[0] == "N":
if a.startswith("Normal termination of Gaussian"): self.norm_term_idxs.append(i); continue
elif a[0] == "M":
if a.startswith("Mulliken charges and spin densities:"):
pass; self.muliken_spin_densities_idxs.append(i); continue
elif a.startswith("Mulliken charges:"):
pass; self.muliken_charge_idxs.append(i);continue
elif a.startswith("Molecular Orbital Coefficients:"):
pass; self.pop_analysis_idxs.append(i);continue
elif a[0] == "P":
b = a.split(); c = len(b)
if c != 6 or any(x != b[n] for x,n in zip(["Point","Number:","Path","Number:"],[0,1,3,4])): continue
if any(not b[n].isnumeric() for n in [2, 5]): continue
else: self.irc_points.append([i, b[5], b[2]]); continue
elif a[0] == "S":
b = a.split(); c = len(b)
if a == "Standard orientation:": self.start_xyz_idxs.append(i + 5); continue
elif a.startswith("SCF Done:") and c > 5: self.scf_done.append([i,b[4]]); continue
elif a.startswith("S**2 before annihil"):self.s_squared.append([i,b[3].replace(",",""),b[-1]]); continue
elif a.startswith("Sum of electronic and zero-point Energies="): self.thermal[4] = b[-1]; continue
elif a.startswith("Sum of electronic and thermal Energies="): self.thermal[5] = b[-1]; continue
elif a.startswith("Sum of electronic and thermal Enthalpies="): self.thermal[6] = b[-1]; continue
elif a.startswith("Sum of electronic and thermal Free Energies="): self.thermal[7] = b[-1]; continue
elif a.startswith("Step") and c == 9:
x = ["Step", "number", "out", "of", "a", "maximum", "of"]
y = [0, 1, 3, 4, 5, 6, 7]
z = all(b[n].isnumeric() for n in [2, 8])
if all(d == b[n] for d,n in zip(x,y)) and z: self.opt_points.append(i); continue
elif a[0] == "T":
b = a.split()
if a.startswith("Thermal correction to Energy="): self.thermal[1] = b[-1]; continue
elif a.startswith("Thermal correction to Enthalpy="): self.thermal[2] = b[-1]; continue
elif a.startswith("Thermal correction to Gibbs Free Energy="): self.thermal[3] = b[-1]; continue
elif a[0] == "Z":
b = a.split()
if a.startswith("Zero-point correction="): self.thermal[0] = b[-2]; continue
elif a[0] == "#": self.hash_line_idxs.append(i); continue
elif a[0] == "*":
if a.replace("*","").startswith("Gaussian NBO Version 3.1"):
pass; self.npa_start_idxs.append(i);continue
#--------------------------------------------POST PROCESSING----------------------------------------------------
x = None if self.start_xyz_idxs is None else [min(a for a in self.multi_dash_idxs if a > b) for b in self.start_xyz_idxs]
self.end_xyz_idxs = x
self.scan_end = [min(a for a in self.multi_dash_idxs if a > b[0]) for b in self.scan_points]
try:
x = [self.list[b:min(a for a in self.empty_line_idxs if a > b)] for b in self.force_const_mat]
self.displ_block = x
except Exception as e:
print("Error while finding vibrational frequencies of log file")
print(e)
print(self.name)
self.displ_block = []
# --------------------------------------------------ASSURANCE---------------------------------------------------
self.init_errors = []
#if self.charge_mult is None:
# self.init_errors.append("Charge and multiplicity could not be identified!")
#if len(self.start_resume_idxs) != len(self.end_resume_idxs):
# self.init_errors.append("Inconsistent resumes")
#if len(self.name.split()) != 1:
# self.init_errors.append("Name should not contain empty spaces or be empty")
#if not self.list[1].strip().startswith("Entering Gaussian System"):
# self.init_errors.append("Is this a Gaussian log file?")
#if not self.start_xyz_idxs is None:
# if len(self.start_xyz_idxs) != len(self.end_xyz_idxs):
# self.init_errors.append("Found an inconsistent number of geometries")
#if not any([self.homo is None, self.lumo is None]):
# if self.homo > self.lumo:
# self.init_errors.append("Lumo is lower than homo?")
#if self.init_errors:
# for a in self.init_errors: print(a)
# print("Errors above were found on file\n{}".format(self.name))
@functools.lru_cache(maxsize=1)
def loghelp(self):
for a in vars(self):
if a != "list":
print(a.upper(),"--->",getattr(self,a))
@functools.lru_cache(maxsize=1)
def xyz_cord_block(self,start_idx,end_idx):
data = [a.split() for a in self.list[start_idx:end_idx]]
return [[elements[int(l[1])],*[l[i] for i in [3,4,5]]] for l in data]
@functools.lru_cache(maxsize=1)
def last_cord_block(self):
if not all([self.xyz_cord_block, self.end_xyz_idxs]):
if self.last_log_abstract:
print("WARNING: Coordinates will be taken from the last job abstract:")
print("lines {} - {} of file:".format(self.start_resume_idxs[-1],self.end_resume_idxs[-1]))
print("{}".format(self.name))
return self.last_log_abstract.xyz_object().cord_block()
else: return None
else:
return self.xyz_cord_block(self.start_xyz_idxs[-1],self.end_xyz_idxs[-1])
@functools.lru_cache(maxsize=1)
def first_cord_block(self):
if not all([self.start_xyz_idxs,self.end_xyz_idxs]):
if self.input_geom_idx:
coordinates = []
for i,a in enumerate(self.list[self.input_geom_idx:]):
if i > 5 and not coordinates: break
a = a.split()
if len(a) == 4:
if a[0] in elements and all(is_str_float(a[n]) for n in [1, 2, 3]):
coordinates.append(a)
elif coordinates: break
elif coordinates: break
return coordinates
else: return None
else:
return self.xyz_cord_block(self.start_xyz_idxs[0],self.end_xyz_idxs[0])
@functools.lru_cache(maxsize=1)
def _n_atoms(self):
if self.last_cord_block():
return len(self.last_cord_block())
elif self.first_cord_block():
return len(self.first_cord_block())
def any_xyz_obj(self,a_idx,b_idx,title=" ",name=False):
if name == False: name = self.name
return XyzFile([name, self.n_atoms, title, *(" ".join(l) for l in self.xyz_cord_block(a_idx,b_idx))])
@functools.lru_cache(maxsize=1)
def last_xyz_obj(self):
if self.last_cord_block():
return XyzFile([self.name,self.n_atoms," ",*(" ".join(l) for l in self.last_cord_block())])
else:
return None
@functools.lru_cache(maxsize=1)
def first_xyz_obj(self):
return XyzFile([self.name,self.n_atoms," ",*(" ".join(l) for l in self.first_cord_block())])
@functools.lru_cache(maxsize=1)
def low_e_xyz_obj(self):
if self.calc_type == "SP": return None
else:
xyzs = {"TS":self.opt,"Red":self.scan_geoms,"IRC":self.irc,"Opt":self.opt}[self.calc_type]()
if len(xyzs) == 0: return None
else: return sorted(xyzs,key= lambda x: float(x.title()) if is_str_float(x.title()) else 1)[0]
@functools.lru_cache(maxsize=1)
def _calc_type(self):
if self.raw_route:
r_sect = self.raw_route_keys
if any(a in r_sect for a in ("ts", "qst2","qst3")): return "TS"
elif any(True for a in r_sect if a in ("modredundant", "readoptimize", "readfreeze")): return "Red"
elif "irc" in r_sect: return "IRC"
elif "opt" in r_sect: return "Opt"
else: return "SP"
else: return "No data"
@functools.lru_cache(maxsize=1)
def _normal_termin(self):
return any(True if "Normal termination of Gaussian" in l else False for l in self.list[-5:])
def _error_msg(self):
error_idxs = [a for a in self.errors if a + 5 > self.lenght]
for n in [-4,-3,-2,-1]:
if self.list[n].strip().startswith("galloc: could not allocate memory."):
error_idxs.append(n)
if error_idxs: return " | ".join([self.list[n] for n in error_idxs])
else: return "No data"
@functools.lru_cache(maxsize=1)
def needs_ref(self):
if self.calc_type == "Opt" and self.last_freq:
if self.last_freq.n_ifreq() == "0": return "No"
else: return "Yes"
elif self.calc_type == "TS" and self.last_freq:
if self.last_freq.n_ifreq() == "1": return "No"
else: return "Yes"
else: return "-"
@functools.lru_cache(maxsize=1)
def irc(self):
if not all([self.start_xyz_idxs,self.end_xyz_idxs,self.irc_points,self.scf_done]): return []
points = self.irc_points
scf = [max(self.scf_done,key=lambda x: x[0] if x[0] < a[0] else 0)[1] for a in points]
a_idx = [max(self.start_xyz_idxs,key=lambda x: x if x < a[0] else 0) for a in points]
b_idx = [max(self.end_xyz_idxs,key=lambda x: x if x < a[0] else 0) for a in points]
points = [[*d[1:],c,self.any_xyz_obj(a,b,title=c)] for a,b,c,d in zip(a_idx,b_idx,scf,points)]
path_a = sorted([a for a in points if a[0] == "1"], key = lambda x: int(x[1]), reverse=True)
path_b = [a for a in points if a[0] == "2"]
return [a[3] for a in [*path_a,*path_b]]
@functools.lru_cache(maxsize=1)
def opt(self):
if not all([self.start_xyz_idxs,self.end_xyz_idxs,self.opt_points,self.scf_done]): return []
points = self.opt_points
scf = [max(self.scf_done,key=lambda x: x[0] if x[0] < a else 0)[1] for a in points]
a_idx = [max(self.start_xyz_idxs,key=lambda x: x if x < a else 0) for a in points]
b_idx = [max(self.end_xyz_idxs,key=lambda x: x if x < a else 0) for a in points]
return [self.any_xyz_obj(a,b,title=c) for a,b,c in zip(a_idx,b_idx,scf)]
@functools.lru_cache(maxsize=1)
def scan_geoms(self):
if not all([self.start_xyz_idxs, self.end_xyz_idxs, self.scan_points, self.scf_done]): return []
geoms = []
all_points = self.scan_points
points = [a for a in all_points if a[0] < self.start_xyz_idxs[-1] and a[0] < self.end_xyz_idxs[-1]]
points_removed = len(all_points) - len(points)
if points_removed != 0:
print(f"WARNING: {points_removed} Scan points have been removed due to inconsistent number o geometries found")
start_idx = [min(i for i in self.start_xyz_idxs if i > b[0]) for b in points]
end_idx = [min(i for i in self.end_xyz_idxs if i > b[0]) for b in points]
scf_idx = [max(i for i in self.scf_done if i[0] < b[0]) for b in points]
for i,(a,b,c,d) in enumerate(zip(start_idx,end_idx,scf_idx,points)):
name = self.name.replace(".log","_" + str(i+1)+".xyz")
if d[1] == "Optimized": print("Optimized geometry found at line {}!".format(d[0]))
elif d[1] == "Non-Optimized": print("Non-Optimized1 geometry found at line {}!".format(d[0]))
geoms.append(self.any_xyz_obj(a,b,title=str(c[1]), name=name))
if len(geoms) == 0:
print("No Optimized geometries found for {} file".format(self.name()))
return geoms
@functools.lru_cache(maxsize=1)
def _last_freq(self):
return LogFreq(self.displ_block[-1]) if self.displ_block else False
@functools.lru_cache(maxsize=1)
def _last_log_abstract(self):
if all([self.start_resume_idxs,self.end_resume_idxs]):
x = ["".join([x.strip() for x in self.list[a:b]]).split("\\") for a,b in zip(self.start_resume_idxs,self.end_resume_idxs)]
return LogAbstract(x[-1]) if x else None
@functools.lru_cache(maxsize=1)
def _xyz_from_dist_matrix(self):
end_idx = lambda x: next(i for i,a in enumerate(self.list[x+1:],start=x+1) if not a.split()[0].isdigit())
return [DistMatrix(self.list[a+1:end_idx(a)]) for a in self.distance_matrix]
@functools.lru_cache(maxsize=1)
def _last_muliken_spin_density(self):
if self.muliken_spin_densities_idxs:
end_idx = lambda x: next(i for i, a in enumerate(self.list[x + 1:], start=x + 1) if not a.split()[0].isdigit())
return "\n".join(self.list[self.muliken_spin_densities_idxs[-1]:end_idx(self.muliken_spin_densities_idxs[-1]+1)])
@functools.lru_cache(maxsize=1)
def _last_internal_coord(self):
if self.scan_points:
end_idx = lambda x: next(i for i, a in enumerate(self.list[x + 1:], start=x + 1) if not a.strip().startswith("!"))
return "\n".join(self.list[self.scan_points[-1][0]-1:end_idx(self.scan_points[-1][0]+5)+1])
@functools.lru_cache(maxsize=1)
def _last_muliken_charges(self):
if self.muliken_charge_idxs:
end_idx = lambda x: next(i for i, a in enumerate(self.list[x + 1:], start=x + 1) if not a.split()[0].isdigit())
return "\n".join(self.list[self.muliken_charge_idxs[-1]:end_idx(self.muliken_charge_idxs[-1]+1)])
@functools.lru_cache(maxsize=1)
def _last_chelpg_charges(self):
if self.chelpg_charge_idxs:
end_idx = lambda x: next(i for i, a in enumerate(self.list[x + 1:], start=x + 1) if not a.split()[0].isdigit())
return "\n".join(self.list[self.chelpg_charge_idxs[-1]:end_idx(self.chelpg_charge_idxs[-1] + 1)])
@functools.lru_cache(maxsize=1)
def _pop_analysis(self):
if self.pop_analysis_idxs and self.muliken_charge_idxs:
return "\n".join(self.list[self.pop_analysis_idxs[-1]:self.muliken_charge_idxs[-1]])
@functools.lru_cache(maxsize=1)
def _npa_analysis(self):
if self.npa_start_idxs and self.npa_end_idxs:
if len(self.npa_start_idxs) > 1:
end_idx = lambda x: next(i for i, a in enumerate(self.list[x + 1:], start=x + 1) if a.strip() == "")
return "\n".join(self.list[self.npa_start_idxs[-2]:end_idx(self.npa_end_idxs[-1])])
@functools.lru_cache(maxsize=1)
def _last_apt_charges(self):
if self.apt_charge_idxs:
end_idx = lambda x: next(i for i, a in enumerate(self.list[x + 1:], start=x + 1) if not a.split()[0].isdigit())
return "\n".join(self.list[self.apt_charge_idxs[-1]:end_idx(self.apt_charge_idxs[-1]+1)])
@functools.lru_cache(maxsize=1)
def _raw_route(self):
try:
raw_route =None
x = None if self.hash_line_idxs is None else min(a for a in self.multi_dash_idxs if a > self.hash_line_idxs[0])
x = None if self.hash_line_idxs is None else "".join([a.lstrip() for a in self.list[self.hash_line_idxs[0]:x]])
raw_route = " ".join(x.split())
except IndexError as e:
raw_route = None
print("Error while finding route section of log file")
print(e)
print(self.name)
finally:
return raw_route
@functools.lru_cache(maxsize=1)
def _raw_route_keys(self):
if not self.raw_route: return
r_sect = [self.raw_route]
for x in [None, "/", "(", ")", ",", "=", "%", ":"]:
r_sect = [a for a in itertools.chain(*[i.split(x) for i in r_sect]) if len(a) > 1]
r_sect = [a.lower() for a in r_sect]
return r_sect
def sep_conseq(self,mixed_list):
x, group, new_list = None, [], []
for a in mixed_list:
if x is None and group == []: group.append(a); x = a
elif x + 1 == a: group.append(a); x = a
else: new_list.append(group); x = a; group = [a]
new_list.append(group)
return new_list
@functools.lru_cache(maxsize=1)
def _homo(self):
try:
error_a = f"Inconsisten orbitals on file\n{self.name}"
assert all(min(a) == max(b) + 1 for a, b in zip(self.sep_conseq(self.uno_orb_energies), self.sep_conseq(self.oc_orb_energies))), error_a
homo = []
for structure in self.sep_conseq(self.oc_orb_energies):
orbitals =[]
for i in structure:
error_b = f"Could not identify occupied orbital on line {i}\nFile:{self.name}"
assert self.list[i].lstrip().startswith("Alpha occ. eigenvalues --"), error_b
for a in self.list[i].replace("Alpha occ. eigenvalues --"," ").replace("-"," -").split():
orbitals.append(float(a))
homo.append(max(orbitals))
return homo
except AssertionError:
return None
except ValueError as e:
print(f"Error while looking for homo energy on file {self.name}\n{e}")
@functools.lru_cache(maxsize=1)
def _lumo(self):
try:
error_a = f"Inconsisten orbitals on file\n{self.name}"
assert all(min(a) == max(b) + 1 for a, b in zip(self.sep_conseq(self.uno_orb_energies), self.sep_conseq(self.oc_orb_energies))), error_a
lumo = []
for structure in self.sep_conseq(self.uno_orb_energies):
orbitals = []
for i in structure:
error_b = f"Could not identify unoccupied orbital on line {i}\nFile:{self.name}"
assert self.list[i].lstrip().startswith("Alpha virt. eigenvalues --"), error_b
for a in self.list[i].replace("Alpha virt. eigenvalues --", " ").replace("-", " -").split():
orbitals.append(float(a))
lumo.append(min(orbitals))
return lumo
except AssertionError:
return None
except ValueError as e:
print(f"Error while looking for lumo energy on file {self.name}\n{e}")
@functools.lru_cache(maxsize=1)
def _homolumo(self):
if self.homo and self.lumo:
try:
assert len(self.homo) == len(self.lumo), f"Inconsistent orbitals on file:\n{self.name}"
return [a-b for a,b in zip(self.homo,self.lumo)]
except AssertionError:
return None
homo = property(_homo)
lumo = property(_lumo)
homolumo = property(_homolumo)
raw_route_keys = property(_raw_route_keys)
raw_route = property(_raw_route)
n_atoms = property(_n_atoms)
normal_termin = property(_normal_termin)
calc_type = property(_calc_type)
error_msg = property(_error_msg)
last_log_abstract = property(_last_log_abstract)
last_freq = property(_last_freq)
xyz_from_dist_matrix = property(_xyz_from_dist_matrix)
last_muliken_spin_density = property(_last_muliken_spin_density)
last_internal_coord = property(_last_internal_coord)
last_muliken_charges = property(_last_muliken_charges)
last_chelpg_charges = property(_last_chelpg_charges)
pop_analysis = property(_pop_analysis)
npa_analysis = property(_npa_analysis)
last_apt_charges = property(_last_apt_charges)
class LogAbstract:
def __init__(self,content):
assert type(content) is list
self.list = content
self.version = None
self.dipole = None
self.img_freq = None
self.hash_line = None
for i,a in enumerate(self.list):
a = a.lstrip()
if a.lstrip == "": print("Empty!");continue
elif a.startswith("Version="): self.version = a.replace("Version=","")
elif a.startswith("#"): self.hash_line = i
elif a.startswith("NImag="): self.img_freq = a.replace("NImag=0","")
elif a.startswith("DipoleDeriv="): self.img_freq = a.replace("DipoleDeriv=","")
else: continue
def __str__(self):
return "\n".join(self.list)
def read_strucure(self):
charge_mult = None
title = None
coordinates = []
for i,a in enumerate(self.list[self.hash_line:]):
if i > 5 and not coordinates: break
a = a.split(",")
if len(a) == 2 and not coordinates: charge_mult = a; continue
if len(a) == 4:
if a[0] in elements and all(is_str_float(a[n]) for n in [1,2,3]):
coordinates.append(" ".join(a))
elif coordinates: break
elif coordinates: break
return charge_mult, XyzFile([self.list[0],str(len(coordinates)),title,*coordinates])
def charge_mult(self):
return self.read_strucure()[0]
def xyz_object(self):
return self.read_strucure()[1]
class LogFreq:
def __init__(self, content):
assert type(content) is list
self.list = content
self.rows = []
for i,a in enumerate(self.list):
if a.lstrip().startswith("Frequencies --"):
try:
assert self.list[i + 1].lstrip().startswith("Red. masses --")
assert self.list[i + 2].lstrip().startswith("Frc consts --")
assert self.list[i + 3].lstrip().startswith("IR Inten --")
self.rows.append(i-2)
except AssertionError:
continue
if not self.rows:
self.n_atoms = 1
self.block = []
else:
self.n_atoms = len(self.list) - self.rows[-1] - 7
self.block = self.list[self.rows[0]:]
def __str__(self):
return "\n".join(self.list)
@functools.lru_cache(maxsize=1)
def frequencies(self):
return list(itertools.chain(*[i.split()[2:] for i in self.block[2::self.n_atoms+7]]))
@functools.lru_cache(maxsize=1)
def ir_intensities(self):
return list(itertools.chain(*[i.split()[3:] for i in self.block[5::self.n_atoms+7]]))
@functools.lru_cache(maxsize=1)
def displ_for_freq_idx(self,freq_idx):
displ = []
for num in range(self.n_atoms):
displ.append(list(itertools.chain(*[i.split()[2:] for i in self.block[7+num::self.n_atoms+7]])))
displ_for_freq_str = [a[freq_idx*3:freq_idx*3+3] for a in displ]
displ_for_freq_float = [[float(i) for i in b] for b in displ_for_freq_str]
return displ_for_freq_float
@functools.lru_cache(maxsize=1)
def n_ifreq(self):
return str(len([b for b in self.frequencies() if float(b) < 0])) if self.frequencies() else "No data"
def ir_spectra(self,threshold = 20):
pairs = []
for a,b in zip(self.frequencies(), self.ir_intensities()):
if is_str_float(a) and is_str_float(b):
pairs.append([float(a),float(b)])
for a,b in zip(sorted(pairs,key=lambda x: x[1], reverse=True), range(threshold)):
print("{:>10.1f}{:>10.1f}".format(float(a[0]),float(a[1])))
print("---------")
class DistMatrix:
def __init__(self,text):
self.element = {}
for a in [b.split() for b in text]:
idx = "".join(a[0:2])
if len(a) > 2 and a[1] in elements:
if idx in self.element:
self.element[idx].extend([float(c) for c in a[2:]])
continue
else:
self.element[idx] = [float(c) for c in a[2:]]
for a in self.element:
print(a,self.element[a])
self.dist_matrix = sorted(self.element.values(),key=lambda x: len(x))
self.elem_vector = sorted(self.element.keys(), key=lambda x: len(self.element[x]))
self.xyz_ent_a = []
self.xyz_ent_b = []
for i,a in enumerate(self.dist_matrix):
if i == 0:
self.xyz_ent_a.append([0, 0, 0])
self.xyz_ent_b.append([0, 0, 0])
if i == 1:
self.xyz_ent_a.append([a[0], 0, 0])
self.xyz_ent_b.append([a[0], 0, 0])
if i == 2:
x = (self.dist_matrix[i-1][0]**2+a[0]**2-a[1]**2)/(2*self.dist_matrix[i-1][0])
y = math.sqrt(a[1]**2-x**2)
self.xyz_ent_a.append([x, y, 0])
self.xyz_ent_b.append([x, y, 0])
if i > 2:
x = (self.dist_matrix[i-1][0]**2+a[0]**2-a[1]**2)/(2*self.dist_matrix[i-1][0])
y = math.sqrt(a[1]**2-x**2)
#z =
self.xyz_ent_a.append([x, y, 0])
self.xyz_ent_b.append([x, y, 0])
class GjfFile:
pattern = re.compile(r"[0-9][0-9][Gg]\+")
def __init__(self,file_content):
self.list = file_content
self.list_l = [a.split() for a in file_content]
self.str_l = [a.replace(" ", "") for a in self.list]
self.return_print = "\n".join(self.list[1:])
#########################
#########################
self.empty_line_idxs = [i for i,a in enumerate(self.list) if a.split() == []]
self.asterisk_line_idxs = [idx for idx,line in enumerate(self.list) if line.split() == ["****"]]
self.link_one_idxs = [i for i,l in enumerate(self.list) if "--link1--" in l.lower()]
@functools.lru_cache(maxsize=1)
def name(self):
if len(self.list[0]) == 0: raise Exception(".gjf or .com object has no name")
assert type(self.list[0]) is str, "Name must be string"
return self.list[0]
@functools.lru_cache(maxsize=1)
def charge(self):
return int(self.list[self.c_m_idx()].split()[0])
@functools.lru_cache(maxsize=1)
def multiplicity(self):
return int(self.list[self.c_m_idx()].split()[1])
@functools.lru_cache(maxsize=1)
def n_electrons(self):
return sum(elements.index(e) for e in self.all_elements()) - self.charge()
@functools.lru_cache(maxsize=1)
def n_atoms(self):
return len(self.all_elements())
@functools.lru_cache(maxsize=1)
def all_elements(self):
return [line[0] for line in self.cord_block()]
@functools.lru_cache(maxsize=1)
def elements(self):
return list(dict.fromkeys(self.all_elements()))
@functools.lru_cache(maxsize=1)
def c_m_validate(self):
return not self.n_electrons()%2 == self.multiplicity()%2
@functools.lru_cache(maxsize=1)
def c_m_validate_txt(self):
return "Yes" if self.c_m_validate() else "--NO!--"
@functools.lru_cache(maxsize=1)
def n_proc(self):
for line in self.list:
line = line.lower().replace(" ","")
if "%nprocshared=" in line: return int(line.replace("%nprocshared=",""))
elif "%nproc=" in line: return int(line.replace("%nproc=",""))
@functools.lru_cache(maxsize=1)
def cord_block(self):
coordinates = []
for line in self.list_l[self.c_m_idx()+1:]:
if len(line) == 0: break
if len(line) != 4: continue
if line[0] in elements: coordinates.append(line)
else: coordinates.append([elements[int(line[0])],*line[0:]])
return coordinates
@functools.lru_cache(maxsize=1)
def route_text(self):
flatten = lambda l: [item for sublist in l for item in sublist]
try: return " ".join(flatten([a.split() for a in self.list[self.route_idx():self.title_idx()]]))
except: return "No data"
@functools.lru_cache(maxsize=1)
def c_m_idx(self):
if len(self.list[self.title_idx()+2].split()) < 2:
raise Exception("Did you provide charge and multiplicity data at line {} of file {}?".format(self.title_idx()+1,self.name()))
return self.title_idx()+2
@functools.lru_cache(maxsize=1)
def end_cord_idx(self):
for idx,line in enumerate(self.list):
if idx < self.c_m_idx(): continue
if line.split() == []: return idx+1
#########################
#########################
@functools.lru_cache(maxsize=1)
def route_idx(self):
for idx,line in enumerate(self.list):
if line.strip().startswith("#"):return idx
raise Exception("A route section (#) should be specified for .gjf or .com files")
@functools.lru_cache(maxsize=1)
def title_idx(self):
for idx,line in enumerate(self.list):
if idx > self.route_idx() and line.split() == []: return idx+1
@functools.lru_cache(maxsize=1)
def gen_basis(self):
return any(i in self.route_text().lower() for i in ["/gen", "gen ","genecp"])
@functools.lru_cache(maxsize=1)
def declared_basis_lines(self):
if not self.gen_basis(): return None
idxs = [i+1 for idx,i in enumerate(self.asterisk_line_idxs) if i < self.asterisk_line_idxs[-1]]
idxs.insert(0,max(i+1 for i in self.empty_line_idxs if i < self.asterisk_line_idxs[-1]))
return idxs
@functools.lru_cache(maxsize=1)
def declared_basis(self):
e_w_b = [self.list[i].split()[:-1] for i in self.declared_basis_lines()]
return [j.capitalize() for i in e_w_b for j in i]
@functools.lru_cache(maxsize=1)
def basis_errors(self):
if not self.gen_basis(): return []
#errors
zero_last = any(self.list[i].split()[-1] == "0" for i in self.declared_basis_lines())
miss_basis = [a for a in self.elements() if a not in self.declared_basis()]
surpl_basis = [a for a in self.declared_basis() if a not in self.elements()]
rep_basis = list(dict.fromkeys([a for a in self.declared_basis() if self.declared_basis().count(a) > 1]))
errors = []
for i in [*[a+1 for a in self.declared_basis_lines()],self.route_idx()]:
if GjfFile.pattern.search(self.list[i]):
errors.append("Is the basis set specifications correct?".format(i))
errors.append("{}".format(self.list[i]))
errors.append("Shouldn't '+' appear before the letter 'G'?")
#statements
if not zero_last:errors.append("Missing zero at the end of basis set specification?")
if miss_basis:errors.append("Missing basis for: {} ?".format(" ".join(miss_basis)))
if surpl_basis:errors.append("Surplous basis for: {} ?".format(" ".join(surpl_basis)))
if rep_basis:errors.append("Repeated basis for: {} ?".format(" ".join(rep_basis)))
return errors
@functools.lru_cache(maxsize=1)
def gen_ecp(self):
return any(i in self.route_text().lower() for i in ["pseudo", "genecp"])
@functools.lru_cache(maxsize=1)
def declared_ecp_lines(self):
line_idx = []
if not self.gen_ecp(): return None
if self.gen_basis(): start_idx = self.declared_basis_lines()[-1] + 1
else:start_idx = self.end_cord_idx()
for idx,line in enumerate(self.list):
if idx < start_idx: continue
if len(line.split()) <= 1: continue
if line.split()[-1] != "0": continue
if all(True if a.capitalize() in elements else False for a in line.split()[:-1]): line_idx.append(idx)
return line_idx
@functools.lru_cache(maxsize=1)
def declared_ecp(self):
ecps = [self.list[i].split()[:-1] for i in self.declared_ecp_lines()]
return [j.capitalize() for i in ecps for j in i]
@functools.lru_cache(maxsize=1)
def ecp_errors(self,heavy_e = 36):
if not self.gen_ecp(): return []
#errors
zero_last = any(self.list[i].split()[-1] == "0" for i in self.declared_ecp_lines())
miss_ecp = [a for a in self.elements() if a not in self.declared_ecp() and elements.index(a) > heavy_e]
surpl_ecp = [a for a in self.declared_ecp() if a not in self.elements()]
rep_ecp = list(dict.fromkeys([a for a in self.declared_ecp() if self.declared_ecp().count(a) > 1]))
#statements
errors = []
if not zero_last:errors.append("Missing zero at the end of ecp set specification?")
if miss_ecp:errors.append("Missing ecp for: {} ?".format(" ".join(miss_ecp)))
if surpl_ecp:errors.append("Surplous ecp for: {} ?".format(" ".join(surpl_ecp)))
if rep_ecp:errors.append("Repeated ecp for: {} ?".format(" ".join(rep_ecp)))
return errors
@functools.lru_cache(maxsize=1)
def route_errors(self):
errors = []
keywords = self.route_text().lower().split()
if len(keywords) > 1:
if "nosymm" in keywords:
if keywords[0] == "#t" or keywords[0:2] == ["#","t"]:
errors.append("Combination of 'NoSymm' and '#T' might supress geometry output!")
return errors
@functools.lru_cache(maxsize=1)
def mem(self):
for line in self.list:
line = line.lower().replace(" ","")
if line.startswith("%mem=") and line.endswith("mb"): return int(line[5:-2])
elif line.startswith("%mem=") and line.endswith("gb"): return 1000*int(line[5:-2])
return None
#########################
#########################
def replace_cord(self, xyz_obj):
new = []
for line in self.list[0:self.c_m_idx() + 1]: new.append(line)
for line in xyz_obj.form_cord_block(): new.append(line)
for line in self.list[self.end_cord_idx()-1:]: new.append(line)
return GjfFile(new)
def xyz_obj(self):
return XyzFile([self.name(),self.n_atoms()," ",*[" ".join(a) for a in self.cord_block()]])
class XyzFile:
def __init__(self,file_content):
self.list = file_content
if len(self.list) < 2: raise Exception(".xyz Object is empty?")
elif not (str(self.list[1]).strip().isdigit() and len(str(self.list[1]).split()) == 1):
print("{} is not a proper .xyz file\nAttempting to read it anyway!".format(self.list[0]))
try_xyz = []
for line in self.list:
line = line.split()
if len(line) != 4: continue
if not all(is_str_float(line[i]) for i in range(1, 4)): continue
if line[0] in elements[0:]:
try_xyz.append(" ".join(line))
continue
try:
line[0] = elements[int(line[0])]
try_xyz.append(" ".join(line))
except:
raise Exception("Could not understand file {}".format(self.list[0]))
try_xyz.insert(0,len(try_xyz))
try_xyz.insert(1," ")
try_xyz.insert(0,self.list[0])
self.list = try_xyz
self.list_l = [str(a).split() for a in self.list]
#self.molecule.print_int_bond_map()
def __add__(self,other):
assert type(self) == type(other), "Operation '+' allowed only for two XYZ objects"
new = [os.path.splitext(self.name())[0]+"_"+other.name(), str(self.n_atoms()+other.n_atoms()),
self.title()+" "+other.title(),*(self.form_cord_block() + other.form_cord_block())]
return XyzFile(new)
def __sub__(self, other):
el_a = self.all_elements()
el_b = other.all_elements()
assert len(el_a) > len (el_b), "Can't subtract a larger structure from a smaller one"
assert type(self) == type(other), "Operation '-' allowed only for two XYZ objects"
idxs_to_rem = []
for n in range(len(el_a) - len(el_b)):
if all([True if el_a[n+i] == a else False for i,a in enumerate(el_b)]):
idxs_to_rem = [c+n for c in range(len(el_b))]
break
if len(idxs_to_rem) == 0: print("Could not subtract value!")
xyz_cord = [a for idx,a in enumerate(self.form_cord_block()) if idx not in idxs_to_rem]
new = [os.path.splitext(self.name())[0]+"-"+other.name(), str(self.n_atoms()-other.n_atoms()),
self.title()+"-"+other.title(),*xyz_cord]
return XyzFile(new)
def __str__(self):
return "\n".join(self.return_print())
@functools.lru_cache(maxsize=1)
def name(self):
if len(self.list[0]) == 0: raise Exception(".xyz Object has no name")
return self.list[0]
@functools.lru_cache(maxsize=1)
def n_atoms(self):
if any([len(str(self.list[1]).split()) != 1, not str(self.list[1]).isnumeric()]):
raise Exception("First line of {} (.xyz type) file should contain only the number of atoms in the geometry!".format(self.name()))
return int(self.list[1])
@functools.lru_cache(maxsize=1)
def title(self):
return self.list[2]
@functools.lru_cache(maxsize=1)
def cord_block(self):
cordinates = []
for idx,line in enumerate(self.list_l):
if idx <= 2: continue
if idx >= self.n_atoms() + 3: continue
if line[0] in elements: cordinates.append(line)
else: cordinates.append([elements[int(line[0])],*line[0:]])
return cordinates
@functools.lru_cache(maxsize=1)
def form_cord_block(self):
return ["{:<5}{:>20.6f}{:>20.6f}{:>20.6f}".format(x[0], *[float(x[a]) for a in [1, 2, 3]]) for x in self.cord_block()]
@functools.lru_cache(maxsize=1)
def cord_strip(self):
return [line[1:] for line in self.cord_block()]
@functools.lru_cache(maxsize=1)
def all_elements(self):
return [line[0] for line in self.cord_block()]
@functools.lru_cache(maxsize=1)
def elements(self):
return list(dict.fromkeys(self.all_elements()))
@functools.lru_cache(maxsize=1)
def n_electrons(self):
return sum(elements.index(e) for e in self.all_elements())
@functools.lru_cache(maxsize=1)
def return_print(self):
return [str(self.n_atoms()),self.title(),*[l for l in self.form_cord_block()]]
def print_file(self):
print("======={}=======".format(self.name()))
print("=======START=======")
print("\n".join([l for l in self.return_print()]))
print("========END========")
def save_file(self,directory=None):
if directory is None:
file_path = os.path.splitext(os.path.join(os.getcwd(),self.name().replace(" ","")))[0]+".xyz"
else:
file_path = os.path.splitext(os.path.join(directory,self.name().replace(" ","")))[0]+".xyz"
if os.path.exists(file_path):
print("File {} already exists!".format(os.path.splitext(os.path.basename(file_path))[0] + ".xyz"))
return
with open(file_path,"w") as file:
for line in self.return_print():file.write(str(line)+"\n")
print("File {} saved!".format(os.path.splitext(os.path.basename(file_path))[0] + ".xyz"))
def print_all(self):
print("\n".join([l for l in self.list]))
def displace(self,mult,displacement):
cord_block = [[a,*[float(b[n])-c[n]*mult for n in range(3)]] for a,b,c in zip(self.all_elements(),self.cord_strip(),displacement)]
cord_block = [" ".join([str(i) for i in l]) for l in cord_block]
return XyzFile([self.name(),self.n_atoms(),self.title(),*cord_block])
def rotate(self, angle, axis):
"takes xyz object and returns xyz object rotated by angle over axis"
assert axis in ("x", "y", "z"), "Only 'x','y' or 'z' axis are suported"
if axis == "x":
m_mat = [[1., 0., 0.], [0., math.cos(angle), -math.sin(angle)], [0., math.sin(angle), math.cos(angle)]]
if axis == "y":
m_mat = [[math.cos(angle), 0., math.sin(angle)], [0., 1., 0.], [-math.sin(angle), 0., math.cos(angle)]]
if axis == "z":
m_mat = [[math.cos(angle), -math.sin(angle), 0.], [math.sin(angle), math.cos(angle), 0.], [0., 0., 1.]]
m_mat = np.array(m_mat, np.float64)
rotated = np.array([i[1:4] for i in self.cord_block()], np.float64).transpose()
rotated = np.matmul(m_mat,rotated).transpose()
rotated = np.ndarray.tolist(rotated)
rotated = [[b,*[str(n) for n in a]] for b,a in zip(self.all_elements(),rotated)]
xyz_mat = [self.name(), self.n_atoms()," ",*[" ".join(a) for a in rotated]]
return XyzFile(xyz_mat)
def superimpose(self, other, num_atoms=0, print_step=False, ret = "geom",conv=18):
"""Takes xyz object and returns xyz object rotated by angle over axis.
Returns either the max_distance 'max_d' or final geometry 'geom' after rotations and superpositions"""
def rotate(xyz,angle,axis):
assert axis in ("x","y","z"), "Only 'x','y' or 'z' axis are suported"
if axis == "x":
m_mat = [[1., 0., 0.], [0., math.cos(angle), -math.sin(angle)], [0., math.sin(angle), math.cos(angle)]]
if axis == "y":
m_mat = [[math.cos(angle), 0., math.sin(angle)], [0., 1., 0.], [-math.sin(angle), 0., math.cos(angle)]]
if axis == "z":
m_mat = [[math.cos(angle), -math.sin(angle), 0.], [math.sin(angle), math.cos(angle), 0.], [0., 0., 1.]]
m_mat = np.array(m_mat, np.float64)
rotated = np.array(xyz, np.float64).transpose()
rotated = np.matmul(m_mat,rotated).transpose()
return np.ndarray.tolist(rotated)
def calc_err(xyz_1, xyz_2, n_atms):
n_atms = len(xyz_1) if n_atms == 0 else n_atms
sq_dist = sum(sum(math.pow(c-d,2) for c,d in zip(a,b)) for a,b in zip(xyz_1[:n_atms],xyz_2))
return math.sqrt(sq_dist / n_atms)
def max_dist(xyz_a, xyz_b):
return max(math.sqrt(sum(pow(c-d,2) for c,d in zip(a,b))) for a,b in zip(xyz_a,xyz_b))
#----------------------
last_error = None
xyz_1 = [[float(a) for a in b] for b in other.std_cord(num_atoms).cord_strip()]
xyz_2 = [[float(a) for a in b] for b in self.std_cord(num_atoms).cord_strip()]
#Check atom correspondence
for a,b,c in zip(range(len(self.all_elements()) if num_atoms == 0 else num_atoms),other.all_elements(),self.all_elements()):
if b != c:
atom_number = 'th' if 11<=a+1<=13 else {1:'st',2:'nd',3:'rd'}.get((a+1)%10, 'th')
print("WARNING: {}{} atom pair doesn't not correspond to an element match: {} & {}".format(a+1,atom_number,b,c))
if print_step: print("======ACTIONS======")
#Start algorithm
for num in range(conv):
step_size = 1 / 2 ** num
while True:
rot = [[1, "x"], [1, "y"], [1, "z"], [-1, "x"], [-1, "y"], [-1, "z"]]
movements = [rotate(xyz_2, step_size * i[0], i[1]) for i in rot]
if ret == "max_d":
last_error = max_dist(xyz_2, xyz_1)
errors = [max_dist(i, xyz_1) for i in movements]
else:
last_error = calc_err(xyz_2, xyz_1, num_atoms)
errors = [calc_err(i, xyz_1, num_atoms) for i in movements]
best_m = errors.index(min(errors))
if min(errors) < last_error:
xyz_2 = movements[best_m]
if print_step:
msg = [step_size * rot[best_m][0], rot[best_m][1], calc_err(xyz_1, xyz_2, num_atoms)]
print("Rotating {:.5f} radian in {}. RMSD = {:.5f}".format(*msg))
continue
else:
if ret == "max_d" and max_dist(xyz_1, xyz_2) < 0.1:
return True
break
if print_step: print("Final RMSD = {:.5f}".format(calc_err(xyz_1, xyz_2, num_atoms)))
if print_step: print("========END========")
if ret == "geom":
cord_block = [" ".join([a,*[str(n) for n in b]]) for a,b in zip(self.all_elements(),xyz_2)]
return XyzFile([self.name(),self.n_atoms(),self.title(),*cord_block])
elif ret == "max_d":
return False
def std_cord(self, n_atoms=0):
pure_cord = self.cord_strip() if n_atoms == 0 else self.cord_strip()[0:n_atoms]
xyz_avg = [[float(n) for n in i] for i in pure_cord]
xyz_avg = [sum([i[n] for i in xyz_avg]) / len(xyz_avg) for n in range(3)]
xyz_avg = [[float(i[n]) - xyz_avg[n] for n in range(3)] for i in self.cord_strip()]
xyz_avg = [[str(n) for n in a] for a in xyz_avg]
xyz_avg = [" ".join([b,*a]) for b,a in zip(self.all_elements(),xyz_avg)]
xyz_mat = [self.name(), self.n_atoms(), " ", *xyz_avg]
return XyzFile(xyz_mat)
def enantiomer(self):
xyz = [" ".join([*a[0:-1],str(-float(a[-1]))]) for a in self.cord_block()]
xyz_mat = [os.path.splitext(self.name())[0]+"_ent.xyz", self.n_atoms(), " ", *xyz]
return XyzFile(xyz_mat)
molecule = property(lambda self: Molecule(self.cord_block()))
class Molecule:
def __init__(self,atom_list):
assert type(atom_list) is list
self.atom_list = [Atom(a,i) for i,a in enumerate(atom_list)]
self.abc_angle(0,1,2)
self.n_mol_ent()
def __str__(self):
return "\n".join([str(a) for a in self.atom_list])
def int_bond_map(self):
return [[b.int_bond_order(a) if a != b and b.int_bond_order(a) > 0.85 else None for a in self.atom_list] for b in self.atom_list]
def ts_bond_map(self):
return [[b.ts_bond_order(a) if a != b and b.int_bond_order(a) > 0.85 else None for a in self.atom_list] for b in self.atom_list]
def print_int_bond_map(self):
for a in self.atom_list:
bonded = ["{:>3}{:>2}:{:.1f}".format(b.idx,b.element, b.int_bond_order(a)) for b in self.atom_list if a != b and b.int_bond_order(a) > 0.1]
print("{:>3}{:>2}".format(a.idx,a.element),"-->",", ".join(bonded))
def print_ts_bond_map(self):
for a in self.atom_list:
bonded = ["{:>3}{:>2}:{:.1f}".format(b.idx,b.element, b.ts_bond_order(a)) for b in self.atom_list if a != b and b.ts_bond_order(a) > 0.1]
print("{:>3}{:>2}".format(a.idx,a.element),"-->",", ".join(bonded))
def n_mol_ent(self, map=None):
if map is None: map = self.int_bond_map()
visited = [False for _ in map]
n_entities = 0
entities = []
def check(idx,atoms=[]):
visited[idx] = True
atoms.append(idx)
for ib, b in enumerate(map[idx]):
if b is None: continue
elif visited[ib]: continue
else:
print(f"Leaving {idx+1} to check on {ib+1} because of BO: {b}")
check(ib, atoms)
return atoms
for ia,a in enumerate(map):
if visited[ia]: continue
else:
print(f"Adding new entitie starting from {ia+1}")
n_entities +=1
entities.append(check(ia,[]))
print("Visited\n",visited)
print("n entitites\n", n_entities)
print("entities\n", entities)
def valid_idxs(func):
def wrapper(obj,*list):
assert all([type(n) is int for n in list]), "Atom indexes should be integers"
assert all([n in range(len(obj.atom_list)) for n in list]), "Atom indexes are out of range"
return func(obj,*list)
return wrapper
@valid_idxs
def ab_distance(self,a,b):
return self.atom_list[a].distance(self.atom_list[b])
@valid_idxs
def abc_angle(self,a,b,c):
return self.atom_list[a].angle(self.atom_list[b],self.atom_list[c])
@valid_idxs
def abcd_dihedral(self,a,b,c,d):
return self.atom_list[a].dihedral(self.atom_list[b],self.atom_list[c],self.atom_list[d])
class Atom:
el_radii = dict(element_radii)
def __init__(self,line,idx):
assert type(line) is list
assert len(line) == 4
assert line[0] in elements
assert all(is_str_float(a) for a in line[1:])
self.idx = idx
self.element = line[0]
self.cord = [float(a) for a in line[1:]]
def distance(self,other):
return sum((b - a) ** 2 for a, b in zip(self.cord, other.cord)) ** 0.5
def angle(self,other_a,other_b):
a_a = np.array(self.cord)
b_a = np.array(other_a.cord)
c_a = np.array(other_b.cord)
ba, bc = a_a - b_a, c_a - b_a
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
#print("Angle :",self.idx,other_a.idx,other_b.idx,"is:", "{:.2f}°".format(np.degrees(angle)))
return angle
def dihedral(self,other_a,other_b,other_c):
p = np.array([self.cord,other_a.cord,other_b.cord,other_c.cord])
# From: stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python
b = p[:-1] - p[1:]
b[0] *= -1
v = np.array([np.cross(v, b[1]) for v in [b[0], b[2]]])
# Normalize vectors
v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1, 1)
return np.degrees(np.arccos(v[0].dot(v[1])))
def ts_bond_order(self,other):
return math.exp((Atom.el_radii[self.element]/100 + Atom.el_radii[other.element]/100 - self.distance(other))/0.6)
def int_bond_order(self,other):
return math.exp((Atom.el_radii[self.element]/100 + Atom.el_radii[other.element]/100 - self.distance(other))/0.3)
def __str__(self):
return "{}{}".format(self.idx,self.element)
class Var:
conf_dir = os.path.dirname(__file__)
conf_file = os.path.join(conf_dir, "chemxls_preferences.init")
def __init__(self,conf_file=conf_file):
self.ext = ["any", ".xyz", ".gjf", ".com", ".log", ".inp", ".out"]
a = self.ext
self.options = [
{"short":"Blank", "uid":"001", "extension":a[0], "supl":False, "float":False, "hyp":False, "long":"Blank column" },
{"short":"Eh to kcal/mol", "uid":"002", "extension":a[0], "supl":False, "float":True , "hyp":False, "long":"Hartree to kcal/mol conversion factor (627.5)" },
{"short":"Eh to kJ/mol", "uid":"003", "extension":a[0], "supl":False, "float":True , "hyp":False, "long":"Hartree to kJ/mol conversion factor (2625.5)" },
{"short":"Filename", "uid":"004", "extension":a[0], "supl":False, "float":False, "hyp":False, "long":"Filename" },
{"short":"Folder", "uid":"005", "extension":a[0], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to corresponding folder" },
{"short":".xyz", "uid":"006", "extension":a[1], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to Filename.xyz" },
{"short":".gjf", "uid":"007", "extension":a[2], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to Filename.gjf" },
{"short":".gjf_#", "uid":"008", "extension":a[2], "supl":False, "float":False, "hyp":False, "long":"Route section read from Filename.gjf" },
{"short":".com", "uid":"009", "extension":a[3], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to Filename.com" },
{"short":".com_#", "uid":"010", "extension":a[3], "supl":False, "float":False, "hyp":False, "long":"Route section read from Filename.com" },
{"short":".log", "uid":"011", "extension":a[4], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to Filename.log" },
{"short":".log_#", "uid":"012", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Route section read from Filename.log" },
{"short":"E0", "uid":"013", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Energy from last SCF cycle" },
{"short":"iFreq", "uid":"014", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Number of imaginary frequencies found on Filename.log" },
{"short":"E_ZPE", "uid":"015", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Zero-point correction" },
{"short":"E_tot", "uid":"016", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Thermal correction to Energy" },
{"short":"H_corr", "uid":"017", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Thermal correction to Enthalpy" },
{"short":"G_corr", "uid":"018", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Thermal correction to Gibbs Free Energy" },
{"short":"E0+E_ZPE", "uid":"019", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Sum of electronic and zero-point Energies" },
{"short":"E0+E_tot", "uid":"020", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Sum of electronic and thermal Energies" },
{"short":"E0+H_corr", "uid":"021", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Sum of electronic and thermal Enthalpies" },
{"short":"E0+G_corr", "uid":"022", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Sum of electronic and thermal Free Energies" },
{"short":"Done?", "uid":"023", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Filename.log gaussian normal termination status" },
{"short":"Error", "uid":"024", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Error messages found on Filename.log" },
{"short":"HOMO", "uid":"025", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"HOMO from Alpha occ. eigenvalues of Filename.log" },
{"short":"LUMO", "uid":"026", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"LUMO from Alpha virt. eigenvalues of Filename.log" },
{"short":"HOMO-LUMO", "uid":"027", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"HOMO-LUMO from Alpha occ. & virt. eigenv. of Filename.log" },
{"short":"Charge", "uid":"028", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Charge from Filename.log" },
{"short":"Mult", "uid":"029", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Starting multiplicity from Filename.log" },
{"short":"n_SCF", "uid":"030", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Number of 'SCF Done:' keywords found" },
{"short":"n_atoms", "uid":"031", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Number of atoms on Filename.log" },
{"short":"TYP", "uid":"032", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Filename.log calculation type (This may be unreliable)" },
{"short":"Needs refinement?", "uid":"033", "extension":a[4], "supl":False, "float":False, "hyp":False, "long":"Filename.log calculation type consistency with iFreq" },
{"short":"S**2 BA", "uid":"034", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Filename.log last spin densities before anihilation" },
{"short":"S**2 After", "uid":"035", "extension":a[4], "supl":False, "float":True , "hyp":False, "long":"Filename.log last spin densities after anihilation" },
{"short":"LG", "uid":"036", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last geometry" },
{"short":"MulkSpinDens", "uid":"037", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last Muliken charge and spin density" },
{"short":"LastIntCoord", "uid":"038", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last Internal coordinates" },
{"short":"MulkCharges", "uid":"039", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last Muliken charge" },
{"short":"ESPCharges", "uid":"040", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last ESP charge" },
{"short":"POPAnalysis", "uid":"041", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last population analysis" },
{"short":"NPAAnalysis", "uid":"042", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last NPA analysis" },
{"short":"APTCharges", "uid":"043", "extension":a[4], "supl":True , "float":False, "hyp":True , "long":"Filename.log last APT charges" },
{"short":".inp", "uid":"044", "extension":a[5], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to Filename.inp" },
{"short":".out", "uid":"045", "extension":a[6], "supl":False, "float":False, "hyp":True , "long":"Hyperlink to Filename.out" },
]
assert not any(a["hyp"] and a["float"] for a in self.options), "Cannot be float and hyperlink simultaneously"
assert len(set(a["uid"] for a in self.options)) == len(self.options), "UIDs have to be unique"
assert len(set(a["short"] for a in self.options)) == len(self.options), "Short names have to be unique"
assert len(set(a["long"] for a in self.options)) == len(self.options), "Long names have to be unique"
assert set(a["extension"] for a in self.options) == set(self.ext), "Are there unused extensions or typos?"
assert all([a["hyp"] and a["supl"] for a in self.options if a["supl"]]), "Use of suplementary files must be accompanied by corresponding hyperlink"
self.std_config = configparser.ConfigParser()
self.std_config["DEFAULT"] = {"options": "005 004 006 007 011","splitext":"False","splitjobs":"False"}
self.std_config["STARTUP"] = {"options": "005 004 006 007 011","splitext":"False","splitjobs":"False"}
self.std_config["PRESETA"] = {"options": "005 004 006 007" ,"splitext":"False","splitjobs":"False"}
self.std_config["PRESETB"] = {"options": "005 004 006" ,"splitext":"False","splitjobs":"False"}
self.std_config["PRESETC"] = {"options": "005 004" ,"splitext":"False","splitjobs":"False"}
if not os.path.isfile(conf_file):
with open(conf_file, "w") as configfile:
self.std_config.write(configfile)
self.config = self.std_config
else:
self.config = configparser.ConfigParser()
self.config.read(conf_file)
def pick(args,get_type,valid_keys={},default=None,config=self.config,std_config=self.std_config):
try:
if get_type == "str" : result = config.get(*args)
elif get_type == "bool": result = config.getboolean(*args)
elif get_type == "int" : result = config.getint(*args)
except:
if get_type == "str" : result = std_config.get(*args)
elif get_type == "bool": result = std_config.getboolean(*args)
elif get_type == "int" : result = std_config.getint(*args)
finally:
if valid_keys : result = valid_keys.get(result,default)
return result
big_name = ["DEFAULT" ,"STARTUP" ,"PRESETA" ,"PRESETB" ,"PRESETC" ]
opt = ["default_options" ,"startup_options" ,"preset_a_options" ,"preset_b_options" ,"preset_c_options" ]
split = ["default_split" ,"startup_split" ,"preset_a_split" ,"preset_b_split" ,"preset_c_split" ]
jobs = ["default_split_jobs","startup_split_jobs","preset_a_split_jobs","preset_b_split_jobs","preset_c_split_jobs"]
valid_keys = [a["uid"] for a in self.options]
for big,a,b,c in zip(big_name,opt,split,jobs):
setattr(self,a,[n for n in pick((big,"options"),"str").split() if n in valid_keys])
setattr(self,b,pick((big,"splitext" ),"bool", default=False))
setattr(self,c,pick((big,"splitjobs"),"bool", default=False))
def set_variables(self,section,option,value,conf_file=conf_file):
self.config[section][option] = value
with open(conf_file, "w") as configfile:
self.config.write(configfile)
self.__init__()
#GUI CLASSES
class FileFolderSelection(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self,parent)
self.in_folder = None # str
self.in_folder_label = tk.StringVar(value="Please set folder name")
self.supl_folder = None # str
self.supl_folder_label = tk.StringVar(value="Please set folder name")
self.supl_folder_auto = tk.BooleanVar(value=True)
self.xls_path = None # str
self.xls_path_label = tk.StringVar(value="Please set file name")
self.xls_path_auto = tk.BooleanVar(value=True)
self.recursive_analysis = tk.BooleanVar(value=True)
self.str_width = 500
self.grid_columnconfigure(0, weight=1)
self.lock = False
self.check_buttons = []
self.buttons = []
#INPUT FOLDER
box = self.boxify("Analyze this directory:", 0)
label = tk.Label(box, textvariable=self.in_folder_label)
label.config(width=self.str_width, fg="navy")
label.grid(column=0, row=0)
button = tk.Button(box, text="Select", command=self.set_in_folder, padx="1", pady="0")
button.config(fg="navy")
button.grid(column=1, row=0, sticky="e")
self.buttons.append(button)
check_button = tk.Checkbutton(box, text="Recursively",
variable=self.recursive_analysis,
onvalue=True,
offvalue=False,
selectcolor="gold")
check_button.grid(column=2, row=0, sticky="w")
self.check_buttons.append(check_button)
#SUPLEMENTARY FOLDER
box = self.boxify("Write suplementary files to this directory:", 1)
label = tk.Label(box, textvariable=self.supl_folder_label)
label.config(width=self.str_width)
label.grid(column=0, row=0)
button = tk.Button(box, text="Select", command=self.set_supl_folder, padx="1", pady="0")
button.grid(column=1, row=0, sticky="e")
self.buttons.append(button)
check_button = tk.Checkbutton(box, text="Auto",
variable=self.supl_folder_auto,
onvalue=True, offvalue=False,
command=self.auto_set_supl)
check_button.grid(column=2, row=0, sticky="w")
self.check_buttons.append(check_button)
# XLS file
box = self.boxify("Write xls file here:", 2)
label = tk.Label(box, textvariable=self.xls_path_label)
label.config(width=self.str_width)
label.grid(column=0, row=0)
button = tk.Button(box, text="Select", command=self.set_xls_path, padx="1", pady="0")
button.grid(column=1, row=0, sticky="e")
self.buttons.append(button)
check_button = tk.Checkbutton(box, text="Auto",
variable=self.xls_path_auto,
onvalue=True, offvalue=False,
command=self.auto_set_xls)
check_button.grid(column=2, row=0, sticky="w")
self.check_buttons.append(check_button)
#AUTO SET
if len(sys.argv) > 1 and sys.argv[-1] in ["--cwd","-cwd","cwd"]:
self.in_folder = os.path.normpath(os.getcwd())
self.in_folder_label.set(trim_str(self.in_folder,self.str_width))
if self.xls_path_auto.get(): self.auto_set_xls()
if self.supl_folder_auto.get(): self.auto_set_supl()
def boxify(self,name,row):
box = tk.LabelFrame(self, text=name)
box.grid(column=0, row=row, sticky="news")
box.grid_columnconfigure(2, minsize=90)
box.grid_columnconfigure(0, weight=1)
return box
def set_in_folder(self):
in_folder = filedialog.askdirectory()
assert type(in_folder) == str
if type(in_folder) == str and in_folder.strip() != "":
self.in_folder = os.path.normpath(in_folder)
self.in_folder_label.set(trim_str(self.in_folder,self.str_width))
if self.xls_path_auto.get(): self.auto_set_xls()
if self.supl_folder_auto.get(): self.auto_set_supl()
else:
messagebox.showinfo(title="Folder selection", message="Folder won't be set!")
def set_supl_folder(self):
supl_folder = filedialog.askdirectory()
if type(supl_folder) == str and supl_folder.strip() != "":
self.supl_folder = os.path.normpath(os.path.join(supl_folder, "chemxlslx_supl_files"))
self.supl_folder_auto.set(False)
self.supl_folder_label.set(trim_str(self.supl_folder, self.str_width))
else:
messagebox.showinfo(title="Folder selection", message="Folder won't be set!")
def auto_set_supl(self):
if self.supl_folder_auto.get() and type(self.in_folder) == str:
if not os.path.isdir(self.in_folder): return
supl_folder = os.path.join(self.in_folder, "chemxlslx_supl_files")
supl_folder = os.path.normpath(supl_folder)
self.supl_folder = supl_folder
self.supl_folder_label.set(trim_str(supl_folder,self.str_width))
def set_xls_path(self):
xls_path = filedialog.asksaveasfilename(title = "Save xls file as:",
filetypes = [("Spreadsheet","*.xls")])
assert type(xls_path) == str
if os.path.isdir(os.path.dirname(xls_path)) and xls_path.strip() != "":
if not xls_path.endswith(".xls"): xls_path += ".xls"
self.xls_path = os.path.normpath(xls_path)
self.xls_path_auto.set(False)
self.xls_path_label.set(trim_str(self.xls_path,self.str_width))
else:
messagebox.showinfo(title="File selection", message="File won't be set!")
def auto_set_xls(self):
if self.xls_path_auto.get() and type(self.in_folder) == str:
if not os.path.isdir(self.in_folder): return
self.xls_path = os.path.join(self.in_folder,"chemxls_analysis.xls")
self.xls_path = os.path.normpath(self.xls_path)
self.xls_path_label.set(trim_str(self.xls_path,self.str_width))
class ListBoxFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self,parent)
self.root = parent
self.grid_columnconfigure(0,weight=1)
self.grid_columnconfigure(3,weight=1)
self.columnconfigure(0,uniform="fred")
self.columnconfigure(3,uniform="fred")
self.grid_rowconfigure(1,weight=1)
self.grid_rowconfigure(2,weight=1)
self.lock = False
self.preferences = Var()
self.options = self.preferences.options
self.need_style0 = [a["short"] for a in self.options if a["float"]]
self.need_formula = [a["short"] for a in self.options if a["hyp"]]
self.dict_options = {a["long"]:[a["short"],a["uid"],a["extension"]] for a in self.options}
self.label_dict = {a["short"]:a["long"] for a in self.options}
self.label_dict.update({"Link1":"Job step of 'Filename.log'"})
self.extension_dict = {d["short"]:d["extension"] for d in self.options}
self.extension_dict.update({"Link1":".log"})
#LEFT PANEL
left_label = tk.Label(self,text="Available options")
left_label.grid(column=0,row=0,columnspan=2)
self.listbox_a = tk.Listbox(self)
self.populate_a("any")
self.listbox_a.grid(column=0, row=1,rowspan=4,sticky="news")
scrollbar = tk.Scrollbar(self, orient="vertical")
scrollbar.config(command=self.listbox_a.yview)
scrollbar.grid(column=1, row=1, rowspan=4, sticky="ns")
self.listbox_a.config(yscrollcommand=scrollbar.set)
self.check_buttons = []
self.buttons = []
#BOTTOM LEFT PANEL
frame = tk.Frame(self)
frame.grid(column=0,row=5,columnspan=2)
left_label = tk.Label(frame,text="Filter options by file extension:")
left_label.grid(column=0,row=0)
self.display_ext = tk.StringVar()
self.drop_options = ttk.OptionMenu(frame,self.display_ext,"any",*self.preferences.ext,
command=lambda x=self.display_ext.get():self.populate_a(x))
self.drop_options.configure(width=10)
self.drop_options.grid(column=1,row=0,sticky="e")
#RIGHT PANEL
right_label = tk.Label(self,text="Selected options")
right_label.grid(column=3,row=0,columnspan=2)
self.listbox_b = tk.Listbox(self)
self.listbox_b.grid(column=3, row=1, rowspan=4, sticky="news")
self.populate_b(self.preferences.startup_options)
scrollbar = tk.Scrollbar(self, orient="vertical")
scrollbar.config(command=self.listbox_b.yview)
scrollbar.grid(column=4, row=1,rowspan=4,sticky="ns")
self.listbox_b.config(yscrollcommand=scrollbar.set)
#BOTTOM RIGHT PANEL
frame = tk.Frame(self)
frame.grid(column=3,row=5,columnspan=2,sticky="news")
self.split_xlsx_by_ext = tk.BooleanVar(value=self.preferences.startup_split)
check_button = tk.Checkbutton(frame, text="One extension per Spreadsheet",
variable=self.split_xlsx_by_ext,
onvalue=True,
offvalue=False)
self.split_jobs = tk.BooleanVar(value=self.preferences.startup_split_jobs)
check_button.grid(column=0, row=0, sticky="w")
self.check_buttons.append(check_button)
check_button = tk.Checkbutton(frame, text="Split gaussian jobs (Link1)",
variable=self.split_jobs,
onvalue=True,
offvalue=False)
check_button.grid(column=1, row=0, sticky="w")
self.check_buttons.append(check_button)
for n in range(2):
frame.columnconfigure(n,weight=1, uniform='asdffw')
#CENTER BUTTONS
button = tk.Button(self, text=">", command=self.move_right, padx="3")
button.grid(column=2, row=1, sticky="news")
self.buttons.append(button)
button = tk.Button(self, text="X", command=self.delete_selection, padx="3")
button.grid(column=2, row=2, sticky="news")
self.buttons.append(button)
button = tk.Button(self, text=u'\u2191', command=self.mv_up_selection, padx="3")
button.grid(column=2, row=3, sticky="news")
self.buttons.append(button)
button = tk.Button(self, text=u'\u2193', command=self.mv_down_selection, padx="3")
button.grid(column=2, row=4, sticky="news")
self.buttons.append(button)
for n in range(4):
self.rowconfigure(n+1,weight=1, uniform='buttons_')
#PREFERENCE BUTTONS
frame = tk.Frame(self)
frame.grid(column=0,row=6,columnspan=1,rowspan=2)
top = ["Startup","Preset A","Preset B","Preset C"]
for i,a in enumerate(top):
button = tk.Button(frame, text=a, command=lambda a=a: self.load_pref(a))
button.grid(column=i, row=0,sticky="news",padx="5")
self.buttons.append(button)
button = tk.Button(frame, text="Save", command= lambda a=a: self.save_pref(a))
button.grid(column=i, row=1,sticky="news",padx="5")
self.buttons.append(button)
button = tk.Button(frame, text="Add\nAll", command=self.add_all)
button.grid(column=4, row=0,rowspan=2, sticky="news")
self.buttons.append(button)
button = tk.Button(frame, text="Remove\nAll", command=self.rem_all)
button.grid(column=5, row=0,rowspan=2, sticky="news")
self.buttons.append(button)
for n in range(6):
frame.columnconfigure(n,weight=1, uniform='third')
#PREVIEW AND GENERATE BUTTONS
frame = tk.Frame(self)
frame.grid(column=3,row=6,columnspan=2,rowspan=1,sticky="news")
button = tk.Button(frame, text="PREVIEW FILES", command=self.preview_files, padx="1")
button.grid(column=0, row=0, columnspan=1,sticky="news")
self.buttons.append(button)
button = tk.Button(frame, text="GENERATE XLS FILE!", command=self.threaded_xls, padx="1")
button.grid(column=1, row=0, columnspan=1,sticky="news")
self.buttons.append(button)
for n in range(2):
frame.columnconfigure(n,weight=1, uniform='prev')
#PROGRESS BAR
self.progress = ttk.Progressbar(self, orient=tk.HORIZONTAL, length=100, mode='determinate')
self.progress["value"] = 0
self.progress.grid(column=0,row=8,columnspan=6,sticky="news",pady="5")
#PROGRESS LABEL
self.progress_label = tk.StringVar()
self.progress_label.set("github.com/ricalmang")
label = tk.Label(self, textvariable=self.progress_label)
label.grid(column=0, row=9,columnspan=6,sticky="e")
self.path_sheet = ["abs",
"Set A1 to 'abs' if you want Links to use absolute paths",
"Set A1 to anything else if you want Links to use relative paths",
"ON EXCEL: If Hyperlinks are displayed as text and are not working, try:",
"'Ctrl + F' then, replace '=' by '=' in order to force excel to reinterpret cell data",
"ON LIBRE OFFICE: If Hyperlinks are not working, try:",
"'Ctrl + H' (Find & replace) then, replace 'Path!A1' by 'Path.A1' to adjust cell reference of hyperlinks formulas"]
self.style0 = xlwt.easyxf("", "#.0000000")
def mv_up_selection(self):
for n in self.listbox_b.curselection():
if n == 0: pass
else:
text = self.listbox_b.get(n)
self.listbox_b.delete(n)
self.listbox_b.insert(n-1,text)
self.listbox_b.selection_clear(0, tk.END)
self.listbox_b.selection_set(n - 1)
self.listbox_b.activate(n - 1)
def mv_down_selection(self):
for n in self.listbox_b.curselection():
if n == len(self.listbox_b.get(0,tk.END))-1: pass
else:
text = self.listbox_b.get(n)
self.listbox_b.delete(n)
self.listbox_b.insert(n + 1, text)
self.listbox_b.selection_clear(0, tk.END)
self.listbox_b.selection_set(n + 1)
self.listbox_b.activate(n + 1)
def load_pref(self,name):
option = {"Startup" : ["startup_options", "startup_split" , "startup_split_jobs" ],
"Preset A": ["preset_a_options","preset_a_split" , "preset_a_split_jobs"],
"Preset B": ["preset_b_options","preset_b_split" , "preset_b_split_jobs"],
"Preset C": ["preset_c_options","preset_c_split" , "preset_c_split_jobs"]
}[name]
self.populate_b(getattr(self.preferences,option[0])) #UIDS
self.split_xlsx_by_ext.set(getattr(self.preferences,option[1]))
self.split_jobs.set(getattr(self.preferences,option[2]))
def save_pref(self,name):
uids = " ".join([{a["long"]: a["uid"] for a in self.options}[b] for b in self.listbox_b.get(0,tk.END)])
result = messagebox.askyesno(title=f"Are you sure?",
message=f"This will assign currently selected options to {name} button!",
icon='warning')
if not result: return
name = {"Startup":"STARTUP","Preset A":"PRESETA","Preset B":"PRESETB","Preset C":"PRESETC"}[name]
self.preferences.set_variables(name,"options",uids)
self.preferences.set_variables(name,"splitext", str(self.split_xlsx_by_ext.get()))
self.preferences.set_variables(name,"splitjobs", str(self.split_jobs.get()))
def delete_selection(self):
self.listbox_b.delete(tk.ACTIVE)
def move_right(self):
self.listbox_b.insert(tk.END,self.listbox_a.get(tk.ACTIVE))
def populate_a(self,extension="any"):
self.listbox_a.delete(0,tk.END)
if extension=="any":
for a in self.options:
self.listbox_a.insert(tk.END, a["long"])
else:
for a in self.options:
if a["extension"] == extension:
self.listbox_a.insert(tk.END, a["long"])
def populate_b(self,uids=[]):
self.listbox_b.delete(0,tk.END)
for uid in uids:
self.listbox_b.insert(tk.END, {a["uid"]: a["long"] for a in self.options}[uid])
def rem_all(self):
self.listbox_b.delete(0, tk.END)
def add_all(self):
self.listbox_b.delete(0, tk.END)
for a in self.options: self.listbox_b.insert(tk.END, a["long"])
def evaluate_list(self,folder, recursive=True, extensions=[],errors=set(),files=set(),base_only=False):
try:
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
if recursive:
self.evaluate_list(folder=os.path.join(folder, file),
recursive=recursive,
files=files,
extensions=extensions,
base_only=base_only,
errors=errors)
elif any([file.endswith(extension) for extension in extensions]):
files.add(os.path.join(folder,os.path.splitext(file)[0] if base_only else file))
except PermissionError as error:
errors.add(error)
finally:
return files,errors
@lock_release
def preview_files(self):
global frame_a
folder = frame_a.in_folder
cond_1 = folder is None
cond_2 = type(folder) == str and folder.strip() == ""
if cond_1 or cond_2:
messagebox.showinfo(title="Analysis folder is not yet selected!", message="Analysis won't be performed!")
return
assert type(folder) == str, type(folder)
if not os.path.isdir(folder):
messagebox.showinfo(title="Analysis folder is not a valid path", message="Analysis won't be performed!")
return
recursive = frame_a.recursive_analysis.get()
extensions = list(set(self.dict_options[a][-1] for a in self.listbox_b.get(0,tk.END) if a[-1] != "any"))
if extensions:
files,errors = self.evaluate_list(folder=folder, files=set(), recursive=recursive, extensions=extensions)
else:
files, errors = [], []
self.pop_up_files(files,errors)
def threaded_preview(self):
job = threading.Thread(target=self.preview_files)
job.start()
self.refresh_gui()
def pop_up_files(self,files,errors):
top_level = tk.Toplevel()
top_level.wm_geometry("1000x600")
scrollbar = tk.Scrollbar(top_level,orient="vertical")
listbox = tk.Text(top_level,yscrollcommand=scrollbar.set)
text = "=" * 23 + "THE FOLLOWING FILES WERE FOUND ON THIS DIRECTORY" + "=" * 23 + "\n"
listbox.insert(tk.INSERT,text)
text = "\n".join(["{:<4} {}".format(i, trim_str(a, frame_a.str_width)) for i, a in enumerate(files)])
listbox.insert(tk.INSERT,text)
if errors:
text = "\n"*2 + "=" * 10 + "THE FOLLOWING ERRORS WERE RAISED WHILE LOOKING FOR FILES IN THIS DIRECTORY" + "=" * 10 + "\n"
listbox.insert(tk.INSERT,text)
text = "\n".join(["{}".format(a) for i, a in enumerate(errors)])
listbox.insert(tk.INSERT, text)
listbox.grid(column=0,row=0,sticky="news")
top_level.grid_columnconfigure(0,weight=1)
top_level.grid_rowconfigure(0, weight=1)
scrollbar.config(command=listbox.yview)
scrollbar.grid(column=1,row=0,sticky="ns")
def refresh_gui(self):
self.root.update()
self.root.after(1000, self.refresh_gui)
def threaded_xls(self):
job = threading.Thread(target=self.startup_gen_xls)
job.start()
self.refresh_gui()
@lock_release
def startup_gen_xls(self):
global frame_a
folder = frame_a.in_folder
# FOLDER MUST BE STRING
cond_1 = folder is None
cond_2 = type(folder) == str and folder.strip() == ""
if cond_1 or cond_2:
messagebox.showinfo(title="Analysis folder is not yet selected!", message="Analysis won't be performed!")
return
need_supl = any([a["supl"] for a in self.options if a["long"] in self.listbox_b.get(0,tk.END)])
# XLS FOLDER MUST BE PARENT OF IN_FOLDER AND SUPL_FOLDER
parent = os.path.normpath(os.path.dirname(frame_a.xls_path))
child_a = os.path.normpath(frame_a.in_folder)
child_b = os.path.normpath(frame_a.supl_folder)
cond_1 = not child_a.startswith(parent)
cond_2 = not child_b.startswith(parent) and need_supl
if cond_1 and cond_2:
message = ".xls file must be saved on a folder that contains both suplementary files and input files "
message += "(May be in subdirectories)."
message += "\nThis is to ensure that relative path hyperlinks will work properly on the resulting xls file!"
message += "\nAnalysis won't be performed!"
messagebox.showinfo(title="Please chose another path scheme!", message=message)
return
elif cond_1:
message = "The folder being analyzed must be contained on a subdirectory of the folder in wich the .xls file is saved."
message += "\nThis is to ensure that relative path hyperlinks will work properly on the resulting xls file!"
message += "\nAnalysis won't be performed!"
messagebox.showinfo(title="Please chose another path scheme!", message=message)
return
elif cond_2:
message = "The suplementary data folder must be contained on a subdirectory of the folder in wich the .xls file is saved."
message += "\nThis is to ensure that relative path hyperlinks will work properly on the resulting xls file!"
message += "\nAnalysis won't be performed!"
messagebox.showinfo(title="Please chose another path scheme!", message=message)
return
# IN_PATH MUST EXIST
if not os.path.isdir(folder):
messagebox.showinfo(title="Analysis folder does not exist!",
message="Analysis won't be performed!")
return
# IDEALY SUPL_FOLDER SHOULD NOT EXIST
if os.path.isdir(child_b) and need_supl:
message="Do you want to overwrite files on the following directory?\n{}".format(child_b)
result=messagebox.askyesno(title="Suplementary file directory already exists!", message=message,icon='warning')
if not result: return
# IDEALY SUPL_FOLDER BASE DIRECTORY SHOULD EXIST
if not os.path.isdir(os.path.dirname(child_b)) and need_supl:
message="Suplementary file directory parent directory does not exists!\nAnalysis won't be performed!\n"
messagebox.showinfo(title="Parent directory does not exist!", message=message)
return
# IDEALY XLS SHOULD NOT EXIST
if os.path.isfile(frame_a.xls_path):
message = "Do you want to overwrite the following file?\n{}".format(frame_a.xls_path)
message += "\nMoreover, if you want to overwrite it, make sure this file is not opened by another program before you proced."
result = messagebox.askyesno(title=".xls file already exists!", message=message,icon='warning')
if not result: return
# PARENT DIRECTORY XLS SHOULD EXIST
if not os.path.isdir(os.path.dirname(frame_a.xls_path)):
messagebox.showinfo(title=".xls parent directory does not exist!",
message="Analysis won't be performed!")
return
recursive = frame_a.recursive_analysis.get()
extensions = list(set(self.dict_options[a][-1] for a in self.listbox_b.get(0,tk.END) if a[-1] != "any"))
if extensions:
basenames, _ = self.evaluate_list(folder=folder, recursive=recursive,files=set(), extensions=extensions, base_only=True)
else:
basenames, _ = [], []
self.progress["value"] = 0
self.progress_label.set("Analyzing {} file basenames...".format(len(basenames)))
csv_list = []
csv_generator = self.analysis_generator(basenames,extensions)
aux_files_needed = ["LG", "MulkSpinDens", "LastIntCoord", "MulkCharges",
"ESPCharges", "POPAnalysis", "NPAAnalysis", "APTCharges"]
for file in csv_generator:
for request in self.listbox_b.get(0,tk.END):
key = self.dict_options[request][0]
if key in aux_files_needed:
if not file[key] or file[key] == "-": continue
filename = "_".join([file["Filename"],str(file["Link1"]),key+".txt"])
filename = os.path.join(frame_a.supl_folder,file["rel_path"],filename)
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
assert type(file[key]) == str
with open(filename, "w") as f:
f.write(file[key])
except FileExistsError:
print("Error while creating the following file:")
print(filename)
print("File already exists!")
finally:
file.update({key: self.mk_hyperlink(filename)})
csv_list.append(file.copy())
self.progress["value"] = 100
self.progress_label.set("Saving xls file...")
time.sleep(0.1)
csv_list.sort(key=lambda x: os.path.join(x["rel_path"], x["Filename"]))
if self.split_xlsx_by_ext.get():
self.gen_xls_single_ext(csv_list)
else:
self.gen_xls_normal(csv_list)
self.progress_label.set("Done! Please look up: {}".format(trim_str(frame_a.xls_path,frame_a.str_width-50)))
time.sleep(0.1)
def gen_xls_single_ext(self,csv_list):
global frame_a
wb = Workbook()
used_extensions = list(dict.fromkeys(self.extension_dict[b] for b in self.used_short_titles))
x = self.used_short_titles
data_sheets = []
if sum(a["Link1"] for a in csv_list) != 0:
x.insert(0, "Link1")
for ext in used_extensions:
if ext =="any":continue
sheet1 = wb.add_sheet("Data {}".format(ext))
for i_b, b in enumerate(y for y in x if self.extension_dict[y] in ["any",ext]):
sheet1.write(0, i_b, b)
filtered_csv_list = csv_list if ext == ".log" else [a for a in csv_list if a["Link1"]==0]
data_sheets.append([sheet1,ext,filtered_csv_list])
sheet2 = wb.add_sheet('Labels')
for i_b, b in enumerate(x):
sheet2.write(i_b, 0, b)
sheet2.write(i_b, 1, self.label_dict[b])
# TODO Exception: Formula: unknown sheet name Path
sheet3 = wb.add_sheet('Path')
for i, a in enumerate(self.path_sheet): sheet3.write(i, 0, a)
for sheet1,ext,filtered_csv_list in data_sheets:
for i_a, a in enumerate(filtered_csv_list, start=1):
for i_b, b in enumerate(y for y in x if self.extension_dict[y] in ["any",ext]):
args = self.sheet_write_args(i_a,i_b,a,b)
sheet1.write(*args)
self.save_xls(wb)
def sheet_write_args(self,i_a,i_b,a,b):
if b =="Link1":
return [i_a, i_b, str(a[b]+1)]
elif b in self.need_style0 and is_str_float(a[b]):
return [i_a, i_b, float(a[b]), self.style0]
elif b in self.need_formula and a[b] not in [None, "-"]:
return [i_a, i_b, xlwt.Formula(a[b])]
elif b in self.need_formula and a[b] in [None, "-"]:
return [i_a, i_b, "-"]
else:
return [i_a, i_b, a[b]]
def gen_xls_normal(self,csv_list):
global frame_a
wb = Workbook()
sheet1 = wb.add_sheet('Data')
sheet2 = wb.add_sheet('Labels')
sheet3 = wb.add_sheet('Path')
for i,a in enumerate(self.path_sheet): sheet3.write(i,0,a)
x = self.used_short_titles
if sum(a["Link1"] for a in csv_list) != 0:
x.insert(0,"Link1")
for i_b, b in enumerate(x):
sheet2.write(i_b, 0, b)
sheet2.write(i_b, 1, self.label_dict[b])
for i_b, b in enumerate(x):
sheet1.write(0, i_b, b)
for i_a, a in enumerate(csv_list, start=1):
for i_b, b in enumerate(x):
args = self.sheet_write_args(i_a, i_b, a, b)
sheet1.write(*args)
self.save_xls(wb)
def save_xls(self,wb):
global frame
while True:
try:
wb.save(frame_a.xls_path)
break
except PermissionError:
result = messagebox.askyesno(title="Error while saving xls file!",
message="It appears the following file is already open:\n{}\nDo you want to retry to overwrite it?\n(Please close the file before retrying)".format(frame_a.xls_path))
if not result: break
def analysis_generator(self,basenames,extensions):
for i,a in enumerate(basenames):
self.progress["value"] = int(i / len(basenames) * 100)
for file_dict in self.evaluate_file(a,extensions):
yield file_dict
def mk_hyperlink(self,x, y="Link"):
global frame_a
xls_path = os.path.dirname(frame_a.xls_path)
return 'HYPERLINK(IF(Path!A1="abs";"{}";"{}");"{}")'.format(x, (os.path.relpath(x, xls_path)), y)
def evaluate_file(self,a,extensions):
global frame_a
x = self.used_short_titles
row = {"Link1":0}
row.update({a: "-" for a in x})
print_exception = lambda e,a: print(f"Error:\n{e}\nOn file:\n{a}")
up = lambda a: row.update(a)
file_isfile = lambda name,ext: [y := os.path.normpath(name+ext),os.path.isfile(y)]
# BLANK
if (n:="Blank" ) in x: up({n:" " })
if (n:="Eh to kcal/mol") in x: up({n:"627.5095" })
if (n:="Eh to kJ/mol" ) in x: up({n:"2625.5002"})
#FILE PROPERTIES
up({"Filename":os.path.basename(a)})
#FOLDER PROPERTIES
fold_name = os.path.dirname(a)
up({"rel_path":os.path.relpath(fold_name, frame_a.in_folder)})
if (n:="Folder") in x: up({n: self.mk_hyperlink(fold_name,row["rel_path"])})
# XYZ PROPERTIES
if (ext:=".xyz") in extensions:
filename, is_file = file_isfile(a,ext)
up({".xyz":self.mk_hyperlink(filename) if is_file else None})
#INPUT PROPERTIES .GJF
if (ext:=".gjf") in extensions:
filename, is_file = file_isfile(a,ext)
if (n:= ".gjf" ) in x: up({n: self.mk_hyperlink(filename) if is_file else None})
if (n:=".gjf_#") in x:
inp = GjfFile(read_item(filename)) if is_file else False
up({n:inp.route_text() if inp else "-"})
#INPUT PROPERTIES .COM
if (ext:=".com") in extensions:
filename, is_file = file_isfile(a, ext)
if (n:=".com") in x: up({n: self.mk_hyperlink(filename) if is_file else None})
if (n:=".com_#") in x:
inp = GjfFile(read_item(filename)) if is_file else False
up({n: inp.route_text() if inp else "-"})
#INP PROPERTIES
if (ext:=".inp") in extensions:
filename, is_file = file_isfile(a, ext)
if (n:=".inp") in x: up({n: self.mk_hyperlink(filename) if is_file else None})
#OUT PROPERTIES
if (ext:=".out") in extensions:
filename, is_file = file_isfile(a, ext)
if (n:=".out") in x: up({n: self.mk_hyperlink(filename) if is_file else None})
#LOG PROPERTIES
if (ext:=".log") in extensions:
filename, is_file = file_isfile(a, ext)
if (n:=".log") in x: up({n: self.mk_hyperlink(filename) if is_file else None})
other_log_properties = []
for a in self.listbox_b.get(0,tk.END):
if self.dict_options[a][-1] == ".log":
if self.dict_options[a][0] != ".log":
other_log_properties.append(self.dict_options[a][0])
if other_log_properties and is_file:
logs = [LogFile(read_item(filename),self.split_jobs.get()) if is_file else False]
while True:
if hasattr(logs[-1],"link_one"): logs.append(getattr(logs[-1],"link_one"))
else: break
#print(logs)
for i,b in enumerate(logs):
if i > 0:
yield row
up({a[1]: "-" for a in other_log_properties})
up({"Link1":i})
#try:
if (n:=".log_#" )in x: up({n: b.raw_route if b.raw_route else "-"})
if (n:="E0" )in x: up({n: b.scf_done[-1][-1] if b.scf_done else "-"})
if (n:="iFreq" )in x: up({n: b.last_freq.n_ifreq() if b.last_freq else "-"})
if (n:="E_ZPE" )in x: up({n: b.thermal[0] if b.thermal[0] else "-"})
if (n:="E_tot" )in x: up({n: b.thermal[1] if b.thermal[1] else "-"})
if (n:="H_corr" )in x: up({n: b.thermal[2] if b.thermal[2] else "-"})
if (n:="G_corr" )in x: up({n: b.thermal[3] if b.thermal[3] else "-"})
if (n:="E0+E_ZPE" )in x: up({n: b.thermal[4] if b.thermal[4] else "-"})
if (n:="E0+E_tot" )in x: up({n: b.thermal[5] if b.thermal[5] else "-"})
if (n:="E0+H_corr" )in x: up({n: b.thermal[6] if b.thermal[6] else "-"})
if (n:="E0+G_corr" )in x: up({n: b.thermal[7] if b.thermal[7] else "-"})
if (n:="Done?" )in x: up({n: "Yes" if b.normal_termin else "No"})
if (n:="Error" )in x: up({n: b.error_msg if b else "-"})
if (n:="HOMO" )in x: up({n: b.homo[-1] if b.homo else "-"})
if (n:="LUMO" )in x: up({n: b.lumo[-1] if b.homo else "-"})
if (n:="HOMO-LUMO" )in x: up({n: b.homolumo[-1] if b.homolumo else "-"})
if (n:="Charge" )in x: up({n: b.charge_mult[0] if b.charge_mult else "-"})
if (n:="Mult" )in x: up({n: b.charge_mult[1] if b.charge_mult else "-"})
if (n:="n_SCF" )in x: up({n: len(b.scf_done) if b.scf_done else "-"})
if (n:="n_atoms" )in x: up({n: b.n_atoms if b.n_atoms else "-"})
if (n:="TYP" )in x: up({n: b.calc_type if b.calc_type else "-"})
if (n:="Needs refinement?")in x: up({n: b.needs_ref()})
if (n:="S**2 BA" )in x: up({n: b.s_squared[-1][1] if b.s_squared else "-"})
if (n:="S**2 After" )in x: up({n: b.s_squared[-1][2] if b.s_squared else "-"})
if (n:="LG" )in x: up({n: "\n".join(b.last_xyz_obj().return_print()) if b.last_xyz_obj() else None})
if (n:="MulkSpinDens" )in x: up({n: b.last_muliken_spin_density})
if (n:="LastIntCoord" )in x: up({n: b.last_internal_coord })
if (n:="MulkCharges" )in x: up({n: b.last_muliken_charges})
if (n:="ESPCharges" )in x: up({n: b.last_chelpg_charges })
if (n:="POPAnalysis" )in x: up({n: b.pop_analysis })
if (n:="NPAAnalysis" )in x: up({n: b.npa_analysis })
if (n:="APTCharges" )in x: up({n: b.last_apt_charges })
#except Exception as e:
# print_exception(e,b)
#finally:
# pass
#print(row)
yield row
def _used_short_titles(self):
return [self.dict_options[a][0] for a in self.listbox_b.get(0, tk.END)]
used_short_titles = property(_used_short_titles)
#GUI CREATION
root = tk.Tk()
root.title("chemxls v0.0.1")
root_row = 0
root.grid_columnconfigure(0,weight=1)
frame_a = FileFolderSelection(root)
frame_a.grid(column=0,row=root_row,sticky="news",padx="5")
root_row += 1
frame_b = ListBoxFrame(root)
frame_b.grid(column=0,row=root_row,sticky="news",padx="5")
root.grid_rowconfigure(root_row,weight=1)
root_row += 1
w, h = 925 if sys.platform == "win32" or os.name == "nt" else 1000, 685
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
root.minsize(w, h)
root.maxsize(ws, hs)
x = int(ws / 2 - w / 2)
y = int(hs / 2 - h / 2)
root.geometry(f"{w}x{h}+{x}+{y}")
root.mainloop()
|
## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Drag
# parasite_drag_wing.py
#
# Created: Dec 2013, SUAVE Team
# Modified: Jan 2016, E. Botero
# Apr 2019, T. MacDonald
# Apr 2020, M. Clarke
# May 2021, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# local imports
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Helper_Functions import compressible_mixed_flat_plate
# suave imports
from SUAVE.Core import Data
# package imports
import numpy as np
from SUAVE.Methods.Aerodynamics.Supersonic_Zero.Drag.Cubic_Spline_Blender import Cubic_Spline_Blender
from SUAVE.Methods.Geometry.Two_Dimensional.Planform import segment_properties
# ----------------------------------------------------------------------
# Parasite Drag Wing
# ----------------------------------------------------------------------
## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Drag
def parasite_drag_wing(state,settings,geometry):
"""Computes the parasite drag due to wings
Assumptions:
Basic fit
Source:
http://aerodesign.stanford.edu/aircraftdesign/aircraftdesign.html (Stanford AA241 A/B Course Notes)
Inputs:
settings.wing_parasite_drag_form_factor [Unitless]
state.conditions.freestream.
mach_number [Unitless]
temperature [K]
reynolds_number [Unitless]
geometry.
areas.reference [m^2]
chords.mean_aerodynamic [m]
thickness_to_chord [Unitless]
sweeps.quarter_chord [radians]
aspect_ratio [Unitless]
spans.projected [m]
areas.affected [m^2]
areas.wetted [m^2]
transition_x_upper [Unitless]
transition_x_lower [Unitless]
Outputs:
wing_parasite_drag [Unitless]
Properties Used:
N/A
"""
# unpack inputs
C = settings.wing_parasite_drag_form_factor
recalculate_total_wetted_area = settings.recalculate_total_wetted_area
freestream = state.conditions.freestream
# conditions
Mc = freestream.mach_number
Tc = freestream.temperature
re = freestream.reynolds_number
wing = geometry
wing_parasite_drag = 0.0
# Unpack wing
exposed_root_chord_offset = wing.exposed_root_chord_offset
t_c_w = wing.thickness_to_chord
Sref = wing.areas.reference
num_segments = len(wing.Segments.keys())
# if wing has segments, compute and sum parasite drag of each segment
xtu = wing.transition_x_upper
xtl = wing.transition_x_lower
if num_segments>0:
total_segment_parasite_drag = 0
total_segment_k_w = 0
total_segment_cf_w_u = 0
total_segment_cf_w_l = 0
total_segment_k_comp_u = 0
total_segment_k_comp_l = 0
total_k_reyn_u = 0
total_k_reyn_l = 0
if recalculate_total_wetted_area:
wing = segment_properties(wing,update_wet_areas=True)
for i_segs in range(num_segments):
segment = wing.Segments[i_segs]
if i_segs == num_segments-1:
continue
mac_seg = segment.chords.mean_aerodynamic
Sref_seg = segment.areas.reference
Swet_seg = segment.areas.wetted
sweep_seg = segment.sweeps.quarter_chord
# compute parasite drag coef., form factor, skin friction coef., compressibility factor and reynolds number for segments
segment_parasite_drag , segment_k_w, segment_cf_w_u, segment_cf_w_l, segment_k_comp_u, segment_k_comp_l, k_reyn_u ,k_reyn_l = compute_parasite_drag(re,mac_seg,Mc,Tc,xtu,xtl,sweep_seg,t_c_w,Sref_seg,Swet_seg,C)
total_segment_parasite_drag += segment_parasite_drag*Sref_seg
total_segment_k_w += segment_k_w*Sref_seg
total_segment_cf_w_u += segment_cf_w_u*Sref_seg
total_segment_cf_w_l += segment_cf_w_l*Sref_seg
total_segment_k_comp_u += segment_k_comp_u*Sref_seg
total_segment_k_comp_l += segment_k_comp_l*Sref_seg
total_k_reyn_u += k_reyn_u*Sref_seg
total_k_reyn_l += k_reyn_l*Sref_seg
wing_parasite_drag = total_segment_parasite_drag / Sref
k_w = total_segment_k_w / Sref
cf_w_u = total_segment_cf_w_u / Sref
cf_w_l = total_segment_cf_w_l / Sref
k_comp_u = total_segment_k_comp_u / Sref
k_comp_l = total_segment_k_comp_l / Sref
k_reyn_u = total_k_reyn_u / Sref
k_reyn_l = total_k_reyn_l / Sref
# if wing has no segments
else:
# wing
mac_w = wing.chords.mean_aerodynamic
sweep_w = wing.sweeps.quarter_chord
span_w = wing.spans.projected
Sref = wing.areas.reference
chord_root = wing.chords.root
chord_tip = wing.chords.tip
wing_root = chord_root + exposed_root_chord_offset*((chord_tip - chord_root)/span_w)
if recalculate_total_wetted_area or wing.areas.wetted==0.:
# calculate exposed area
if wing.symmetric:
S_exposed_w = wing.areas.reference - (chord_root + wing_root)*exposed_root_chord_offset
else:
S_exposed_w = wing.areas.reference - 0.5*(chord_root + wing_root)*exposed_root_chord_offset
if t_c_w < 0.05:
Swet = 2.003* S_exposed_w
else:
Swet = (1.977 + 0.52*t_c_w) * S_exposed_w
wing.areas.wetted = Swet
else:
Swet = wing.areas.wetted
# compute parasite drag coef., form factor, skin friction coef., compressibility factor and reynolds number for wing
wing_parasite_drag , k_w, cf_w_u, cf_w_l, k_comp_u, k_comp_l, k_reyn_u, k_reyn_l = compute_parasite_drag(re,mac_w,Mc,Tc,xtu,xtl,sweep_w,t_c_w,Sref,Swet,C)
# dump data to conditions
wing_result = Data(
wetted_area = wing.areas.wetted,
reference_area = Sref ,
parasite_drag_coefficient = wing_parasite_drag ,
skin_friction_coefficient = (cf_w_u+cf_w_l)/2. ,
compressibility_factor = (k_comp_u+k_comp_l)/2 ,
reynolds_factor = (k_reyn_u+k_reyn_l)/2 ,
form_factor = k_w ,
)
state.conditions.aerodynamics.drag_breakdown.parasite[wing.tag] = wing_result
return wing_parasite_drag
## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Drag
def compute_parasite_drag(re,mac_w,Mc,Tc,xtu,xtl,sweep_w,t_c_w,Sref,Swet,C):
"""Computes the parasite drag due to wings
Assumptions:
Basic fit
Source:
adg.stanford.edu (Stanford AA241 A/B Course Notes)
Inputs:
re (Reynolds Number) [Unitless]
mac_w (Wing MAC) [m]
Mc (Mach Number) [Unitless]
Tc (Temperature) [K]
xtu (Upper Transition) [Unitless] (percent of chord)
xtl (Lower Transition) [Unitless] (percent of chord)
sweep_w (Wing Sweep) [rad]
t_c_w (Wing t/c) [Unitless]
Sref (Wing Ref Area) [m^2]
Swet (Wing Wetted Area) [m^2]
C (Form Factor) [Unitless]
Outputs:
(u is upper, l is lower)
wing_parasite_drag [Unitless]
k_w (Form Factor) [Unitless]
cf_w_u (Skin Friction Coefficient) [Unitless]
cf_w_l [Unitless]
k_comp_u (Compressibility Factor) [Unitless]
k_comp_l [Unitless]
k_reyn_u (Reynolds Factor) [Unitless]
k_reyn_l [Unitless]
Properties Used:
N/A
"""
# reynolds number
Re_w = re*mac_w
# skin friction coefficient, upper
cf_w_u, k_comp_u, k_reyn_u = compressible_mixed_flat_plate(Re_w,Mc,Tc,xtu)
# skin friction coefficient, lower
cf_w_l, k_comp_l, k_reyn_l = compressible_mixed_flat_plate(Re_w,Mc,Tc,xtl)
# correction for airfoils
cos_sweep = np.cos(sweep_w)
cos2 = cos_sweep*cos_sweep
ind = Mc <= 1.
k_w = np.ones_like(Mc)
k_w[ind] = 1. + ( 2.* C * (t_c_w * cos2) ) / ( np.sqrt(1.- Mc[ind]*Mc[ind] * cos2) ) \
+ ( C*C * cos2 * t_c_w*t_c_w * (1. + 5.*(cos2)) ) \
/ (2.*(1.-(Mc[ind]*cos_sweep)**2.))
spline = Cubic_Spline_Blender(.95,1.0)
h00 = lambda M:spline.compute(M)
k_w = k_w*(h00(Mc)) + 1*(1-h00(Mc))
# find the final result
wing_parasite_drag = k_w * cf_w_u * Swet / Sref /2. + k_w * cf_w_l * Swet / Sref /2.
return wing_parasite_drag , k_w, cf_w_u, cf_w_l, k_comp_u, k_comp_l, k_reyn_u, k_reyn_l
|
# Copyright (C) 2007 - 2012 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# SAM 6/12/12: I've moved the document pairer into its own file.
# SAM 5/9/12: Added initial support for doing alignment via Kuhn-Munkres.
from munkres import Munkres, make_cost_matrix
import MAT.Document
from MAT.Annotation import AnnotationAttributeType
#
# Utilities.
#
class PairingError(Exception):
pass
# Track, in a dictionary, whether something is a content tag or not.
def _cacheContentType(contentDict, atype, task, contentAnnotations, tokenAnnotations):
label = atype.lab
if not contentDict.has_key(label):
contentDict[label] = [atype.hasSpan, None]
if task:
contentDict[label][1] = task.getCategoryForLabel(label)
elif contentAnnotations and label in contentAnnotations:
contentDict[label][1] = "content"
elif tokenAnnotations and label in tokenAnnotations:
contentDict[label][1] = "token"
elif contentDict[label][0] != atype.hasSpan:
raise PairingError, ("label '%s' is both spanned and spanless in the label cache" % label)
return contentDict[label][1]
def checkContentTag(contentDict, label, task, contentAnnotations = None, tokenAnnotations = None):
return _cacheContentType(contentDict, label, task, contentAnnotations, tokenAnnotations) == "content"
def checkLexTag(contentDict, label, task, contentAnnotations = None, tokenAnnotations = None):
return _cacheContentType(contentDict, label, task, contentAnnotations, tokenAnnotations) == "token"
#
# PAIRING
#
# I'm going to use the Kuhn-Munkres algorithm as the basis for my annotation pairing.
# It will be enhanced in a number of ways.
# First, we're going to have similarity profiles which describe the
# dimensions which are going to contribute to the similarity, and
# the weights assigned to each value, and the pairing method we'll use.
# Ultimately, this will be XML-declarable in the task.xml file. But right
# now, we'll just ask the task. We'll have declared methods for
# the various attribute types and dimensions, and the possibility
# of defining your own. We'll allow each dimension to return a set
# of error tokens, which will be accumulated to indicate how many times an error
# of that type is encountered. You'll have the possibility of
# multiple errors for a given mismatch. For sets, we'll use (2 x intersection) / size(set1) + size(set2),
# and for lists, we'll do the same with the longest common subsequence instead of
# the intersection. We'll have to adjust that for some of the more
# complex equality operations; I may need to do Kuhn-Munkres with the equality
# operation, in fact.
# Second, we're going to stratify the pairing. Because I need to use pairing information between
# annotations during computation of the contribution of annotation-valued attributes,
# the types which are pointed to must be paired before the types that point to them.
# Note that this is a TYPE-level operation, not an instance-level evaluation; if
# the FOO tag is a label restriction on the BAR attribute of the BAZ tag,
# even if no FOOs are ever values of the BAR attribute, FOOs must be paired first.
# This means that we have to have strata defined. We could deduce them, but
# that might not cohere with the user intent, and I want to force the user to
# realize that if FOO and BAZ are in different strata, they CAN'T pair with
# each other, even if they overlap. And you could imagine a scenario where the
# user wishes to force the possible pairing, even if it means using
# unstable scoring (next bullet).
# Third, we're going to have the possibility of unstable scoring. If
# annotation values are compared before they're paired, we'll have a fallback
# scoring mechanism which is unstable - it may reflect similarities contributing
# to the pairing, but between annotations which will ultimately not be paired.
# Fourth, we're going to have to watch explicitly for cycles - if you're
# comparing two annotations, and you encounter that pair again while you're
# comparing it, the second comparison should either contribute a 0 similarity
# or raise an error. Not sure what the right thing to do here is; raise an
# error until we figure it out.
# Fifth, we're going to need some sort of fallback in the case of more than
# two documents. We can't do the n-wise pairing, so we'll need a "pivot" document,
# which can be arbitrarily chosen, I suppose, to which all the other
# documents will be compared. But in the case where the pairs relate to a spurious
# element in the non-pivot, we'll have elements which might be reconcilable with
# EACH OTHER which result in separate pairwise spurious relations. I'm
# not sure what to do about that; should I start iterating through the
# other documents and try to "swallow" the spurious elements? Not sure.
# First step: refactor the scorer to have a separate pairer. The inputs that are
# relevant to the pairer are:
# task
# contentAnnotations
# tokenAnnotations
# equivalenceClasses
# labelsToIgnore
# For each document, when we add it, we need to specify, perhaps, which segments
# are pairable.
# If there's no task, we use the default similarity for the spanned or spanless
# annotation.
# So here's the thing about equivalence classes: you can only use them when
# determining the value of the label dimension - never when you're choosing
# the similarity. The similarity always works on the true label anyway.
class PairState:
def __init__(self, task = None, contentAnnotations = None,
tokenAnnotations = None, equivalenceClasses = None,
labelsToIgnore = None, similarityProfile = None,
skipTokens = False):
self.task = task
self.contentAnnotations = contentAnnotations
self.tokenAnnotations = tokenAnnotations
self.equivalenceClasses = equivalenceClasses
self.labelsToIgnore = (labelsToIgnore and set(labelsToIgnore)) or None
if labelsToIgnore and equivalenceClasses:
for lab in labelsToIgnore:
if equivalenceClasses.has_key(lab):
print "Ignoring label '%s' in equivalence classes" % lab
del equivalenceClasses[lab]
self.contentTags = {}
self.simEngine = SimilarityEngine(self, similarityProfile = similarityProfile)
# For each document tuple added in addDocumentTuples (minus the
# ones that are skipped because the signals don't match), we
# create a final entry. If there are more than two docs in each
# tuple, we'll do the postprocessing to unify the pairs.
self.numDocs = None
self.resultEntries = []
self.skipTokens = skipTokens
# See _computeImpliedSpan.
self.impliedSpans = {}
# Each element in the list of tuples is a n-ary list of
# pairs: each pair is (filename, doc). Each entry in the list must
# be the same length. If segFilters is not None, it must be the same length
# as the entries, and must be a function which filters the segments in the
# corresponding doc.
# We also assume that the "pivot" document is the first document in each
# tuple.
def addDocumentTuples(self, tupleList, segFilters = None):
# Not sure who'd pass an empty tuple list, but it shouldn't break.
if not tupleList:
return
if self.numDocs is None:
self.numDocs = len(tupleList[0])
# Temporary blockage.
if self.numDocs > 2:
raise PairingError, "Pairer doesn't work yet with more than two documents; stay tuned."
for elt in tupleList:
if len(elt) != self.numDocs:
raise PairingError, "Not all document comparisons have the same number of documents"
if (segFilters is not None) and (len(segFilters) != self.numDocs):
raise PairingError, "Number of segment filters differs from the number of documents in the comparisons"
# 8/11/08: We need a very, very different algorithm for computing
# this information, because of how we need to align the overlapping
# but not matching elements. We also need to collect token-level
# numbers. So once we determine it's the same document, we need to
# separate lex tags from content tags, sort both, and number the lex tags,
# indexed by start and end indexes (so we can figure out how many
# tokens are covered by an annotation). Then we need to walk through
# the context tags for one document, comparing them point by point
# with annotations from the other document, classifying them according
# to one of the six bins described above.
# Of course, sometimes we don't have tokens, in which case
# (a) we can't report token-level scores, and (b) we can't use
# the token indices to root the content tags.
# Finally, if there's a task, we should really use the zone annotations
# in the reference document as a filter on what we should score.
if self.task:
annotLabeler = lambda ann: self.task.getEffectiveAnnotationLabel(ann, useExtraDistinguishingAttributes = True,
restrictToCategory = "content")
else:
annotLabeler = lambda ann: ann.atype.lab
if self.equivalenceClasses:
coreAnnotLabeler = annotLabeler
def annotLabeler(ann):
lab = coreAnnotLabeler(ann)
return self.equivalenceClasses.get(lab, lab)
# Check for signals matching.
finalTupleList = []
for tpl in tupleList:
sig = tpl[0][1].signal
sigsMatch = True
for file, doc in tpl:
if doc.signal != sig:
sigsDontMatch = False
break
if not sigsMatch:
print "Signals don't match among documents %s; skipping." % ", ".join([t[0] for t in tuple])
else:
finalTupleList.append(tpl)
# For some reason, I think because I only want to compute the true task zone
# info once, I call processableRegions on whole bunches of documents.
# Now, if there are no segFilters above, then we don't need to filter the annotations.
# If there ARE seg filters, we should get the processable regions
# of the first doc if there WERE no seg filters, and in that case, see if each region list
# for each doc is the same as the bare one. If it is, then every doc is zoned the same
# way and everything's used; otherwise, we can't pair spanless annotations, because we
# don't know which ones "appear" in those zones.
# Well, nothing is every THAT simple. I STILL have to filter by the zones,
# in case there are perversely annotations outside the zones which should be
# ignored. But I DO need to know whether regions were limited or not.
firstFilterList = MAT.Document.AnnotatedDoc.processableRegions([tpl[0][1] for tpl in finalTupleList],
task = self.task)
# I need this for spanless annotations, maybe. We'll use the
# same maxRegion for each document in each tuple.
maxRegionHash = {}
for i in range(self.numDocs):
maxRegionHash.update(dict([(d, (firstFilter[0][0], firstFilter[-1][1]))
for (d, firstFilter)
in zip([tpl[i][1] for tpl in finalTupleList], firstFilterList)]))
if segFilters is None:
filterRegionList = [(f, False) for f in firstFilterList]
else:
# Slice this by columns, so we can pass in a single segment filter function.
regionColumnLists = [MAT.Document.AnnotatedDoc.processableRegions([tpl[i][1] for tpl in finalTupleList], task = self.task,
segmentFilterFn = segFilters[i])
for i in range(self.numDocs)]
filterRegionList = []
for j in range(len(finalTupleList)):
tpl = finalTupleList[j]
firstFilter = firstFilterList[j]
regionFilters = [elt[j] for elt in regionColumnLists]
changedFilter = self._reduceFilters(firstFilter, regionFilters)
if changedFilter:
filterRegionList.append((changedFilter, True))
else:
filterRegionList.append((firstFilter, False))
for j in range(len(tupleList)):
regionFilters, filterChanged = filterRegionList[j]
self._addDocumentTuple(tupleList[j], annotLabeler, regionFilters, filterChanged, maxRegionHash)
# Here, I see if the region filters are all identical to the first
# filter, and if they are, I return None, because we don't need to
# filter. If at any point, the lists differ, I return the actual region list,
# which is the intersection of all the region filters.
def _reduceFilters(self, firstFilter, regionFilters):
filterChanged = False
pivotRegions = firstFilter
for otherRegions in regionFilters:
if pivotRegions == otherRegions:
continue
else:
filterRegions = []
filterChanged = True
# Merge the ref and hyp regions. The algorithm is pretty simple. Both the lists
# will be in order. In each case, we loop while pivotRegions and otherRegions
# are both present. If one ends before the other, discard it.
# If the start of one precedes the other, move the
# earlier one forward. If they start at the same point, find the
# earliest end and add that as a region. Discard the shorter one
# and move the start index of the longer one.
while pivotRegions and otherRegions:
if pivotRegions[0][1] <= otherRegions[0][0]:
pivotRegions[0:1] = []
elif otherRegions[0][1] <= pivotRegions[0][0]:
otherRegions[0:1] = []
elif otherRegions[0][0] < pivotRegions[0][0]:
otherRegions[0][0] = pivotRegions[0][0]
elif pivotRegions[0][0] < otherRegions[0][0]:
pivotRegions[0][0] = otherRegions[0][0]
elif pivotRegions[0][1] < otherRegions[0][1]:
# They start at the same point, but ref ends earlier.
filterRegions.append((pivotRegions[0][0], pivotRegions[0][1]))
otherRegions[0][0] = pivotRegions[0][1]
pivotRegions[0:1] = []
elif otherRegions[0][1] < pivotRegions[0][1]:
# They start at the same point, but hyp ends earlier.
filterRegions.append((otherRegions[0][0], otherRegions[0][1]))
pivotRegions[0][0] = otherRegions[0][1]
otherRegions[0:1] = []
else:
# They start and end at the same point.
filterRegions.append((pivotRegions[0][0], pivotRegions[0][1]))
pivotRegions[0:1] = []
otherRegions[0:1] = []
pivotRegions = filterRegions
if filterChanged:
return pivotRegions
else:
# We don't need a filter, because the firstFilter is the filter
# without segfilters, and represents the whole doc.
return None
def _addDocumentTuple(self, tpl, annotLabeler, filterRegions, filterChanged, maxRegionHash):
ref, rDoc = tpl[0]
# This will be a list of pairs (spannedList, spanlessList) of annotations
# from the doc. This will
# be assured of being the same length as the hypStrata below;
# if there's no strata declared, there will be one stratum, and
# if there are strata declared, they'll be applied to both docs.
refStrata = self.simEngine.getDocStrata(rDoc, filterChanged)
# You can't filter spanless annotations. Actually, you can, by computing their
# implied regions.
filteredSpanned, filteredSpanless = self.filterByRegions(filterRegions, spannedLists = [p[0] for p in refStrata],
spanlessLists = [p[1] for p in refStrata],
maxRegionHash = maxRegionHash)
refStrata = zip(filteredSpanned, filteredSpanless)
finalRefStrata = []
for rSpanContent, rSpanlessContent in refStrata:
rSpanContent = [(annotLabeler(ann), ann) for ann in rSpanContent]
rSpanlessContent = [(annotLabeler(ann), ann) for ann in rSpanlessContent]
if self.labelsToIgnore:
rSpanContent = [(lab, ann) for (lab, ann) in rSpanContent if lab not in self.labelsToIgnore]
rSpanlessContent = [(lab, ann) for (lab, ann) in rSpanlessContent if lab not in self.labelsToIgnore]
finalRefStrata.append((rSpanContent, rSpanlessContent))
finalPairLists = []
finalTokPairLists = []
for target, tDoc in tpl[1:]:
pairs = []
finalPairLists.append(pairs)
pairsTokLevel = []
finalTokPairLists.append(pairsTokLevel)
hStrata = self.simEngine.getDocStrata(tDoc, filterChanged)
# Use the new processableRegions method to retrieve all the
# useable annotations. By default, everything is used, but we should
# be able to filter on gold annotations.
filteredSpanned, filteredSpanless = self.filterByRegions(filterRegions, spannedLists = [p[0] for p in hStrata],
spanlessLists = [p[1] for p in hStrata],
maxRegionHash = maxRegionHash)
hStrata = zip(filteredSpanned, filteredSpanless)
for (rSpanContent, rSpanlessContent), (hSpanContent, hSpanlessContent) in zip(finalRefStrata, hStrata):
hSpanContent = [(annotLabeler(ann), ann) for ann in hSpanContent]
hSpanlessContent = [(annotLabeler(ann), ann) for ann in hSpanlessContent]
if self.labelsToIgnore:
hSpanContent = [(lab, ann) for (lab, ann) in hSpanContent if lab not in self.labelsToIgnore]
hSpanlessContent = [(lab, ann) for (lab, ann) in hSpanlessContent if lab not in self.labelsToIgnore]
self._addDocumentTupleStratum(rSpanContent, hSpanContent, True, pairs, pairsTokLevel)
self._addDocumentTupleStratum(rSpanlessContent, hSpanlessContent, False, pairs, pairsTokLevel)
self._integratePairs(tpl, filterRegions, finalPairLists, finalTokPairLists)
def _addDocumentTupleStratum(self, rContent, hContent, isSpan, pairs, pairsTokLevel):
if not (rContent or hContent):
return
# What about overlaps and multiple spans on each side? The original
# algorithm didn't take that into account. In fact, the way it's sorted
# in multiple places clearly shows that all sorts of things would
# break.
# Tokens are going to be the same in both docs, so
# I only need to analyze one of them. But I only need to
# do this if the tokens are being collected. And if either
# the reference or the hypothesis doesn't have tokens, we
# shouldn't try, because it'll break and we don't
# have tokens.
# GAAAA. I have to make sure that whatever pairing I
# apply for the tags applies to the tokens as well. So
# the token algorithm has to change, completely. Ditto
# for the pseudo-tokens and characters. EVERYTHING
# starts with the annotation pairings.
# We'll collect triples of (ref, hyp, status),
# where status is one of "match", "tagclash", "undermark", "overmark",
# "tagplusundermark", "tagplusovermark", "overlap",
# "missing", "spurious". We'll loop through the ref, since
# we have no reason to pick one or the other. In some cases, we have to
# do this from the point of view of one side or the other.
# updateTagDetail() does it from the point of view of the hypothesis.
# In order to do this by tag, I have to subdivide the
# results by tag.
# We're going to collect both character counts and pseudo-tokens (see below).
thesePairs = self._pairAnnotations(rContent, hContent, isSpan = isSpan)
pairs += thesePairs
# print [(label, (ann and ann.start), (ann and ann.end), hLabel,
# (hAnn and hAnn.start), (hAnn and hAnn.end), refMatchStatus, hypMatchStatus)
# for [label, ann, refMatchStatus, hLabel, hAnn, hypMatchStatus] in pairs]
if isSpan and (not self.skipTokens):
self._pairTokenSpans(thesePairs, pairsTokLevel)
def _pairTokenSpans(self, pairs, pairsTokLevel):
# OK. Easy case first. Let's process the tag-level pairs. And, at the same time,
# collect the appropriate intervals for the token level. I need to do
# another round of pairing up on the non-overlapping sections, because
# things like this should be fine at the token level:
# ref: <TAG>a b c d</TAG>
# hyp: <TAG>a b</TAG> <TAG>c d</TAG>
# Don't forget: only the spanned annotations should be processed on
# the token level. Let's collect the misbehaving partial spans for
# later computation.
missingTokLevel = []
spuriousTokLevel = []
# This is a list where the entries are even longer than the pairs:
# [rLab, rAnn, rStatus, hLab, hAnn, hStatus, start, end]
# I'll need this as I move to more elaborate scoring.
remainingTokIndexes = set()
for [label, ann, refMatchStatus, hLabel, hAnn, hypMatchStatus] in pairs:
hEntry = (hLabel, hAnn)
if refMatchStatus == "missing":
missingTokLevel.append((label, ann, ann.start, ann.end))
remainingTokIndexes.update([ann.start, ann.end])
elif hypMatchStatus == "spurious":
spuriousTokLevel.append((hLabel, hAnn, hAnn.start, hAnn.end))
remainingTokIndexes.update([hAnn.start, hAnn.end])
else:
# Ultimately, we want to be able to report all counts. But what
# should the report look like for the clash cases? I need
# a couple of different categories: "refclash" and "hypclash".
# "spanclash" gets one of each on the given tag;
# the other clashes get one of each on the corresponding
# tag.
if ann.start < hAnn.start:
missingTokLevel.append((label, ann, ann.start, hAnn.start))
remainingTokIndexes.update([ann.start, hAnn.start])
elif ann.start > hAnn.start:
spuriousTokLevel.append((hLabel, hAnn, hAnn.start, ann.start))
remainingTokIndexes.update([hAnn.start, ann.start])
if ann.end > hAnn.end:
missingTokLevel.append((label, ann, hAnn.end, ann.end))
remainingTokIndexes.update([hAnn.end, ann.end])
elif ann.end < hAnn.end:
spuriousTokLevel.append((hLabel, hAnn, ann.end, hAnn.end))
remainingTokIndexes.update([ann.end, hAnn.end])
# I used to be able to make some assertion about the
# match status of the spans in the overlap, but no
# longer, with the new profiles - they have to
# go through the same matching as all the others.
pairsTokLevel += self._pairRemainingTokSpans(max(hAnn.start, ann.start), min(hAnn.end, ann.end), [[label, ann]], [[hLabel, hAnn]])
# Now, let's do the tokens. The way we do this is we create a
# mapping from the token starts and ends to its interval. Then, we can
# figure out what to add for each pair.
# At the same time, we can do the pseudo-tokens and the characters,
# because they're all span-free.
# But I also need to do the accumulations for pseudotag and character counts. Note the
# comment above; these:
# ref: <TAG>a b c d</TAG>
# hyp: <TAG>a b</TAG> <TAG>c d</TAG>
# should score perfectly. Because the phrase-level loop pairs annotations,
# I have to do collection ONLY during the loop, and then update the accumulation dictionaries
# afterward. The collection can be the same for both pseudotag and character, and is just
# the relevant indices and then what label was found in the ref and content.
# What this means is that I can't just go through the pairs as is, because
# they will fail in the case above. I need to maintain the label pairs,
# because they need to be consistent with the tag level scoring; but the span
# boundaries need to be erased before anything else happens.
# What this amounts to is that I need to regenerate the intervals, but
# I need to take spurious and missing, AFTER I'VE FIGURED OUT THE PORTIONS
# THAT CORRESPOND TO THEM, and pair THEM up.
# Do these need to be paired differently for tokens and pseudo tokens and
# characters? At one point, I recognized that I needed to strip whitespace
# from the edges of pseudo tokens to pair them up correctly, but not from
# characters.
# Yeah. Here's the problem:
# ref: <TAG>a b c d</TAG>
# hyp: <TAG>a b</TAG> <TAG>c d e</TAG>
# If I pair them, the second ref is missing - but if I work
# through the pairs, rather than the annotation indices, I'll never be able
# to match the remainders. I thought maybe I'd have to do tokens separately,
# but I think if at any point, I can't find a token boundary for the annotation
# boundary, I'll bail on token scoring.
# So we have to do another round of pairing. Do I try to use _pairAnnotations?
# I think not. The fit is not great. I need first to segment the regions
# I've collected based on the available indices. In each region, I perform
# my pairing algorithm.
remainingTokIndexes = list(remainingTokIndexes)
remainingTokIndexes.sort()
# Can't use enumerate() because the mapping needs to be reversed.
tokIdxMapping = {}
j = 0
for i in remainingTokIndexes:
tokIdxMapping[i] = j
j += 1
# Mapping from intervals to label-annotation pairs.
intervals = {}
for lab, ann, start, end in missingTokLevel:
allIndexes = remainingTokIndexes[tokIdxMapping[start]:tokIdxMapping[end] + 1]
i = 1
while i < len(allIndexes):
intvl = (allIndexes[i - 1], allIndexes[i])
try:
intervals[intvl][0].append((lab, ann))
except KeyError:
intervals[intvl] = [(lab, ann)], []
i += 1
for lab, ann, start, end in spuriousTokLevel:
allIndexes = remainingTokIndexes[tokIdxMapping[start]:tokIdxMapping[end] + 1]
i = 1
while i < len(allIndexes):
intvl = (allIndexes[i - 1], allIndexes[i])
try:
intervals[intvl][1].append((lab, ann))
except KeyError:
intervals[intvl] = [], [(lab, ann)]
i += 1
# Now, pair the remaining spans.
for (start, end), (rEntries, hEntries) in intervals.items():
pairsTokLevel += self._pairRemainingTokSpans(start, end, rEntries, hEntries)
def _integratePairs(self, t, filterRegions, finalPairLists, finalTokPairLists):
if (len(finalPairLists) == 1) and (len(finalTokPairLists) == 1):
finalPairs = finalPairLists[0]
finalTokPairs = finalTokPairLists[0]
else:
raise PairingError, "Can't integrate pairs from more than two documents"
# The filter regions are needed downstream in the scorer.
self.resultEntries.append({"pairs": finalPairs, "tokenPairs": finalTokPairs, "tuple": t, "filterRegions": filterRegions})
# entries are refstart, refend, hstart, hend
@staticmethod
def _buildIndexTable(rContent, hContent, startEndTbl):
indexTbl = {}
for label, ann in rContent:
start, end = startEndTbl[ann]
try:
indexTbl[start][0].append((label, ann))
except KeyError:
indexTbl[start] = [[(label, ann)], [], [], []]
try:
indexTbl[end][1].append((label, ann))
except KeyError:
indexTbl[end] = [[], [(label, ann)], [], []]
for label, ann in hContent:
start, end = startEndTbl[ann]
try:
indexTbl[start][2].append((label, ann))
except KeyError:
indexTbl[start] = [[], [], [(label, ann)], []]
try:
indexTbl[end][3].append((label, ann))
except KeyError:
indexTbl[end] = [[], [], [], [(label, ann)]]
# OK, all sorted by indexes.
allIndices = indexTbl.keys()
allIndices.sort()
return indexTbl, allIndices
# Kuhn-Munkres algorithm. The idea is that we find all the annotations
# which are overlap chunks: contiguous spans where the "cover count" remains > 0.
# Then if the r dimension is empty, they're missing; if h is empty, they're spurious;
# if it's one-to-one, pair them; otherwise, run Kuhn-Munkres.
# If they're spanless, we compute the start and end spans by
# finding the implied extent of the annotation, by looking at the
# first start and last end of the annotations they point to.
# If that fails, they have to match by label, and THOSE
# are paired.
def _pairAnnotations(self, rContent, hContent, isSpan = True):
pairs = []
if isSpan:
self._pairSpannedAnnotations(rContent, hContent, pairs,
dict([(a, (a.start, a.end)) for (lab, a) in rContent] + \
[(a, (a.start, a.end)) for (lab, a) in hContent]))
else:
impliedSpanRContent = []
impliedSpanHContent = []
# From labels to accumulated reference and hypothesis.
labDict = {}
startEndTbl = {}
for lab, ann in rContent:
start, end = self._computeImpliedSpan(ann, self.impliedSpans)
if start is None:
try:
labDict[ann.atype.lab][0].append((lab, ann))
except KeyError:
labDict[ann.atype.lab] = ([(lab, ann)], [])
else:
impliedSpanRContent.append((lab, ann))
startEndTbl[ann] = (start, end)
for lab, ann in hContent:
start, end = self._computeImpliedSpan(ann, self.impliedSpans)
if start is None:
try:
labDict[ann.atype.lab][1].append((lab, ann))
except KeyError:
labDict[ann.atype.lab] = ([], [(lab, ann)])
else:
impliedSpanHContent.append((lab, ann))
startEndTbl[ann] = (start, end)
if impliedSpanRContent or impliedSpanHContent:
self._pairSpannedAnnotations(impliedSpanRContent, impliedSpanHContent, pairs, startEndTbl)
for lab, (accumRef, accumHyp) in labDict.items():
pairs += self._pairAnnotationsAtInterval(accumRef, accumHyp)
return pairs
def _pairSpannedAnnotations(self, rContent, hContent, pairs, startEndTbl):
indexTbl, allIndices = self._buildIndexTable(rContent, hContent, startEndTbl)
curRef = set()
curHyp = set()
accumRef = []
accumHyp = []
for i in allIndices:
[rStart, rEnd, hStart, hEnd] = indexTbl[i]
preSize = len(curRef) + len(curHyp)
if rEnd:
curRef -= set(rEnd)
if hEnd:
curHyp -= set(hEnd)
if (not curRef) and (not curHyp) and (preSize > 0):
pairs += self._pairAnnotationsAtInterval(accumRef, accumHyp)
accumRef = []
accumHyp = []
if rStart:
accumRef += rStart
curRef |= set(rStart)
if hStart:
accumHyp += hStart
curHyp |= set(hStart)
# This is used for the spanless annotations.
@classmethod
def _computeImpliedSpan(cls, a, impliedSpans):
if a.atype.hasSpan:
return (a.start, a.end)
else:
try:
return impliedSpans[a]
except KeyError:
start = end = None
for attrObj, val in zip(a.atype.attr_list, a.attrs):
if (attrObj._typename_ == "annotation") and (val is not None):
if not attrObj.aggregation:
thisStart, thisEnd = cls._computeImpliedSpan(val, impliedSpans)
if (start is None) or (thisStart < start):
start = thisStart
if (end is None) or (thisEnd > end):
end = thisEnd
else:
for subval in val:
thisStart, thisEnd = cls._computeImpliedSpan(subval, impliedSpans)
if (start is None) or (thisStart < start):
start = thisStart
if (end is None) or (thisEnd > end):
end = thisEnd
# Might be (None, None)
impliedSpans[a] = (start, end)
return (start, end)
def _pairAnnotationsAtInterval(self, accumRef, accumHyp, duringTokenRemainder = False):
if not accumHyp:
return [[lab, ann, "missing", None, None, None] for (lab, ann) in accumRef]
elif not accumRef:
return [[None, None, None, lab, ann, "spurious"] for (lab, ann) in accumHyp]
else:
if (len(accumHyp) == 1) and (len(accumRef) == 1):
hLab, hAnn = accumHyp[0]
rLab, rAnn = accumRef[0]
# They pair with each other, but we definitely need to compute
# their similarity.
r, dimSim, errToks = self.simEngine.computeSimilarity(accumRef[0], accumHyp[0],
useTokenSimilarity = duringTokenRemainder)
# If their similarity is 0, we don't actually care - these things
# MUST pair with each other. Actually, when we run
# Kuhn-Munkres, we make these missing and spurious, so
# we should do the same here. But we have to be sure that
# in the token case, the span counts as matching.
if r == 0:
return [[rLab, rAnn, "missing", None, None, None],
[None, None, None, hLab, hAnn, "spurious"]]
else:
rStatus, hStatus = self._computePairStatuses(r, errToks)
if not duringTokenRemainder:
self.simEngine.recordPair(rAnn, hAnn)
return [[rLab, rAnn, rStatus, hLab, hAnn, hStatus]]
else:
# Run Kuhn-Munkres.
return self._kuhnMunkres(accumRef, accumHyp,
duringTokenRemainder = duringTokenRemainder)
def _kuhnMunkres(self, accumRef, accumHyp, duringTokenRemainder = False):
# one row for each item in the accumRef, one column for each item in
# accumHyp. Compute similarity for each pair. Note that the computeSimilarity
# method returns a triple, and in the cost matrix, I only want the first value.
computeSimilarity = self.simEngine.computeSimilarity
matrix = make_cost_matrix([[computeSimilarity(r, h,
useTokenSimilarity = duringTokenRemainder)[0] for h in accumHyp]
for r in accumRef],
lambda cost: 1.0 - cost)
newPairs = []
# If accumRef and accumHyp are not the same length, some of them might
# not be matched.
indexPairs = Munkres().compute(matrix)
# print "result", indexPairs, simVals
for row, column in indexPairs:
try:
rLab, rAnn = accumRef[row]
except IndexError:
# hyp matched with nothing. Handle later.
continue
try:
hLab, hAnn = accumHyp[column]
except IndexError:
# ref matched with nothing. Handle later.
continue
r, dimSim, errToks = self.simEngine.similarityCache[(rAnn, hAnn)]
if r == 0:
# Not sure if this can ever happen, but I'm pretty sure it can.
newPairs += [[rLab, rAnn, "missing", None, None, None],
[None, None, None, hLab, hAnn, "spurious"]]
else:
# Compute the status from the errToks. Soon, this will be
# passed right through.
rStatus, hStatus = self._computePairStatuses(r, errToks)
newPairs.append([rLab, rAnn, rStatus, hLab, hAnn, hStatus])
# We need to record the pair so that the similarity engine knows
# later what was paired.
if not duringTokenRemainder:
self.simEngine.recordPair(rAnn, hAnn)
if len(accumHyp) < len(accumRef):
# Some of the refs aren't matched. Collect all possible indices
# of the refs and remove the row indices.
newPairs += [list(accumRef[i]) + ["missing", None, None, None]
for i in set(range(len(accumRef))) - set([p[0] for p in indexPairs])]
elif len(accumRef) < len(accumHyp):
# Some of the hyps aren't matched. Collect all possible indices
# of the hyps and remove the column indices.
newPairs += [[None, None, None] + list(accumHyp[i]) + ["spurious"]
for i in set(range(len(accumHyp))) - set([p[1] for p in indexPairs])]
return newPairs
# Unless it's missing or spurious or match, there's going to be a set of
# tokens in the errToks.
def _computePairStatuses(self, r, errToks):
if r == 1.0:
return "match", "match"
elif not errToks:
# These don't match, but we don't know why.
return set(["unknownclash"]), set(["unknownclash"])
else:
rToks, hToks = errToks
return rToks, hToks
# Remember, spans are no longer a candidate comparison, so
# we just compare by label.
def _pairRemainingTokSpans(self, start, end, rEntries, hEntries):
# reminder for pairsTokLevel:
# [rLab, rAnn, rStatus, hLab, hAnn, hStatus, start, end]
return [r + [start, end] for r in self._pairAnnotationsAtInterval(rEntries, hEntries,
duringTokenRemainder = True)]
# In order to deal with spanless annotations via their implied spans,
# I'm going to do some fancy dancing.
@classmethod
def filterByRegions(cls, orderedRegionList, spannedLists = None, spanlessLists = None, maxRegionHash = None):
# Ensure the annot lists are ordered, and collect the things that
# we can get implied spans for. If we can't compute a span,
# use the max region extents for the document.
spannedTuples = []
if spannedLists:
for l in spannedLists:
l.sort(cmp, lambda x: x.start)
spannedTuples = [[(a, a.start, a.end) for a in spannedList] for spannedList in spannedLists]
spanlessTuples = []
if spanlessLists:
impliedSpans = {}
for spanlessList in spanlessLists:
tupleList = []
for a in spanlessList:
start, end = cls._computeImpliedSpan(a, impliedSpans)
if start is not None:
tupleList.append((a, start, end))
else:
try:
start, end = maxRegionHash[a.doc]
tupleList.append((a, start, end))
except KeyError:
pass
tupleList.sort(cmp, lambda x: x[1])
spanlessTuples.append(tupleList)
annotLists = spannedTuples + spanlessTuples
annotListCount = len(annotLists)
idxList = [0] * annotListCount
finalLists = [[] for l in annotLists]
rI = 0
while rI < len(orderedRegionList):
curRegion = orderedRegionList[rI]
rI += 1
j = 0
while j < annotListCount:
annotTupleList = annotLists[j]
finalList = finalLists[j]
while idxList[j] < len(annotTupleList):
curA, start, end = annotTupleList[idxList[j]]
if start < curRegion[0]:
# discard.
idxList[j] += 1
elif end <= curRegion[1]:
# keep.
finalList.append(curA)
idxList[j] += 1
else:
# wait.
break
j += 1
# So we need to return the spanned tuples and the spanless tuples
# separately.
return tuple(finalLists[:len(spannedTuples)]), tuple(finalLists[len(spannedTuples):])
# Next, we need to deal with the similarity profiles.
# What will be the shape of this? The engine should deal with all the types.
# What gets passed in? The strata are above the level of the individual profile.
# <similarity_profile labels="...">
# <stratum>label,label...</stratum> // if not defined, use labels
# <tag_profile labels="...">
# <dimension name="..." weight="..." method="..."/>
# We should barf if the annotations require stratification but currently
# aren't stratified. The similarity profiles and tag profiles will all be
# by actual label, not effective label. The similarities between non-matching
# labels in a stratum should be the lesser of the two similarities in each
# direction (i.e., assuming any attribute dimensions don't match).
# The default profile, whatever it is, only applies to spanned annotations.
# If there's no task, we have to figure this out on a document-by-document basis.
# And if it's already been determined and it's different in the current doc,
# we should scream.
# If there's no task, there can't be a profile. If there's no profile, there's
# very little you can do; you can basically use the default spanned comparator
# for the spanned annotations, and that's it. Not awful, but not much.
class SimilarityEngine:
def __init__(self, pairer, similarityProfile = None):
self.pairer = pairer
self.strata = None
self.profileName = similarityProfile
# This is a map from string labels to an index into the
# list of strata. Used to determine whetner labels are
# on the same stratum or not.
self.labelToStratumIdx = {}
self.profile = None
# This is a map from pairs of atype labels (NOT effective labels) to
# params to the computeSimilarity method.
self.methodMap = {}
# This is a map from atype labels (NOT effective labels) to
# params to the computSimilarity method. It differs from the
# one above because it should (a) lack the attribute annotations
# (which are never paired for labels which don't share a tag profile)
# and (b) have use_dead_weight set.
self.unpairedMethodMap = {}
self._spannedDefault = {"dimensions": [LabelComparisonClass(self, [], "_label", None, .1,
{"true_residue": .5},
None, None),
SpanComparisonClass(self, [], "_span", None, .9,
{}, None, None),
NonAnnotationAttributeRemainderClass(self, [], "_nonannotation_attribute_remainder",
None, .1, {}, None, None),
AnnotationAttributeRemainderClass(self, [], "_annotation_attribute_remainder",
None, .1, {}, None, None)
]}
self._spanlessDefault = {"dimensions": [LabelComparisonClass(self, [], "_label", None, .2,
{"true_residue": .5},
None, None),
NonAnnotationAttributeRemainderClass(self, [], "_nonannotation_attribute_remainder",
None, .2, {}, None, None),
AnnotationAttributeRemainderClass(self, [], "_annotation_attribute_remainder",
None, .6, {}, None, None)
]}
if pairer.task:
self.profile = pairer.task.getSimilarityProfile(name = self.profileName) or {}
self._compileStrata()
self._compileProfile()
else:
self.computeSimilarity = self.computeSimilarityTaskless
# This is a mapping from pairs of annotations to similarities.
self.similarityCache = {}
self.pairCache = set()
def _compileProfile(self):
atp = self.pairer.task.getAnnotationTypeRepository()
recordedLabels = set()
for tp in self.profile.get("tag_profiles", []):
# These are all the labels to which the profile applies.
labs = tp["true_labels"]
attrEquivs = tp["attr_equivalences"] or {}
# Check to make sure that an attribute
# occurs only once.
reverseEquivs = {}
for eqv, names in attrEquivs.items():
for name in names:
if reverseEquivs.has_key(name):
raise PairingError, ("a tag profile in a similarity profile in task '%s' specifies the attribute '%s' in more than one attribute equivalence" % (self.pairer.task.name, name))
reverseEquivs[name] = eqv
allAtypes = []
for l in labs:
recordedLabels.add(l)
atype = atp.get(l)
if not atype:
raise PairingError, ("label '%s' in tag profile for similarity profile in task %s is unknown" % \
(l, self.pairer.task.name))
allAtypes.append(atype)
labEntry = {"dimensions": [], "dead_weight": 0}
totalWeight = 0
if not tp["dimensions"]:
raise PairingError, ("no dimensions in tag profile in task %s" % self.pairer.task.name)
for dim in tp["dimensions"]:
dimName = dim["name"]
dimWeight = float(dim["weight"])
totalWeight += dimWeight
# This has a default.
dimMethod = dim.get("method")
# If this is an attribute, and it's an aggregation, and you don't
# want to use the default aggregation techniques, do this.
dimAggregatorMethod = dim.get("aggregator_method")
dimParams = dim.get("params") or {}
# This can be defined, but is optional. If not present,
# the params are treated as strings, which may not be
# very efficient if your custom method has params which
# should be digested into floats or something.
dimDigester = dim.get("param_digester_method")
self._compileDimensionComparator(labEntry, allAtypes, attrEquivs,
dimName, dimWeight, dimMethod,
dimAggregatorMethod, dimParams, dimDigester)
if totalWeight == 0:
# There won't be any denominator.
raise PairingError, ("tag profile in task '%s' has a total weight of 0" % self.pairer.task.name)
# By now we've confirmed that every one of these labels has the dimensions
# specified, so we record all these pairs. For pairs that do NOT exist,
# if the labels are in the same stratum, we compare them without the
# attributes, using the dead weight, and take the minimum; otherwise,
# we return 0.
for lab in labs:
# Assemble the dead weight from the attributes for the unpairedMethodMap.
self.unpairedMethodMap[lab] = {"dimensions": [d for d in labEntry["dimensions"]
if not isinstance(d, AttributeComparisonClass)],
"use_dead_weight": True,
"dead_weight": sum([d.weight for d in labEntry["dimensions"]
if isinstance(d, AttributeComparisonClass)])}
for otherLab in labs:
self.methodMap[(lab, otherLab)] = labEntry
self.methodMap[(otherLab, lab)] = labEntry
# WHAT ABOUT DEFAULTS? There are two levels: what happens when you have
# no task, and what happens when you have a task but no similarity.
# The "no task" case is handled elsewhere. Here, we should record every
# possible annotation listed in the strata. Use the default comparison
# whenever there's no entry.
for spanned, spanless in self.strata:
spannedSet = set(spanned)
spannedSet -= recordedLabels
spanlessSet = set(spanless)
spanlessSet -= recordedLabels
# And finally, there needs to be an unpaired method
# map for all of these as well.
for l in spannedSet:
entry = self._spannedDefault
self.unpairedMethodMap[l] = {"dimensions": [d for d in entry["dimensions"]
if not isinstance(d, AttributeComparisonClass)],
"use_dead_weight": True,
"dead_weight": sum([d.weight for d in entry["dimensions"]
if isinstance(d, AttributeComparisonClass)])}
for l2 in spannedSet:
# All elements in the same stratum which are unprofiled
# labels can be compared to each other.
self.methodMap[(l, l2)] = entry
self.methodMap[(l2, l)] = entry
for l in spanlessSet:
entry = self._spanlessDefault
self.unpairedMethodMap[l] = {"dimensions": [d for d in entry["dimensions"]
if not isinstance(d, AttributeComparisonClass)],
"use_dead_weight": True,
"dead_weight": sum([d.weight for d in entry["dimensions"]
if isinstance(d, AttributeComparisonClass)])}
for l2 in spanlessSet:
self.methodMap[(l, l2)] = entry
self.methodMap[(l2, l)] = entry
# Each dimMethod is a function(rVal, hVal, **dimParams) -> (sim between 0 and 1, optional list of (error token from ref POV, error token from hyp POV))
def _compileDimensionComparator(self, labEntry, allAtypes, attrEquivs,
dimName, dimWeight, dimMethod,
dimAggregatorMethod, dimParams, dimDigester):
comparisonClass = _SPECIAL_COMPARISON_TYPES.get(dimName)
if comparisonClass:
c = comparisonClass(self, allAtypes, dimName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester)
else:
if dimName.find(",") > -1:
# Multiple attributes. Our comparison class will be MultiAttributeComparisonClass.
dimNames = [s.strip() for s in dimName.split(",")]
attrAggrTriples = [self._ensureCommonAttrData(allAtypes, attrEquivs, d) for d in dimNames]
c = MultiAttributeComparisonClass(self, dimNames, attrAggrTriples,
dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester)
else:
attrType, aggrType, dimMap = self._ensureCommonAttrData(allAtypes, attrEquivs, dimName)
comparisonClass = _ATTR_COMPARISON_TYPES[attrType]
c = comparisonClass(self, dimName, aggrType,
dimMap, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester)
labEntry["dimensions"].append(c)
def _ensureCommonAttrData(self, allAtypes, attrEquivs, candidateDimName):
# It's gotta be an attribute type.
# Make sure all the types are the same.
# If the dimName appears in the attrEquivs, the atype
# has to have one of the attrs in the equiv.
dimMapRequired = False
try:
dimNames = attrEquivs[candidateDimName]
dimMapRequired = True
except KeyError:
dimNames = [candidateDimName]
attrType = None
# I need to start out with something that isn't None.
aggrType = 0
# From label to true attr.
dimMap = {}
for atype in allAtypes:
dimName = None
for dn in dimNames:
if atype.attr_table.has_key(dn):
dimName = dn
if not dimName:
raise PairingError, ("The attribute '%s' specified as a dimension for label '%s' in the tag profile in a similarity profile for task %s is unknown" % (candidateDimName, atype.lab, self.pairer.task.name))
attr = atype.attr_list[atype.attr_table[dimName]]
if (attrType is not None) and (attrType != attr._typename_):
raise PairerError, ("The attribute '%s' specified as a dimension for label '%s' in the tag profile in a similarity profile for task %s has a different type than it does in another label in that tag_profile" % (dimName, atype.lab, self.pairer.task.name))
if (aggrType is not 0) and (aggrType != attr.aggregation):
raise PairerError, ("The attribute '%s' specified as a dimension for label '%s' in the tag profile in a similarity profile for task %s has a different aggregation type than it does in another label in that tag_profile" % (dimName, atype.lab, self.pairer.task.name))
attrType = attr._typename_
aggrType = attr.aggregation
dimMap[atype.lab] = dimName
return attrType, aggrType, ((dimMapRequired and dimMap) or None)
# If there's no task, I can't check stratification, really, because I have no
# document-independent way of assessing the elements. However, I only need to
# stratify if any of the annotations referenced in the tag profiles have
# annotation-valued attributes which are being compared; and that can't
# happen if there's no task.
# It ought to be a bug to not assign some content annotations to strata.
# You need to pair everything that's pointed to by other things; that's
# part of what's checked in the compilation.
def _compileStrata(self):
try:
strata = self.profile["strata"]
except KeyError:
strata = [self.pairer.task.getAnnotationTypesByCategory("content")]
# Each compiled stratum is a pair of (spanned, spanless). They may never
# pair with each other. The spanned are always done first; that should
# make for a useful default if we ever develop a default comparison
# for spanless annotations.
globalTable = self.pairer.task.getAnnotationTypeRepository()
strataToCheck = []
self.strata = []
self.labelToStratumIdx = {}
for s in strata:
stratum = ([], [])
aLabStratum = ([], [])
for aLab in s:
if self.labelToStratumIdx.has_key(aLab):
raise PairingError, ("label '%s' appears more than once in similarity stratum for task %s" % \
(aLab, self.pairer.task.name))
self.labelToStratumIdx[aLab] = len(self.strata)
atype = globalTable.get(aLab)
if atype is None:
raise PairingError, ("unknown label '%s' in similarity stratum for task %s" % (aLab, self.pairer.task.name))
# Don't need the other arguments. This may be redundant, because if there are no
# strata, we don't need to check this, since we know that it's already
# content. But this will get it into the table, which is also useful.
if not checkContentTag(self.pairer.contentTags, atype, self.pairer.task, None, None):
raise PairingError, ("label '%s' in similarity stratum for task %s is not a content tag" % \
(aLab, self.pairer.task.name))
if atype.hasSpan:
stratum[0].append(atype)
aLabStratum[0].append(aLab)
else:
stratum[1].append(atype)
aLabStratum[1].append(aLab)
strataToCheck.append(stratum)
self.strata.append(aLabStratum)
self._checkStratification(strataToCheck)
# If there are no strata, we have to compile the local strata
# and check the stratification. At one point, we were filtering
# out spanless annotations when we were filtering regions, because
# the spans couldn't be computed, but now that I'm computing
# implied spans, that should work fine. Can we still end up
# in a situation where spanless annotations in stratum n + 1
# point to things that weren't paired because they were filtered
# out in stratum n? No, because implied spans is transitive,
# sort of - if you point to an annotation, your implied span
# is at least the implied span of that annotation. So if
# annotation A is filtered out in stratum n, anything that
# points to it will be filtered out in stratum n + 1.
def getDocStrata(self, doc, filterChanged):
if self.strata is None:
onlyStratum = ([], [])
for atype in doc.atypeDict.keys():
if checkContentTag(self.pairer.contentTags, atype, None,
self.pairer.contentAnnotations, self.pairer.tokenAnnotations):
if atype.hasSpan:
onlyStratum[0].append(atype)
else:
onlyStratum[1].append(atype)
self._checkStratification([onlyStratum])
# If no error is raised, build the annotation strata.
content = ([], [])
for spanned in onlyStratum[0]:
content[0].extend(doc.atypeDict[spanned])
for spanless in onlyStratum[1]:
content[1].extend(doc.atypeDict[spanless])
strata = [content]
else:
strata = []
for spanned, spanless in self.strata:
s = ([], [])
strata.append(s)
for lab in spanned:
atype = doc.anameDict.get(lab)
if atype is not None:
s[0].extend(doc.atypeDict[atype])
for lab in spanless:
atype = doc.anameDict.get(lab)
if atype is not None:
s[1].extend(doc.atypeDict[atype])
return strata
# These strata are lists of lists of atype objects. Each atype
# which points to another atype has to have that atype in a previous
# stratum (stratified) or in the current stratum (unstratified). Otherwise,
# it's an error. For that matter, right now, unstratified won't be handled.
# Also, we're going to always process spanned before spanless, so
# we can take that into account here.
def _checkStratification(self, strata):
alreadyFound = set()
def checkAtypeSetStratification(atypeSet):
atypeLabSet = set([atype.lab for atype in atypeSet])
for atype in atypeSet:
if atype.hasAnnotationValuedAttributes:
for attr in atype.attr_list:
if isinstance(attr, AnnotationAttributeType):
labs = attr.atomicLabelRestrictions
if attr.complexLabelRestrictions:
if not labs:
labs = set([p[0] for p in attr.complexLabelRestrictions])
else:
labs = labs.copy()
labs.update([p[0] for p in attr.complexLabelRestrictions])
if labs:
for l in labs:
if l not in alreadyFound:
if l in atypeLabSet:
raise PairingError, ("label %s is stratified in task %s with an annotation type which refers to it" % (l, self.pairer.task.name))
else:
raise PairingError, ("label %s is referenced in task %s in a stratum before it's paired" % (l, self.pairer.task.name))
alreadyFound.update(atypeLabSet)
for (spanned, spanless) in strata:
checkAtypeSetStratification(spanned)
checkAtypeSetStratification(spanless)
# If the label pair is known, run the method entry. If it's not,
# but the annotations are in the same stratum, then run each one
# with the dead weight enabled, and then take the minimum.
# Otherwise, return 0.
def computeSimilarity(self, rPair, hPair, useTokenSimilarity = False):
rLab, rAnnot = rPair
hLab, hAnnot = hPair
# Token similarity is just like regular similarity, except (a) it
# doesn't compare the span, and (b) it doesn't store the results.
if not useTokenSimilarity:
try:
return self.similarityCache[(rAnnot, hAnnot)]
except KeyError:
pass
labPair = (rAnnot.atype.lab, hAnnot.atype.lab)
try:
labEntry = self.methodMap[labPair]
r = self._computeSimilarity(rLab, rAnnot, hLab, hAnnot,
useTokenSimilarity = useTokenSimilarity, **labEntry)
except KeyError:
# You'd think this should already have been checked, but
# sometimes we need the similarity between elements in
# values of attributes, which won't have been comparison segmented
# necessarily.
rStratum = self.labelToStratumIdx.get(labPair[0])
hStratum = self.labelToStratumIdx.get(labPair[1])
if (rStratum is None) or (hStratum is None):
# One or the other can't be compared.
r = 0, None, None
elif rStratum != hStratum:
r = 0, None, None
elif rAnnot.atype.hasSpan != hAnnot.atype.hasSpan:
r = 0, None, None
else:
# Now, compare the two elements, first from the point of view of
# the reference (using its method map) and then from the point
# of view of the hypothesis (using its method map). Take the minimum.
# The errTokens from _computeSimilarity if not None, is a pair
# of error token sets or lists, from the point of view of each side.
# We glom them together, being careful to reverse the hErrTokens.
# And we have to say SOMEthing about the recorded dimensions.
# Let's take the dimensions from the "winning" comparison.
# So then we DON'T merge the errtokens.
rComp, dimStatus, errTokens = self._computeSimilarity(rLab, rAnnot, hLab, hAnnot,
useTokenSimilarity = useTokenSimilarity,
**self.unpairedMethodMap[labPair[0]])
hComp, hDimStatus, hErrTokens = self._computeSimilarity(hLab, hAnnot, rLab, rAnnot,
useTokenSimilarity = useTokenSimilarity,
**self.unpairedMethodMap[labPair[1]])
if hComp < rComp:
dimStatus = hDimStatus
if hErrTokens:
errTokens = (hErrTokens[1], hErrTokens[0])
else:
errTokens = None
r = min(rComp, hComp), dimStatus, errTokens
if not useTokenSimilarity:
self.similarityCache[(rAnnot, hAnnot)] = r
return r
# Without a task, we'll use the default span comparison.
def computeSimilarityTaskless(self, r, h, useTokenSimilarity = False):
rLab, rAnnot = r
hLab, hAnnot = h
if rAnnot.atype.hasSpan:
dflt = self._spannedDefault
else:
dflt = self._spanlessDefault
# Token similarity is just like regular similarity, except (a) it
# doesn't compare the span, and (b) it doesn't store the results.
if not useTokenSimilarity:
try:
return self.similarityCache[(rAnnot, hAnnot)][0]
except KeyError:
pass
res = self._computeSimilarity(rLab, rAnnot, hLab, hAnnot, useTokenSimilarity = useTokenSimilarity, **dflt)
if not useTokenSimilarity:
self.similarityCache[(rAnnot, hAnnot)] = res
return res
# And here's the real magic work. We need to capture the results of
# this comparison.
# So there's one odd thing that can happen here: if the annotations
# are in a set that's induced by sequences of overlaps,
# the rLab and hLab may not overlap at all. In this case, their
# similarity HAS to be 0.
# So the first thing this function has to do is compare the
# implied spans, and if they both have implied spans and they don't
# overlap, the answer is 0. We have to ask the pairer for that information.
def _computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity = False, use_dead_weight = False,
dimensions = None, dead_weight = None):
rStart, rEnd = self.pairer._computeImpliedSpan(rAnnot, self.pairer.impliedSpans)
hStart, hEnd = self.pairer._computeImpliedSpan(hAnnot, self.pairer.impliedSpans)
if (rStart is not None) and (hStart is not None) and \
((rEnd <= hStart) or (rStart >= hEnd)):
# Nope.
return 0, {}, None
dimResults = {}
numerator = 0
denominator = 0
totalErrToks = set()
# I'm not going to futz around with whether .99999999 == 1.0;
# let's track whether the individual dimensions are 1.0 or not.
# And if there are no dimensions, I don't want to be dividing by 0...
itsPerfect = True
if dimensions:
for dimension in dimensions:
if isinstance(dimension, SpanComparisonClass) and useTokenSimilarity:
# Originally, I just skipped this, but the fact of the matter is,
# the span DOES match. This interacts with _pairAnnotationsAtInterval,
# which ought to reject a pair, even if it's the only possible pair,
# if its similarity is zero. So I want to add the weight to the
# numerator.
denominator += dimension.weight
numerator += dimension.weight
continue
if (not isinstance(dimension, AttributeComparisonClass)) or (not use_dead_weight):
# If you use the dead weight, you skip the attributes.
r, weight, errToks = dimension.computeSimilarity(rLab, rAnnot, hLab, hAnnot, useTokenSimilarity)
dimResults[dimension.dimName] = (r, errToks)
if r < 1.0:
itsPerfect = False
denominator += weight
numerator += (r * weight)
if errToks:
totalErrToks.update(errToks)
if use_dead_weight and dead_weight:
itsPerfect = False
denominator += dead_weight
if itsPerfect:
return 1.0, dimResults, None
elif not totalErrToks:
return float(numerator) / float(denominator), dimResults, None
else:
# Accumulate the tokens for each annotation.
return float(numerator) / float(denominator), dimResults, \
(set([t[0] for t in totalErrToks]), set([t[1] for t in totalErrToks]))
def recordPair(self, rAnn, hAnn):
self.pairCache.add((rAnn, hAnn))
#
# Here are the various magic comparison classes.
#
class SimilarityEngineComparisonClass(object):
def __init__(self, simEngine, dimName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester,
defaultMethod, methodTable):
self.simEngine = simEngine
self.defaultMethod = defaultMethod
self.methodTable = methodTable
self.dimName = dimName
self.weight = dimWeight
self.computedMethod = None
self.computedParams = None
if not dimMethod:
dimMethod = self.methodTable[self.defaultMethod]
dimDigester = None
if type(dimMethod) is tuple:
dimMethod, dimDigester = dimMethod
elif self.methodTable.has_key(dimMethod):
# First, see if the method is a known one.
dimMethod = self.methodTable[dimMethod]
dimDigester = None
if type(dimMethod) is tuple:
dimMethod, dimDigester = dimMethod
else:
# If we don't know it, try to evaluate it.
try:
dimMethod = eval(dimMethod)
except (NameError, AttributeError):
raise PairingError, ("Custom dimension method '%s' in similarity profile for task '%s' is unknown" % (dimMethod, self.simEngine.pairer.task.name))
# We only need digesters for custom methods.
if dimDigester:
try:
dimDigester = eval(dimDigester)
except (NameError, AttributeError):
# OK, try it in the plugin.
import MAT.PluginMgr
try:
dimDigester = MAT.PluginMgr.FindPluginObject(dimDigester, self.simEngine.pairer.task.name)
except MAT.PluginMgr.PluginError:
raise PairingError, ("Custom dimension method parameter digester '%s' in similarity profile for task '%s' is unknown" % (dimDigester, self.simEngine.pairer.task.name))
# Now, if we have the aggregator method, apply it.
if dimAggregatorMethod:
origDimMethod = dimMethod
dimMethod = lambda rVal, hVal, **params: dimAggregatorMethod(rVal, hVal, origDimMethod, **params)
# At this point, we have a mapper and an optional parameter digester for this entry.
if dimDigester:
dimParams = dimDigester(**dimParams)
self.computedMethod = dimMethod
self.computedParams = dimParams
class SpanComparisonClass(SimilarityEngineComparisonClass):
def __init__(self, simEngine, allAtypes, dimName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
if dimAggregatorMethod:
raise PairingError, "Can't define an aggregator method on the _span dimension in a similarity profile"
for atype in allAtypes:
if not atype.hasSpan:
raise PairingError, ("Can't define a _span dimension in a similarity profile on type '%s', because it has no span" % atype.lab)
# Ignore dimDigester.
SimilarityEngineComparisonClass.__init__(self, simEngine, dimName, dimMethod, dimWeight, dimParams,
None, dimDigester, "overlap",
{"overlap": (self._similaritySpanOverlap, self._similarityDimDigester)})
def computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity):
r, errToks = self.computedMethod((rAnnot.start, rAnnot.end), (hAnnot.start, hAnnot.end),
**(self.computedParams or {}))
return r, self.weight, errToks
def _similarityDimDigester(self, **params):
if params.get("overlap_match_lower_bound") is not None:
params["overlap_match_lower_bound"] = float(params["overlap_match_lower_bound"])
if params.get("overlap_mismatch_upper_bound") is not None:
params["overlap_mismatch_upper_bound"] = float(params["overlap_mismatch_upper_bound"])
return params
# Here, rVal and hVal are both span pairs.
# overlap_match_lower_bound is the threshold above which
# it counts as 1.0.
# overlap_mismatch_upper_bound is the threshold below which
# it counts as 0.0.
@staticmethod
def _similaritySpanOverlap(rVal, hVal, overlap_match_lower_bound = None, overlap_mismatch_upper_bound = None, **params):
rStart, rEnd = rVal
hStart, hEnd = hVal
overlapPct = float(min(rEnd, hEnd) - max(rStart, hStart))/float(max(rEnd, hEnd) - min(rStart, hStart))
if (overlap_match_lower_bound is not None) and (overlapPct > overlap_match_lower_bound):
overlapPct = 1.0
elif (overlap_mismatch_upper_bound is not None) and (overlapPct < overlap_mismatch_upper_bound):
overlapPct = 0.0
if overlapPct == 1.0:
return 1.0, None
else:
# Figure out what the overlap error is.
if (hStart <= rStart) and \
(hEnd >= rEnd):
# The hyp is larger than the ref (remember, we've factored out equality already)
return overlapPct, [("undermark", "overmark")]
elif (hStart >= rStart) and \
(hEnd <= rEnd):
# The hyp is smaller than the ref
return overlapPct, [("overmark", "undermark")]
else:
return overlapPct, [("overlap", "overlap")]
class LabelComparisonClass(SimilarityEngineComparisonClass):
def __init__(self, simEngine, allAtypes, dimName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
if dimAggregatorMethod:
raise PairingError, "Can't define an aggregator method on the _label dimension in a similarity profile"
SimilarityEngineComparisonClass.__init__(self, simEngine, dimName, dimMethod, dimWeight, dimParams,
None, dimDigester,
"label_equality",
{"label_equality": (self._similarityLabelEquality,
self._similarityLabelEqualityParamDigester)})
def computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity):
r, errToks = self.computedMethod((rAnnot, rLab), (hAnnot, hLab),
**(self.computedParams or {}))
return r, self.weight, errToks
@staticmethod
def _similarityLabelEqualityParamDigester(**params):
for k, v in params.items():
if k == "true_residue":
params[k] = float(v)
return params
@staticmethod
def _similarityLabelEquality(rPair, hPair, true_residue = None, **params):
rAnnot, rVal = rPair
hAnnot, hVal = hPair
if rVal == hVal:
return 1.0, None
elif rAnnot.atype.lab != hAnnot.atype.lab:
return 0.0, [("tagclash", "tagclash")]
elif true_residue is not None:
return true_residue, [("computedtagclash", "computedtagclash")]
return 0.0, [("tagclash", "tagclash")]
class AttributeComparisonClass(SimilarityEngineComparisonClass):
# aggrType may be 0, to distinguish it from None.
def __init__(self, simEngine, attrName, aggrType,
dimMap, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester,
defaultMethod, methodTable):
if aggrType is not None:
# It's an aggregation, so we need an aggregation function to apply to the
# method.
if dimAggregatorMethod:
try:
dimAggregatorMethod = eval(dimAggregatorMethod)
except (NameError, AttributeError):
raise PairingError, ("Custom dimension aggregator method '%s' in similarity profile for task '%s' is unknown" % (dimAggregatorMethod, simEngine.pairer.task.name))
elif aggrType == "set":
dimAggregatorMethod = self._similaritySetAggregation
elif aggrType == "list":
dimAggregatorMethod = self._similarityListAggregation
elif dimAggregatorMethod:
raise PairingError, ("Can't define an aggregator method for non-aggregating dimension '%s' in a similarity profile for task '%s'" % (dimAggregatorMethod, simEngine.pairer.task.name))
SimilarityEngineComparisonClass.__init__(self, simEngine, attrName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester,
defaultMethod, methodTable)
self.dimMap = dimMap
def computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity):
if self.dimMap:
rVal = rAnnot.get(self.dimMap[rAnnot.atype.lab])
hVal = hAnnot.get(self.dimMap[hAnnot.atype.lab])
else:
rVal = rAnnot.get(self.dimName)
hVal = hAnnot.get(self.dimName)
# If they're both None, good on them.
if (rVal is None) and (hVal is None):
return 1.0, self.weight, None
elif (rVal is not None) and (hVal is not None):
r, errToks = self.computedMethod(rVal, hVal, useTokenSimilarity = useTokenSimilarity,
**(self.computedParams or {}))
return r, self.weight, errToks
else:
return 0.0, self.weight, None
@staticmethod
def _similarityEquality(rVal, hVal, **params):
if rVal == hVal:
return 1.0, None
else:
return 0.0, None
@staticmethod
def _similaritySetAggregation(rVal, hVal, itemComp, useTokenSimilarity = False, **params):
if itemComp is AttributeComparisonClass._similarityEquality:
v = len(rVal & hVal)/float(len(rVal | hVal))
# This is wrong, because it only works for exact equality, but what the hell.
if v == 1.0:
return 1.0, None
else:
return v, [("setclash", "setclash")]
else:
# I think the right thing to do is compute the pairwise
# similarity, and then run Kuhn-Munkres.
# the itemComp method returns a pair, and in the cost matrix, I only want the first value.
matrix = make_cost_matrix([[itemComp(r, h, useTokenSimilarity = useTokenSimilarity)[0] for h in hVal]
for r in rVal],
lambda cost: 1.0 - cost)
indexPairs = Munkres().compute(matrix)
# Sum of the similarities. Now, how to get
# some sensible value out of this? First, make sure
# we re-invert the results.
rawSum = sum([1.0 - matrix[row][column] for (row, column) in indexPairs])
maxSum = max(len(hVal), len(rVal))
if rawSum == maxSum:
return 1.0, None
else:
return (rawSum / float(maxSum)), [("setclash", "setclash")]
@staticmethod
def _similarityListAggregation(rVal, hVal, itemComp, **params):
raise PairingError, "haven't implemented list aggregation for pairing yet"
return 0.0, None
def _getDeclaredAttributes(self):
# This is required by the remainder stuff.
if self.dimMap:
return self.dimMap.values()
else:
return [self.dimName]
class NonAnnotationAttributeComparisonClass(AttributeComparisonClass):
def __init__(self, simEngine, attrName, aggrType,
dimMap, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
AttributeComparisonClass.__init__(self, simEngine, attrName, aggrType,
dimMap, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester,
"equality",
{"equality": self._similarityEquality})
class StringAttributeComparisonClass(NonAnnotationAttributeComparisonClass):
pass
class IntAttributeComparisonClass(NonAnnotationAttributeComparisonClass):
pass
class FloatAttributeComparisonClass(NonAnnotationAttributeComparisonClass):
pass
class BooleanAttributeComparisonClass(NonAnnotationAttributeComparisonClass):
pass
class AnnotationAttributeComparisonClass(AttributeComparisonClass):
def __init__(self, simEngine, attrName, aggrType,
dimMap, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
AttributeComparisonClass.__init__(self, simEngine, attrName, aggrType,
dimMap, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester,
"similarity",
{"similarity": self._similarityAnnotationSimilarity})
# This one isn't static, because it has to use the cache.
# Right now, we don't support unstratified, so we'll always
# have an entry for this in the cache. If they're not paired,
# the result is 0; otherwise, it's the contents of the cache.
def _similarityAnnotationSimilarity(self, rVal, hVal, **params):
e = self.simEngine
if (rVal, hVal) not in e.pairCache:
return 0.0, [("annattributenotpaired", "annattributenotpaired")]
else:
# Ignore the dimension results and error tokens.
r = e.similarityCache[(rVal, hVal)][0]
if r == 1.0:
return 1.0, None
else:
return r, [("annclash", "annclash")]
# This has to be an element of AttributeComparisonClass, because
# its dead weight must contribute in the unpaired case.
class MultiAttributeComparisonClass(AttributeComparisonClass):
def __init__(self, simEngine, dimNames, attrAggrTriples,
dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
dimName = ",".join(dimNames)
# There had better be a comparison method.
if dimMethod is None:
raise PairingError, ("The dimension '%s' in the tag profile in a similarity profile for task %s is a multi-attribute comparison, but it has no method specified" % (dimName, simEngine.pairer.task.name))
if dimAggregatorMethod is not None:
raise PairingError, "Can't define an aggregator method on the multi-attribute dimension in a similarity profile"
if len(dimNames) < 2:
raise PairingError, "Can't define a multi-attribute dimension on fewer than two dimensions"
KNOWN_METHODS = {
"_annotation_set_similarity": self._similarityAnnotationSetSimilarity
}
AttributeComparisonClass.__init__(self, simEngine, dimName,
None, None, dimMethod, dimWeight,
dimParams, None, dimDigester,
None, KNOWN_METHODS)
self.dimNames = dimNames
dimMaps = [c for (a, b, c) in attrAggrTriples]
if not [dm for dm in dimMaps if dm is not None]:
# If none of them are not none (i.e., all of them are none),
# don't store the dimMaps.
self.dimMaps = None
else:
self.dimMaps = dimMaps
self.attrAggrPairs = [(a, b) for (a, b, c) in attrAggrTriples]
def computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity):
# Get the vals. Make sure to use the dimMaps if appropriate.
# Use the true labels.
if self.dimMaps:
rVals = [rAnnot.get(dmap[rAnnot.atype.lab]) for dmap in self.dimMaps]
hVals = [hAnnot.get(dmap[hAnnot.atype.lab]) for dmap in self.dimMaps]
else:
rVals = [rAnnot.get(d) for d in self.dimNames]
hVals = [hAnnot.get(d) for d in self.dimNames]
if (not [v for v in rVals if v is not None]) and \
(not [v for v in hVals if v is not None]):
# If they're both all Nones, good on them.
return 1.0, self.weight, None
else:
r, errToks = self.computedMethod(rVals, hVals,
useTokenSimilarity = useTokenSimilarity,
**(self.computedParams or {}))
return r, self.weight, errToks
# For this case, I'm going to use the set similarity.
def _similarityAnnotationSetSimilarity(self, rVals, hVals, useTokenSimilarity = False,
**params):
if not getattr(self, "_checkedAggrPairs", False):
for (attr, aggr) in self.attrAggrPairs:
if aggr is not None:
raise PairingError, "can't use _annotation_set_similarity to compare attributes which are aggregates themselves"
if attr != "annotation":
raise PairingError, "can't use _annotation_set_similarity to compare attributes which aren't annotation attributes"
self._checkedAggrPairs = True
self._annotationCompInst = AnnotationAttributeComparisonClass(self.simEngine, self.dimNames[0], None, (self.dimMaps and self.dimMaps[0]) or None, None, 0, None, None, None)
# Now, I know I have an _annotationCompInst, and I can use
# the set similarity from above.
return self._similaritySetAggregation(set([v for v in rVals if v is not None]),
set([v for v in hVals if v is not None]),
self._annotationCompInst._similarityAnnotationSimilarity,
useTokenSimilarity = useTokenSimilarity)
def _getDeclaredAttributes(self):
if self.dimMaps:
res = []
for dm in self.dimMaps:
res += dm.values()
return res
else:
return self.dimNames
# This one is interesting. It collects up all the non-annotation attributes which
# are neither (a) an effective label, or (b) already specified in
# an existing dimension, and aggregates their results. This works
# across tag profiles as well as within them, unlike the explicitly
# declared attributes. I'm going to use this in the defaults, as well
# as allowing it to be created explicitly.
# Oh, and the attributes need to be globally declared.
# Oh, and if it has no attributes to match, its weight should be 0.
# Note, too, that this must be able to be called when there's no task.
class NonAnnotationAttributeRemainderClass(SimilarityEngineComparisonClass):
def __init__(self, simEngine, allAtypes, dimName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
if dimAggregatorMethod:
raise PairingError, ("Can't define an aggregator method on the %s dimension in a similarity profile" % dimName)
SimilarityEngineComparisonClass.__init__(self, simEngine, dimName, dimMethod, dimWeight, dimParams,
None, dimDigester, "overlap",
{"overlap": self._similarityAttributeOverlap})
# I need to partition these by reference label.
# This is a mapping from reference true labels to triples of
# a mapping from attribute names to attribute objects for all
# relevant attributes; a mapping from attribute names to comparison
# classes for those same attributes; and a mapping from
# hypothesis labels to their checkable attributes.
self.checkableCache = {}
def computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity):
return self.computedMethod((rLab, rAnnot), (hLab, hAnnot),
useTokenSimilarity = useTokenSimilarity,
**(self.computedParams or {}))
# This can't be a static method, since I need to reach waaaay back to the simEngine.
def _similarityAttributeOverlap(self, rVal, hVal, useTokenSimilarity = False, **params):
rLab, rAnnot = rVal
hLab, hAnnot = hVal
try:
annotRemainder, comparers, checkableDict = self.checkableCache[rAnnot.atype.lab]
except KeyError:
# We haven't computed it yet. If there's a task, filter
# the types by what's declared.
if self.simEngine.pairer.task:
mTable = self.simEngine.methodMap[(rAnnot.atype.lab, rAnnot.atype.lab)]
# Which ones are declared? Remove them from contention.
declared = set()
for d in mTable["dimensions"]:
if isinstance(d, NonAnnotationAttributeComparisonClass) or \
isinstance(d, MultiAttributeComparisonClass):
declared.update(d._getDeclaredAttributes())
globAtype = self.simEngine.pairer.task.getAnnotationTypeRepository()[rAnnot.atype.lab]
else:
declared = set()
globAtype = rAnnot.atype
annotRemainder = dict([(a.name, a) for a in globAtype.attr_list
if (a.name not in declared) and \
(not a.effectiveLabelAttribute) and \
(a._typename_ != "annotation")])
comparers = dict([(a.name, _ATTR_COMPARISON_TYPES[a._typename_](self.simEngine, a.name, a.aggregation, None, None, 1, None, None, None))
for a in annotRemainder.values()])
checkableDict = {}
self.checkableCache[rAnnot.atype.lab] = (annotRemainder, comparers, checkableDict)
if len(annotRemainder.keys()) == 0:
# It weighs nothing
return 1.0, 0.0, None
try:
checkable = checkableDict[hAnnot.atype.lab]
except KeyError:
# So now, for the hAnnot, we need to ensure that the
# other attribute in question is (a) not an effective label attribute,
# (b) not an annotation attribute, (c) matches the type and aggregation
# of the local one. ONLY then do they count toward something that
# can match.
if self.simEngine.pairer.task:
hGlobAtype = self.simEngine.pairer.task.getAnnotationTypeRepository()[hAnnot.atype.lab]
else:
hGlobAtype = hAnnot.atype
checkable = [a.name for a in hGlobAtype.attr_list
if (annotRemainder.has_key(a.name)) and \
(not a.effectiveLabelAttribute) and \
(a._typename_ == annotRemainder[a.name]._typename_) and \
(a.aggregation == annotRemainder[a.name].aggregation)]
checkableDict[hAnnot.atype.lab] = checkable
# Now, look through each of the checkable annotations. VERY similar
# to _computeSimilarity. I thought of bypassing all the boilerplate and
# going directly to the equality method, but if there's an aggregation
# involved, I don't want to have to compute that here too.
numerator = 0
denominator = 0
totalErrToks = set()
itsPerfect = True
for dimensionName in checkable:
dimension = comparers[dimensionName]
r, weight, errToks = dimension.computeSimilarity(rLab, rAnnot, hLab, hAnnot, useTokenSimilarity)
if r < 1.0:
itsPerfect = False
denominator += weight
numerator += (r * weight)
if errToks:
totalErrToks.update(errToks)
# OK, we've done them all. Now, we add the dead weight of
# the ones in the local remainder that couldn't be checked remotely.
localSize = len(annotRemainder.keys())
remainder = localSize - len(checkable)
if remainder > 0:
itsPerfect = False
denominator += remainder
if itsPerfect:
return 1.0, self.weight, None
elif not totalErrToks:
return float(numerator) / float(denominator), self.weight, None
else:
# Accumulate the tokens for each annotation.
return float(numerator) / float(denominator), self.weight, totalErrToks
# For the annotation remainder, we're going to take all the annotation
# attributes and assume the names don't mean anything. So we do the best
# alignment between the annotations which share aggregations.
class AnnotationAttributeRemainderClass(SimilarityEngineComparisonClass):
def __init__(self, simEngine, allAtypes, dimName, dimMethod, dimWeight, dimParams,
dimAggregatorMethod, dimDigester):
SimilarityEngineComparisonClass.__init__(self, simEngine, dimName, dimMethod, dimWeight, dimParams,
None, dimDigester, "overlap",
{"overlap": self._similarityAttributeOverlap})
self.checkableCache = {}
self.aggrs = dict([(aggr, AnnotationAttributeComparisonClass(self.simEngine, "<null>", aggr,
None, None, 1.0, None, None, None))
for aggr in [None, "set", "list"]])
def computeSimilarity(self, rLab, rAnnot, hLab, hAnnot, useTokenSimilarity):
return self.computedMethod((rLab, rAnnot), (hLab, hAnnot),
useTokenSimilarity = useTokenSimilarity,
**(self.computedParams or {}))
def _getCheckableCacheEntry(self, annot):
try:
return self.checkableCache[annot.atype.lab]
except KeyError:
# We haven't computed it yet. If there's a task, filter
# the types by what's declared.
if self.simEngine.pairer.task:
mTable = self.simEngine.methodMap[(annot.atype.lab, annot.atype.lab)]
# Which ones are declared? Remove them from contention.
declared = set()
for d in mTable["dimensions"]:
if isinstance(d, AnnotationAttributeComparisonClass) or \
isinstance(d, MultiAttributeComparisonClass):
declared.update(d._getDeclaredAttributes())
globAtype = self.simEngine.pairer.task.getAnnotationTypeRepository()[annot.atype.lab]
else:
declared = set()
globAtype = annot.atype
# We want a comparer for each aggregation type. So first, we sort them
# into aggregation types.
aggrDict = {None: [], "set": [], "list": []}
valSize = 0
for a in globAtype.attr_list:
if (a.name not in declared) and (a._typename_ == "annotation"):
aggrDict[a.aggregation].append(a)
valSize += 1
self.checkableCache[annot.atype.lab] = (aggrDict, valSize)
return aggrDict, valSize
# This can't be a static method, since I need to reach waaaay back to the simEngine.
# This is very similar, in some ways, to the nonattribute case.
def _similarityAttributeOverlap(self, rVal, hVal, useTokenSimilarity = False, **params):
rLab, rAnnot = rVal
hLab, hAnnot = hVal
rAggrDict, rValSize = self._getCheckableCacheEntry(rAnnot)
if rValSize == 0:
# It weighs nothing.
return 1.0, 0.0, None
hAggrDict, hValSize = self._getCheckableCacheEntry(hAnnot)
# Now, we need to deploy Kuhn-Munkres, I think, in each
# aggregation type. But first, I need to get the values
# for the corresponding annotations, and then run the
# default comparison for the itemComp. None shouldn't
# count as a match here, I don't think - it doesn't
# help anything in any case.
valSize = max(rValSize, hValSize)
simSum = 0
for aggr in [None, "set", "list"]:
rAggr = [v for v in [rAnnot.get(a.name) for a in rAggrDict[aggr]] if v is not None]
hAggr = [v for v in [hAnnot.get(a.name) for a in hAggrDict[aggr]] if v is not None]
if rAggr and hAggr:
itemComp = self.aggrs[aggr]
# the itemComp method returns a triple, and in the cost matrix, I only want the first value.
data = [[itemComp.computedMethod(rVal, hVal, useTokenSimilarity = useTokenSimilarity,
**(itemComp.computedParams or {}))[0] for hVal in hAggr]
for rVal in rAggr]
matrix = make_cost_matrix(data, lambda cost: 1.0 - cost)
indexPairs = Munkres().compute(matrix)
# Note that these need to be picked out of the data, not the
# matrix. The matrix computes the lowest cost, and I want the
# highest, which is why we apply the inverse when we build the matrix.
rawSum = sum([data[row][column] for (row, column) in indexPairs])
simSum += rawSum
# The denominator is valSize.
if simSum == valSize:
return 1.0, self.weight, None
else:
return (simSum / float(valSize)), self.weight, None
_SPECIAL_COMPARISON_TYPES = {
"_span": SpanComparisonClass,
"_label": LabelComparisonClass,
"_nonannotation_attribute_remainder": NonAnnotationAttributeRemainderClass,
"_annotation_attribute_remainder": AnnotationAttributeRemainderClass
}
_ATTR_COMPARISON_TYPES = {
"string": StringAttributeComparisonClass,
"int": IntAttributeComparisonClass,
"float": FloatAttributeComparisonClass,
"boolean": BooleanAttributeComparisonClass,
"annotation": AnnotationAttributeComparisonClass
}
|
from abc import abstractmethod
from copy import deepcopy
from typing import Any
import numpy as np
import pandas as pd
from aistac.handlers.abstract_handlers import HandlerFactory
from aistac.intent.abstract_intent import AbstractIntentModel
from ds_discovery.components.commons import Commons
__author__ = 'Darryl Oatridge'
class AbstractCommonsIntentModel(AbstractIntentModel):
@classmethod
def __dir__(cls):
"""returns the list of available methods associated with the parameterized intent"""
rtn_list = []
for m in dir(cls):
if m.startswith('_get'):
rtn_list.append(m)
elif not m.startswith('_'):
rtn_list.append(m)
return rtn_list
@abstractmethod
def run_intent_pipeline(self, *args, **kwargs) -> [None, tuple]:
""" Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract.
"""
@staticmethod
def select2dict(column: str, condition: str, expect: str=None, logic: str=None, date_format: str=None,
offset: int=None) -> dict:
""" a utility method to help build feature conditions by aligning method parameters with dictionary format.
:param column: the column name to apply the condition to
:param condition: the condition string (special conditions are 'date.now' for current date
:param expect: (optional) the data type to expect. If None then the data type is assumed from the dtype
:param logic: (optional) the logic to provide, see below for options
:param date_format: (optional) a format of the date if only a specific part of the date and time is required
:param offset: (optional) a time delta in days (+/-) from the current date and time (minutes not supported)
:return: dictionary of the parameters
logic:
AND: the intersect of the current state with the condition result (common to both)
NAND: outside the intersect of the current state with the condition result (not common to both)
OR: the union of the current state with the condition result (everything in both)
NOR: outside the union of the current state with the condition result (everything not in both)
NOT: the difference between the current state and the condition result
XOR: the difference between the union and the intersect current state with the condition result
extra logic:
ALL: the intersect of the whole index with the condition result irrelevant of level or current state index
ANY: the intersect of the level index with the condition result irrelevant of current state index
"""
return Commons.param2dict(**locals())
@staticmethod
def action2dict(method: Any, **kwargs) -> dict:
""" a utility method to help build feature conditions by aligning method parameters with dictionary format.
:param method: the method to execute
:param kwargs: name value pairs associated with the method
:return: dictionary of the parameters
Special method values
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
"""
return Commons.param2dict(method=method, **kwargs)
@staticmethod
def canonical2dict(method: Any, **kwargs) -> dict:
""" a utility method to help build feature conditions by aligning method parameters with dictionary format.
The method parameter can be wither a 'model_*' or 'frame_*' method with two special reserved options
Special reserved method values
@empty: returns an empty dataframe, optionally the key values size: int and headers: list
@generate: generates a dataframe either from_env(task_name) o from a remote repo uri. params are
task_name: the task name of the generator
repo_uri: (optional) a remote repo to retrieve the the domain contract
size: (optional) the generated sample size
seed: (optional) if seeding should be applied the seed value
run_book: (optional) a domain contract runbook to execute as part of the pipeline
:param method: the method to execute
:param kwargs: name value pairs associated with the method
:return: dictionary of the parameters
"""
return Commons.param2dict(method=method, **kwargs)
"""
PRIVATE METHODS SECTION
"""
def _apply_action(self, canonical: pd.DataFrame, action: Any, select_idx: pd.Int64Index=None, seed: int=None):
""" applies an action returning an indexed Series
Special method values
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
:param canonical: a reference canonical
:param action: the action dictionary
:param select_idx: (optional) the index selection of the return Series. if None then canonical index taken
:param seed: a seed to apply to any action
:return: pandas Series with passed index
"""
if not isinstance(select_idx, pd.Int64Index):
select_idx = canonical.index
if isinstance(action, dict):
action = action.copy()
method = action.pop('method', None)
if method is None:
raise ValueError(f"The action dictionary has no 'method' key.")
if f"{method}" in self.__dir__() or f"_{method}" in self.__dir__():
if isinstance(seed, int) and 'seed' not in action.keys():
action.update({'seed': seed})
if str(method).startswith('get_'):
if f"{method}" in self.__dir__():
action.update({'size': select_idx.size, 'save_intent': False})
result = eval(f"self.{method}(**action)", globals(), locals())
else:
action.update({'size': select_idx.size})
result = eval(f"self._{method}(**action)", globals(), locals())
elif str(method).startswith('correlate_'):
action.update({'canonical': canonical.iloc[select_idx], 'save_intent': False})
result = eval(f"self.{method}(**action)", globals(), locals())
else:
raise NotImplementedError(f"The method {method} is not implemented as part of the actions")
dtype = 'object'
if any(isinstance(x, int) for x in result):
dtype = 'int'
elif any(isinstance(x, float) for x in result):
dtype = 'float'
return pd.Series(data=result, index=select_idx, dtype=dtype)
elif str(method).startswith('@header'):
header = action.pop('header', None)
if header is None:
raise ValueError(f"The action '@header' requires a 'header' key.")
if header not in canonical.columns:
raise ValueError(f"When executing the action '@header', the header {header} was not found")
return canonical[header].iloc[select_idx]
elif str(method).startswith('@choice'):
header = action.pop('header', None)
size = action.pop('size', 1)
size = action.pop('seed', 1)
if header is None:
raise ValueError(f"The action '@choice' requires a 'header' key")
if header not in canonical.columns:
raise ValueError(f"When executing the action '@choice', the header {header} was not found")
generator = np.random.default_rng(seed=seed)
return pd.Series([np.random.choice(x) for x in canonical[header]],
dtype=canonical[header].dtype.name).iloc[select_idx]
elif str(method).startswith('@eval'):
code_str = action.pop('code_str', None)
if code_str is None:
raise ValueError(f"The action '@eval' requires a 'code_str' key.")
e_value = eval(code_str, globals(), action)
return pd.Series(data=([e_value] * select_idx.size), index=select_idx)
elif str(method).startswith('@constant'):
constant = action.pop('value', None)
if constant is None:
raise ValueError(f"The action '@constant' requires a 'value' key.")
return pd.Series(data=([constant] * select_idx.size), index=select_idx)
else:
raise ValueError(f"The 'method' key {method} is not a recognised intent method")
return pd.Series(data=([action] * select_idx.size), index=select_idx)
def _selection_index(self, canonical: pd.DataFrame, selection: list, select_idx: pd.Index=None):
""" private method to iterate a list of selections and return the resulting index
:param canonical: a pandas DataFrame to select from
:param selection: the selection list of dictionaries
:param select_idx: a starting index, if None then canonical index used
:return:
"""
select_idx = select_idx if isinstance(select_idx, pd.Index) else canonical.index
sub_select_idx = select_idx
state_idx = None
for condition in selection:
if isinstance(condition, str):
condition = {'logic': condition}
if isinstance(condition, dict):
if not isinstance(state_idx, pd.Index):
state_idx = sub_select_idx
if len(condition) == 1 and 'logic' in condition.keys():
if condition.get('logic') == 'ALL':
condition_idx = canonical.index
elif condition.get('logic') == 'ANY':
condition_idx = sub_select_idx
elif condition.get('logic') == 'NOT':
condition_idx = state_idx
state_idx = sub_select_idx
else:
condition_idx = state_idx
else:
condition_idx = self._condition_index(canonical=canonical.iloc[sub_select_idx], condition=condition,
select_idx=sub_select_idx)
logic = condition.get('logic', 'AND')
state_idx = self._condition_logic(base_idx=canonical.index, sub_select_idx=sub_select_idx,
state_idx=state_idx, condition_idx=condition_idx, logic=logic)
elif isinstance(condition, list):
if not isinstance(state_idx, pd.Index) or len(state_idx) == 0:
state_idx = sub_select_idx
state_idx = self._selection_index(canonical=canonical, selection=condition, select_idx=state_idx)
else:
raise ValueError(f"The subsection of the selection list {condition} is neither a dict or a list")
return state_idx
@staticmethod
def _condition_logic(base_idx: pd.Index, sub_select_idx: pd.Index, state_idx: pd.Index, condition_idx: pd.Index,
logic: str) -> pd.Index:
if str(logic).upper() == 'ALL':
return base_idx.intersection(condition_idx).sort_values()
elif str(logic).upper() == 'ANY':
return sub_select_idx.intersection(condition_idx).sort_values()
elif str(logic).upper() == 'AND':
return state_idx.intersection(condition_idx).sort_values()
elif str(logic).upper() == 'NAND':
return sub_select_idx.drop(state_idx.intersection(condition_idx)).sort_values()
elif str(logic).upper() == 'OR':
return state_idx.append(state_idx.union(condition_idx)).drop_duplicates().sort_values()
elif str(logic).upper() == 'NOR':
result = state_idx.append(state_idx.union(condition_idx)).drop_duplicates().sort_values()
return sub_select_idx.drop(result)
elif str(logic).upper() == 'NOT':
return state_idx.difference(condition_idx)
elif str(logic).upper() == 'XOR':
return state_idx.union(condition_idx).difference(state_idx.intersection(condition_idx))
raise ValueError(f"The logic '{logic}' must be AND, NAND, OR, NOR, NOT, XOR ANY or ALL")
@staticmethod
def _condition_index(canonical: pd.DataFrame, condition: dict, select_idx: pd.Index) -> pd.Index:
_column = condition.get('column')
_condition = condition.get('condition')
if _column == '@':
_condition = str(_condition).replace("@", "canonical.iloc[select_idx]")
elif _column not in canonical.columns:
raise ValueError(f"The column name '{_column}' can not be found in the canonical headers.")
else:
_condition = str(_condition).replace("@", f"canonical['{_column}']")
# find the selection index
return eval(f"canonical[{_condition}].index", globals(), locals())
def _get_canonical(self, data: [pd.DataFrame, pd.Series, list, str, dict, int], header: str=None, size: int=None,
deep_copy: bool=None) -> pd.DataFrame:
""" Used to return or generate a pandas Dataframe from a number of different methods.
The following can be passed and their returns
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use the canonical2dict(...) method to construct a dict with a method and related parameters
methods:
- model_*(...) -> one of the builder model methods and paramters
- *_selection(...) -> one of the builder selection methods (get_, correlate_, frame_) and paramters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
:param data: a dataframe or action event to generate a dataframe
:param header: (optional) header for pd.Series or list
:param size: (optional) a size parameter for @empty of @generate
:param header: (optional) used in conjunction with lists or pd.Series to give a header reference
:return: a pd.Dataframe
"""
deep_copy = deep_copy if isinstance(deep_copy, bool) else True
if isinstance(data, pd.DataFrame):
if deep_copy:
return deepcopy(data)
return data
if isinstance(data, dict):
data = data.copy()
method = data.pop('method', None)
if method is None:
try:
return pd.DataFrame.from_dict(data=data)
except ValueError:
raise ValueError("The canonical data passed was of type 'dict' but did not contain a 'method' key "
"or was not convertible to Dataframe")
if method in self.__dir__():
if str(method).startswith('model_') or method == 'frame_selection':
data.update({'save_intent': False})
return eval(f"self.{method}(**data)", globals(), locals())
if str(method).endswith('_selection'):
if not isinstance(header, str):
raise ValueError(f"The canonical type 'dict' method '{method}' must have a header parameter.")
data.update({'save_intent': False})
if method == 'get_selection':
if not isinstance(size, int):
raise ValueError(f"The canonical type 'dict' method '{method}' must have a size parameter.")
data.update({'size': size})
return pd.DataFrame(data=eval(f"self.{method}(**data)", globals(), locals()), columns=[header])
elif str(method).startswith('@generate'):
task_name = data.pop('task_name', None)
if task_name is None:
raise ValueError(f"The data method '@generate' requires a 'task_name' key.")
uri_pm_repo = data.pop('repo_uri', None)
module = HandlerFactory.get_module(module_name='ds_discovery')
inst = module.SyntheticBuilder.from_env(task_name=task_name, uri_pm_repo=uri_pm_repo,
default_save=False)
size = size if isinstance(size, int) and 'size' not in data.keys() else data.pop('size', None)
seed = data.get('seed', None)
run_book = data.pop('run_book', None)
result = inst.tools.run_intent_pipeline(canonical=size, columns=run_book, seed=seed)
return inst.tools.frame_selection(canonical=result, save_intent=False, **data)
elif str(method).startswith('@empty'):
size = size if isinstance(size, int) and 'size' not in data.keys() else data.pop('size', None)
headers = data.pop('headers', None)
size = range(size) if size else None
return pd.DataFrame(index=size, columns=headers)
else:
raise ValueError(f"The data 'method' key {method} is not a recognised intent method")
elif isinstance(data, (list, pd.Series)):
header = header if isinstance(header, str) else 'default'
if deep_copy:
data = deepcopy(data)
return pd.DataFrame(data=data, columns=[header])
elif isinstance(data, str):
if not self._pm.has_connector(connector_name=data):
if isinstance(size, int):
return pd.DataFrame(index=range(size))
raise ValueError(f"The data connector name '{data}' is not in the connectors catalog")
handler = self._pm.get_connector_handler(data)
canonical = handler.load_canonical()
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
return canonical
elif isinstance(data, int):
return pd.DataFrame(index=range(data)) if data > 0 else pd.DataFrame()
elif not data:
return pd.DataFrame()
raise ValueError(f"The canonical format is not recognised, pd.DataFrame, pd.Series, "
f"str, list or dict expected, {type(data)} passed")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 05:21:49 2019
@author: yibo
"""
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import CheckButtons
import numpy as np
import IPython
from IPython import get_ipython
from matplotlib.patches import Ellipse, Polygon
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
#Add a zoom-in/zoom-out function for mouse scroll
def zoom_factory(ax,base_scale = 2.):
def zoom_fun(event):
cur_xlim = ax.get_xlim()# get the current x and y limits
cur_ylim = ax.get_ylim()
cur_xrange = (cur_xlim[1] - cur_xlim[0])*.5
cur_yrange = (cur_ylim[1] - cur_ylim[0])*.5
xdata = event.xdata # get event location_x and location_y
ydata = event.ydata
if event.button == 'up': # deal with zoom in
scale_factor = 1/base_scale
elif event.button == 'down': # deal with zoom out
scale_factor = base_scale
else:# deal with something that should never happen
scale_factor = 1
print(event.button)
ax.set_xlim([xdata - cur_xrange*scale_factor, xdata + cur_xrange*scale_factor])# set new limits
ax.set_ylim([ydata - cur_yrange*scale_factor, ydata + cur_yrange*scale_factor])
plt.draw() # force re-draw
fig = ax.get_figure() # get the figure of interest
fig.canvas.mpl_connect('scroll_event',zoom_fun) # attach the call back
return zoom_fun#return the function
#Add a hot-key function for keyboard.
def key_factory(ax):
def key_fun(event):
print('keyboard function inside')
print(event.key)
cur_xlim = ax.get_xlim()# get the current x and y limits
cur_ylim = ax.get_ylim()
cur_xrange=(cur_xlim[1]-cur_xlim[0])
cur_yrange=(cur_ylim[1]-cur_ylim[0])
if event.key == "ctrl+c": # deal with zoom in
print(' back to center')
ax.set_xlim([xlim[0],xlim[1]])# set new limits
ax.set_ylim([ylim[0], ylim[1]])
plt.draw() # force re-draw
if event.key=='up':
ax.set_ylim([cur_ylim[0]+cur_yrange*0.1, cur_ylim[1]+cur_yrange*0.1])
plt.draw() # force re-draw
if event.key=='down':
ax.set_ylim([cur_ylim[0]-cur_yrange*0.1, cur_ylim[1]-cur_yrange*0.1])
plt.draw() # force re-draw
if event.key=='left':
ax.set_xlim([cur_xlim[0]-cur_xrange*0.1,cur_xlim[1]-cur_xrange*0.1])# set new limits
plt.draw() # force re-draw
if event.key=='right':
ax.set_xlim([cur_xlim[0]+cur_xrange*0.1,cur_xlim[1]+cur_xrange*0.1])# set new limits
plt.draw() # force re-draw
fig = ax.get_figure() # get the figure of interest
fig.canvas.mpl_connect('key_press_event',key_fun) # attach the call back
return key_fun#return the function
import colorsys
color_cold=(0,0,1) # Select Coldest Color in RGB
color_hot=(1,0,0) # Select Hotest Color in HSV
(h1,s1,v1)=colorsys.rgb_to_hsv(color_cold[0],color_cold[1],color_cold[2])
(h2,s2,v2)=colorsys.rgb_to_hsv(color_hot[0],color_hot[1],color_hot[2])
def define_color(stress): #display the stress to color
#apply HSV instead of RGB here, only HSV can change the color gradually.
#priting color of line segment will vary between the selected Cold and Hot color.
if (stress>500):
stress=500
if (stress<-500):
stress=-500
(r,g,b)=colorsys.hsv_to_rgb(h1+(h2-h1)*(stress+500)/1000,s1+(s2-s1)*(stress+500)/1000,v1+(v2-v1)*(stress+500)/1000)
return((r,g,b))
def define_color2(current): #display the current density to color
if (current>10000):
current=10000
if(current<-10000):
current=-10000
(r,g,b)=colorsys.hsv_to_rgb(h1+(h2-h1)*(current)/25,s1+(s2-s1)*(current)/25,v1+(v2-v1)*(current)/25)
return((r,g,b))
def read_line(line_file):
a = np.loadtxt(line_file).astype(np.float)
return(a);
b=read_line("temp2/output.txt")
b2=read_line("temp2/output2.txt")
b3=read_line("temp2/stress.txt")
b4=read_line("temp2/void.txt")
b5=read_line("temp2/curden.txt")
m=b
m2=b2
m3=b3
m4=b4
m5=b5
line_number=np.size(m,0)
branch_number=np.size(m2,0)
stress_number=np.size(m3,0)
void_number=np.size(m4,0)
curden_number=np.size(m5,0)
match=0
for i in range(line_number):
for j in range(branch_number):
if((m[i,4]==m2[j,3])and(m[i,5]==m2[j,4])and(m[i,6]==m2[j,5])and(m[i,7]==m2[j,6])):
m[i,1]=m2[j,1]
m[i,2]=m2[j,2]
for i in range(line_number):
for j in range(stress_number):
if((m[i,0]==m3[j,0])and(m[i,1]==m3[j,1])and(m[i,2]==m3[j,2])):
m[i,11]=1
for i in range(line_number):
for j in range(void_number):
if((m[i,0]==m4[j,0])and(m[i,1]==m4[j,1])and(m[i,2]==m4[j,2])):
m[i,13]=m4[j,5]
match=match+1
if((m[i,4]==m4[j,3])and(m[i,5]==m4[j,4])):
m[i,12]=0
else:
m[i,12]=1
for i in range(line_number):
for j in range(curden_number):
if((m[i,0]==m5[j,0])and(m[i,1]==m5[j,1])and(m[i,2]==m5[j,2])):
m[i,9]=m5[j,3]
m[i,10]=1
#image alignment.
ipython = get_ipython()
ipython.magic("%matplotlib qt5") # inline or qt5
plt.rcParams ['figure.dpi']=100 #This is the default dpi 100.
plt.rcParams['figure.figsize']=(10.0,10.0) #default size is (6.0, 4.0), (6.0,6.0)will keep the correct length/width ratio
# count how many different layers
layer_label=[]
for i in range(line_number):
if(not(m[i,0] in layer_label)):
layer_label.append(m[i,0])
layer_label.sort()
total_layer=len(layer_label)
'''
#Add hatch pattern for layers.
pattern_lib=['/ ', '- ' , '+ ' , ' ' , '/-/- ' ,'/\ ']
pattern_num=len(pattern_lib)
layer_pattern_dic={}
for i in range(total_layer):
layer_pattern_dic.setdefault(layer_label[i],pattern_lib[i])
'''
#plot the figure
fig, ax = plt.subplots()
l=[]
final_match=0
for i in range(line_number):
layer_id=m[i,0]#m[i,0:13] is: layerID-treeID-BranchID-lineID-x1-y1-x2-y2-width-current_density-current_bit-stress_bit-void_bit - void_width
tree_id=m[i,1]
branch_id=m[i,2]
line_id=m[i,3]#not necessary
x1=m[i,4]
y1=m[i,5]
x2=m[i,6]
y2=m[i,7]
w=m[i,8]
cd=m[i,9]
c_bit=m[i,10]
s_bit=m[i,11]
v_bit=m[i,12]
v_width=m[i,13]
#if((c_bit==1)or(s_bit==1)or(v_bit==1)):
'''line_stress = np.loadtxt('temp/'+'stress'+str(layer_id)+'_'+str(tree_id)+'_'+str(branch_id)+'.txt').astype(np.int)'''
#line_stress = np.loadtxt('stress'+str(layer_id)+'_'+str(tree_id)+'_'+str(branch_id)+'.txt').astype(np.int)
'''line_stress=[100, 200, -300, 400, 250, -100, 200]''' #Fake stress
if(not(s_bit==1)):#no stress warning, just plot the branch.
line_stress=[0] #a fake stress value
num_segment = len(line_stress) #number of line segments divided by stress
if(x1==x2):
for j in range(num_segment):
#grid base color or stress color
branch_color=(0.8,0.8,0.8)
x,=ax.fill([x1-w/2,x1+w/2,x2+w/2,x2-w/2],[y1+(y2-y1)/num_segment*j,y1+(y2-y1)/num_segment*j,y1+(y2-y1)/num_segment*(j+1),y1+(y2-y1)/num_segment*(j+1)],color=branch_color,visible=True,label=str(int(layer_id)))
l.append(x) #x is the pointer to a ploygen
'''x=ax.add_patch(Polygon([[x1-w/2, y1], [x1+w/2, y1], [x2+w/2, y2], [x2-w/2, y2]], closed=True,fill=False, hatch=layer_pattern_dic[layer_id],label=str(int(layer_id))))
l.append(x)'''
else:
for j in range(num_segment):
branch_color=(0.8,0.8,0.8)
x,=ax.fill([x1+(x2-x1)/num_segment*j,x1+(x2-x1)/num_segment*j,x1+(x2-x1)/num_segment*(j+1),x1+(x2-x1)/num_segment*(j+1)],[y1+w/2,y1-w/2,y2-w/2,y2+w/2],color=branch_color,visible=True,label=str(int(layer_id)))
l.append(x)
'''x=ax.add_patch(Polygon([[x1, y1+w/2], [x1, y1-w/2], [x2, y2-w/2], [x2, y2+w/2]], closed=True,fill=False, hatch=layer_pattern_dic[layer_id],label=str(int(layer_id))))
l.append(x)'''
else:#Stress warning, load stress information
pass;
special=[2819,
2821,
2835,
2836,
2855,
2857,
2871,
2872,
2891,
2893,
2907,
2908,
2932,
2933,
2952,
2954]
for i in range(line_number):
layer_id=m[i,0]#m[i,0:13] is: layerID-treeID-BranchID-lineID-x1-y1-x2-y2-width-current_density-current_bit-stress_bit-void_bit - void_width
tree_id=m[i,1]
branch_id=m[i,2]
line_id=m[i,3]#not necessary
x1=m[i,4]
y1=m[i,5]
x2=m[i,6]
y2=m[i,7]
w=m[i,8]
cd=m[i,9]
c_bit=m[i,10]
s_bit=m[i,11]
v_bit=m[i,12]
v_width=m[i,13]
#if((c_bit==1)or(s_bit==1)or(v_bit==1)):
'''line_stress = np.loadtxt('temp/'+'stress'+str(layer_id)+'_'+str(tree_id)+'_'+str(branch_id)+'.txt').astype(np.int)'''
#line_stress = np.loadtxt('stress'+str(layer_id)+'_'+str(tree_id)+'_'+str(branch_id)+'.txt').astype(np.int)
'''line_stress=[100, 200, -300, 400, 250, -100, 200]''' #Fake stress
if(not(s_bit==1)):#no stress warning, just plot the branch.
pass;
else:#Stress warning, load stress information
'''w=w*50'''
line_stress = np.loadtxt('temp2/'+'stress'+str(int(layer_id))+'_'+str(int(tree_id))+'_'+str(int(branch_id))+'.txt').astype(np.int)
num_segment = len(line_stress) #number of line segments divided by stress
if(x1==x2):
for j in range(num_segment):
#grid base color or stress color
branch_color=define_color(line_stress[j])
x,=ax.fill([x1-w/2,x1+w/2,x2+w/2,x2-w/2],[y1+(y2-y1)/num_segment*j,y1+(y2-y1)/num_segment*j,y1+(y2-y1)/num_segment*(j+1),y1+(y2-y1)/num_segment*(j+1)],color=branch_color,visible=True,label=str(int(layer_id)))
l.append(x) #x is the pointer to a ploygen
'''x=ax.add_patch(Polygon([[x1-w/2, y1], [x1+w/2, y1], [x2+w/2, y2], [x2-w/2, y2]], closed=True,fill=False, hatch=layer_pattern_dic[layer_id],label=str(int(layer_id))))
l.append(x)'''
else:
for j in range(num_segment):
branch_color=define_color(line_stress[j])
x,=ax.fill([x1+(x2-x1)/num_segment*j,x1+(x2-x1)/num_segment*j,x1+(x2-x1)/num_segment*(j+1),x1+(x2-x1)/num_segment*(j+1)],[y1+w/2,y1-w/2,y2-w/2,y2+w/2],color=branch_color,visible=True,label=str(int(layer_id)))
l.append(x)
'''x=ax.add_patch(Polygon([[x1, y1+w/2], [x1, y1-w/2], [x2, y2-w/2], [x2, y2+w/2]], closed=True,fill=False, hatch=layer_pattern_dic[layer_id],label=str(int(layer_id))))
l.append(x)'''
'''if((v_bit!=-1)and(v_width!=0)):'''
if(i in special):
'''v_width=v_width*10'''
v_width=1
w=w*3
final_match=final_match+1
#Then plot the Void
if(x1==x2):
if(v_bit==0):
y2=y1+v_width
else:
y1=y2-v_width
x,=ax.fill([x1-w/2,x1+w/2,x2+w/2,x2-w/2],[y1,y1,y2,y2],color=(0,0,0),visible=True,label=str(int(layer_id)))
l.append(x)
else:
if(v_bit==0):
x2=x1+v_width
else:
x1=x2-v_width
x,=ax.fill([x1,x1,x2,x2],[y1-w/2,y1+w/2,y2+w/2,y2-w/2],color=(0,0,0),visible=True,label=str(int(layer_id)))
l.append(x)
#ax range data for the key_factory
xlim=ax.get_xlim()
ylim=ax.get_ylim()
#Adjust the following font(here means adjust the font of the button)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 10}
matplotlib.rc('font', **font)
# Make checkbuttons with all plotted lines with correct visibility
lines=l
rax = plt.axes([0.9, 0.35, 0.1, 0.3]) #the location of button bar [right_down_x,right_down_y,width,height]
labels = [str(line.get_label()) for line in lines]#the label of every single polygen.
visibility = [line.get_visible() for line in lines]
# Attention, the label in the checkbutton's label=['25','26,'27''] must be same with the label of polygen, which in the labels=[str(line.get_label()) for line in lines]
'''
check = CheckButtons(rax, ['41','25','26','27'], [True,True,True,True])#check = CheckButtons(rax, labels, visibility)
'''
# dynamically deside how many check buttons
button_label_list=[]
button_default_value=[]
for i in range(len(layer_label)):
#button_label_list.append(str(int(layer_label[i]))+' '+layer_pattern_dic[layer_label[i]])
button_label_list.append('L'+str(int(layer_label[i])))
button_default_value.append(True)
check=CheckButtons(rax,button_label_list,button_default_value)#check = CheckButtons(rax, labels, visibility)
'''
#Add corresponding graphic hatch example for checkbuttons
for i in range(total_layer):
rax2 = plt.axes([0.952, 0.35+0.3/(total_layer+1)*(i+0.75), 0.047,0.3/(total_layer+1)*0.5 ]) #the location of button bar [right_down_x,right_down_y,width,height]
rax2.set_axis_off() #Turn the x- and y-axis off.
#rax2.fill([0,2,2,0],[0,0,1,1],hatch=pattern_lib[i])
rax2.add_patch(Polygon([[0,0],[1,0],[1,1],[0,1]],closed=True,fill=False, hatch=layer_pattern_dic[layer_label[total_layer-1-i]]))
'''
def func(label): #此处输入的label,是button上的名字string,用它来检索每条线的label
label1=label.split('L')
label=label1[1]
'''
index = labels.index(label)
lines[index].set_visible(not lines[index].get_visible())
plt.draw()'''
#now, change index to index[], which means a group of indexes
index=[]
line_num=len(lines)
for j in range(line_num):
if(labels[j]==label):
index.append(j)
#print(index)
for k in range(len(index)):
lines[index[k]].set_visible(not lines[index[k]].get_visible())
plt.draw()
check.on_clicked(func)
#add colorbar
#Attention: rax=plt.axes([bottomleft_x,bottomleft_y,width,height]) can not fully decide the colorbar size.
# The pyplot.colorbar() has a limited height range, which is 0.3 max.
# The pyplot.colorbar() has a fixed width, not adjustable.
rax = plt.axes([-0.182, 0.15, 0.2, 0.6])
sm = plt.cm.ScalarMappable(cmap="jet", norm=plt.Normalize(vmin=-500, vmax=500))
sm.set_array([])
plt.colorbar(sm)
#Call the mouse zoom-in/out function.
scale=2
f=zoom_factory(ax,base_scale=scale)
#Call the keyboard function
f2=key_factory(ax)
#plot the image.
plt.show()
#
#input("Press <enter>")
|
# ----------------------------------------------------------------------------
# Python Wires Tests
# ----------------------------------------------------------------------------
# Copyright (c) Tiago Montes.
# See LICENSE for details.
# ----------------------------------------------------------------------------
"""
Generic usage test driver mixin.
"""
from __future__ import absolute_import
from . import helpers
class TestWiresUsageMixin(helpers.CallTrackerAssertMixin):
"""
Drives Wires usage tests requiring mixed class to:
- Have a Wires instance at self.w.
- Allow wiring via self.wire.
- Allow unwiring via self.unwire.
"""
def test_wire_call(self):
"""
Wires the `this` call and calls it.
Checks the wired callable was called once with the correct arguments.
"""
tracker = helpers.CallTracker()
self.w.this.wire(tracker)
self.addCleanup(self.w.this.unwire, tracker)
self.w.this()
self.assert_called(tracker, [
((), {},),
])
def test_double_wire_call(self):
"""
Wires the `this` call twice and calls it.
Checks the wired callable was called twice with the correct arguments.
"""
tracker = helpers.CallTracker()
self.w.this.wire(tracker)
self.w.this.wire(tracker)
self.addCleanup(self.w.this.unwire, tracker)
self.addCleanup(self.w.this.unwire, tracker)
self.w.this()
self.assert_called(tracker, [
((), {},),
((), {},),
])
def test_multi_wired_call(self):
"""
Wires the `this` call to multiple callables and calls it.
Checks that each of the wired callables was called once with the correct
arguments.
"""
num_wirings = 10
trackers = [helpers.CallTracker() for _ in range(num_wirings)]
for tracker in trackers:
self.w.this.wire(tracker)
self.addCleanup(self.w.this.unwire, tracker)
self.w.this()
for tracker in trackers:
self.assert_called(tracker, [
((), {},),
])
def test_wire_call_unwire_call(self):
"""
Wires the `this` call to a callable and calls it.
Checks the wired callable was called once with the correct arguments.
Unwires the `this` callable and calls it again.
Checks the wired callable wasn't called again.
"""
tracker = helpers.CallTracker()
self.w.this.wire(tracker)
self.w.this()
self.assert_single_call_no_args(tracker)
self.w.this.unwire(tracker)
self.w.this()
self.assert_single_call_no_args(tracker)
# ----------------------------------------------------------------------------
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to export a model for batch prediction."""
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils as saved_model_utils
_TOP_PREDICTIONS_IN_OUTPUT = 82
class ModelExporter(object):
def __init__(self, model, reader):
self.model = model
self.reader = reader
with tf.Graph().as_default() as graph:
self.inputs, self.outputs = self.build_inputs_and_outputs()
self.graph = graph
self.saver = tf.train.Saver(tf.global_variables(), sharded=True)
def export_model(self, model_dir, global_step_val, last_checkpoint):
"""Exports the model so that it can used for batch predictions."""
with self.graph.as_default():
with tf.Session() as session:
session.run(tf.global_variables_initializer())
self.saver.restore(session, last_checkpoint)
signature = signature_def_utils.build_signature_def(
inputs=self.inputs,
outputs=self.outputs,
method_name=signature_constants.PREDICT_METHOD_NAME)
signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature}
model_builder = saved_model_builder.SavedModelBuilder(model_dir)
model_builder.add_meta_graph_and_variables(session,
tags=[tag_constants.SERVING],
signature_def_map=signature_map,
clear_devices=True)
model_builder.save()
def build_inputs_and_outputs(self):
input_name_list = self.reader.dname_string_list #模型输入变量名
inupt_shape_list = self.reader.data_shape_list #模型输入shape
input_dtype_list = self.reader.dtype_list #模型输入类型
inputs_dict={}
for input_name,input_shape,input_dtype in zip(input_name_list, inupt_shape_list, input_dtype_list):
inputs_dict[input_name] = tf.placeholder(shape=[None]+input_shape, dtype=input_dtype, name=input_name) #add batch size dim
with tf.variable_scope("tower"):
result = self.model(inputs_dict,is_training=False)
predictions = result["tagging_output_fusion"]["predictions"]
video_embedding = result["video_embedding"]
top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT)
#inputs = {"video_input_placeholder": saved_model_utils.build_tensor_info(video_input_placeholder),
# "audio_input_placeholder": saved_model_utils.build_tensor_info(audio_input_placeholder),
# "text_input_placeholder": saved_model_utils.build_tensor_info(text_input_placeholder),
# "num_frames_placeholder": saved_model_utils.build_tensor_info(num_frames_placeholder)}
inputs = {key:saved_model_utils.build_tensor_info(val) for key,val in inputs_dict.items()}
outputs = {
"class_indexes": saved_model_utils.build_tensor_info(top_indices),
"video_embedding": saved_model_utils.build_tensor_info(video_embedding),
"predictions": saved_model_utils.build_tensor_info(top_predictions)}
return inputs, outputs
|
import random
from typing import List
from operator_generator_strategies.distinct_operator_strategies.distinct_window_strategy import \
DistinctWindowGeneratorStrategy
from utils.contracts import Aggregations
from operator_generator_strategies.base_generator_strategy import BaseGeneratorStrategy
from operators.aggregation_operator import AggregationOperator
from utils.contracts import Schema, Operator
from utils.utils import random_list_element, random_name
class DistinctAggregationGeneratorStrategy(BaseGeneratorStrategy):
def __init__(self):
super().__init__()
def generate(self, schema: Schema) -> List[Operator]:
window = DistinctWindowGeneratorStrategy().generate(schema)[0]
fields = schema.get_numerical_fields()
if window._windowKey:
fields.remove(window._windowKey)
_, field = random_list_element(fields)
_, aggregationOperation = random_list_element(
[Aggregations.avg, Aggregations.min, Aggregations.max, Aggregations.sum])
outputField = field
alias = ""
aggregation = f"{aggregationOperation.value}(Attribute(\"{field}\"))"
if bool(random.getrandbits(1)):
alias = random_name()
outputField = alias
schema = Schema(name=schema.name, int_fields=[outputField], double_fields=[], string_fields=[],
timestamp_fields=window.get_output_schema().timestamp_fields,
fieldNameMapping=schema.get_field_name_mapping())
aggregationOperator = AggregationOperator(aggregation=aggregation, alias=alias, window=window, schema=schema)
return [aggregationOperator]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2016 pocsuite developers (https://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
import sys
from pocsuite_cli import pcsInit
from .lib.core.common import banner
from .lib.core.common import dataToStdout
from .lib.core.settings import PCS_OPTIONS
def main():
try:
pocFile, targetUrl = sys.argv[1: 3]
except ValueError:
excMsg = "usage: pcs-attack [pocfile] [url]\n"
excMsg += "pocsuite: error: too few arguments"
dataToStdout(excMsg)
sys.exit(1)
PCS_OPTIONS.update(
{
'url': targetUrl, 'pocFile': pocFile, 'headers': None, 'extra_params': None,
'host': None, 'Mode': 'attack', 'retry': None, 'delay': None, 'dork': None,
'vulKeyword': None,
}
)
pcsInit(PCS_OPTIONS)
if __name__ == "__main__":
main()
|
import io
import pickle
import socket
import psutil
import argparse
import sys
sys.path.append("..")
from ...messaging import network_params
from ...messaging import message
from ...messaging import messageutils
import getpass
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--Kill")
args = parser.parse_args()
# username = getpass.getuser()
while(True):
try:
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_address = ('', network_params.KILL_RECV_PORT)
listen_socket.bind(listen_address)
listen_socket.listen(5)
messageutils.make_and_send_message(msg_type = "KILL_JOB" ,content = args.Kill, to = '127.0.0.1', port = network_params.SERVER_RECV_PORT, file_path =None, msg_socket=None)
print('here')
connection, client_address = listen_socket.accept()
data_list = []
data = connection.recv(network_params.BUFFER_SIZE)
while data:
data_list.append(data)
data = connection.recv(network_params.BUFFER_SIZE)
data = b''.join(data_list)
msg = pickle.loads(data)
assert isinstance(msg, message.Message), "Can't specify the message type"
if(msg.msg_type == 'ACK_JOB_KILL_SUCCESS'):
print("Job Killed \n")
print("Job Id : " + str(msg.content))
elif(msg.msg_type == 'ERR_JOB_KILL'):
print("Job could not be killed \n")
break
except BrokenPipeError:
continue
except OSError:
continue
|
import base64
import datetime
import json
import logging
import urllib.parse
import OpenSSL.crypto as crypto
import aiohttp
import pytz
import requests
import esia_client.exceptions
logger = logging.getLogger(__name__)
class FoundLocation(esia_client.exceptions.EsiaError):
def __init__(self, location: str, *args, **kwargs):
super().__init__(*args, kwargs)
self.location = location
def make_request(url: str, method: str = 'GET', **kwargs) -> dict:
"""
Делает запрос по указанному URL с параметрами и возвращает словарь из JSON-ответа
Keyword Args:
headers: Request HTTP Headers
params: URI HTTP request params
data: POST HTTP data
timeout: Timeout requests
Raises:
HttpError: Ошибка сети или вебсервера
IncorrectJsonError: Ошибка парсинга JSON-ответа
"""
try:
response = requests.request(method, url, **kwargs)
logger.debug(f'Status {response.status_code} from {method} request to {url} with {kwargs}')
response.raise_for_status()
if response.status_code in (200, 302) and response.headers.get('Location'):
raise FoundLocation(location=response.headers.get('Location'))
elif not response.headers['Content-type'].startswith('application/json'):
logger.error(f'{response.headers["Content-type"]} -> {response.text}')
raise esia_client.exceptions.IncorrectJsonError(
f'Invalid content type -> {response.headers["content-type"]}'
)
return response.json()
except requests.HTTPError as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.HttpError(e)
except ValueError as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.IncorrectJsonError(e)
async def make_async_request(
url: str, method: str = 'GET', **kwargs) -> dict:
"""
Делает асинхронный запрос по указанному URL с параметрами и возвращает словарь из JSON-ответа
Keyword Args:
headers: Request HTTP Headers
params: URI HTTP request params
data: POST HTTP data
timeout: Timeout requests
Raises:
HttpError: Ошибка сети или вебсервера
IncorrectJsonError: Ошибка парсинга JSON-ответа
"""
try:
async with aiohttp.client.ClientSession() as session:
async with session.request(method, url, **kwargs) as response:
logger.debug(f'Status {response.status} from {method} request to {url} with {kwargs}')
response.raise_for_status()
if response.status in (200, 302) and response.headers.get('Location'):
raise FoundLocation(location=response.headers.get('Location'))
elif not response.content_type.startswith('application/json'):
text = await response.text()
logger.error(f'{response.content_type} -> {text}')
raise esia_client.exceptions.IncorrectJsonError(
f'Invalid content type -> {response.content_type}'
)
return await response.json()
except aiohttp.client.ClientError as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.HttpError(e)
except ValueError as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.IncorrectJsonError(e)
def sign(content: str, crt: crypto.X509, pkey: crypto.PKey) -> str:
"""
Подписывает параметры запроса цифровой подписью
Args:
data: Данные, которые необходимо подписать
crt: Путь до сертификата
pkey: Путь до приватного ключа
"""
bio_in = crypto._new_mem_buf(content.encode())
PKCS7_DETACHED = 0x40
pkcs7 = crypto._lib.PKCS7_sign(crt._x509, pkey._pkey, crypto._ffi.NULL, bio_in, PKCS7_DETACHED)
bio_out = crypto._new_mem_buf()
crypto._lib.i2d_PKCS7_bio(bio_out, pkcs7)
sigbytes = crypto._bio_to_string(bio_out)
return base64.urlsafe_b64encode(sigbytes).decode()
def get_timestamp() -> str:
"""
Получение текущей временной метки
"""
return datetime.datetime.now(pytz.utc).strftime('%Y.%m.%d %H:%M:%S %z').strip()
def decode_payload(base64string: str) -> dict:
"""
Расшифровка информации из JWT токена
Args:
base64string: JSON в UrlencodedBaset64
"""
offset = len(base64string) % 4
base64string += '=' * (4 - offset) if offset else ''
try:
return json.loads(base64.urlsafe_b64decode(base64string))
except (ValueError, Exception) as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.IncorrectMarkerError(e)
def format_uri_params(params: dict) -> str:
"""
Форматирует строку с URI параметрами
Args:
params: параметры запроса
"""
a = '&'.join((f'{key}={value}' for key, value in params.items()))
return '&'.join((f'{key}={urllib.parse.quote(str(value).encode())}' for key, value in params.items()))
|
# Import the Flask Framework
from flask import Flask
import os
import sys
# This is necessary so other directories can find the lib folder
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
from os.path import isfile, join
from flask import Blueprint, request, session, g, redirect, url_for, abort, render_template, flash, make_response
app = Flask(__name__)
app.config.from_pyfile('config.py')
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
# Link up modules
from modules.home import home_module
from modules.login import login_module
from modules.search import search_module
from modules.submit import submit_module
app.register_blueprint(home_module)
app.register_blueprint(login_module)
app.register_blueprint(search_module)
app.register_blueprint(submit_module)
|
#!/usr/bin/env python
import astropy.io.fits as pyfits
import numpy as np
import sys,pylab
if len(sys.argv)<3 :
print sys.argv[0],"residuals.fits spots.list"
sys.exit(0)
hdulist=pyfits.open(sys.argv[1])
hdulist.info()
data=hdulist[0].data
model=hdulist["MODEL"].data
ivar=(hdulist["PULL"].data/(hdulist["RESIDUAL"].data+(hdulist["RESIDUAL"].data==0)))**2 # (PULL/RES)**2 = IVAR
var=(hdulist["RESIDUAL"].data/(hdulist["PULL"].data+(hdulist["PULL"].data==0)))**2 # VAR
vals=np.loadtxt(sys.argv[2]).T
ww=vals[0]
xx=vals[3]
yy=vals[4]
ff=vals[5]
hx=7
hy=7
resampling=8
bins=np.linspace(-hx-1,hx+1,(2*hx+3)*resampling)
xbins=bins[:-1]+(bins[1]-bins[0])/2.
data_xprof=np.zeros((bins.size-1))
model_xprof=np.zeros((bins.size-1))
data_yprof=np.zeros((bins.size-1))
model_yprof=np.zeros((bins.size-1))
count=0
for w,x,y,f in zip(ww,xx,yy,ff) :
print w,x,y
count += 1
if count %100 == 0 :
print count,w,x,y,f
xi=int(x)
yi=int(y)
if 0 : # stack on other dim
data_stamp_xprof = np.sum(data[yi-2:yi+2+1,xi-hx:xi+hx+1],axis=0)
var_stamp_xprof = np.sum(var[yi-2:yi+2+1,xi-hx:xi+hx+1],axis=0)
model_stamp_xprof = np.sum(model[yi-2:yi+2+1,xi-hx:xi+hx+1],axis=0)
data_stamp_yprof = np.sum(data[yi-hy:yi+hy+1,xi-2:xi+2+1],axis=1)
var_stamp_yprof = np.sum(var[yi-2:yi+2+1,xi-hx:xi+hx+1],axis=1)
model_stamp_yprof = np.sum(model[yi-hy:yi+hy+1,xi-2:xi+2+1],axis=1)
else : # middle slice
data_stamp_xprof = data[yi,xi-hx:xi+hx+1]
model_stamp_xprof = model[yi,xi-hx:xi+hx+1]
data_stamp_yprof = data[yi-hy:yi+hy+1,xi]
model_stamp_yprof = model[yi-hy:yi+hy+1,xi]
if np.sum(model_stamp_xprof)<=0 :
continue
stamp_x = np.linspace(-hx-(x-xi+0.5),hx+1-(x-xi+0.5),2*hx+1)
stamp_y = np.linspace(-hy-(y-yi+0.5),hy+1-(y-yi+0.5),2*hy+1)
if stamp_y.size != data_stamp_yprof.size :
continue
if 0 : # interpolation version
data_xprof += np.interp(xbins,stamp_x,data_stamp_xprof)
model_xprof += np.interp(xbins,stamp_x,model_stamp_xprof)
data_yprof += np.interp(xbins,stamp_y,data_stamp_yprof)
model_yprof += np.interp(xbins,stamp_y,model_stamp_yprof)
else : # stacking version
txbins=int(np.floor((xi-x+1.)*resampling))+np.arange(stamp_x.size)*resampling
tybins=int(np.floor((yi-y+1.)*resampling))+np.arange(stamp_y.size)*resampling
for i in range(resampling) :
data_xprof[txbins+i] += data_stamp_xprof
model_xprof[txbins+i] += model_stamp_xprof
data_yprof[tybins+i] += data_stamp_yprof
model_yprof[tybins+i] += model_stamp_yprof
norme=np.max(data_xprof)
a=pylab.subplot(1,2,1)
a.plot(xbins,data_xprof/norme,"-",c="b",lw=2,label="stacked data")
a.plot(xbins,model_xprof/norme,"--",c="r",lw=2,label="stacked PSF model")
a.legend(loc="upper center")
a.set_ylim([-0.05,1.5])
a.set_xlabel("X CCD")
norme=np.max(data_yprof)
a=pylab.subplot(1,2,2)
a.plot(xbins,data_yprof/norme,"-",c="b",lw=2,label="stacked data")
a.plot(xbins,model_yprof/norme,"--",c="r",lw=2,label="stacked PSF model")
a.legend(loc="upper center")
a.set_ylim([-0.05,1.5])
a.set_xlabel("Y CCD")
pylab.show()
sys.exit(0)
pylab.plot(x,y,"o")
pylab.show()
|
# -*- coding: utf-8 -*-
import sympy as sy
from sympy import symbols, Matrix
def inv_sym_3x3(m: Matrix, as_adj_det=False):
P11, P12, P13, P21, P22, P23, P31, P32, P33 = \
symbols('P_{11} P_{12} P_{13} P_{21} P_{22} P_{23} P_{31} \
P_{32} P_{33}', real=True)
Pij = [[P11, P12, P13], [P21, P22, P23], [P31, P32, P33]]
P = sy.Matrix(Pij)
detP = P.det()
adjP = P.adjugate()
invP = adjP / detP
subs = {s: r for s, r in zip(sy.flatten(P), sy.flatten(m))}
if as_adj_det:
return detP.subs(subs), adjP.subs(subs)
else:
return invP.subs(subs)
"""
from sympy.utilities.lambdify import lambdify
import numpy as np
f_det3x3 = lambdify([P_], detP)
f_adj3x3 = lambdify([P_], adjP)
f_inv3x3 = lambdify([P_], invP)
arr = np.eye(3) * 3
f_det3x3(arr)
f_adj3x3(arr)
f_inv3x3(arr)
"""
|
import simplejson as json
import sys
import re
import os
# Uses a customized version of simplejson, because the dump json has entries
# with the same key, and the order of the keys is important. The customized
# version adds a counter to each key name, for uniqueness, and to recover
# order by sorting. Then at pretty print time, this is removed. Hacky but it
# works.
sys.path.insert(0, '.')
class Frame:
def __init__(self, d):
self.d = d
def GetHash(self):
keys = self.d.keys()
keys.sort()
for key in keys:
if key[13:] == 'hash':
return self.d[key]
return None
def GetUpdate(self):
keys = self.d.keys()
keys.sort()
for key in keys:
if key[13:] == 'updates':
return int(self.d[key])
return 0
def PrettyPrint(self):
# Example: "__00000000__-updates": "9951",
s = json.dumps(self.d, sort_keys=True, indent=4)
p = re.compile('^.*(?P<remove>__.*__-).*$')
lines = []
for line in s.splitlines():
line = line.rstrip()
m = p.match(line)
if not m:
lines.append(line)
else:
index = line.find(m.group('remove'))
line2 = line[0:index] + line[index+len(m.group('remove')):]
lines.append(line2)
return '\n'.join(lines)
class Dump:
def __init__(self, filename):
f = file(filename)
self.j = json.loads(f.read().rstrip('\x00'))
f.close()
def GetFrameNumbers(self):
numbers = []
keys = self.j.keys()
keys.sort()
for key in keys:
if key[13:].startswith('frame'):
numbers.append(int(key[18:]))
return numbers
def GetFrame(self, framenumber):
keys = self.j.keys()
keys.sort()
for key in keys:
if key[13:].startswith('frame'):
if framenumber == int(key[18:]):
return Frame(self.j[key])
return None
class Differ:
def __init__(self, file0, file1):
self.badupdate = -1
try:
dump0 = Dump(file0)
dump1 = Dump(file1)
for number in dump0.GetFrameNumbers():
frame0 = dump0.GetFrame(number)
frame1 = dump1.GetFrame(number)
if not frame0:
break
if not frame1:
break
if frame0.GetHash() != frame1.GetHash():
self.badupdate = frame0.GetUpdate()
self.frame0 = frame0
self.frame1 = frame1
break
except:
print 'ERROR loading %s or %s' % (file0, file1)
def GetBadUpdateNumber(self):
return self.badupdate
def Diff(self, frame0file, frame1file, outfile):
f = file(frame0file, 'w')
f.write(self.frame0.PrettyPrint())
f.close()
f = file(frame1file, 'w')
f.write(self.frame1.PrettyPrint())
f.close()
os.system('diff %s %s > %s' % (frame0file, frame1file, outfile))
if __name__ == '__main__':
# Find the frame that is not matching
d = Differ(sys.argv[1], sys.argv[2])
out0 = '/tmp/foo'
if len(sys.argv) > 3:
out0 = sys.argv[3]
out1 = '/tmp/bar'
if len(sys.argv) > 4:
out1 = sys.argv[4]
d.Diff(out0, out1, '/tmp/diff')
f = file('/tmp/diff')
for line in f.read().splitlines():
print line
f.close()
|
import argparse
import zipfile
import yaml
import os
from .peanut import Job
from .mpi_calibration import MPICalibration
from .mpi_saturation import MPISaturation
from .hpl import HPL
from .blas_calibration import BLASCalibration
from .stress_test import StressTest
from .bit_flips import BitFlips
from .version import __version__, __git_version__
from .smpi_hpl import SMPIHPL
from .frequency_get import FrequencyGet
from .sw4lite import SW4lite
from .simdjson import Simdjson
from .mpi_ring import MPIRing
from .memory_calibration import MemoryCalibration
from .empty import Empty
classes = [
MPICalibration,
MPISaturation,
BLASCalibration,
StressTest,
HPL,
SMPIHPL,
FrequencyGet,
SW4lite,
BitFlips,
Simdjson,
MPIRing,
MemoryCalibration,
Empty,
]
entry_points = {cls.__name__: cls.main for cls in classes}
def replay(args):
parser = argparse.ArgumentParser(description='Peanut, the tiny job runner')
parser.add_argument('zip_name', type=str, help='Zip file of the experiment to replay.')
args = parser.parse_args(args)
try:
input_zip = zipfile.ZipFile(args.zip_name)
except FileNotFoundError:
parser.error('File %s does not exist' % args.zip_name)
try:
info = yaml.load(input_zip.read('info.yaml'))
replay_command = info['replay_command']
expfile_name = info['expfile']
expfile_content = input_zip.read(expfile_name)
git_version = info['peanut_git_version']
except KeyError:
parser.error('Wrong format for archive %s' % args.zip_name)
if git_version != __git_version__:
parser.error('Mismatch between the peanut versions. To replay this experiment, checkout %s' % git_version)
with open(expfile_name, 'wb') as f:
f.write(expfile_content)
args = replay_command.split(' ')
assert args[0] == 'peanut'
entry_points[args[1]](args[2:])
os.remove(expfile_name)
entry_points['replay'] = replay
def main():
parser = argparse.ArgumentParser(description='Peanut, the tiny job runner')
parser.add_argument('command', type=str, help='Experiment to run.')
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
parser.add_argument('--git-version', action='version',
version='%(prog)s {version}'.format(version=__git_version__))
args, command_args = parser.parse_known_args()
try:
func = entry_points[args.command]
except KeyError:
if not os.path.isfile(args.command):
parser.error('Either provide your own Python script or choose from %s' % ', '.join(entry_points.keys()))
else: # some black magic to import the provided Python file
import importlib.util
import inspect
spec = importlib.util.spec_from_file_location("my_module", args.command)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
clsmembers = inspect.getmembers(module, inspect.isclass)
clsmembers = [(name, cls) for (name, cls) in clsmembers if issubclass(cls, Job) and cls != Job]
if len(clsmembers) == 0:
parser.error('Did not find any class inheriting from %s in file %s' % (Job.__name__, args.command))
if len(clsmembers) > 1:
parser.error('Found more than one class inheriting from %s in file %s:\n%s' % (Job.__name__,
args.command, ', '.join([name for (name, cls) in clsmembers])))
func = clsmembers[0][1].main
func(command_args)
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.7 on 2021-04-12 12:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0009_tradeagreement'),
('interaction', '0074_update_service_questions_and_answers'),
]
operations = [
migrations.AddField(
model_name='interaction',
name='has_related_trade_agreements',
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name='interaction',
name='related_trade_agreements',
field=models.ManyToManyField(blank=True, to='metadata.TradeAgreement'),
),
]
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from models import ToBeAnnotated
def index(request):
tbas = ToBeAnnotated.objects.all()
return render_to_response("index.html",RequestContext(request, {"tbas":tbas}))
|
import setuptools
import os
from pathlib import Path
root = Path(os.path.realpath(__file__)).parent
version_file = root / "src" / "sphinx_codeautolink" / "VERSION"
readme_file = root / "readme_pypi.rst"
setuptools.setup(
name="sphinx-codeautolink",
version=version_file.read_text().strip(),
license="MIT",
description="Automatic links from code examples to reference documentation.",
keywords="sphinx extension code link",
long_description=readme_file.read_text(),
long_description_content_type="text/x-rst",
url="https://pypi.org/project/sphinx-codeautolink",
download_url="https://pypi.org/project/sphinx-codeautolink",
project_urls={
"Source": "https://github.com/felix-hilden/sphinx-codeautolink",
"Issues": "https://github.com/felix-hilden/sphinx-codeautolink/issues",
"Documentation": "https://sphinx-codeautolink.rtfd.org",
},
author="Felix Hildén",
author_email="felix.hilden@gmail.com",
maintainer="Felix Hildén",
maintainer_email="felix.hilden@gmail.com",
packages=setuptools.find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
python_requires=">=3.6",
install_requires=[
'sphinx>=3.2.0',
'beautifulsoup4',
'dataclasses;python_version<"3.7"',
],
# Keep extras in sync with requirements manually
extras_require={
"ipython": ["ipython"],
},
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Software Development :: Documentation',
],
)
|
import numpy as np
data_a = np.fromfile('error.dat', dtype=float, count=-1, sep='')
data_a1 = data_a[::2]
data_a2 = data_a[1::2]
import matplotlib as mpl
font = {'family' : 'serif',
'size' : 16}
mpl.rc('font', **font)
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(data_a1, data_a2, label='$\eta_E(t)$', linewidth=2)
plt.xlabel("$t \, [a.u.]$",fontsize=20)
plt.ylabel("$\eta_E(t)$",fontsize=20)
plt.legend(loc='upper right')
plt.savefig('error.pdf', bbox_inches='tight', transparent=True)
|
# -*- coding: utf-8 -*-
class PreviewGeneratorException(Exception):
pass
class UnavailablePreviewType(PreviewGeneratorException):
"""
Exception raised when a preview method is not implemented for the type of
file you are processing
"""
pass
class UnsupportedMimeType(PreviewGeneratorException):
"""
Exception raised when a file mimetype is not found in supported mimetypes
"""
pass
class BuilderNotLoaded(PreviewGeneratorException):
"""
Exception raised when the factory is used but no builder has been loaded
You must call factory.load_builders() before to use the factory
"""
pass
class ExecutableNotFound(PreviewGeneratorException):
pass
class BuilderDependencyNotFound(PreviewGeneratorException):
pass
|
# Author: Luka Maletin
from data_structures.stack import Stack
OPERATORS = '&|!'
def infix_to_postfix(expression):
postfix = []
priority = {'(': 1, '&': 2, '|': 2, '!': 2}
operators = Stack()
for token in expression:
if token in OPERATORS:
# Operators are added to the stack, but first the ones with a
# higher priority are added to the result:
while not operators.is_empty() and priority[token] <= priority[operators.top()]:
postfix.append(operators.pop())
operators.push(token)
# Left parenthesis are added to the stack:
elif token == '(':
operators.push(token)
# Operators between parenthesis are added from the stack to the result:
elif token == ')':
while operators.top() != '(':
postfix.append(operators.pop())
operators.pop() # Pop the left parentheses from the stack.
# Operands are added to the result:
else:
postfix.append(token)
while not operators.is_empty():
# The remaining operators are added from the stack to the result:
postfix.append(operators.pop())
return postfix
|
import pytest
from expackage import my_add
def test_my_add():
# Unit test for my_add function
assert my_add(3, 2) == 5
def test_my_add_first_input_raises():
# Test that my_add raises a TypeError when first input isn't a number
with pytest.raises(TypeError) as excinfo:
my_add('not a number', 5)
error = 'Input to my_add should be either integers or floats'
assert error == str(excinfo.value)
def test_my_add_second_input_raises():
# Test that my_add raises a TypeError when second input isn't a number
with pytest.raises(TypeError) as excinfo:
my_add(2, 'not a number')
error = 'Input to my_add should be either integers or floats'
assert error == str(excinfo.value)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from nycTaxis import core
from nycTaxis import dataHandler
assert len(sys.argv) == 2, "Usage: 1. Argument - target date, rolling average"
targetDate = sys.argv[1]
taxiDataFrame = dataHandler.loadDataFrame('/home/tobi/Projects/BlueYonder/etc/data.h5')
result = core.rollingAverageDate(taxiDataFrame, targetDate)
print("the average drive length of the 45 days prior to and including {} was {}".format(targetDate, result))
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent Placement Group Object.
Top-level Placement Group implementation.
Cloud specific implementations of Placement Group needed.
"""
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec
FLAGS = flags.FLAGS
PLACEMENT_GROUP_CLUSTER = 'cluster'
PLACEMENT_GROUP_SUPERCLUSTER = 'supercluster'
PLACEMENT_GROUP_SPREAD = 'spread'
PLACEMENT_GROUP_NONE = 'none'
PLACEMENT_GROUP_OPTIONS = frozenset([
PLACEMENT_GROUP_CLUSTER,
PLACEMENT_GROUP_SPREAD,
PLACEMENT_GROUP_NONE
])
# Default placement group style is specified by Cloud Specific Placement Group.
flags.DEFINE_enum(
'placement_group_style', None,
list(PLACEMENT_GROUP_OPTIONS) + [PLACEMENT_GROUP_SUPERCLUSTER],
'The vm placement group option to use. Default set by cloud.')
def GetPlacementGroupSpecClass(cloud):
"""Returns the PlacementGroupSpec class corresponding to 'cloud'."""
return spec.GetSpecClass(BasePlacementGroupSpec, CLOUD=cloud)
def GetPlacementGroupClass(cloud):
"""Returns the PlacementGroup class corresponding to 'cloud'."""
return resource.GetResourceClass(BasePlacementGroup,
CLOUD=cloud)
class BasePlacementGroupSpec(spec.BaseSpec):
"""Storing various data about a placement group.
Attributes:
zone: The zone the in which the placement group will launch.
"""
SPEC_TYPE = 'BasePlacementGroupSpec'
CLOUD = None
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(BasePlacementGroupSpec, cls)._ApplyFlags(config_values, flag_values)
if FLAGS.placement_group_style:
config_values['placement_group_style'] = FLAGS.placement_group_style
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Can be overridden by derived classes to add options or impose additional
requirements on existing options.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(BasePlacementGroupSpec, cls)._GetOptionDecoderConstructions()
result.update({'zone': (option_decoders.StringDecoder, {'none_ok': True})})
return result
class BasePlacementGroup(resource.BaseResource):
"""Base class for Placement Groups.
This class holds Placement Group methods and attributes relating to the
Placement Groups as a cloud
resource.
Attributes:
zone: The zone the Placement Group was launched in.
"""
RESOURCE_TYPE = 'BasePlacementGroup'
def __init__(self, placement_group_spec):
"""Initialize BasePlacementGroup class.
Args:
placement_group_spec: placement_group.BasePlacementGroupSpec object of the
placement group.
"""
super(BasePlacementGroup, self).__init__()
self.zone = placement_group_spec.zone
|
'''
Created on 22/02/2013
@author: victor
'''
import pyRMSD.RMSDCalculator
import numpy
import math
NUMBER_OF_THREADS = 4
if __name__ == '__main__':
# Reading coords
coordsets = numpy.load("data/tmp_amber_long.npy")
number_of_atoms = coordsets.shape[1]
number_of_conformations = coordsets.shape[0]
rmsds = {}
calculators = {"KABSCH":'KABSCH_OMP_CALCULATOR',
"QTRFIT":"QTRFIT_OMP_CALCULATOR",
"QCP":"QCP_OMP_CALCULATOR",
"QCP CUDA":"QCP_CUDA_CALCULATOR"}
for calculator_key in calculators:
tmp_coords = numpy.copy(coordsets)
calculator_type = calculators[calculator_key]
calculator = pyRMSD.RMSDCalculator.RMSDCalculator(tmp_coords, calculator_type)
if "OMP" in calculator_type:
calculator.setNumberOfOpenMPThreads(NUMBER_OF_THREADS)
if "CUDA" in calculator_type:
calculator.setCUDAKernelThreadsPerBlock(2,16)
rmsds[calculator_key] = calculator.oneVsFollowing(0)
#---------------#
rms = {}
calculator_names = rmsds.keys()
for calculator_name_i in calculator_names:
print "* ",calculator_name_i
rms[calculator_name_i] = {}
for calculator_name_j in calculator_names:
print "\t ",calculator_name_j
rmsd_diff = rmsds[calculator_name_i] - rmsds[calculator_name_j]
rms[calculator_name_i][calculator_name_j] = math.sqrt(numpy.sum(rmsd_diff**2))
#---------------#
handler = open("root_mean_square","w")
for calculator_name in calculator_names:
handler.write("%s "%calculator_name)
handler.write("\n")
for calculator_name_i in calculator_names:
handler.write("%s "%calculator_name_i)
for calculator_name_j in calculator_names:
handler.write("%.03e "%(rms[calculator_name_i][calculator_name_j]))
handler.write("\n")
|
# coding:utf8
from kivy.uix.modalview import ModalView
class AuthPopup(ModalView):
def __init__(self, app, **kwargs):
self.show_password_text = 'Показать пароль'
self.hide_password_text = 'Скрыть пароль'
self.app = app
super(AuthPopup, self).__init__(**kwargs)
def log_in(self):
login = self.ids.login_textinput.text
password = self.ids.pass_textinput.text
if login and password:
self.app.osc_service.send_auth_request(login, password)
self.dismiss()
def update_pass_input_status(self, button):
self.ids.pass_textinput.password = not self.ids.pass_textinput.password
if self.ids.pass_textinput.password:
button.text = self.show_password_text
else:
button.text = self.hide_password_text
def on_dismiss(self):
if self.ids.login_textinput.text != '':
self.app._cached_login = self.ids.login_textinput.text
else:
self.app._cached_login = None
if self.ids.pass_textinput.text != '':
self.app._cached_password = self.ids.pass_textinput.text
else:
self.app._cached_login = None
|
from setuptools import setup
setup(
name="gpef",
version="0.0.1",
description="General Purpose Experiment Framework",
author='zhaozhang',
author_email='zz156@georgetown.edu',
packages=['gpef.tools', 'gpef.graphic', 'gpef.stat'],
#install_requires=["matplotlib"],
entry_points="""
[console_scripts]
cdf_plot = gpef.graphic.cdf_plot:main
gpef = gpef.cmd.cmd:main
basic_stat = gpef.stat.basic_stat:main
""",
install_requires=[
"matplotlib",
"pandas",
"paramiko"
]
)
|
from __future__ import print_function
import sys
from armada_command import armada_api
from armada_command.scripts.compat import json
def add_arguments(parser):
parser.add_argument('saved_containers_path',
help='Path to JSON file with saved containers. They are created in /opt/armada. '
'If not provided, containers saved in K/V store will be recovered.',
nargs='?', default='')
def command_recover(args):
if not args.saved_containers_path:
payload = {'recover_from_kv': True}
else:
with open(args.saved_containers_path) as saved_containers_file:
saved_containers = json.load(saved_containers_file)
payload = {'recover_from_kv': False, 'saved_containers': saved_containers}
result = armada_api.post('recover', payload)
if result['status'] != 'ok':
print(result['error'])
sys.exit(1)
else:
print('Containers have been restored.')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-01-20 19:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('djangobin', '0009_auto_20190120_1901'),
]
operations = [
migrations.RemoveField(
model_name='snippet',
name='author',
),
migrations.AddField(
model_name='snippet',
name='user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from hypothesis import given, assume
from hypothesis.strategies import characters, text
from eli5.lime.textutils import SplitResult, TokenizedText
def test_split_result():
s = SplitResult.fromtext("")
assert list(s.tokens) == []
s = SplitResult.fromtext("hello")
assert list(s.tokens) == ["hello"]
s = SplitResult.fromtext("hello world!")
assert list(s.tokens) == ["hello", "world"]
assert list(s.separators) == ["", " ", "!"]
@given(text())
def test_split_result_combine(text):
assume("\x00" not in text) # fixme
s = SplitResult.fromtext(text)
assert s.text == text
s_copy = s.copy()
assert (s_copy.parts == s.parts).all()
assert s_copy.parts is not s.parts
def test_split_result_masked():
s = SplitResult.fromtext("Hello, world!")
assert s.masked(np.array([False, False], dtype=bool)).text == s.text
assert s.masked(np.array([True, False], dtype=bool)).text == ", world!"
assert s.masked(np.array([False, True], dtype=bool)).text == "Hello, !"
assert s.masked(np.array([True, True], dtype=bool)).text == ", !"
def test_token_spans():
s = SplitResult.fromtext("Hello, world!")
assert s.token_spans == [(0, 5), (7, 12)]
|
from typing import Optional, Dict, Tuple, List
import numpy as np
from konvens2020_summarization.data_classes import Corpus
from konvens2020_summarization.featurizer import TfidfFeaturizer
from konvens2020_summarization.preprocessor import Preprocessor
def get_important_words(corpus: Corpus,
k: Optional[int] = 10,
tfidf_args: Optional[Dict] = None,
preprocess_pipeline: Optional[List] = None) -> Tuple[Dict, Corpus]:
if not tfidf_args:
tfidf_args = {'max_df': 0.9}
important_words = {}
preprocessor = Preprocessor(pipeline=preprocess_pipeline)
corpus_processed = preprocessor.process(corpus=corpus)
featurizer = TfidfFeaturizer(train_documents=corpus_processed.documents,
model_args=tfidf_args)
corpus_featurized = featurizer.featurize(corpus=corpus_processed)
feature_names = featurizer.feature_names
for doc in corpus_featurized.documents:
top_indices = np.argsort(doc.text_vectorized.flatten())[::-1][:k]
important_words[doc.id_] = np.array(feature_names)[top_indices].tolist()
return important_words, corpus_processed
def predict_tfidf_content_score(corpus: Corpus,
name: str,
num_important_words: Optional[int] = 10,
tfidf_args: Optional[Dict] = None,
preprocessing: Optional[List] = None
):
important_words, corpus = get_important_words(corpus=corpus,
k=num_important_words,
tfidf_args=tfidf_args,
preprocess_pipeline=preprocessing)
for doc in corpus.documents:
important_doc_words = important_words[doc.id_]
for gen_sum in doc.gen_summaries:
gen_sum_counts = 0
for imp_word in important_doc_words:
if imp_word in gen_sum.text_processed.split(' '):
gen_sum_counts += 1
try:
gen_sum_score = gen_sum_counts / len(important_doc_words)
except ZeroDivisionError:
gen_sum_score = 0.
gen_sum.predicted_scores[name] = gen_sum_score
return corpus
|
import time
import numpy as np
from sklearn.metrics.scorer import balanced_accuracy_scorer
from automlToolkit.utils.logging_utils import get_logger
from automlToolkit.components.evaluators.base_evaluator import _BaseEvaluator
from automlToolkit.components.evaluators.evaluate_func import holdout_validation, cross_validation, partial_validation
def get_estimator(config):
from automlToolkit.components.models.classification import _classifiers, _addons
classifier_type = config['estimator']
config_ = config.copy()
config_.pop('estimator', None)
config_['random_state'] = 1
try:
estimator = _classifiers[classifier_type](**config_)
except:
estimator = _addons.components[classifier_type](**config_)
if hasattr(estimator, 'n_jobs'):
setattr(estimator, 'n_jobs', 4)
return classifier_type, estimator
class ClassificationEvaluator(_BaseEvaluator):
def __init__(self, clf_config, scorer=None, data_node=None, name=None,
resampling_strategy='cv', resampling_params=None, seed=1):
self.resampling_strategy = resampling_strategy
self.resampling_params = resampling_params
self.clf_config = clf_config
self.scorer = scorer if scorer is not None else balanced_accuracy_scorer
self.data_node = data_node
self.name = name
self.seed = seed
self.eval_id = 0
self.logger = get_logger('Evaluator-%s' % self.name)
self.init_params = None
self.fit_params = None
def get_fit_params(self, y, estimator):
from automlToolkit.components.utils.balancing import get_weights
_init_params, _fit_params = get_weights(
y, estimator, None, {}, {})
self.init_params = _init_params
self.fit_params = _fit_params
def __call__(self, config, **kwargs):
start_time = time.time()
if self.name is None:
raise ValueError('This evaluator has no name/type!')
assert self.name in ['hpo', 'fe']
# Prepare configuration.
np.random.seed(self.seed)
config = config if config is not None else self.clf_config
downsample_ratio = kwargs.get('data_subsample_ratio', 1.0)
# Prepare data node.
if 'data_node' in kwargs:
data_node = kwargs['data_node']
else:
data_node = self.data_node
X_train, y_train = data_node.data
# Prepare training and initial params for classifier.
if data_node.enable_balance or True:
if self.init_params is None or self.fit_params is None:
self.get_fit_params(y_train, config['estimator'])
config_dict = config.get_dictionary().copy()
for key, val in self.init_params.items():
config_dict[key] = val
classifier_id, clf = get_estimator(config_dict)
try:
if self.resampling_strategy == 'cv':
if self.resampling_params is None or 'folds' not in self.resampling_params:
folds = 5
else:
folds = self.resampling_params['folds']
score = cross_validation(clf, self.scorer, X_train, y_train,
n_fold=folds,
random_state=self.seed,
if_stratify=True,
fit_params=self.fit_params)
elif self.resampling_strategy == 'holdout':
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
score = holdout_validation(clf, self.scorer, X_train, y_train,
test_size=test_size,
random_state=self.seed,
if_stratify=True,
fit_params=self.fit_params)
elif self.resampling_strategy == 'partial':
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
score = partial_validation(clf, self.scorer, X_train, y_train, downsample_ratio,
test_size=test_size,
random_state=self.seed,
if_stratify=True,
fit_params=self.fit_params)
else:
raise ValueError('Invalid resampling strategy: %s!' % self.resampling_strategy)
except Exception as e:
if self.name == 'fe':
raise e
self.logger.info('%s-evaluator: %s' % (self.name, str(e)))
score = 0.
fmt_str = '\n' + ' ' * 5 + '==> '
self.logger.debug('%s%d-Evaluation<%s> | Score: %.4f | Time cost: %.2f seconds | Shape: %s' %
(fmt_str, self.eval_id, classifier_id,
score, time.time() - start_time, X_train.shape))
self.eval_id += 1
if self.name == 'hpo':
# Turn it into a minimization problem.
score = 1. - score
return score
|
# Copyright 2013 Mark Dickinson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code to annotate edges and objects.
"""
import gc
import types
import weakref
import six
from refcycle.key_transform_dict import KeyTransformDict
# Maximum number of characters to print in a frame filename.
FRAME_FILENAME_LIMIT = 30
def _get_cell_type():
def f(x=None):
return lambda: x
return type(f().__closure__[0])
CellType = _get_cell_type()
def add_attr(obj, attr, references):
if hasattr(obj, attr):
references[getattr(obj, attr)].append(attr)
def add_cell_references(obj, references):
add_attr(obj, "cell_contents", references)
def add_function_references(obj, references):
add_attr(obj, "__defaults__", references)
add_attr(obj, "__closure__", references)
add_attr(obj, "__globals__", references)
add_attr(obj, "__code__", references)
add_attr(obj, "__name__", references)
add_attr(obj, "__module__", references)
add_attr(obj, "__doc__", references)
if six.PY3:
# Assumes version >= 3.3.
add_attr(obj, "__qualname__", references)
add_attr(obj, "__annotations__", references)
add_attr(obj, "__kwdefaults__", references)
def add_sequence_references(obj, references):
for position, item in enumerate(obj):
references[item].append("item[{}]".format(position))
def add_dict_references(obj, references):
for key, value in six.iteritems(obj):
references[key].append("key")
references[value].append("value[{0!r}]".format(key))
def add_set_references(obj, references):
for elt in obj:
references[elt].append("element")
def add_bound_method_references(obj, references):
add_attr(obj, "__self__", references)
add_attr(obj, "__func__", references)
add_attr(obj, "im_class", references)
def add_weakref_references(obj, references):
# For subclasses of weakref, we can't reliably distinguish the
# callback (if any) from other attributes.
if type(obj) is weakref.ref:
referents = gc.get_referents(obj)
if len(referents) == 1:
target = referents[0]
references[target].append("__callback__")
def add_frame_references(obj, references):
add_attr(obj, "f_back", references)
add_attr(obj, "f_code", references)
add_attr(obj, "f_builtins", references)
add_attr(obj, "f_globals", references)
add_attr(obj, "f_trace", references)
# The f_locals dictionary is only created on demand,
# and then cached.
f_locals = obj.f_locals
add_attr(obj, "f_locals", references)
# Some badly-behaved code replaces the f_locals dict with
# something that doesn't support the full dict interface. So we
# only continue with the annotation if f_locals is a Python dict.
if type(f_locals) is dict:
for name, local in six.iteritems(obj.f_locals):
references[local].append("local {!r}".format(name))
def add_getset_descriptor_references(obj, references):
add_attr(obj, "__objclass__", references)
add_attr(obj, "__name__", references)
add_attr(obj, "__doc__", references)
type_based_references = {
tuple: add_sequence_references,
list: add_sequence_references,
dict: add_dict_references,
set: add_set_references,
frozenset: add_set_references,
types.FunctionType: add_function_references,
types.FrameType: add_frame_references,
CellType: add_cell_references,
types.MethodType: add_bound_method_references,
weakref.ref: add_weakref_references,
types.GetSetDescriptorType: add_getset_descriptor_references,
}
def annotated_references(obj):
"""
Return known information about references held by the given object.
Returns a mapping from referents to lists of descriptions. Note that there
may be more than one edge leading to any particular referent; hence the
need for a list. Descriptions are currently strings.
"""
references = KeyTransformDict(transform=id, default_factory=list)
for type_ in type(obj).__mro__:
if type_ in type_based_references:
type_based_references[type_](obj, references)
add_attr(obj, "__dict__", references)
add_attr(obj, "__class__", references)
if isinstance(obj, type):
add_attr(obj, "__mro__", references)
return references
###############################################################################
# Object annotations.
BASE_TYPES = (
six.integer_types +
(float, complex, type(None), six.text_type, six.binary_type)
)
def object_annotation(obj):
"""
Return a string to be used for Graphviz nodes. The string
should be short but as informative as possible.
"""
# For basic types, use the repr.
if isinstance(obj, BASE_TYPES):
return repr(obj)
if type(obj).__name__ == 'function':
return "function\\n{}".format(obj.__name__)
elif isinstance(obj, types.MethodType):
if six.PY2:
im_class = obj.im_class
if im_class is None:
im_class_name = "<None>"
else:
im_class_name = im_class.__name__
try:
func_name = obj.__func__.__name__
except AttributeError:
func_name = "<anonymous>"
return "instancemethod\\n{}.{}".format(
im_class_name,
func_name,
)
else:
try:
func_name = obj.__func__.__qualname__
except AttributeError:
func_name = "<anonymous>"
return "instancemethod\\n{}".format(func_name)
elif isinstance(obj, list):
return "list[{}]".format(len(obj))
elif isinstance(obj, tuple):
return "tuple[{}]".format(len(obj))
elif isinstance(obj, dict):
return "dict[{}]".format(len(obj))
elif isinstance(obj, types.ModuleType):
return "module\\n{}".format(obj.__name__)
elif isinstance(obj, type):
return "type\\n{}".format(obj.__name__)
elif six.PY2 and isinstance(obj, types.InstanceType):
return "instance\\n{}".format(obj.__class__.__name__)
elif isinstance(obj, weakref.ref):
referent = obj()
if referent is None:
return "weakref (dead referent)"
else:
return "weakref to id 0x{:x}".format(id(referent))
elif isinstance(obj, types.FrameType):
filename = obj.f_code.co_filename
if len(filename) > FRAME_FILENAME_LIMIT:
filename = "..." + filename[-(FRAME_FILENAME_LIMIT-3):]
return "frame\\n{}:{}".format(
filename,
obj.f_lineno,
)
else:
return "object\\n{}.{}".format(
type(obj).__module__,
type(obj).__name__,
)
|
from pololu_drv8835_rpi import motors
import math
class MotorControl:
def __init__(self):
self.rightSpeed = 0
self.leftSpeed = 0
def adjustSpeed(self, rightSpeed, leftSpeed):
self.rightSpeed = rightSpeed
self.leftSpeed = leftSpeed
480 if rightSpeed > 480 else rightSpeed
-480 if rightSpeed < -480 else rightSpeed
480 if leftSpeed > 480 else leftSpeed
-480 if leftSpeed < -480 else leftSpeed
motors.setSpeeds(rightSpeed, leftSpeed)
def changeControllerValueToMotorValue(self, x, y):
r = math.hypot(x, y)
t = math.atan2(y, x)
# rotate by 45 degrees
t += math.pi / 4
# back to cartesian
left = r * math.cos(t)
right = r * math.sin(t)
# rescale the new coords
left = left * math.sqrt(2)
right = right * math.sqrt(2)
# clamp to -1/+1
left = max(-1, min(left, 1))
right = max(-1, min(right, 1))
return int(left * 480), -int(right * 480)
|
"""
`%hierarchy` and `%%dot` magics for IPython
===========================================
This extension provides two magics.
First magic is ``%hierarchy``. This magic command draws hierarchy of
given class or the class of given instance. For example, the
following shows class hierarchy of currently running IPython shell.::
%hierarchy get_ipython()
Second magic is ``%%dot``. You can write graphiz dot language in a
cell using this magic. Example::
%%dot -- -Kfdp
digraph G {
a->b; b->c; c->d; d->b; d->a;
}
License for ipython-hierarchymagic
----------------------------------
ipython-hierarchymagic is licensed under the term of the Simplified
BSD License (BSD 2-clause license), as follows:
Copyright (c) 2012 Takafumi Arakaki
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
License for Sphinx
------------------
`run_dot` function and `HierarchyMagic._class_name` method in this
extension heavily based on Sphinx code `sphinx.ext.graphviz.render_dot`
and `InheritanceGraph.class_name`.
Copyright notice for Sphinx can be found below.
Copyright (c) 2007-2011 by the Sphinx team (see AUTHORS file).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from IPython.core.magic import Magics, magics_class, line_magic, cell_magic
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
from IPython.core.display import display_png, display_svg
from sphinx.ext.inheritance_diagram import InheritanceGraph
from future.utils import lmap
def run_dot(code, options=[], format='png'): # pylint: disable=redefined-builtin, dangerous-default-value
"""run_dot"""
# mostly copied from sphinx.ext.graphviz.render_dot
import os
from subprocess import Popen, PIPE
from sphinx.util.osutil import EPIPE, EINVAL
dot_args = ['dot'] + options + ['-T', format]
if os.name == 'nt':
# Avoid opening shell window.
# * https://github.com/tkf/ipython-hierarchymagic/issues/1
# * http://stackoverflow.com/a/2935727/727827
process = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE,
creationflags=0x08000000)
else:
process = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
wentwrong = False
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = process.communicate(code.encode('utf-8'))
except (OSError, IOError) as err:
if err.errno != EPIPE:
raise
wentwrong = True
if wentwrong:
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = process.stdout.read(), process.stderr.read()
process.wait()
if process.returncode != 0:
raise RuntimeError('dot exited with error:\n[stderr]\n{0}'
.format(stderr.decode('utf-8')))
return stdout
@magics_class
class GraphvizMagic(Magics):
"""GraphvizMagic"""
@magic_arguments()
@argument(
'-f', '--format', default='png', choices=('png', 'svg'),
help='output format (png/svg)'
)
@argument(
'options', default=[], nargs='*',
help='options passed to the `dot` command'
)
@cell_magic
def dot(self, line, cell):
"""Draw a figure using Graphviz dot command."""
args = parse_argstring(self.dot, line)
image = run_dot(cell, args.options, format=args.format)
if args.format == 'png':
display_png(image, raw=True)
elif args.format == 'svg':
display_svg(image, raw=True)
class FoldedInheritanceGraph(InheritanceGraph):
"""InheritanceGraph"""
def __init__(self, *args, **kwds):
self._width = kwds.pop('width', 40)
super(FoldedInheritanceGraph, self).__init__(*args, **kwds)
@staticmethod
def _foldclassname(classname, width):
"""Split `classname` in newlines if the width is wider than `width`.
>>> fold = FoldedInheritanceGraph._foldclassname
>>> fold('aaa.bbb.ccc', 7)
'aaa.bbb\\n.ccc'
>>> fold('aaa.bbb.ccc', 3)
'aaa\\n.bbb\\n.ccc'
>>> identity = lambda x, y: ''.join(fold(x, y).split('\\n'))
>>> identity('aaa.bbb.ccc', 7)
'aaa.bbb.ccc'
>>> identity('aaa.bbb.ccc', 3)
'aaa.bbb.ccc'
"""
parts = classname.split('.')
lines = []
chunk = [parts.pop(0)]
for part in parts:
if len('.'.join(chunk + [part])) > width:
lines.append('.'.join(chunk))
chunk = [part]
else:
chunk.append(part)
lines.append('.'.join(chunk))
return '\\n.'.join(lines)
def _class_info(self, *args, **kwds):
class_info = super(FoldedInheritanceGraph, self) \
._class_info(*args, **kwds)
width = self._width
def fold(elem):
"""fold"""
(nodename, fullname, baselist) = elem
nodename = self._foldclassname(nodename, width)
baselist = [self._foldclassname(b, width) for b in baselist]
return (nodename, fullname, baselist)
return lmap(fold, class_info)
@magics_class
class HierarchyMagic(Magics):
"""HierarchyMagic"""
@magic_arguments()
@argument(
'-r', '--rankdir', default='TB',
help='direction of the hierarchy graph (default: %(default)s)'
)
@argument(
'-s', '--size', default='5.0, 12.0',
help='size of the generated figure (default: %(default)s)',
)
@argument(
'-w', '--name-width', default=40, type=int,
help='width of each nodes in character length (default: %(default)s)',
)
@argument(
'object', nargs='+',
help='Class hierarchy of these classes or objects will be drawn',
)
@line_magic
def hierarchy(self, parameter_s=''):
"""Draw hierarchy of a given class."""
args = parse_argstring(self.hierarchy, parameter_s)
objects = lmap(self.shell.ev, args.object)
clslist = lmap(self._object_to_class, objects)
namelist = lmap(self._class_name, clslist)
igraph = FoldedInheritanceGraph(
namelist, '',
width=args.name_width)
code = igraph.generate_dot(
'inheritance_graph',
graph_attrs={'rankdir': args.rankdir,
'size': '"{0}"'.format(args.size)})
stdout = run_dot(code, format='png')
display_png(stdout, raw=True)
@staticmethod
def _object_to_class(obj):
if isinstance(obj, type):
return obj
elif hasattr(obj, "__class__"):
return obj.__class__
else:
raise ValueError(
"Given object {0} is not a class or an instance".format(obj))
@staticmethod
def _class_name(clas, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = clas.__module__
if module == '__builtin__':
fullname = clas.__name__
else:
fullname = '%s.%s' % (module, clas.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def load_ipython_extension(ipython):
"""Load the extension in IPython."""
global _loaded # pylint: disable=global-statement, invalid-name
if not _loaded:
ipython.register_magics(HierarchyMagic)
ipython.register_magics(GraphvizMagic)
_loaded = True
_loaded = False # pylint: disable=invalid-name
|
# -*- coding: utf-8 -*-
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
(
'id',
models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True
),
),
('title', models.CharField(max_length=20)),
],
options={
'ordering': ['title'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
(
'id',
models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True
),
),
('title', models.CharField(max_length=20)),
('slug', models.SlugField()),
('description', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['title'],
'verbose_name_plural': 'categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Foodstuff',
fields=[
(
'id',
models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True
),
),
('title', models.CharField(unique=True, max_length=100)),
('description', models.TextField(null=True, blank=True)),
('slug', models.SlugField()),
],
options={
'ordering': ['title'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ingredient',
fields=[
(
'id',
models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True
),
),
('quantity', models.CharField(max_length=20, null=True, blank=True)),
('modifier', models.CharField(max_length=50, null=True, blank=True)),
('rank', models.IntegerField()),
(
'foodstuff',
models.ForeignKey(
related_name='ingredients', to='food.Foodstuff', on_delete=models.CASCADE
),
),
],
options={
'ordering': ['rank'],
'verbose_name': 'recipe ingredient',
'verbose_name_plural': 'recipe ingredients',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Link',
fields=[
(
'id',
models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True
),
),
('title', models.CharField(unique=True, max_length=50)),
('url', models.CharField(max_length=250)),
('rank', models.IntegerField()),
],
options={
'ordering': ['rank'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Recipe',
fields=[
(
'id',
models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True
),
),
('title', models.CharField(unique=True, max_length=50, db_index=True)),
('slug', models.SlugField()),
(
'pub_date',
models.DateField(
default=datetime.date.today, null=True, verbose_name=b'date published'
),
),
('directions', models.TextField(null=True, blank=True)),
('description', models.TextField(null=True, blank=True)),
('teaser', models.CharField(max_length=100, null=True, blank=True)),
('credit', models.TextField(null=True, blank=True)),
(
'rclass',
models.CharField(
default=b'D',
max_length=1,
choices=[(b'D', b'Drink'), (b'E', b'Eat'), (b'I', b'Ingredient')],
),
),
('attributes', models.ManyToManyField(to='food.Attribute')),
('categories', models.ManyToManyField(to='food.Category')),
],
options={
'ordering': ['title'],
},
bases=(models.Model,),
),
migrations.AddField(
model_name='ingredient',
name='recipe',
field=models.ForeignKey(related_name='ingredients', to='food.Recipe', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
# Copyright (c) 2016-2018, University of Idaho
# All rights reserved.
#
# Roger Lew (rogerlew@gmail.com)
#
# The project described was supported by NSF award number IIA-1301792
# from the NSF Idaho EPSCoR Program and by the National Science Foundation.
from os.path import join as _join
from os.path import exists as _exists
from datetime import datetime, timedelta
from glob import glob
from collections import OrderedDict
import numpy as np
import os
from wepppy.all_your_base import try_parse_float
uint_types = ['OFE (#)', 'J', 'Y', 'M', 'D']
vars_collapse_ofe = ['Y', 'J', 'M', 'D', 'Date', 'P (mm)', 'RM (mm)', 'Q (mm)',
'Ep (mm)', 'Es (mm)', 'Er (mm)', 'Dp (mm)', 'UpStrmQ (mm)',
'SubRIn (mm)', 'latqcc (mm)', 'Total-Soil Water (mm)',
'frozwt (mm)', 'Snow-Water (mm)', 'QOFE (mm)', 'Tile (mm)', 'Irr (mm)']
vars_collapse_time = ['Area (m^2)']
_nan = float('nan')
class HillWat:
def __init__(self, fname):
assert _exists(fname), fname
self.fname = fname
# read datafile
lines = []
with open(self.fname) as f:
lines = f.readlines()
lines = [L.strip() for L in lines]
assert len(lines) > 19, fname
# Read header
i0, iend = self._find_headerlines(lines)
header = [L.split() for L in lines[i0:iend]]
header = zip(*header)
header = [' '.join(tup) for tup in header]
header = [h.replace(' -', '')
.replace('#', '(#)')
.replace(' mm', ' (mm)')
.replace('Water(mm)', 'Water (mm)')
.replace('m^2', '(m^2)')
.strip() for h in header]
# iterate through the data
ncols = len(header)
data = dict([(h, []) for h in header])
data['Date'] = []
data['M'] = []
data['D'] = []
for L in lines[iend + 2:]:
L = L.split()
assert len(L) >= ncols
for k, v in zip(header, L[:ncols]):
if k in uint_types:
data[k].append(int(v))
else:
data[k].append(try_parse_float(v, _nan))
assert 'Y' in data, (data, lines)
year = data['Y'][-1]
julday = data['J'][-1]
dt = datetime(year, 1, 1) + timedelta(julday - 1)
data['Date'].append(np.datetime64(dt))
data['M'].append(dt.month)
data['D'].append(dt.day)
# cast data values as np.arrays
for (k, v) in data.items():
dtype = (np.float32, np.int16)[any([k == s for s in uint_types])]
if k == 'Date':
dtype = np.datetime64
data[k] = np.array(v, dtype=dtype)
# reshape depending on number of ofes
num_ofes = len(set(data['OFE (#)']))
days_in_sim = int(len(data['OFE (#)']) / num_ofes)
# pack the table data into numpy arrays
for (k, v) in data.items():
data[k] = np.reshape(data[k], (days_in_sim, num_ofes))
# collapse to reduce redundancy
for k in vars_collapse_ofe:
data[k] = data[k][:, 0]
data[k] = np.reshape(data[k], (days_in_sim, 1))
for k in vars_collapse_time:
data[k] = data[k][0, :]
data[k] = np.reshape(data[k], (1, num_ofes))
# Create array of Area weights
total_area = np.sum(data['Area (m^2)'])
data['Area Weights'] = data['Area (m^2)'] / total_area
self.data = data
self.header = header
self.total_area = total_area
self.num_ofes = num_ofes
def as_dict(self):
data = self.data
header = data.keys()
d = {}
m, n = data['D'].shape
for i in range(m):
row = {k: data[k][i, 0] for k in header if 'Area' not in k}
year = row['Y']
month = row['M']
day = row['D']
d[(year, month, day)] = row
return d
def _find_headerlines(self, lines):
i0 = None
iend = None
for i, L in enumerate(lines):
s = L.strip()
if s == '':
continue
if s[0] == '-':
if i0 == None:
i0 = i
else:
iend = i
return i0 + 1, iend
def watershed_swe(wd):
wat_fns = glob(_join(wd, 'wepp/output/*.wat.dat'))
total_area = 0.0
cumulative_swe = None
for wat_fn in wat_fns:
wat = HillWat(wat_fn)
area = wat.data['Area (m^2)'][0]
total_area += area
# calc swe in m^3
swe = wat.data['Snow-Water (mm)'] * 0.001 * area
if cumulative_swe is None:
cumulative_swe = swe
else:
cumulative_swe += swe
return cumulative_swe / total_area * 1000
if __name__ == "__main__":
from pprint import pprint
from glob import glob
from os.path import join as _join
import sys
print(watershed_swe('/geodata/weppcloud_runs/srivas42-greatest-ballad'))
sys.exit()
test_wd = '/geodata/weppcloud_runs/srivas42-greatest-ballad/wepp/output'
fns = glob(_join(test_wd, '*.wat.dat'))
for fn in fns:
print(fn)
wat = HillWat(fn)
pprint(wat.data.keys())
input()
|
from protocol import Protocol
from transition import Transition
def name(n):
return "Flock-of-birds 2 ({})".format(n)
# From https://hal.archives-ouvertes.fr/hal-00565090/document
# Example 2, p. 5
def generate(n):
Q = set(range(n + 1))
T = set()
S = {"0", "1"}
I = {"0": 0, "1": 1}
O = {i: (0 if i < n else 1) for i in Q}
for i in range(1, n):
T.add(Transition((i, i), (i, i + 1)))
for i in range(0, n):
T.add(Transition((n, i), (n, n)))
return Protocol(Q, T, S, I, O)
|
import boto3
def get_batch_client(access_key, secret_key, region):
"""
Returns the client object for AWS Batch
Args:
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
obj: AWS Batch Client Obj
"""
return boto3.client(
'batch',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
def get_compute_environments(compute_envs, access_key, secret_key, region):
"""
Returns AWS Batch compute envs list with all details
Args:
compute_envs (list): List of compute env names
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
envs (list): List of all Batch compute envs with all details
"""
client = get_batch_client(access_key, secret_key, region)
response = client.describe_compute_environments(
computeEnvironments=compute_envs
)
return response['computeEnvironments']
def check_compute_env_exists(compute_env, access_key, secret_key, region):
"""
Check whether the given compute env name already exists in AWS account
Args:
compute_env (str): Compute env name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
Boolean: True if env exists else False
"""
if len(get_compute_environments([compute_env], access_key, secret_key, region)):
return True
else:
return False
def get_job_definitions(job_def_name, access_key, secret_key, region):
"""
Get all job definition versions with details
Args:
job_def_name (str): Job definiiton name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
jobDefinitions (list): List of all job definitions with details
"""
response = client.describe_job_definitions(
jobDefinitionName=job_def_name,
status='ACTIVE'
)
return response['jobDefinitions']
def check_job_definition_exists(job_def_name, access_key, secret_key, region):
"""
Check whether the given job definiiton exists in AWS Batch
Args:
job_def_name (str): Job definiiton name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
Boolean: True if it already exists else False
"""
client = get_batch_client(access_key, secret_key, region)
try:
job_definitions = get_job_definitions(job_def_name, access_key, secret_key, region)
return True if len(job_definitions) else False
except:
return False
def check_job_queue_exists(job_queue_name, access_key, secret_key, region):
"""
Check whether the given job queue exists in AWS Batch
Args:
job_queue_name (str): Job Queue name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
region (str): AWS Region
Returns:
Boolean: True if it already exists else False
"""
client = get_batch_client(access_key, secret_key, region)
try:
response = client.describe_job_queues(
jobQueues=[job_queue_name],
)
return True if len(response['jobQueues']) else False
except:
return False
|
class Environment:
def __init__(self):
self.total_timesteps = 1000
self.timesteps_occurred = 0
self.current_state = 0
self.markov_chain = [self.current_state]
def get_current_state(self):
return self.current_state
def get_actions(self):
return [-1, 1]
def perform_action(self, state, action):
self.timesteps_occurred += 1
next_state = state + action
self.current_state = next_state
self.markov_chain.append(self.current_state)
def check_completion(self):
return self.timesteps_occurred == self.total_timesteps
|
import json
import os
import random
from pprint import pprint
def load_data(filename):
if filename is None:
raise ValueError("None is not a valid file name")
with open(os.path.abspath(filename)) as json_file:
#raw_data = open(filename).read()
return json.load(json_file)
def random_choose(available, key, eliminate=False):
candidates = available[key]
choosen = random.choice(candidates)
if eliminate:
candidates.remove(choosen)
available[key] = candidates
return choosen, available
players = load_data('players.json')
missions = load_data('missions.json')
deployments = load_data('deployments.json')
while len(players["players"]) > 1:
player1, players = random_choose(players, "players", eliminate=True)
player2, players = random_choose(players, "players", eliminate=True)
deployment, deployments = random_choose(deployments, "deployments")
mission, missions = random_choose(missions, "missions")
print ("Match %(player1)s vs %(player2)s Mission: %(mission)s Map: %(map)s" % \
{
"player1": player1["name"],
"player2": player2["name"],
"mission": mission["name"],
"map": deployment["name"]
})
consolation_deployment, deployments = random_choose(deployments, "deployments")
consolation_mission, missions = random_choose(missions, "missions")
print ("Consolation Match Mission: %(mission)s Map: %(map)s" % \
{
"mission": consolation_mission["name"],
"map": consolation_deployment["name"]
})
final_deployment, deployments = random_choose(deployments, "deployments")
final_mission, missions = random_choose(missions, "missions")
print ("Final Match Mission: %(mission)s Map: %(map)s" % \
{
"mission": final_mission["name"],
"map": final_deployment["name"]
})
|
# coding: utf-8
"""
Характеристики для цифрового фильтра
"""
# Other
from pylab import *
from numpy import arange
from numpy import ones
# App
from visualisers import mfreqz
from visualisers import impz
from iir_models.iir_digital import calc_digital_characteristics
if __name__=='__main__':
mean_params = (5.32255626, 3.07633474, 0.88892465, 2.14692147, 69.83541651)
(T1, T2, t0, K, dy) = mean_params
#K = 1
freq = arange(10000)*0.01 # Hz
freq_sampling = 1 # Hz
""" Model """
b, a, fs = calc_digital_characteristics((T1, T2, t0, K), freq_sampling)
print b, a
#b = ones(1)
from numpy.polynomial import Polynomial as P
a1 = P(b)
a2 = P([0,0,0,1])
b = (a1*a2).coef
""" View """
#plot_normalize_analog(tau, freq, freq_sampling, plot_AFC, plot_PFC)
#impz(b, a)
mfreqz(b, a)
b, a, fs = calc_digital_characteristics((T1, T2, t0, K), freq_sampling)
print b, a
""" View """
#plot_normalize_analog(tau, freq, freq_sampling, plot_AFC, plot_PFC)
#impz(b, a)
mfreqz(b, a)
show()
print 'Done'
|
"""
# author: shiyipaisizuo
# contact: shiyipaisizuo@gmail.com
# file: prediction.py
# time: 2018/8/24 22:18
# license: MIT
"""
import argparse
import os
import torch
import torchvision
from torchvision import transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser("""Image classifical!""")
parser.add_argument('--path', type=str, default='../../data/PASCAL_VOC/2005/',
help="""image dir path default: '../../data/PASCAL_VOC/2005/'.""")
parser.add_argument('--batch_size', type=int, default=128,
help="""Batch_size default:128.""")
parser.add_argument('--num_classes', type=int, default=6,
help="""num classes. Default: 6.""")
parser.add_argument('--model_path', type=str, default='../../../models/pytorch/PASCAL/',
help="""Save model path""")
parser.add_argument('--model_name', type=str, default='2005.pth',
help="""Model name.""")
args = parser.parse_args()
# Create model
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
transform = transforms.Compose([
transforms.Resize(128), # 将图像转化为800 * 800
transforms.RandomCrop(114), # 从图像中裁剪一个24 * 24的
transforms.ToTensor(), # 将numpy数据类型转化为Tensor
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # 归一化
])
# Load data
test_datasets = torchvision.datasets.ImageFolder(root=args.path + 'val/',
transform=transform)
test_loader = torch.utils.data.DataLoader(dataset=test_datasets,
batch_size=args.batch_size,
shuffle=True)
def main():
print(f"Test numbers:{len(test_datasets)}")
# Load model
if torch.cuda.is_available():
model = torch.load(args.model_path + args.model_name).to(device)
else:
model = torch.load(args.model_path + args.model_name, map_location='cpu')
model.eval()
correct = 0.
total = 0
for images, labels in test_loader:
# to GPU
images = images.to(device)
labels = labels.to(device)
# print prediction
outputs = model(images)
# equal prediction and acc
_, predicted = torch.max(outputs.data, 1)
# val_loader total
total += labels.size(0)
# add correct
correct += (predicted == labels).sum().item()
print(f"Acc: {100 * correct / total:.4f}.")
if __name__ == '__main__':
main()
|
#-*- encoding: utf-8 -*-
import os
import types
from functools import reduce
_GLOBAL_CONFIG = dict(
user_agent = 'citationdetective',
log_dir = os.path.join(os.path.expanduser('~'), 'cd_logs'),
)
# A base configuration that all languages "inherit" from.
_BASE_LANG_CONFIG = dict(
articles_sampling_fraction = 2e-2,
statement_max_size = 5000,
context_max_size = 5000,
min_sentence_length = 6,
min_citation_need_score = 0.5
)
# Language-specific config, inheriting from the base config above.
_LANG_CODE_TO_CONFIG = dict(
en = dict(
# A friendly name for the language
lang_name = 'English',
# The direction of the language, either ltr or rtl
lang_dir = 'ltr',
# The database to use on Tools Labs
database = 'enwiki_p',
# The domain for Wikipedia in this language
wikipedia_domain = 'en.wikipedia.org',
# These sections which content do not need citations
sections_to_skip = [
'See also',
'References',
'External links',
'Further reading',
'Notes',
'Additional sources',
'Sources',
'Bibliography',
],
# Dictionary of word to vector
vocb_path = os.path.expanduser('~/citationdetective/citation-needed/embeddings/word_dict_en.pck'),
# Dictionary of section title to vector
section_path = os.path.expanduser('~/citationdetective/citation-needed/embeddings/section_dict_en.pck'),
# Tensorflow models to detect Citation Need for English
model_path = os.path.expanduser('~/citationdetective/citation-needed/models/fa_en_model_rnn_attention_section.h5'),
# Argument for padding word vectors to the same length
# so as to use as the input for the RNN model
word_vector_length = 187,
),
it = dict(
lang_name = 'Italiano',
lang_dir = 'ltr',
database = 'itwiki_p',
wikipedia_domain = 'it.wikipedia.org',
sections_to_skip = [
'Note',
'Bibliografia',
'Voci correlate',
'Altri progetti',
'Collegamenti esterni',
],
vocb_path = os.path.expanduser('~/citationdetective/citation-needed/embeddings/word_dict_it.pck'),
section_path = os.path.expanduser('~/citationdetective/citation-needed/embeddings/section_dict_it.pck'),
model_path = os.path.expanduser('~/citationdetective/citation-needed/models/fa_it_model_rnn_attention_section.h5'),
word_vector_length = 319,
),
fr = dict(
lang_name = 'Français',
lang_dir = 'ltr',
database = 'frwiki_p',
wikipedia_domain = 'fr.wikipedia.org',
sections_to_skip = [
'Notes et références',
'Références',
'Annexes',
'Voir aussi',
'Liens externes',
],
vocb_path = os.path.expanduser('~/citationdetective/citation-needed/embeddings/word_dict_fr.pck'),
section_path = os.path.expanduser('~/citationdetective/citation-needed/embeddings/section_dict_fr.pck'),
model_path = os.path.expanduser('~/citationdetective/citation-needed/models/fa_fr_model_rnn_attention_section.h5'),
word_vector_length = 296,
),
)
Config = types.SimpleNamespace
def _inherit(base, child):
ret = dict(base) # shallow copy
for k, v in child.items():
if k in ret:
if isinstance(v, list):
v = ret[k] + v
elif isinstance(v, dict):
v = dict(ret[k], **v)
ret[k] = v
return ret
LANG_CODES_TO_LANG_NAMES = {
lang_code: _LANG_CODE_TO_CONFIG[lang_code]['lang_name']
for lang_code in _LANG_CODE_TO_CONFIG
}
def get_localized_config(lang_code='en'):
if lang_code is None:
lang_code = os.getenv('CD_LANG')
lang_config = _LANG_CODE_TO_CONFIG[lang_code]
cfg = Config(lang_code = lang_code, **reduce(
_inherit, [_GLOBAL_CONFIG, _BASE_LANG_CONFIG, lang_config]))
cfg.lang_codes_to_lang_names = LANG_CODES_TO_LANG_NAMES
return cfg
|
# This is a temporary script that MAY be useful later.
# What it does is creates origin folders based on file names
import os
import shutil
from tqdm import tqdm
WHERE_TO_COPY = r"C:\Users\Aleksei\Desktop\origin_folders".replace("\\", "/")
FROM_COPY = r"F:\_ReachU-defectTypes\__new_2020_06\orthos".replace("\\", "/")
vrts = os.listdir(FROM_COPY)
# Only consider VRT files at this point, though need to copy many types
vrts = [vrt for vrt in vrts if vrt.endswith(".vrt")]
exts_to_copy = ('.jpg', '.mask.png', '.predicted_defects.png', '.vrt')
# Alright, let's go
folder_names = []
# Create necessary dirs and copy files to 'em
print("Copying files...")
for fn in tqdm(vrts):
fbn = fn.split(".")[0]
ffbn = fbn.split("-")[0]
if ffbn not in folder_names:
folder_names.append(ffbn)
os.makedirs(WHERE_TO_COPY + "/" + ffbn)
for ext in exts_to_copy:
fnm = fbn + ext
shutil.copyfile(FROM_COPY + "/" + fnm,
WHERE_TO_COPY + "/" + ffbn + "/" + fnm)
|
# From https://github.com/PyCQA/bandit/blob/master/examples/sql_statements-py36.py
import sqlalchemy
# bad
query = "SELECT * FROM foo WHERE id = '%s'" % identifier
query = "INSERT INTO foo VALUES ('a', 'b', '%s')" % value
query = "DELETE FROM foo WHERE id = '%s'" % identifier
query = "UPDATE foo SET value = 'b' WHERE id = '%s'" % identifier
query = """WITH cte AS (SELECT x FROM foo)
SELECT x FROM cte WHERE x = '%s'""" % identifier
# bad alternate forms
query = "SELECT * FROM foo WHERE id = '" + identifier + "'"
query = "SELECT * FROM foo WHERE id = '{}'".format(identifier)
query = f"SELECT * FROM foo WHERE id = {tmp}"
# bad
cur.execute("SELECT * FROM foo WHERE id = '%s'" % identifier)
cur.execute("INSERT INTO foo VALUES ('a', 'b', '%s')" % value)
cur.execute("DELETE FROM foo WHERE id = '%s'" % identifier)
cur.execute("UPDATE foo SET value = 'b' WHERE id = '%s'" % identifier)
# bad alternate forms
cur.execute("SELECT * FROM foo WHERE id = '" + identifier + "'")
cur.execute("SELECT * FROM foo WHERE id = '{}'".format(identifier))
cur.execute(f"SELECT * FROM foo WHERE id {tmp}")
# good
cur.execute("SELECT * FROM foo WHERE id = '%s'", identifier)
cur.execute("INSERT INTO foo VALUES ('a', 'b', '%s')", value)
cur.execute("DELETE FROM foo WHERE id = '%s'", identifier)
cur.execute("UPDATE foo SET value = 'b' WHERE id = '%s'", identifier)
# bug: https://bugs.launchpad.net/bandit/+bug/1479625
def a():
def b():
pass
return b
a()("SELECT %s FROM foo" % val)
# real world false positives
choices=[('server_list', _("Select from active instances"))]
print("delete from the cache as the first argument")
|
# -*- coding: utf-8 -*-
import os
ConsumerKey = os.environ['ConsumerKey'] # * enter your Consumer Key *
ConsumerSecret = os.environ['ConsumerSecret'] # * enter your Consumer Secret *
AccessToken = os.environ['AccessToken'] # * enter your Access Token *
AccesssTokenSecert = os.environ['AccesssTokenSecert'] # * enter your Accesss Token Secert *
clickType_single = os.environ['clickType_single'] # * 改行を入れたい場合は、\\nを環境変数に入れ、.replace('\\n', '\n')で変換する
clickType_double = os.environ['clickType_double']
clickType_long = os.environ['clickType_long']
UPDATE_URL = 'https://api.twitter.com/1.1/statuses/update.json'
|
"""
A file to create confusion matrix for each classification result
Example : {
# For each protected group
"0": {
# For each ground truth, create confusion matrix
"0": {"TP": 3, "FP": 2, "TN": 5, "FN": 1},
"1": {"TP": 5, "FP": 1, "TN": 3, "FN": 2}
},
"1": {
"0": {"TP": 5, "FP": 1, "TN": 2, "FN": 1},
"1": {"TP": 2, "FP": 1, "TN": 5, "FN": 1}
}
}
"""
from utils import *
def cm(path="."):
"""
A main function to create confusion matrix
:param path: A path of root directory that contains prediction result of all models
"""
# Iterate for each directory
dir_list = get_dir_list(path)
for main_dir in dir_list:
print(main_dir)
# Create directory to store confusion matrices
file_map = get_file_map(main_dir)
if file_map == {}:
print(">>> No CSV directories to map.")
return
copy_directory(file_map, CM_PATH)
copy_directory(file_map, COUNT_PATH)
total_dir = len(file_map)
dir_idx = 1
# Traverse each file to create confusion matrix JSON
for path in file_map:
print("---- %d/%d: %s" % (dir_idx, total_dir, path))
dir_idx += 1
cm_path = get_new_path(path, CM_PATH)
count_path = get_new_path(path, COUNT_PATH)
for f in file_map[path]:
filename = "./prediction" + path[1:] + f
df = pd.read_csv(filename)
cm_filename = (cm_path + f).split(".csv")[0]
count_filename = (count_path + f).split(".csv")[0]
# Get labels of each column
ground_truth = pd.get_dummies(df[GROUND_TRUTH]).columns.tolist()
predicted_label = pd.get_dummies(df[PREDICTED_LABEL]).columns.tolist()
protected_groups = pd.get_dummies(df[PROTECTED_GROUP]).columns.tolist()
# Check label property
sample = df[GROUND_TRUTH][0]
sample_type = type(sample)
if sample_type == str:
df_count, count_res = count_multi(df)
cm_res = calculate_multi(ast.literal_eval(sample), protected_groups, count_res)
elif sample.dtype.kind == "i":
df_count, count_res = count(df)
cm_res = calculate(ground_truth, protected_groups, df_count)
elif sample.dtype == np.floating: # Added to handle CSV with np float
df_changed = df[df.columns].fillna(0.0).astype(int)
ground_truth = pd.get_dummies(df_changed[GROUND_TRUTH]).columns.tolist()
protected_groups = pd.get_dummies(df_changed[PROTECTED_GROUP]).columns.tolist()
df_count, count_res = count(df_changed)
cm_res = calculate(ground_truth, protected_groups, df_count)
else:
print("Wrong format")
return
with open(count_filename + '.json', 'w') as outfile:
json.dump(count_res, outfile, indent=2)
with open(cm_filename + '.json', 'w') as outfile:
json.dump(cm_res, outfile, indent=2)
def count(df):
"""
A function to count numbers for each prediction cases
:param df: A dataframe of raw prediction result
:return:
- df_count
Column: Ground Truth
Row: Predicted label
TP: df_count[ground truth label, ground truth label]
FP: entire row except df_count[ground truth label, ground truth label]
FN: entire column except df_count[ground truth label, ground truth label]
TN: remaining
- count_res
A dictionary format of df_count
"""
# Get labels of each column
ground_truth = pd.get_dummies(df[GROUND_TRUTH]).columns.tolist()
protected_groups = pd.get_dummies(df[PROTECTED_GROUP]).columns.tolist()
# Set dictionary for each protected group
group_inner_dic = {i: 0 for i in ground_truth}
group_dic = {j: copy.deepcopy(group_inner_dic) for j in ground_truth}
df_count = {}
count_res = {}
# Count cases for each protected group label
for group in protected_groups:
matrix = copy.deepcopy(group_dic)
group_df = df[df[PROTECTED_GROUP] == group]
for truth in ground_truth:
ground_df = group_df[group_df[GROUND_TRUTH] == truth]
count = ground_df[PREDICTED_LABEL].value_counts()
for predicted in ground_truth:
if predicted in count:
matrix[truth][predicted] = int(count[predicted])
df_count[group] = pd.DataFrame.from_dict(matrix)
count_res[group] = matrix
df.drop(group_df.index, inplace=True)
return df_count, count_res
def count_multi(df):
"""
A function to count numbers for each prediction cases
This is a case when a model is for multi-label classification.
:param df: A dataframe of raw prediction result
:return:
- df_count
Column: Ground Truth
Row: Predicted label
TP: df_count[ground truth label, ground truth label]
FP: entire row except df_count[ground truth label, ground truth label]
FN: entire column except df_count[ground truth label, ground truth label]
TN: remaining
- count_res
A dictionary format of df_count
"""
# Get labels of each column
ground_truth_len = len(ast.literal_eval(df[GROUND_TRUTH][0]))
protected_groups = pd.get_dummies(df[PROTECTED_GROUP]).columns.tolist()
ground_truth = {}
predicted_label = {}
for idx, row in df.iterrows():
ground_truth[(row[IDX])] = ast.literal_eval(row[GROUND_TRUTH])
predicted_label[(row[IDX])] = ast.literal_eval(row[PREDICTED_LABEL])
ground_truth_df = pd.DataFrame.from_dict(ground_truth, orient="index")
predicted_label_df = pd.DataFrame.from_dict(predicted_label, orient="index")
# Set dictionary for each protected group
group_innermost_dic = {i: 0 for i in range(2)}
group_inner_dic = {i: copy.deepcopy(group_innermost_dic) for i in range(2)}
group_dic = {j: copy.deepcopy(group_inner_dic) for j in range(ground_truth_len)}
df_count = {}
count_res = {}
# Count cases for each protected group label
for group in protected_groups:
matrix = copy.deepcopy(group_dic)
group_df_idx = list(df[df[PROTECTED_GROUP] == group][IDX])
for class_idx in range(ground_truth_len):
for idx in group_df_idx:
ground = ground_truth_df.at[idx, class_idx]
predicted = predicted_label_df.at[idx, class_idx]
matrix[class_idx][ground][predicted] += 1
df_count[group] = pd.DataFrame.from_dict(matrix)
count_res[group] = matrix
return df_count, count_res
def calculate(ground_truth, protected_groups, df_count):
"""
A function to calculate a confusion matrix
:param ground_truth: A list of ground truth value
:param protected_groups: A list of protected group value
:param df_count: A dataframe that contains prediction summary
:return: confusion matrix based on df_count
"""
# Calculate confusion matrix for each protected group label
cm_res = {}
cm_inner_dic = {
"TP": 0,
"FP": 0,
"TN": 0,
"FN": 0
}
cm_dic = {k: copy.deepcopy(cm_inner_dic) for k in ground_truth} ## Added to handle numpy datatype
for group in protected_groups:
cm = copy.deepcopy(cm_dic)
group_count = df_count[group]
total = int(group_count.values.sum())
for truth in ground_truth:
cm[truth]["TP"] = int(group_count.iloc[truth][truth])
cm[truth]["FP"] = int(group_count.iloc[truth].sum()) - cm[truth]["TP"]
cm[truth]["FN"] = int(group_count[truth].sum()) - cm[truth]["TP"]
cm[truth]["TN"] = total - cm[truth]["TP"] - cm[truth]["FP"] - cm[truth]["FN"]
cm_res[group] = cm
return cm_res
def calculate_multi(ground_truth, protected_groups, count_res):
"""
A function to calculate a confusion matrix
This is a case when a model is for multi-label classification.
:param ground_truth: A list of ground truth value
:param protected_groups: A list of protected group value
:param df_count: A dataframe that contains prediction summary
:return: confusion matrix based on df_count
"""
ground_truth_len = len(ground_truth)
# Calculate confusion matrix for each protected group label
cm_res = {}
cm_inner_dic = {
"TP": 0,
"FP": 0,
"TN": 0,
"FN": 0
}
cm_dic = {k: copy.deepcopy(cm_inner_dic) for k in range(ground_truth_len)}
for group in protected_groups:
cm = copy.deepcopy(cm_dic)
group_count = count_res[group]
for label in range(ground_truth_len):
cm[label]["TP"] = int(group_count[label][TRUE][TRUE])
cm[label]["FP"] = int(group_count[label][FALSE][TRUE])
cm[label]["FN"] = int(group_count[label][TRUE][FALSE])
cm[label]["TN"] = int(group_count[label][FALSE][FALSE])
cm_res[group] = cm
return cm_res
if __name__ == '__main__':
cm()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.