hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8ab93e887dd2f84c46b855e6ed1b15a733a74f5 | 7,248 | py | Python | reconstruction/experiment/experiment_3.py | tecdatalab/biostructure | a30e907e83fa5bbfb934d951b7c663b622104fcc | [
"Apache-2.0"
] | null | null | null | reconstruction/experiment/experiment_3.py | tecdatalab/biostructure | a30e907e83fa5bbfb934d951b7c663b622104fcc | [
"Apache-2.0"
] | 15 | 2019-06-17T16:13:39.000Z | 2022-02-27T05:23:59.000Z | reconstruction/experiment/experiment_3.py | tecdatalab/biostructure | a30e907e83fa5bbfb934d951b7c663b622104fcc | [
"Apache-2.0"
] | null | null | null | import os
import random
import time
import numpy as np
from memory_profiler import memory_usage
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
import traceback
from csv_modules.csv_writer import write_in_file
from experiment.utils_general import remove_get_dirs
from general_utils.pdb_utils import get_ignore_pdbs, get_chains_pdb, get_all_pdb_name
from general_utils.download_utils import download_pdb
from general_utils.list_utils import generate_binary_matrix
from pdb_to_mrc.pdb_2_mrc import pdb_to_mrc_chains
from process_mrc.generate import get_mrc_one
from reconstruction.DLX import solve, gen_y_dicc, gen_x_dicc
from reconstruction.semi_exact_cover import get_semi_exact_s
| 35.356098 | 120 | 0.679912 | import os
import random
import time
import numpy as np
from memory_profiler import memory_usage
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
import traceback
from csv_modules.csv_writer import write_in_file
from experiment.utils_general import remove_get_dirs
from general_utils.pdb_utils import get_ignore_pdbs, get_chains_pdb, get_all_pdb_name
from general_utils.download_utils import download_pdb
from general_utils.list_utils import generate_binary_matrix
from pdb_to_mrc.pdb_2_mrc import pdb_to_mrc_chains
from process_mrc.generate import get_mrc_one
from reconstruction.DLX import solve, gen_y_dicc, gen_x_dicc
from reconstruction.semi_exact_cover import get_semi_exact_s
def do_parallel_test_a(path_data, result_cvs_file, resolution_range=[5.0, 5.0], can_elements=None,
ignore_pdbs=[], error_file='error.txt', add_to_ignore_files=False):
# Parale
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
all_names = get_all_pdb_name() # 169315
# all_names = ['100d']
# all_names = ['7jsh']
print("Before get pdb names")
path = os.path.abspath(path_data)
# print(path)
if not os.path.isdir(path):
os.mkdir(path)
complete_pdb = remove_get_dirs(path_data, can_csv=1, add_to_ignore_files=add_to_ignore_files)
ignore_pdbs += complete_pdb
# Add ignore files
ignore_pdbs += get_ignore_pdbs()
if can_elements is None:
can_elements = len(all_names)
parallel_jobs = []
all_names = np.setdiff1d(np.array(all_names), np.array(ignore_pdbs)).tolist()[:can_elements]
print("Do ", len(all_names), flush=True)
for pdb_name in all_names:
resolution = random.uniform(resolution_range[0], resolution_range[1])
# resolution = 3.8680
# print(pdb_name, con2/can_elements)
parallel_jobs.append([pdb_name, executor.submit(do_parallel_test_a_aux, path, pdb_name, result_cvs_file,
resolution), resolution])
# do_parallel_test_a_aux(path, pdb_name, result_cvs_file, resolution)
for f in parallel_jobs:
try:
f[1].result()
except Exception as e:
with open(error_file, "a+") as myfile:
myfile.write(f[0])
myfile.write("\n")
myfile.write(str(f[2]))
myfile.write("\n")
myfile.write(str(type(e).__name__))
myfile.write("\n")
myfile.write(str(e))
myfile.write("\n")
myfile.write(str(traceback.format_exc()))
myfile.write("\n\n\n\n")
def do_parallel_test_a_aux(path, pdb_name, result_cvs_file, resolution):
local_path = path + "/" + pdb_name
if not os.path.exists(local_path):
os.makedirs(local_path)
download_pdb(pdb_name, '{0}/{1}.pdb'.format(local_path, pdb_name))
# Maps creation
chains = get_chains_pdb('{0}/{1}.pdb'.format(local_path, pdb_name))
# print(chains)
start_time = time.time()
pdb_to_mrc_chains(False, False, resolution, '{0}/{1}.pdb'.format(local_path, pdb_name), path, chains, len(chains))
os.remove('{0}/{1}.pdb'.format(local_path, pdb_name))
time_eman = time.time() - start_time
all_segments = []
start_time = time.time()
con_id_segment = 1
for chain in chains:
segments_graph_simulate, _ = get_mrc_one('{0}/{1}_{2}.mrc'.format(local_path, pdb_name, chain), calculate_Z3D=False)
segment = segments_graph_simulate[0]
segment.id_segment = con_id_segment
all_segments.append(segment)
con_id_segment += 1
time_load = time.time() - start_time
headers_csv = ['Pdb', 'Chains', 'Resolution',
'Time load', 'Time EMAN2', 'Time our method', 'Time DLX method',
'Our memory', 'DLX memory',
'Num update our', 'Num update DLX']
do_test_a(pdb_name, headers_csv, result_cvs_file, all_segments, resolution, local_path,
time_eman, time_load, chains)
dirs = os.listdir(local_path)
for directory in dirs:
if directory.split('.')[1] != 'csv':
path_remove = '{0}/{1}'.format(local_path, directory)
os.remove(path_remove)
def do_our_method(initial_matrix, count_update=[]):
# print('Before')
binary_matrix = generate_binary_matrix(initial_matrix)
# print('After')
combinations = get_semi_exact_s(binary_matrix, 1, 1, count_update=count_update)
return combinations
def do_DLX_method(initial_matrix, count_update=[]):
Y = gen_y_dicc(initial_matrix)
X = gen_x_dicc(Y, initial_matrix)
result = list(solve(X, Y, count_update=count_update))
return result
def do_test_a(pdb_name, headers_csv, result_cvs_file, all_segments, resolution, local_path,
time_eman, time_load, chains):
aux_cube = np.zeros((all_segments[0].mask.shape[0] * all_segments[0].mask.shape[1] * all_segments[0].mask.shape[2],))
initial_matrix = []
for i in range(len(all_segments)):
initial_matrix.append(all_segments[i].mask.ravel())
for i in range(len(initial_matrix)):
points_i = np.where(initial_matrix[i] > 0)[0]
initial_matrix[i][points_i] = 1
aux_cube[points_i] = 1
for j in range(i + 1, len(initial_matrix)):
points_j = np.where(initial_matrix[j] > 0)[0]
change_points = np.intersect1d(points_i, points_j)
initial_matrix[j][change_points] = 0
free_points = np.where(aux_cube == 0)
initial_matrix[0][free_points] = 1
# initial_matrix = [[0, 1, 0],
# [1, 0, 0],
# [0, 0, 1]]
# Our method
start_time = time.time()
combinations = do_our_method(initial_matrix)
our_combination_time = time.time() - start_time
# print(our_combination_time)
max_mem_our = max(memory_usage((do_our_method, (initial_matrix,)), interval=.2))
# print(max_mem_our)
# DLX method
start_time = time.time()
result = do_DLX_method(initial_matrix)
dlx_combination_time = time.time() - start_time
# print(dlx_combination_time)
max_mem_dlx = max(memory_usage((do_DLX_method, (initial_matrix,)), interval=.2))
# print(max_mem_dlx)
count_update_DLX = [0]
do_DLX_method(initial_matrix, count_update_DLX)
# print(count_update_DLX)
count_update_our = [0]
do_our_method(initial_matrix, count_update_our)
# print(count_update_our)
# print(result, result)
# print(combinations[0][1], combinations[0])
# print(np.setdiff1d(combinations[0][1], result[0]))
if not (np.setdiff1d(combinations[0][1], result[0]).tolist() == []):
raise Exception("The two methods do not produce the same result")
if not len(combinations[0][1]) == len(initial_matrix):
raise Exception("Can not get exact cover with our method")
if not len(result[0]) == len(initial_matrix):
raise Exception("Can not get exact cover with DLX method")
data_write = [[pdb_name,
chains,
resolution,
time_load, time_eman,
our_combination_time, dlx_combination_time,
max_mem_our, max_mem_dlx,
count_update_our[0], count_update_DLX[0]]]
write_in_file('{0}/{1}'.format(local_path, result_cvs_file), headers_csv, data_write)
| 6,427 | 0 | 115 |
0161f2f2a87a9eed2859c4dfb52457b6de7b4adf | 1,659 | py | Python | test_dismat.py | littletiger0712/nlp_adversarial_examples | aef4066a6a8d028ba4ae91a50700b8c937c08a14 | [
"MIT"
] | 2 | 2020-04-20T04:14:43.000Z | 2020-09-18T02:51:43.000Z | test_dismat.py | littletiger0712/nlp_adversarial_examples | aef4066a6a8d028ba4ae91a50700b8c937c08a14 | [
"MIT"
] | null | null | null | test_dismat.py | littletiger0712/nlp_adversarial_examples | aef4066a6a8d028ba4ae91a50700b8c937c08a14 | [
"MIT"
] | null | null | null | import numpy as np
MAX_VOCAB_SIZE = 60702
import pickle
dist_mat_list = np.load('aux_files/sdist_mat_dic_%d.npy' % (MAX_VOCAB_SIZE))
dist_mat_order = np.load('aux_files/sdist_order_%d.npy' % (MAX_VOCAB_SIZE))
with open('aux_files/dataset_%d.pkl' %MAX_VOCAB_SIZE, 'rb') as f:
dataset = pickle.load(f)
# print(np.shape(dist_mat_list))
# print(dist_mat_list[200:205])
# print(np.shape(dist_mat_order))
# print(dist_mat_order[200:205])
# for i in range(60700,60703):
# cnt_i = i
# if i == 0:
# cnt_i = MAX_VOCAB_SIZE
# print(dataset.inv_dict[cnt_i])
# for j in range(101):
# cnt_dist = dist_mat_order[cnt_i][j]
# if dist_mat_order[cnt_i][j] == 0:
# cnt_dist = MAX_VOCAB_SIZE
# print(cnt_dist, dataset.inv_dict[cnt_dist], dist_mat_list[cnt_i][j])
def pick_most_similar_words(src_word, dist_mat_list, dist_mat_order, ret_count=10, threshold=None):
"""
embeddings is a matrix with (d, vocab_size)
"""
# dist_order = np.argsort(dist_mat[src_word,:])[1:1+ret_count]
# dist_list = dist_mat[src_word][dist_order]
dist_order = dist_mat_order[src_word][1:ret_count+1]
dist_list = dist_mat_list[src_word][1:ret_count+1]
# print(dist_order)
# print(dist_list)
if dist_list[-1] == 0:
return [], []
mask = np.ones_like(dist_list)
if threshold is not None:
mask = np.where(dist_list < threshold)
return dist_order[mask], dist_list[mask]
else:
return dist_order, dist_list
print(pick_most_similar_words(100, dist_mat_list, dist_mat_order))
print(pick_most_similar_words(100, dist_mat_list, dist_mat_order, threshold=1.45)) | 33.857143 | 99 | 0.688969 | import numpy as np
MAX_VOCAB_SIZE = 60702
import pickle
dist_mat_list = np.load('aux_files/sdist_mat_dic_%d.npy' % (MAX_VOCAB_SIZE))
dist_mat_order = np.load('aux_files/sdist_order_%d.npy' % (MAX_VOCAB_SIZE))
with open('aux_files/dataset_%d.pkl' %MAX_VOCAB_SIZE, 'rb') as f:
dataset = pickle.load(f)
# print(np.shape(dist_mat_list))
# print(dist_mat_list[200:205])
# print(np.shape(dist_mat_order))
# print(dist_mat_order[200:205])
# for i in range(60700,60703):
# cnt_i = i
# if i == 0:
# cnt_i = MAX_VOCAB_SIZE
# print(dataset.inv_dict[cnt_i])
# for j in range(101):
# cnt_dist = dist_mat_order[cnt_i][j]
# if dist_mat_order[cnt_i][j] == 0:
# cnt_dist = MAX_VOCAB_SIZE
# print(cnt_dist, dataset.inv_dict[cnt_dist], dist_mat_list[cnt_i][j])
def pick_most_similar_words(src_word, dist_mat_list, dist_mat_order, ret_count=10, threshold=None):
"""
embeddings is a matrix with (d, vocab_size)
"""
# dist_order = np.argsort(dist_mat[src_word,:])[1:1+ret_count]
# dist_list = dist_mat[src_word][dist_order]
dist_order = dist_mat_order[src_word][1:ret_count+1]
dist_list = dist_mat_list[src_word][1:ret_count+1]
# print(dist_order)
# print(dist_list)
if dist_list[-1] == 0:
return [], []
mask = np.ones_like(dist_list)
if threshold is not None:
mask = np.where(dist_list < threshold)
return dist_order[mask], dist_list[mask]
else:
return dist_order, dist_list
print(pick_most_similar_words(100, dist_mat_list, dist_mat_order))
print(pick_most_similar_words(100, dist_mat_list, dist_mat_order, threshold=1.45)) | 0 | 0 | 0 |
104a8a644f054ae7392a7ba557cf5ac417f20648 | 6,121 | py | Python | evaluate/parse_fct_homa.py | eniac/MimicNet | c0790679f8c220c75c33ace67e2735816aac6815 | [
"MIT"
] | 15 | 2021-08-20T08:10:01.000Z | 2022-03-24T21:24:50.000Z | evaluate/parse_fct_homa.py | eniac/MimicNet | c0790679f8c220c75c33ace67e2735816aac6815 | [
"MIT"
] | 1 | 2022-03-30T09:03:39.000Z | 2022-03-30T09:03:39.000Z | evaluate/parse_fct_homa.py | eniac/MimicNet | c0790679f8c220c75c33ace67e2735816aac6815 | [
"MIT"
] | 3 | 2021-08-20T08:10:34.000Z | 2021-12-02T06:15:02.000Z | #!/usr/bin/env python3
import sys
import argparse
import numpy as np
from generate_traffic import *
from HomaPkt import *
if __name__=="__main__":
load = 0.70
numOfSpines = 4
numOfSubtrees = 2
numOfToRsPerSubtree = 2
numOfServersPerRack = 4
evaluatedPrefix = "1.0."
linkSpeed = 100e6
parser = argparse.ArgumentParser()
parser.add_argument("seed", type=int, help="RNG seed required.")
parser.add_argument("directory", type=str,
help="Directory prefix of pcaps and dumps")
parser.add_argument("--evaluated_prefix", type=str,
help="IP prefix of evaluated region, e.g., 1.0.")
parser.add_argument("--load", type=float,
help="Portion of bisection bandwidth utilized.")
parser.add_argument("--numClusters", type=int,
help="Number clusters to generate traffic for.")
parser.add_argument("--numToRs", type=int,
help="Number of ToR switches/racks per cluster.")
parser.add_argument("--numServers", type=int,
help="Number of servers per rack.")
parser.add_argument("--linkSpeed", type=float,
help="Link speed")
args = parser.parse_args()
seed = args.seed
data_dir = args.directory
if args.evaluated_prefix:
evaluatedPrefix = args.evaluated_prefix
if args.load:
load = args.load
if args.numClusters:
numOfSubtrees = args.numClusters
if args.numToRs:
numOfToRsPerSubtree = args.numToRs
if args.numServers:
numOfServersPerRack = args.numServers
if args.linkSpeed:
linkSpeed = args.linkSpeed
numOfSpines = numOfToRsPerSubtree * numOfToRsPerSubtree
rng = np.random.RandomState(seed=seed)
emulatedRacks = range(numOfToRsPerSubtree, numOfSubtrees*numOfToRsPerSubtree)
traffic_matrix = generate_traffic_matrix(rng, load, linkSpeed,
numOfServersPerRack,
numOfToRsPerSubtree, numOfSubtrees,
numOfSpines, emulatedRacks)
filename = data_dir + '/eval' + str(numOfSubtrees) + '/eval.raw'
out_fct = data_dir + '/fct_c' + str(numOfSubtrees) + '.dat'
with open(filename, 'r') as eval_file, \
open(out_fct, 'w') as fct_file:
parse_fct(eval_file, fct_file, traffic_matrix, numOfSubtrees,
numOfToRsPerSubtree, numOfServersPerRack, evaluatedPrefix)
| 37.323171 | 98 | 0.569515 | #!/usr/bin/env python3
import sys
import argparse
import numpy as np
from generate_traffic import *
from HomaPkt import *
def parse_fct(eval_file, fct_file, traffic_matrix, numOfSubtrees,
numOfToRsPerSubtree, numOfServersPerRack, evaluatedPrefix):
echo_tm = dict()
for src_num in traffic_matrix:
for dst_num in traffic_matrix[src_num]:
key1 = (src_num, dst_num)
key2 = (dst_num, src_num)
if key1 not in echo_tm:
echo_tm[key1] = []
echo_tm[key1].extend(traffic_matrix[src_num][dst_num])
if key2 not in echo_tm:
echo_tm[key2] = []
echo_tm[key2].extend(traffic_matrix[src_num][dst_num])
# re-sort by time
for key in echo_tm:
echo_tm[key] = sorted(echo_tm[key], key=lambda elem: elem[0])
flows = dict() # msg_id -> (src_num, dst_num, last_time, last_seq)
count = 0;
for line in eval_file:
count+=1
if "Homa" not in line:
continue
toks = line.split()
pkt = HomaPkt(toks)
# NOTE: FCT is measured by the time the last packet leaves.
# TODO: capture pcaps everywhere and measure vs arrival time.
src = pkt.get("src")
dst = pkt.get("dst")
msg_id = pkt.get("msg_id")
stoks = src.split(".")
dtoks = dst.split(".")
src_num = (int(stoks[3]) // 4) + \
numOfServersPerRack * int(stoks[2]) + \
numOfToRsPerSubtree * numOfServersPerRack * int(stoks[1])
dst_num = (int(dtoks[3]) // 4) + \
numOfServersPerRack * int(dtoks[2]) + \
numOfToRsPerSubtree * numOfServersPerRack * int(dtoks[1])
if src.startswith(evaluatedPrefix) and dst.startswith(evaluatedPrefix):
if pkt.get("tor") != stoks[2] \
or int(pkt.get("svr")) != int(stoks[3])/4:
# don't double count local receives
continue
# Instantiate the flow record if it's the Request
if pkt.get("type") == "REQUEST":
assert int(pkt.get("seq_begin")) == 0
flows[msg_id] = {'src_num': src_num, 'dst_num': dst_num,
'last_time': float(pkt.get("time")),
'last_seq': int(pkt.get("seq_end"))}
# check if this is the end of a flow
if (pkt.get('type') == "REQUEST" or pkt.get('type') == "SCHED" \
or pkt.get("type") == "UNSCHED"):
if msg_id in flows:
flows[msg_id]['last_time'] = float(pkt.get("time"))
flows[msg_id]['last_seq'] = int(pkt.get("seq_end"))
else:
print("Detected dropped REQUEST for msg_id", msg_id)
# assert msg_id in flows
found_count = 0
for flow_record in flows.values():
tm_key = (flow_record['src_num'], flow_record['dst_num'])
found = False
for i in range(len(echo_tm[tm_key])):
start_time, flow_size = echo_tm[tm_key][i]
if flow_size == flow_record['last_seq'] + 1:
fct_file.write("%d %d %f %f %f\n" %
(flow_record['src_num'], flow_record['dst_num'],
start_time, flow_record['last_time'],
flow_record['last_time'] - start_time))
del echo_tm[tm_key][i]
found = True
found_count += 1
break
#if not found:
# print("Could not find matching flow record (probably incomplete flow):", flow_record)
print("Found a total of", found_count, "complete flows")
if __name__=="__main__":
load = 0.70
numOfSpines = 4
numOfSubtrees = 2
numOfToRsPerSubtree = 2
numOfServersPerRack = 4
evaluatedPrefix = "1.0."
linkSpeed = 100e6
parser = argparse.ArgumentParser()
parser.add_argument("seed", type=int, help="RNG seed required.")
parser.add_argument("directory", type=str,
help="Directory prefix of pcaps and dumps")
parser.add_argument("--evaluated_prefix", type=str,
help="IP prefix of evaluated region, e.g., 1.0.")
parser.add_argument("--load", type=float,
help="Portion of bisection bandwidth utilized.")
parser.add_argument("--numClusters", type=int,
help="Number clusters to generate traffic for.")
parser.add_argument("--numToRs", type=int,
help="Number of ToR switches/racks per cluster.")
parser.add_argument("--numServers", type=int,
help="Number of servers per rack.")
parser.add_argument("--linkSpeed", type=float,
help="Link speed")
args = parser.parse_args()
seed = args.seed
data_dir = args.directory
if args.evaluated_prefix:
evaluatedPrefix = args.evaluated_prefix
if args.load:
load = args.load
if args.numClusters:
numOfSubtrees = args.numClusters
if args.numToRs:
numOfToRsPerSubtree = args.numToRs
if args.numServers:
numOfServersPerRack = args.numServers
if args.linkSpeed:
linkSpeed = args.linkSpeed
numOfSpines = numOfToRsPerSubtree * numOfToRsPerSubtree
rng = np.random.RandomState(seed=seed)
emulatedRacks = range(numOfToRsPerSubtree, numOfSubtrees*numOfToRsPerSubtree)
traffic_matrix = generate_traffic_matrix(rng, load, linkSpeed,
numOfServersPerRack,
numOfToRsPerSubtree, numOfSubtrees,
numOfSpines, emulatedRacks)
filename = data_dir + '/eval' + str(numOfSubtrees) + '/eval.raw'
out_fct = data_dir + '/fct_c' + str(numOfSubtrees) + '.dat'
with open(filename, 'r') as eval_file, \
open(out_fct, 'w') as fct_file:
parse_fct(eval_file, fct_file, traffic_matrix, numOfSubtrees,
numOfToRsPerSubtree, numOfServersPerRack, evaluatedPrefix)
| 3,533 | 0 | 23 |
5b1d13ea0e23b3dd4d87b6dc1caada79039cbe47 | 1,950 | py | Python | gen.py | Eadral/OOHelper | 42a58a505a536ad66f7af1c11bda66e6c55fbb48 | [
"MIT"
] | 5 | 2019-03-06T11:08:12.000Z | 2019-04-15T13:07:08.000Z | gen.py | Eadral/OOHelper | 42a58a505a536ad66f7af1c11bda66e6c55fbb48 | [
"MIT"
] | null | null | null | gen.py | Eadral/OOHelper | 42a58a505a536ad66f7af1c11bda66e6c55fbb48 | [
"MIT"
] | null | null | null | import random
from config import *
import time
import os
from utils import datacheck
import threading
id_now = 0
available_level = [-3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
# if __name__ == "__main__":
# id_now = 0
# gen_path = os.path.join("test_data", "auto")
# # print(gen(n_batch=5, batch_size=6, time_interval=30))
# # exit(0)
# if not os.path.exists(gen_path):
# os.mkdir(gen_path)
#
# n = 512
# for i in range(n):
# save(os.path.join(gen_path, autoname()), gen(n_batch=40, batch_size=1, time_interval=2.0))
# # gen(n_batch=40, batch_size=1, time_interval=0.1)
| 23.780488 | 101 | 0.617949 | import random
from config import *
import time
import os
from utils import datacheck
import threading
id_now = 0
available_level = [-3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
def get_next_id():
global id_now
id_now += 1
return id_now
def gen_level(diff=-1):
level = random.choice(available_level)
while level == diff:
level = random.choice(available_level)
return level
def request(time, id_, from_, to):
return "[{:.1f}]{}-FROM-{}-TO-{}\n".format(float(time), id_, from_, to)
def gen_batch(time, n):
batch = []
for i in range(n):
level_from = gen_level()
level_to = gen_level(level_from)
batch.append(request(time, get_next_id(), level_from, level_to))
return batch
def save(filename, lines):
open(filename, "w").writelines(lines)
# check = datacheck(filename)
# if check[0] == 0:
# os.remove(filename)
# return
print("Generated: {} ".format(filename))
def autoname():
return "auto_{}.in".format("".join(str(time.time()).split('.')))
def gen(n_batch, batch_size, time_interval=1.0):
global id_now
assert n_batch * batch_size <= cfg.MAX_REQUEST
time = 0.0
requests = []
for i in range(n_batch):
assert time < cfg.MAX_TIME
requests += gen_batch(time, batch_size)
time += time_interval * random.random()
return requests
def random_range(range):
return random.random() * range + 1 - range
# if __name__ == "__main__":
# id_now = 0
# gen_path = os.path.join("test_data", "auto")
# # print(gen(n_batch=5, batch_size=6, time_interval=30))
# # exit(0)
# if not os.path.exists(gen_path):
# os.mkdir(gen_path)
#
# n = 512
# for i in range(n):
# save(os.path.join(gen_path, autoname()), gen(n_batch=40, batch_size=1, time_interval=2.0))
# # gen(n_batch=40, batch_size=1, time_interval=0.1)
| 1,095 | 0 | 184 |
fb695e03dd81c868318b5c4c114aa635e9e3bb88 | 2,497 | py | Python | device_guids.py | ShannonCanTech/microsoft-authentication-library-for-objc | 649928a6b232bd63082b0a8b6498ccff65369010 | [
"MIT"
] | 1 | 2021-01-13T23:56:09.000Z | 2021-01-13T23:56:09.000Z | device_guids.py | ShannonCanTech/microsoft-authentication-library-for-objc | 649928a6b232bd63082b0a8b6498ccff65369010 | [
"MIT"
] | 1 | 2021-06-20T13:43:07.000Z | 2021-06-20T13:43:07.000Z | device_guids.py | ShannonCanTech/microsoft-authentication-library-for-objc | 649928a6b232bd63082b0a8b6498ccff65369010 | [
"MIT"
] | 2 | 2020-08-30T17:13:28.000Z | 2021-06-20T13:38:02.000Z | #!/usr/bin/env python
import re
import subprocess
import platform
import os
import sys
get_ios.guid = {}
get_mac.guid = None | 26.284211 | 110 | 0.700841 | #!/usr/bin/env python
import re
import subprocess
import platform
import os
import sys
def is_version_higher(orig_version, new_version) :
if new_version[0] > orig_version[0] :
return True
if new_version[0] < orig_version[0] :
return False
if new_version[1] > orig_version[1] :
return True
if new_version[1] < orig_version[1] :
return False
if new_version[2] > orig_version[2] :
return True
return False
def get_guid_i(device) :
device_regex = re.compile("[A-Za-z0-9 ]+ ?(?:\\(([0-9.]+)\\))? \\[([A-F0-9-]+)\\]")
version_regex = re.compile("([0-9]+)\\.([0-9]+)(?:\\.([0-9]+))?")
command = "instruments -s devices"
print "travis_fold:start:Devices"
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True)
# Sometimes the hostname comes back with the proper casing, sometimes not. Using a
# case insensitive regex ensures we work either way
dev_name_regex = re.compile("^" + device + " \\(", re.I)
latest_os_device = None
latest_os_version = None
for line in p.stdout :
sys.stdout.write(line)
if (dev_name_regex.match(line) == None) :
continue
match = device_regex.match(line)
# Regex won't match simulators with apple watches...
if (match == None) :
continue
version_match = version_regex.match(match.group(1))
minor_version = version_match.group(3)
if (minor_version == None) :
minor_version = 0
else :
minor_version = int(minor_version)
version_tuple = (int(version_match.group(1)), int(version_match.group(2)), minor_version)
if latest_os_version == None or is_version_higher(latest_os_version, version_tuple) :
latest_os_device = match.group(2)
latest_os_version = version_tuple
print "travis_fold:end:Devices"
return latest_os_device
def get_guid(device) :
guid = get_guid_i(device)
if (guid == None) :
print_failure(device)
return guid
def print_failure(device) :
print "Failed to find GUID for device : " + device
subprocess.call("instruments -s devices", shell=True)
raise Exception("Failed to get device GUID")
def get_ios(device) :
if (device in get_ios.guid) :
return get_ios.guid[device]
guid = get_guid(device)
get_ios.guid[device] = guid
return guid
get_ios.guid = {}
def get_mac() :
if (get_mac.guid != None) :
return get_mac.guid
guid = subprocess.check_output("system_profiler SPHardwareDataType | awk '/UUID/ { print $3; }'", shell=True)
guid = guid.strip()
get_mac.guid = guid
return guid
get_mac.guid = None | 2,232 | 0 | 138 |
68390a9958cc5e2466c366bfb2b3b4ace7378182 | 2,046 | py | Python | chainer/links/eBNN/link_binary_linear_softmax_layer.py | asrlabncku/RAP | 11fab37c8d98257ec0aed1b306aa9709a3a51328 | [
"MIT"
] | null | null | null | chainer/links/eBNN/link_binary_linear_softmax_layer.py | asrlabncku/RAP | 11fab37c8d98257ec0aed1b306aa9709a3a51328 | [
"MIT"
] | null | null | null | chainer/links/eBNN/link_binary_linear_softmax_layer.py | asrlabncku/RAP | 11fab37c8d98257ec0aed1b306aa9709a3a51328 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from chainer.functions import accuracy
import chainer
import numpy as np
from chainer.links import CLink
from chainer.links.eBNN.link_binary_linear import BinaryLinear
from chainer.links.eBNN.link_softmax_cross_entropy import SoftmaxCrossEntropy
from chainer.utils import binary_util as bu
import math
| 29.652174 | 108 | 0.585044 | from __future__ import absolute_import
from chainer.functions import accuracy
import chainer
import numpy as np
from chainer.links import CLink
from chainer.links.eBNN.link_binary_linear import BinaryLinear
from chainer.links.eBNN.link_softmax_cross_entropy import SoftmaxCrossEntropy
from chainer.utils import binary_util as bu
import math
class BinaryLinearSoftmax(chainer.link.Chain, CLink):
def __init__(self, in_channels, out_channels):
super(BinaryLinearSoftmax, self).__init__(
bl=BinaryLinear(in_channels, out_channels),
sm=SoftmaxCrossEntropy()
)
self.cname = "l_b_linear_softmax"
def __call__(self, h, t=None):
h = self.bl(h)
if t is not None:
self.accuracy = accuracy(h,t)
loss = self.sm(h,t)
return loss
return h
def generate_c(self, link_idx, inp_shape):
name = self.cname + str(link_idx)
text = []
# BinaryLinear bl
l = self.bl
lName = l.name
lname=name+'_'+lName
for p in l.params():
pname=p.name
if pname == 'W':
text += [bu.np_to_uint8C(bu.binarize_real(p.data.T), lname+'_'+pname, 'col_major', pad='1')]
num_classes = p.data.shape[0]
fc_size = p.data.shape[1]
elif pname == 'b':
text += [bu.np_to_floatC(p.data, lname+'_'+pname, 'row_major')]
text = "\n".join(text)
m = 1
n = fc_size
k = num_classes
ftext = "void {name}(uint8_t* input, uint8_t* output){{\n"
ftext += " blinear_layer(input, {name}_bl_W, output, {name}_bl_b, {m}, {n}, {k}); \n}}\n\n"
ftext = ftext.format(name=name, m=m, n=n, k=k)
text += ftext
return text
def is_bin(self):
return True
def buf_mem(self, inp_shape):
return 0
def temp_mem(self, inp_shape):
m = inp_shape[0]
w = np.prod(inp_shape[1:])
res_w = math.ceil(w/8.)
return m*res_w
| 1,487 | 32 | 184 |
3db5559ad8c3d3521e15bb759a7f3aa7f2a3a3e8 | 322 | py | Python | src/waldur_core/structure/widgets.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_core/structure/widgets.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_core/structure/widgets.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | from django.contrib.admin.widgets import FilteredSelectMultiple
| 40.25 | 79 | 0.748447 | from django.contrib.admin.widgets import FilteredSelectMultiple
class ScrolledSelectMultiple(FilteredSelectMultiple):
def __init__(self, verbose_name, is_stacked=False, attrs=None, choices=()):
attrs = attrs or {'style': 'overflow-x: auto'}
super().__init__(verbose_name, is_stacked, attrs, choices)
| 176 | 32 | 49 |
488869ced4076e94b7250bdca50f7f784a3df430 | 1,602 | py | Python | source/backend/functions/cleanup-bucket/cleanup_bucket.py | Manny27nyc/aws-perspective | a2ce5275584572abb5916d8d419548189db59075 | [
"Apache-2.0"
] | 569 | 2020-09-21T15:57:51.000Z | 2022-03-31T20:00:24.000Z | source/backend/functions/cleanup-bucket/cleanup_bucket.py | Manny27nyc/aws-perspective | a2ce5275584572abb5916d8d419548189db59075 | [
"Apache-2.0"
] | 208 | 2020-09-21T16:22:31.000Z | 2022-03-29T21:21:10.000Z | source/backend/functions/cleanup-bucket/cleanup_bucket.py | cloudeteer/aws-perspective | 8a75b2e5314f57a22556df51b5dd9191574e68b3 | [
"Apache-2.0"
] | 54 | 2020-09-21T16:28:45.000Z | 2022-03-12T19:43:25.000Z | import boto3
import functools
import logging
import os
from crhelper import CfnResource
helper = CfnResource(json_logging=False, log_level='DEBUG',
boto_level='CRITICAL')
s3 = boto3.resource("s3")
client = boto3.client("s3")
logger = logging.getLogger()
logger.setLevel(os.getenv("LogLevel", logging.INFO))
def with_logging(handler):
"""
Decorator which performs basic logging and makes logger available on context
"""
@functools.wraps(handler)
return wrapper
@with_logging
@helper.create
@helper.update
@with_logging
@helper.delete
| 26.7 | 82 | 0.691011 | import boto3
import functools
import logging
import os
from crhelper import CfnResource
helper = CfnResource(json_logging=False, log_level='DEBUG',
boto_level='CRITICAL')
s3 = boto3.resource("s3")
client = boto3.client("s3")
logger = logging.getLogger()
logger.setLevel(os.getenv("LogLevel", logging.INFO))
def with_logging(handler):
"""
Decorator which performs basic logging and makes logger available on context
"""
@functools.wraps(handler)
def wrapper(event, *args, **kwargs):
logger.debug('## HANDLER: %s', handler.__name__)
logger.debug('## ENVIRONMENT VARIABLES')
logger.debug(json.dumps(os.environ.copy()))
logger.debug('## EVENT')
logger.debug('Event: %s', event)
return handler(event, *args, **kwargs)
return wrapper
@with_logging
@helper.create
@helper.update
def create(event, context):
return None
@with_logging
@helper.delete
def delete(event, _):
bucket_name = event['ResourceProperties']['Bucket']
logger.info('Beginning cleanup of ' + bucket_name + '...')
bucket = s3.Bucket(bucket_name)
# We need to disable access logging or the access log bucket will never empty.
# Attempting to resolve this with DependsOn attributes results in numerous
# circular dependencies.
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={})
bucket.objects.all().delete()
bucket.object_versions.all().delete()
logger.info('Cleanup of ' + bucket_name + ' complete.')
return None
def handler(event, context):
helper(event, context)
| 922 | 0 | 93 |
dc18f0491df285d0e7c558d67eca714d291e347f | 87,689 | py | Python | graphics/cycling/Analyses.py | JCSDA/mpas-jedi | e0780d1fd295912ee4cfb758854c52b6764d4ab9 | [
"Apache-2.0"
] | 2 | 2021-09-25T01:20:10.000Z | 2021-12-17T18:44:53.000Z | graphics/cycling/Analyses.py | JCSDA/mpas-jedi | e0780d1fd295912ee4cfb758854c52b6764d4ab9 | [
"Apache-2.0"
] | null | null | null | graphics/cycling/Analyses.py | JCSDA/mpas-jedi | e0780d1fd295912ee4cfb758854c52b6764d4ab9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import basic_plot_functions as bpf
import binning_utils as bu
import predefined_configs as pconf
from collections.abc import Iterable
from collections import defaultdict
from copy import deepcopy
import collections
import datetime as dt
import diag_utils as du
import inspect
import logging
import multiprocessing as mp
import numpy as np
from pathlib import Path
import plot_utils as pu
import re
import os
import stat_utils as su
import StatisticsDatabase as sdb
import var_utils as vu
bootStrapStats = []
for x in su.sampleableAggStats:
if x != 'Count': bootStrapStats.append(x)
## plot settings
figureFileType = 'pdf' #['pdf','png']
interiorLabels = True
###################################
## Base class for all analysisTypes
###################################
def categoryBinValsAttributes(dfw, fullBinVar, binMethod, options):
'''
Utility function for providing an ordered list of
pairs of binVals and associated labels for
category binMethods in the context of a DFWrapper
'''
binVar = vu.varDictAll[fullBinVar][1]
dbSelect1DBinVals = dfw.levels('binVal')
binUnitss = dfw.uniquevals('binUnits')
#if (len(binUnitss) == 0 or
# len(dbSelect1DBinVals) == 1): return None, None
assert (len(binUnitss) != 0 and len(dbSelect1DBinVals) > 0), 'ERROR: categoryBinValsAttributes received invalid binVar/binMethod'
binUnits = binUnitss[0]
# reorder select1DBinVals to match binMethod definition
# TODO(JJG): clean up for readability
tmp = deepcopy(pconf.binVarConfigs.get(
fullBinVar,{}).get(
binMethod,{}).get(
'values', dbSelect1DBinVals))
select1DBinVals = []
if (not isinstance(tmp, Iterable) or
isinstance(tmp, str)):
select1DBinVals += [tmp]
else:
select1DBinVals += tmp
for Bin in dbSelect1DBinVals:
if Bin not in select1DBinVals:
select1DBinVals.append(Bin)
for Bin in list(select1DBinVals):
if Bin not in dbSelect1DBinVals:
select1DBinVals.remove(Bin)
binTitles = []
for binVal in select1DBinVals:
if pu.isfloat(binVal) or pu.isint(binVal):
t = ' @ '+binVar+'='+binVal
if binUnits != vu.miss_s:
t = t+' '+binUnits
else:
t = ' @ '+binVal
binTitles.append(t)
binValsMap = list(zip(select1DBinVals, binTitles))
return binValsMap
#=============
# 1-D figures
#=============
class CategoryBinMethodBase(AnalysisBase):
'''
Base class used to analyze statistics across binMethods with zero-dimensioned or
category binValues, e.g., QC flag, named latitude band, cloudiness regime, surface type
'''
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
'''
virtual method
'''
raise NotImplementedError()
class CYAxisExpLines(CategoryBinMethodBase):
'''
Creates a timeseries figure between firstCycleDTime and lastCycleDTime
for each forecast length between fcTDeltaFirst and fcTDeltaLast
- x-axis: cycle initial time
- line: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar, statistic, and FC lead time (if applicable)
'''
class FCAxisExpLines(CategoryBinMethodBase):
'''
Creates a timeseries figure between fcTDeltaFirst and fcTDeltaLast containing
aggregated statistics for the period between firstCycleDTime and lastCycleDTime
- x-axis: forecast duration
- line: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar and statistic
'''
class FCAxisExpLinesDiffCI(CategoryBinMethodBase):
'''
Similar to FCAxisExpLines, except
- shows difference between experiment(s) and control
- control is selected using cntrlExpIndex
- statistics are narrowed down by bootStrapStats
- confidence intervals (CI) are shown at each lead time
- line+shaded region: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar and statistic
'''
class CYAxisFCLines(CategoryBinMethodBase):
'''
Similar to CYAxisExpLines, except
each line is for a different forecast lead time and
each experiment is in a different file
- x-axis: valid time of forecast
- line: per FC lead time
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar, statistic, and experiment
- self.MAX_FC_LINES determines number of FC lead time lines to include
'''
###########################################
## Figures with individual lines per binVal
###########################################
class CYAxisBinValLines(BinValLinesAnalysisType):
'''
Similar to CYAxisExpLines, except
each line is for a different binVal (e.g., latitude band, cloudiness, etc.)
- line: binVals for named bins (e.g., NXTro, Tro, SXTro for latitude)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of statistic and forecast length
'''
# TODO(JJG): implement FCAxisBinValLines similar to FCAxisExpLines
#########################################################
## Figures with binVal on one axis, i.e., 2D and profiles
#########################################################
class OneDimBinMethodBase(AnalysisBase):
'''
Base class used to analyze statistics across binMethods with one-dimensional binValues
that are assigned numerical values, e.g., altitude, pressure, latitude, cloud fraction
'''
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
'''
virtual method
'''
raise NotImplementedError()
class CYandBinValAxes2D(OneDimBinMethodBase):
'''
Creates raster maps with binVar binVals on y-axis
- only applicable to binned diagnostics (e.g., vertical dimension, latitude, zenith angle)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of binVar, binMethod, statistic, and FC lead time
'''
class FCandBinValAxes2D(OneDimBinMethodBase):
'''
Creates raster maps with binVar binVals on y-axis
- only applicable to binned diagnostics (e.g., vertical dimension, latitude, zenith angle)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
'''
class BinValAxisProfile(OneDimBinMethodBase):
'''
Similar to FCandBinValAxes2D, except
- each vertical column of raster points is plotted as a profile on
a separate set of axes instead of in 2-D color
- therefore this is a valid plot even for a single forecast length (omb)
- line: per experiment
- subplot: column by lead time, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
- self.MAX_FC_SUBFIGS determines number of FC lead times to include
'''
class BinValAxisProfileDiffCI(OneDimBinMethodBase):
'''
Similar to BinValAxisProfile, except
shows difference between experiment(s) and control
- control is selected using cntrlExpIndex
- statistics are narrowed down by bootStrapStats
- confidence intervals (CI) are shown at each lead time and binVal
- line+shaded region: per experiment
- subplot: column by lead time, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
- self.MAX_FC_SUBFIGS determines number of FC lead times to include
'''
class BinValAxisPDF(AnalysisBase):
'''
Similar to BinValAxisProfile, except
uses Count statistic to analyze a PDF across binVals
- x-axis: binVal
- line: per binMethod
- subplot: combination of FC lead time and DiagSpace variable
- file: per experiment (if applicable)
'''
# TODO: generalize as a sub-class of OneDimBinMethodBase
class BinValAxisStatsComposite(AnalysisBase):
'''
Similar to BinValAxisProfile, except
all statistics (Count, Mean, RMS, STD) are placed on the same axis
- x-axis: binVal
- line: per statistic
- subplot: per DiagSpace variable
- file: combination of FC lead time, experiment, and binMethod (if applicable)
'''
#===========================
# Calculate gross statistics
#===========================
class GrossValues(AnalysisBase):
'''
Calculate gross statistics for specified category binMethods at first forecast length
NOTE: currently only calculates statistics at self.fcTDeltas[0]
adjust minimum forecast length in order to calculate
for non-zero forecast lengths, assuming those lengths
are present in db
'''
AnalysisTypeDict = {
#Derived from CategoryBinMethodBase(AnalysisBase)
'CYAxisExpLines': CYAxisExpLines,
'FCAxisExpLines': FCAxisExpLines,
'FCAxisExpLinesDiffCI': FCAxisExpLinesDiffCI,
'CYAxisFCLines': CYAxisFCLines,
'CYAxisBinValLines': CYAxisBinValLines,
#Derived from OneDimBinMethodBase(AnalysisBase)
'CYandBinValAxes2D': CYandBinValAxes2D,
'FCandBinValAxes2D': FCandBinValAxes2D,
'BinValAxisProfile': BinValAxisProfile,
'BinValAxisProfileDiffCI': BinValAxisProfileDiffCI,
# TODO(JJG): TwoDimBinMethodBase(AnalysisBase)
#'BinValAxes2D': BinValAxes2D,
#Derived from AnalysisBase
'BinValAxisPDF': BinValAxisPDF,
'BinValAxisStatsComposite': BinValAxisStatsComposite,
'GrossValues': GrossValues,
}
# NOTES:
# (1) FCAxis* types require non-zero forecast length
# (2) CYAxis* types require > 1 analysis cycle
# (3) CYAxisFCLines requires (1) and (2)
# (4) *DiffCI types require more than one experiment
| 40.842571 | 192 | 0.561279 | #!/usr/bin/env python3
import basic_plot_functions as bpf
import binning_utils as bu
import predefined_configs as pconf
from collections.abc import Iterable
from collections import defaultdict
from copy import deepcopy
import collections
import datetime as dt
import diag_utils as du
import inspect
import logging
import multiprocessing as mp
import numpy as np
from pathlib import Path
import plot_utils as pu
import re
import os
import stat_utils as su
import StatisticsDatabase as sdb
import var_utils as vu
bootStrapStats = []
for x in su.sampleableAggStats:
if x != 'Count': bootStrapStats.append(x)
## plot settings
figureFileType = 'pdf' #['pdf','png']
interiorLabels = True
def anWorkingDir(DiagSpace):
return DiagSpace+'_analyses'
###################################
## Base class for all analysisTypes
###################################
class AnalysisBase():
def __init__(self, db, analysisType, diagnosticGroupings = {}):
self.analysisType = analysisType
self.DiagSpaceName = db.DiagSpaceName
self.diagnosticGroupings = diagnosticGroupings
self.logger = logging.getLogger(__name__+'.'+self.DiagSpaceName+'.'+self.analysisType)
## Extract useful variables from the database
self.db = db
self.diagnosticConfigs = db.diagnosticConfigs
self.availableDiagnostics = list(self.diagnosticConfigs.keys())
self.expNames = self.db.expNames
self.nExp = len(self.expNames)
self.cntrlExpName = self.db.cntrlExpName
self.noncntrlExpNames = self.db.noncntrlExpNames
self.fcTDeltas = self.db.fcTDeltas
self.fcTDeltas_totmin = self.db.fcTDeltas_totmin
self.fcMap = list(zip(self.fcTDeltas, self.fcTDeltas_totmin))
self.nFC = len(self.fcTDeltas)
self.cyDTimes = self.db.cyDTimes
self.nCY = len(self.cyDTimes)
self.varNames = self.db.varNames
self.nVars = len(self.varNames)
varLabels = []
for (varName, varUnits) in zip(self.varNames, self.db.varUnitss):
label = varName
if varUnits != vu.miss_s:
label = label+' ('+varUnits+')'
varLabels.append(label)
self.varMap = list(zip(self.varNames, varLabels))
self.chlist = self.db.chlist
self.allBinVals = self.db.allBinVals
self.binNumVals = self.db.binNumVals
self.binNumVals2DasStr = self.db.binNumVals2DasStr
## Establish default configuration
self.blocking = False
self.parallelism = False
# TODO(JJG): decide if nproc is needed
# nproc could be used to initialize workers within a single
# analysisType to be distributed however they would be most useful.
# That would allow for more granularity of computational effort
# but requires that analysisType to be "blocking" due to the nature
# of multiprocessing Pool's.
# self.nproc = nproc
self.requiredStatistics = []
self.requestAggDFW = False
self.blankBinMethodFile = bu.identityBinMethod
self.subWidth = 2.5
self.subAspect = 1.0
self.MAX_FC_SUBFIGS = 6
self.MAX_FC_LINES = 6
## Setup paths
CWD = os.getcwd()
wd = CWD.split('/')[-1]
DSSubDir = anWorkingDir(self.DiagSpaceName)
self.DiagSpacePath = Path('./')
if wd != DSSubDir:
self.DiagSpacePath = self.DiagSpacePath/DSSubDir
self.myFigPath = self.DiagSpacePath/self.analysisType
self.myFigPath.mkdir(parents=True, exist_ok=True)
def binMethodFile(self, binMethod, before = True):
'''
Format the binMethod for file naming
'''
binMethodFile = ''
if binMethod != self.blankBinMethodFile:
if before:
binMethodFile = '_'+binMethod
else:
binMethodFile = binMethod+'_'
return binMethodFile
def fcName(self, diagnosticGroup):
'''
Format the diagnosticGroup for forecast analysisType's
'''
fcDiagName = diagnosticGroup
if self.fcTDeltas[-1] > dt.timedelta(0):
fcDiagName = fcDiagName.replace('omm','omf')
fcDiagName = fcDiagName.replace('omb','omf')
fcDiagName = fcDiagName.replace('bmo','fmo')
fcDiagName = fcDiagName.replace('mmo','fmo')
fcDiagName = fcDiagName.replace('hmo','fmo')
fcDiagName = fcDiagName.replace('bak','fc')
return fcDiagName
def statPlotAttributes(self, diagnosticGroup, statName,
allDiagnosticNames = None):
'''
Define plotting attributes for the combination of diagnosticGroup and statName
'''
ommDiagnostics = ['omb', 'oma', 'omm', 'omf']
mmoDiagnostics = ['bmo', 'amo', 'mmo', 'fmo', 'mmgfsan']
truncateDiagnostics = ommDiagnostics+mmoDiagnostics
diagnosticGroup_ = diagnosticGroup
for diag in truncateDiagnostics:
if diag in diagnosticGroup_:
diagnosticGroup_ = diag
fcDiagName = self.fcName(diagnosticGroup_)
if statName in ['Count']+du.CRStatNames:
statDiagLabel = statName
fcstatDiagLabel = statName
else:
statDiagLabel = statName+'('+diagnosticGroup_+')'
fcstatDiagLabel = statName+'('+fcDiagName+')'
#These only apply to unbounded quantities (omb, oma, ana/bak for velocity, differences)
signDefinite = True
allDiagnosticNames_ = deepcopy(allDiagnosticNames)
if allDiagnosticNames_ is None:
cntrlDiagnosticName = diagnosticGroup_
allDiagnosticNames_ = [diagnosticGroup_]
else:
cntrlDiagnosticName = allDiagnosticNames_[0]
for diag in truncateDiagnostics:
if diag in cntrlDiagnosticName:
cntrlDiagnosticName = diag
for idiag, adiag in enumerate(allDiagnosticNames_):
if diag in adiag:
allDiagnosticNames_[idiag] = diag
cntrlExpDiagnosticLabel = expDiagnosticLabel(self.cntrlExpName, cntrlDiagnosticName, allDiagnosticNames_)
fcstatDiagDiffLabel = statName+'('+fcDiagName+'): [EXP - '+cntrlExpDiagnosticLabel+']'
if statName == 'Mean':
if diagnosticGroup_ in ommDiagnostics:
signDefinite = False
fcstatDiagDiffLabel = statName+': ['+cntrlExpDiagnosticLabel+' - EXP]'
if diagnosticGroup_ in mmoDiagnostics:
signDefinite = False
fcstatDiagDiffLabel = statName+': [EXP - '+cntrlExpDiagnosticLabel+']'
sciTicks = (statName == 'Count')
return statDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, sciTicks, signDefinite
def UNIONcntrlANDexpCYDTimes(self, dfw, myLoc = {}):
'''
Determine the union of cyDTimes available between
the control and each other experiment at each fcTDelta
'''
cntrlLoc = deepcopy(myLoc)
cntrlLoc['expName'] = self.cntrlExpName
expsCYDTimes = {}
for fcTDelta in self.fcTDeltas:
cntrlLoc['fcTDelta'] = fcTDelta
cntrlCYDTimes = set(dfw.levels('cyDTime', cntrlLoc))
expLoc = deepcopy(cntrlLoc)
for expName in self.expNames:
expLoc['expName'] = expName
expCYDTimes = set(dfw.levels('cyDTime', expLoc))
expsCYDTimes[(expName, fcTDelta)] = list(cntrlCYDTimes & expCYDTimes)
if len(cntrlCYDTimes) != len(expCYDTimes):
self.logger.warning(self.cntrlExpName+' and '+expName+' have different number of CYDTimes at forecast length ', fcTDelta, ' Only using common CYDTimes for CI calculation.')
return expsCYDTimes
def analyze(self, workers = None):
self.logger.info('analyze()')
if self.blocking:
# analyses with internal blocking
self.analyze_()
elif self.parallelism:
# each analysis determines how to split external workers
self.analyze_(workers)
else:
# divide workers acros analyses without internal parallelism/blocking
workers.apply_async(self.analyze_)
def analyze_(self, workers = None):
'''
virtual method
'''
raise NotImplementedError()
def expDiagnosticLabel(expName, diagnosticName, allDiagnosticNames):
if len(allDiagnosticNames) > 1:
return expName+'-'+diagnosticName
else:
return expName
def categoryBinValsAttributes(dfw, fullBinVar, binMethod, options):
'''
Utility function for providing an ordered list of
pairs of binVals and associated labels for
category binMethods in the context of a DFWrapper
'''
binVar = vu.varDictAll[fullBinVar][1]
dbSelect1DBinVals = dfw.levels('binVal')
binUnitss = dfw.uniquevals('binUnits')
#if (len(binUnitss) == 0 or
# len(dbSelect1DBinVals) == 1): return None, None
assert (len(binUnitss) != 0 and len(dbSelect1DBinVals) > 0), 'ERROR: categoryBinValsAttributes received invalid binVar/binMethod'
binUnits = binUnitss[0]
# reorder select1DBinVals to match binMethod definition
# TODO(JJG): clean up for readability
tmp = deepcopy(pconf.binVarConfigs.get(
fullBinVar,{}).get(
binMethod,{}).get(
'values', dbSelect1DBinVals))
select1DBinVals = []
if (not isinstance(tmp, Iterable) or
isinstance(tmp, str)):
select1DBinVals += [tmp]
else:
select1DBinVals += tmp
for Bin in dbSelect1DBinVals:
if Bin not in select1DBinVals:
select1DBinVals.append(Bin)
for Bin in list(select1DBinVals):
if Bin not in dbSelect1DBinVals:
select1DBinVals.remove(Bin)
binTitles = []
for binVal in select1DBinVals:
if pu.isfloat(binVal) or pu.isint(binVal):
t = ' @ '+binVar+'='+binVal
if binUnits != vu.miss_s:
t = t+' '+binUnits
else:
t = ' @ '+binVal
binTitles.append(t)
binValsMap = list(zip(select1DBinVals, binTitles))
return binValsMap
#=============
# 1-D figures
#=============
class CategoryBinMethodBase(AnalysisBase):
'''
Base class used to analyze statistics across binMethods with zero-dimensioned or
category binValues, e.g., QC flag, named latitude band, cloudiness regime, surface type
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.parallelism = True
self.maxBinVarTier = 2
# default binVar/binMethod combinations
self.binVarDict = {
(vu.obsVarQC, bu.goodQCMethod): {'binVarTier': 1},
(vu.obsVarLat, bu.latbandsMethod): {'binVarTier': 1},
(vu.obsVarCldFrac, bu.cloudbandsMethod): {'binVarTier': 1},
# (vu.modVarLat, bu.latbandsMethod): {'binVarTier': 1},
(vu.noBinVar, bu.noBinMethod): {'binVarTier': 1},
(vu.obsRegionBinVar, bu.geoirlatlonboxMethod): {'binVarTier': 2},
(vu.modelRegionBinVar, bu.geoirlatlonboxMethod): {'binVarTier': 2},
(vu.obsVarPrs, bu.PjetMethod): {'binVarTier': 3},
(vu.obsVarAlt, bu.altjetMethod): {'binVarTier': 3},
(vu.obsVarLandFrac, bu.surfbandsMethod): {'binVarTier': 3},
}
self.maxDiagnosticsPerAnalysis = 10 // self.nExp
def subplotArrangement(self, binValsMap):
# subplot configuration
if len(binValsMap) > 1:
nxplots = len(binValsMap)
nyplots = self.nVars
nsubplots = nxplots * nyplots
else:
nsubplots = self.nVars
nxplots = np.int(np.ceil(np.sqrt(nsubplots)))
nyplots = np.int(np.ceil(np.true_divide(nsubplots, nxplots)))
return nxplots, nyplots, nsubplots
def analyze_(self, workers = None):
useWorkers = (not self.blocking and self.parallelism and workers is not None)
# TODO(JJG): construct member Diagnostic objects (create new class) from
# diagnosticConfigs instead of referencing dictionary
# entries below.
# TODO(JJG): use same color, vary line style across diagnosticGroupings
diagnosticGrouped = {}
for diag in self.availableDiagnostics:
diagnosticGrouped[diag] = False
diagnosticGroupings = deepcopy(self.diagnosticGroupings)
for group in list(diagnosticGroupings.keys()):
diags = diagnosticGroupings[group]
if (len(diags) > self.maxDiagnosticsPerAnalysis or
not set(diags).issubset(set(list(self.availableDiagnostics)))):
del diagnosticGroupings[group]
continue
for diag in diags: diagnosticGrouped[diag] = True
for diag in self.availableDiagnostics:
if not diagnosticGrouped[diag]:
diagnosticGroupings[diag] = [diag]
for diagnosticGroup, diagnosticNames in diagnosticGroupings.items():
if len(diagnosticNames) > self.maxDiagnosticsPerAnalysis: continue
if len(set(diagnosticNames) & set(self.availableDiagnostics)) == 0: continue
diagnosticConfigs = {}
analysisStatistics = set([])
for diagnosticName in diagnosticNames:
diagnosticConfigs[diagnosticName] = deepcopy(self.diagnosticConfigs[diagnosticName])
analysisStatistics = set(list(analysisStatistics) +
diagnosticConfigs[diagnosticName]['analysisStatistics'])
if not set(self.requiredStatistics).issubset(analysisStatistics): continue
diagLoc = {'diagName': diagnosticNames}
diagBinVars = self.db.dfw.levels('binVar', diagLoc)
diagBinMethods = self.db.dfw.levels('binMethod', diagLoc)
for (fullBinVar, binMethod), options in self.binVarDict.items():
if options.get('binVarTier', 10) > self.maxBinVarTier: continue
binVar = vu.varDictAll[fullBinVar][1]
if (binVar not in diagBinVars or
binMethod not in diagBinMethods): continue
self.logger.info(diagnosticGroup+', '+binVar+', '+binMethod)
if useWorkers:
workers.apply_async(self.innerloopsWrapper,
args = (diagnosticGroup, diagnosticConfigs, fullBinVar, binMethod, analysisStatistics, options))
else:
self.innerloopsWrapper(
diagnosticGroup, diagnosticConfigs, fullBinVar, binMethod, analysisStatistics, options)
def innerloopsWrapper(self,
diagnosticGroup, diagnosticConfigs, fullBinVar, binMethod, analysisStatistics, options):
binVar = vu.varDictAll[fullBinVar][1]
# narrow mydfwDict by binVar and binMethod to reduce run-time and memory
myLoc = {}
myLoc['binVar'] = binVar
myLoc['binMethod'] = binMethod
mydfwDict = {'dfw': self.db.loc(myLoc)}
# aggregate statistics when requested
if self.requestAggDFW:
mydfwDict['agg'] = sdb.DFWrapper.fromAggStats(mydfwDict['dfw'], ['cyDTime'])
sdb.createORreplaceDerivedDiagnostics(mydfwDict['agg'], diagnosticConfigs)
# further narrow mydfwDict by diagName
# NOTE: derived diagnostics may require multiple diagName values;
# can only narrow by diagName after aggregation
myLoc['diagName'] = list(diagnosticConfigs.keys())
for key in mydfwDict.keys():
mydfwDict[key] = sdb.DFWrapper.fromLoc(mydfwDict[key], myLoc)
binValsMap = categoryBinValsAttributes(
mydfwDict['dfw'], fullBinVar, binMethod, options)
nxplots, nyplots, nsubplots = self.subplotArrangement(binValsMap)
for statName in analysisStatistics:
if statName not in options.get('onlyStatNames', analysisStatistics): continue
self.innerloops(
mydfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots)
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
'''
virtual method
'''
raise NotImplementedError()
class CYAxisExpLines(CategoryBinMethodBase):
'''
Creates a timeseries figure between firstCycleDTime and lastCycleDTime
for each forecast length between fcTDeltaFirst and fcTDeltaLast
- x-axis: cycle initial time
- line: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar, statistic, and FC lead time (if applicable)
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.subWidth = 1.9
self.subAspect = 0.75
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
if self.nCY < 2: return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, sciTicks, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName, myLoc['diagName'])
myPath = self.myFigPath/diagnosticGroup
myPath.mkdir(parents=True, exist_ok=True)
lineLoc = {}
axisLimitsLoc = {}
#file loop 1
for (fcTDelta, fcTDelta_totmin) in self.fcMap:
lineLoc['fcTDelta'] = fcTDelta
axisLimitsLoc['fcTDelta'] = fcTDelta
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in self.varMap:
lineLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
#subplot loop 2
for binVal, binTitle in binValsMap:
lineLoc['binVal'] = binVal
axisLimitsLoc['binVal'] = binVal
# use common y-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['dfw'].min(axisLimitsLoc, statName)
dmax = dfwDict['dfw'].max(axisLimitsLoc, statName)
# collect statName for all lines on this subplot
linesVals = []
linesLabel = []
linesGroup = []
for expName in self.expNames:
lineLoc['expName'] = expName
for diagnosticName in myLoc['diagName']:
linesGroup.append(expName)
linesLabel.append(expDiagnosticLabel(
expName, diagnosticName, myLoc['diagName']))
lineLoc['diagName'] = diagnosticName
lineCYDTimes = dfwDict['dfw'].levels('cyDTime', lineLoc)
lineVals = np.full(self.nCY, np.NaN)
cyLoc = deepcopy(lineLoc)
for cyDTime in lineCYDTimes:
icy = self.cyDTimes.index(cyDTime)
cyLoc['cyDTime'] = cyDTime
lineVals[icy] = dfwDict['dfw'].loc(cyLoc, statName)
linesVals.append(lineVals)
# define subplot title
title = varLabel+binTitle
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries(
fig,
self.cyDTimes, linesVals, linesLabel,
title, bgstatDiagLabel,
sciTicks, False, signDefinite,
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
# end binVal loop
# end varMap loop
# save each figure
filename = myPath/('%s%s_TSeries_%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']), fcTDelta_totmin,
self.DiagSpaceName, diagnosticGroup, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
# end fcMap loop
class FCAxisExpLines(CategoryBinMethodBase):
'''
Creates a timeseries figure between fcTDeltaFirst and fcTDeltaLast containing
aggregated statistics for the period between firstCycleDTime and lastCycleDTime
- x-axis: forecast duration
- line: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar and statistic
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.requestAggDFW = True
self.subWidth = 1.9
self.subAspect = 0.9
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
if self.nFC < 2: return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, sciTicks, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName, myLoc['diagName'])
fcDiagName = self.fcName(diagnosticGroup)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
lineLoc = {}
axisLimitsLoc = {}
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in self.varMap:
lineLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
#subplot loop 2
for binVal, binTitle in binValsMap:
lineLoc['binVal'] = binVal
axisLimitsLoc['binVal'] = binVal
# use common y-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['agg'].min(axisLimitsLoc, statName)
dmax = dfwDict['agg'].max(axisLimitsLoc, statName)
#collect aggregated statNames, varying across fcTDelta
linesVals = []
linesLabel = []
linesGroup = []
for expName in self.expNames:
lineLoc['expName'] = expName
for diagnosticName in myLoc['diagName']:
linesGroup.append(expName)
linesLabel.append(expDiagnosticLabel(
expName, diagnosticName, myLoc['diagName']))
lineLoc['diagName'] = diagnosticName
lineFCTDeltas = dfwDict['agg'].levels('fcTDelta', lineLoc)
lineVals = np.full(self.nFC, np.NaN)
fcLoc = deepcopy(lineLoc)
for fcTDelta in lineFCTDeltas:
ifc = self.fcTDeltas.index(fcTDelta)
fcLoc['fcTDelta'] = fcTDelta
lineVals[ifc] = dfwDict['agg'].loc(fcLoc, statName)
linesVals.append(lineVals)
# define subplot title
title = varLabel+binTitle
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries(
fig,
self.fcTDeltas, linesVals, linesLabel,
title, fcstatDiagLabel,
sciTicks, False, signDefinite,
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
# end statMap loop
# end varMap loop
# save each figure
filename = myPath/('%s%s_TSeries_%s-%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
self.fcTDeltas_totmin[0], self.fcTDeltas_totmin[-1],
self.DiagSpaceName, fcDiagName, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
class FCAxisExpLinesDiffCI(CategoryBinMethodBase):
'''
Similar to FCAxisExpLines, except
- shows difference between experiment(s) and control
- control is selected using cntrlExpIndex
- statistics are narrowed down by bootStrapStats
- confidence intervals (CI) are shown at each lead time
- line+shaded region: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar and statistic
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
# OPTIONAL: implement fine-grained parallelism for bootStrapping
#self.blocking = True
self.subWidth = 1.9
self.subAspect = 0.9
for key in self.binVarDict:
if 'onlyStatNames' in self.binVarDict[key]:
self.binVarDict[key]['onlyStatNames'] += bootStrapStats
else:
self.binVarDict[key]['onlyStatNames'] = bootStrapStats
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
if self.nFC < 2: return
if self.nExp * len(myLoc['diagName']) < 2: return
if self.cntrlExpName not in dfwDict['dfw'].levels('expName'): return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, sciTicks, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName, myLoc['diagName'])
fcDiagName = self.fcName(diagnosticGroup)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
# Only bootstrap over the union of cyDTimes available
# from both experiments at each fcTDelta
myExpsCYDTimes = self.UNIONcntrlANDexpCYDTimes(dfwDict['dfw'])
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
binValLoc = {}
#subplot loop 1
for (varName, varLabel) in self.varMap:
binValLoc['varName'] = varName
#subplot loop 2
for binVal, binTitle in binValsMap:
binValLoc['binVal'] = binVal
# intermediate tempdfw reduces extraction time in inner loops
tempdfw = sdb.DFWrapper.fromLoc(dfwDict['dfw'], binValLoc)
cntrlLoc = deepcopy(binValLoc)
cntrlLoc['expName'] = self.cntrlExpName
# define subplot title
title = varLabel+binTitle
linesVals = defaultdict(list)
linesLabel = []
linesGroup = []
cntrlLoc['diagName'] = myLoc['diagName'][0]
for expName in self.expNames:
for diagnosticName in myLoc['diagName']:
if (expName == cntrlLoc['expName'] and
diagnosticName == cntrlLoc['diagName']): continue
linesGroup.append(expName)
linesLabel.append(expDiagnosticLabel(
expName, diagnosticName, myLoc['diagName']))
lineVals = defaultdict(list)
for fcTDelta in self.fcTDeltas:
cntrlLoc['cyDTime'] = myExpsCYDTimes[(expName, fcTDelta)]
cntrlLoc['fcTDelta'] = fcTDelta
expLoc = deepcopy(cntrlLoc)
expLoc['diagName'] = diagnosticName
expLoc['expName'] = expName
X = tempdfw.loc(expLoc)
Y = tempdfw.loc(cntrlLoc)
ciVals = su.bootStrapClusterFunc(
X, Y,
n_samples = 10000,
statNames = [statName])
for trait in su.ciTraits:
lineVals[trait] += [ciVals[statName][trait][0]]
for trait in su.ciTraits:
linesVals[trait].append(lineVals[trait])
# use specific y-axis limits for each varName
dmin = np.nanmin(linesVals[su.cimin])
dmax = np.nanmax(linesVals[su.cimax])
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries(
fig,
self.fcTDeltas, linesVals[su.cimean],
linesLabel,
title,
fcstatDiagDiffLabel,
False, False, False,
nyplots, nxplots, nsubplots, iplot,
linesValsMinCI = linesVals[su.cimin],
linesValsMaxCI = linesVals[su.cimax],
dmin = dmin, dmax = dmax,
lineAttribOffset = 1,
interiorLabels = interiorLabels)
iplot = iplot + 1
# end binValsMap loop
# end varName loop
# save each figure
filename = myPath/('%s%s_TSeries_%s-%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
self.fcTDeltas_totmin[0], self.fcTDeltas_totmin[-1],
self.DiagSpaceName, fcDiagName, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
class CYAxisFCLines(CategoryBinMethodBase):
'''
Similar to CYAxisExpLines, except
each line is for a different forecast lead time and
each experiment is in a different file
- x-axis: valid time of forecast
- line: per FC lead time
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar, statistic, and experiment
- self.MAX_FC_LINES determines number of FC lead time lines to include
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.subWidth = 1.9
self.subAspect = 0.75
self.maxDiagnosticsPerAnalysis = 1
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
if self.nFC < 2 or self.nCY < 2: return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, sciTicks, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName)
fcDiagName = self.fcName(diagnosticGroup)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
lineLoc = {}
axisLimitsLoc = {}
#file loop 1
for expName in self.expNames:
lineLoc['expName'] = expName
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in self.varMap:
lineLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
#subplot loop 2
for binVal, binTitle in binValsMap:
lineLoc['binVal'] = binVal
axisLimitsLoc['binVal'] = binVal
# use common y-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['dfw'].min(axisLimitsLoc, statName)
dmax = dfwDict['dfw'].max(axisLimitsLoc, statName)
# collect statName for all lines on this subplot, letting cyDTime vary
xsVals = []
linesVals = []
self.fcTDeltas_labels = []
for fcTDelta in self.fcTDeltas:
lineLoc['fcTDelta'] = fcTDelta
# calculate valid time for x-axis
xVals = []
for cyDTime in self.cyDTimes:
xVals.append(cyDTime+fcTDelta)
xsVals.append(xVals)
#Setting to avoid over-crowding
if self.fcTDeltas.index(fcTDelta) > (self.MAX_FC_LINES-1): continue
self.fcTDeltas_labels.append(
pu.timeDeltaTicks(fcTDelta.total_seconds(),0))
lineCYDTimes = dfwDict['dfw'].levels('cyDTime', lineLoc)
lineVals = np.full(self.nCY, np.NaN)
cyLoc = deepcopy(lineLoc)
for cyDTime in lineCYDTimes:
icy = self.cyDTimes.index(cyDTime)
cyLoc['cyDTime'] = cyDTime
lineVals[icy] = dfwDict['dfw'].loc(cyLoc, statName)
linesVals.append(lineVals)
# define subplot title
title = varLabel+binTitle
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries(
fig,
xsVals, linesVals, self.fcTDeltas_labels,
title, bgstatDiagLabel,
sciTicks, False, signDefinite,
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
# end binValsMap loop
# end varMap loop
expFileName = re.sub('\.', '', re.sub('\s+', '-', expName))
filename = myPath/('%s%s_TSeries_%s_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']), expFileName,
self.DiagSpaceName, fcDiagName, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
# end expName loop
###########################################
## Figures with individual lines per binVal
###########################################
class BinValLinesAnalysisType(CategoryBinMethodBase):
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
# TODO(JJG): allow for multiple binMethods in one figure, such as
#self.binVarDict = {
# (vu.obsVarQC, [bu.goodQCMethod, bu.badQCMethod]): {
# 'onlyStatNames': ['Count'],
# },
#}
# OR if this is the only case for which it's needed
# TODO: replace badQCMethod with all-encompassing QC Method, e.g., allQCMethod
self.binVarDict = {
(vu.obsVarQC, bu.badQCMethod): {
'onlyStatNames': ['Count'],
'binVarTier': 1,
},
(vu.obsVarQC, bu.allQCMethod): {
'onlyStatNames': ['Count'],
'binVarTier': 1,
},
(vu.obsVarLat, bu.latbandsMethod): {'binVarTier': 1},
# (vu.modVarLat, bu.latbandsMethod): {'binVarTier': 1},
(vu.obsVarCldFrac, bu.cloudbandsMethod): {'binVarTier': 1},
(vu.obsVarLandFrac, bu.surfbandsMethod): {'binVarTier': 3},
}
def subplotArrangement(self, dummy):
# subplot configuration
return self.nExp, self.nVars, self.nExp * self.nVars
class CYAxisBinValLines(BinValLinesAnalysisType):
'''
Similar to CYAxisExpLines, except
each line is for a different binVal (e.g., latitude band, cloudiness, etc.)
- line: binVals for named bins (e.g., NXTro, Tro, SXTro for latitude)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of statistic and forecast length
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.subWidth = 1.9
self.subAspect = 0.75
self.maxDiagnosticsPerAnalysis = 1
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
if self.nCY < 2: return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, sciTicks, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName)
myPath = self.myFigPath/diagnosticGroup
myPath.mkdir(parents=True, exist_ok=True)
lineLoc = {}
binVals = []
for binVal, binTitle in binValsMap: binVals.append(binVal)
lineLoc['binVal'] = binVals
axisLimitsLoc = deepcopy(lineLoc)
#file loop 1
for (fcTDelta, fcTDelta_totmin) in self.fcMap:
lineLoc['fcTDelta'] = fcTDelta
axisLimitsLoc['fcTDelta'] = fcTDelta
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in self.varMap:
lineLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
# use common y-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['dfw'].min(axisLimitsLoc, statName)
dmax = dfwDict['dfw'].max(axisLimitsLoc, statName)
#subplot loop 2
for expName in self.expNames:
lineLoc['expName'] = expName
# collect statName for all lines on this subplot, letting cyDTime vary
linesVals = []
for binVal in binVals:
lineLoc['binVal'] = binVal
lineCYDTimes = dfwDict['dfw'].levels('cyDTime', lineLoc)
lineVals = np.full(self.nCY, np.NaN)
cyLoc = deepcopy(lineLoc)
for cyDTime in lineCYDTimes:
icy = self.cyDTimes.index(cyDTime)
cyLoc['cyDTime'] = cyDTime
# print(dfwDict['dfw'].loc(cyLoc, statName))
lineVals[icy] = dfwDict['dfw'].loc(cyLoc, statName)
linesVals.append(lineVals)
# end binVal loop
# define subplot title
title = expName+'\n'+varLabel
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries(
fig,
self.cyDTimes, linesVals, binVals,
title, bgstatDiagLabel,
sciTicks, False, signDefinite,
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
# end expName Loop
# end varMap Loop
filename = myPath/('%s%s_TSeries_%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
fcTDelta_totmin, self.DiagSpaceName,
diagnosticGroup, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
# end fcMap loop
# TODO(JJG): implement FCAxisBinValLines similar to FCAxisExpLines
#########################################################
## Figures with binVal on one axis, i.e., 2D and profiles
#########################################################
class OneDimBinMethodBase(AnalysisBase):
'''
Base class used to analyze statistics across binMethods with one-dimensional binValues
that are assigned numerical values, e.g., altitude, pressure, latitude, cloud fraction
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.parallelism = True
self.maxBinVarTier = 1
# default binVars
self.binVarDict = {
vu.obsVarAlt: {'profilefunc': bpf.plotProfile, 'binVarTier': 1},
vu.obsVarACI: {'profilefunc': bpf.plotSeries, 'binVarTier': 2},
vu.obsVarCldFrac: {'profilefunc': bpf.plotSeries, 'binVarTier': 1},
vu.obsVarLat: {'profilefunc': bpf.plotProfile, 'binVarTier': 1},
vu.obsVarPrs: {'profilefunc': bpf.plotProfile, 'binVarTier': 1},
vu.obsVarSCI: {'profilefunc': bpf.plotSeries, 'binVarTier': 2},
# vu.modVarLat: {'profilefunc': bpf.plotProfile, 'binVarTier': 1},
vu.modVarLev: {'profilefunc': bpf.plotProfile, 'binVarTier': 1},
vu.obsVarGlint: {'profilefunc': bpf.plotSeries, 'binVarTier': 3},
vu.obsVarLandFrac: {'profilefunc': bpf.plotSeries, 'binVarTier': 3},
vu.obsVarLT: {'profilefunc': bpf.plotSeries, 'binVarTier': 3},
vu.obsVarSenZen: {'profilefunc': bpf.plotSeries, 'binbinVarTier': 3},
}
self.maxDiagnosticsPerAnalysis = 10 // self.nExp
def analyze_(self, workers = None):
useWorkers = (not self.blocking and self.parallelism and workers is not None)
diagnosticGrouped = {}
for diag in self.availableDiagnostics:
diagnosticGrouped[diag] = False
diagnosticGroupings = deepcopy(self.diagnosticGroupings)
for group in list(diagnosticGroupings.keys()):
diags = diagnosticGroupings[group]
if (len(diags) > self.maxDiagnosticsPerAnalysis or
not set(diags).issubset(set(list(self.availableDiagnostics)))):
del diagnosticGroupings[group]
continue
for diag in diags: diagnosticGrouped[diag] = True
for diag in self.availableDiagnostics:
if not diagnosticGrouped[diag]:
diagnosticGroupings[diag] = [diag]
for diagnosticGroup, diagnosticNames in diagnosticGroupings.items():
if len(diagnosticNames) > self.maxDiagnosticsPerAnalysis: continue
if len(set(diagnosticNames) & set(self.availableDiagnostics)) == 0: continue
diagnosticConfigs = {}
analysisStatistics = set([])
for diagnosticName in diagnosticNames:
diagnosticConfigs[diagnosticName] = deepcopy(self.diagnosticConfigs[diagnosticName])
analysisStatistics = set(list(analysisStatistics) +
diagnosticConfigs[diagnosticName]['analysisStatistics'])
if not set(self.requiredStatistics).issubset(analysisStatistics): continue
diagBinVars = self.db.dfw.levels('binVar', {'diagName': diagnosticNames})
for fullBinVar, options in self.binVarDict.items():
if options.get('binVarTier', 10) > self.maxBinVarTier: continue
binVar = vu.varDictAll[fullBinVar][1]
if (binVar not in diagBinVars): continue
binVarLoc = {}
binVarLoc['diagName'] = diagnosticNames
binVarLoc['binVar'] = binVar
binVarLoc['binVal'] = self.binNumVals2DasStr
#Make figures for all binMethods
binMethods = self.db.dfw.levels('binMethod', binVarLoc)
for binMethod in binMethods:
self.logger.info(diagnosticGroup+', '+binVar+', '+binMethod)
if useWorkers:
workers.apply_async(self.innerloopsWrapper,
args = (diagnosticGroup, diagnosticConfigs, binVar, binMethod, analysisStatistics, options))
else:
self.innerloopsWrapper(
diagnosticGroup, diagnosticConfigs, binVar, binMethod, analysisStatistics, options)
def innerloopsWrapper(self,
diagnosticGroup, diagnosticConfigs, binVar, binMethod, analysisStatistics, options):
myLoc = {}
myLoc['binVar'] = binVar
myLoc['binVal'] = self.binNumVals2DasStr
myLoc['binMethod'] = binMethod
# narrow mydfwDict by binVar, binVal, and binMethod to reduce run-time and memory
mydfwDict = {'dfw': self.db.loc(myLoc)}
# aggregate statistics when requested
if self.requestAggDFW:
mydfwDict['agg'] = sdb.DFWrapper.fromAggStats(mydfwDict['dfw'], ['cyDTime'])
sdb.createORreplaceDerivedDiagnostics(mydfwDict['agg'], diagnosticConfigs)
# further narrow mydfwDict by diagName
# NOTE: derived diagnostics may require multiple diagName values;
# can only narrow by diagName after aggregation
myLoc['diagName'] = list(diagnosticConfigs.keys())
for key in mydfwDict.keys():
mydfwDict[key] = sdb.DFWrapper.fromLoc(mydfwDict[key], myLoc)
## Get all float/int binVals associated with binVar
binVals = mydfwDict['dfw'].levels('binVal')
binUnits = mydfwDict['dfw'].uniquevals('binUnits')[0]
# assume all bins represent same variable/units
indepLabel = binVar
if binUnits != vu.miss_s:
indepLabel = indepLabel+' ('+binUnits+')'
# bin info
binNumVals = []
for binVal in binVals:
ibin = self.allBinVals.index(binVal)
binNumVals.append(self.binNumVals[ibin])
# invert independent variable axis for pressure bins
pressure_dict = vu.varDictAll.get(vu.obsVarPrs,['',''])
invert_ind_axis = (pressure_dict[1] == binVar)
# sort bins by numeric value
indices = list(range(len(binNumVals)))
indices.sort(key=binNumVals.__getitem__)
binNumVals = list(map(binNumVals.__getitem__, indices))
binVals = list(map(binVals.__getitem__, indices))
myBinConfigs = {
'str': binVals,
'values': binNumVals,
'indepLabel': indepLabel,
'invert_ind_axis': invert_ind_axis,
}
if len(binVals) < 2: return
# only analyze variables that have non-zero Count when sliced by myLoc
nVarsLoc = 0
varMapLoc = []
for (varName, varLabel) in self.varMap:
countDF = mydfwDict['dfw'].loc({'varName': varName}, 'Count')
if countDF.shape[0] > 0:
counts = countDF.to_numpy()
if np.nansum(counts) > 0:
nVarsLoc += 1
varMapLoc.append((varName, varLabel))
for statName in analysisStatistics:
if statName not in options.get('onlyStatNames', analysisStatistics): continue
self.innerloops(
mydfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options)
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
'''
virtual method
'''
raise NotImplementedError()
class CYandBinValAxes2D(OneDimBinMethodBase):
'''
Creates raster maps with binVar binVals on y-axis
- only applicable to binned diagnostics (e.g., vertical dimension, latitude, zenith angle)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of binVar, binMethod, statistic, and FC lead time
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.subWidth = 2.4
self.subAspect = 0.65
self.maxDiagnosticsPerAnalysis = 1
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
if self.nCY < 2: return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, scilogScale, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName)
myPath = self.myFigPath/diagnosticGroup
myPath.mkdir(parents=True, exist_ok=True)
nxplots = self.nExp
nyplots = nVarsLoc
nsubplots = nxplots * nyplots
planeLoc = {}
axisLimitsLoc = {}
#file loop 1
for (fcTDelta, fcTDelta_totmin) in self.fcMap:
planeLoc['fcTDelta'] = fcTDelta
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in varMapLoc:
planeLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
# use common c-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['dfw'].min(axisLimitsLoc, statName)
dmax = dfwDict['dfw'].max(axisLimitsLoc, statName)
#subplot loop 2
# letting cyDTime and binVal vary
for expName in self.expNames:
planeLoc['expName'] = expName
planeCYDTimes = dfwDict['dfw'].levels('cyDTime', planeLoc)
planeVals = np.full((len(myBinConfigs['str']), self.nCY), np.NaN)
binLoc = deepcopy(planeLoc)
for ibin, binVal in enumerate(myBinConfigs['str']):
binLoc['binVal'] = binVal
tmp = dfwDict['dfw'].loc(binLoc, statName).to_numpy()
for jcy, cyDTime in enumerate(planeCYDTimes):
if jcy > len(tmp)-1: continue
icy = self.cyDTimes.index(cyDTime)
planeVals[ibin, icy] = tmp[jcy]
# define subplot title
title = expName+'\n'+varLabel
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries2D(
fig,
self.cyDTimes, myBinConfigs['values'], planeVals,
title, bgstatDiagLabel,
scilogScale, scilogScale, signDefinite,
myBinConfigs['indepLabel'], myBinConfigs['invert_ind_axis'],
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
filename = myPath/('%s%s_BinValAxisTSeries_%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
fcTDelta_totmin, self.DiagSpaceName,
diagnosticGroup, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
# end fcTDelta loop
class FCandBinValAxes2D(OneDimBinMethodBase):
'''
Creates raster maps with binVar binVals on y-axis
- only applicable to binned diagnostics (e.g., vertical dimension, latitude, zenith angle)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.requestAggDFW = True
self.subWidth = 2.4
self.subAspect = 0.55
self.maxDiagnosticsPerAnalysis = 1
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
if self.nFC < 2: return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, scilogScale, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName)
fcDiagName = self.fcName(diagnosticGroup)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
nxplots = self.nExp
nyplots = nVarsLoc
nsubplots = nxplots * nyplots
planeLoc = {}
axisLimitsLoc = {}
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in varMapLoc:
planeLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
# use common c-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['agg'].min(axisLimitsLoc, statName)
dmax = dfwDict['agg'].max(axisLimitsLoc, statName)
#subplot loop 2
#collect aggregated statName, varying across fcTDelta+binVal
for expName in self.expNames:
planeLoc['expName'] = expName
planeFCTDeltas = dfwDict['agg'].levels('fcTDelta', planeLoc)
planeVals = np.full((len(myBinConfigs['str']), self.nFC), np.NaN)
binLoc = deepcopy(planeLoc)
for ibin, binVal in enumerate(myBinConfigs['str']):
binLoc['binVal'] = binVal
tmp = dfwDict['agg'].loc(binLoc, statName).to_numpy()
for jfc, fcTDelta in enumerate(planeFCTDeltas):
if jfc > len(tmp)-1: continue
ifc = self.fcTDeltas.index(fcTDelta)
planeVals[ibin, ifc] = tmp[jfc]
# define subplot title
title = expName+'\n'+varLabel
# perform subplot agnostic plotting (all expNames)
bpf.plotTimeSeries2D(
fig,
self.fcTDeltas, myBinConfigs['values'], planeVals,
title, fcstatDiagLabel,
scilogScale, scilogScale, signDefinite,
myBinConfigs['indepLabel'], myBinConfigs['invert_ind_axis'],
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
# save each figure
filename = myPath/('%s%s_BinValAxisTSeries_%s-%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
self.fcTDeltas_totmin[0], self.fcTDeltas_totmin[-1],
self.DiagSpaceName, fcDiagName, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels)
class BinValAxisProfile(OneDimBinMethodBase):
'''
Similar to FCandBinValAxes2D, except
- each vertical column of raster points is plotted as a profile on
a separate set of axes instead of in 2-D color
- therefore this is a valid plot even for a single forecast length (omb)
- line: per experiment
- subplot: column by lead time, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
- self.MAX_FC_SUBFIGS determines number of FC lead times to include
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.requestAggDFW = True
self.subWidth = 1.2
self.subAspect = 1.3
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, scilogScale, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName, myLoc['diagName'])
fcDiagName = self.fcName(diagnosticGroup)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
if self.nFC > 1:
nxplots = min([self.nFC, self.MAX_FC_SUBFIGS])
nyplots = nVarsLoc
nsubplots = nxplots * nyplots
else:
nsubplots = nVarsLoc
nxplots = np.int(np.ceil(np.sqrt(nsubplots)))
nyplots = np.int(np.ceil(np.true_divide(nsubplots, nxplots)))
ptLoc = {}
axisLimitsLoc = {}
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in varMapLoc:
ptLoc['varName'] = varName
axisLimitsLoc['varName'] = varName
#subplot loop 2
for fcTDelta in self.fcTDeltas:
ptLoc['fcTDelta'] = fcTDelta
axisLimitsLoc['fcTDelta'] = fcTDelta
# if len(dfwDict['agg'].loc(axisLimitsLoc, statName)) == 0:
# iplot += 1
# continue
# use common x-axis limits across axisLimitsLoc database locations
if statName == 'Count':
dmin = 0.
else:
dmin = dfwDict['agg'].min(axisLimitsLoc, statName)
dmax = dfwDict['agg'].max(axisLimitsLoc, statName)
#Setting to avoid over-crowding
if self.fcTDeltas.index(fcTDelta) > (self.MAX_FC_SUBFIGS-1): continue
#collect aggregated statNames, varying across fcTDelta
linesVals = []
linesLabel = []
linesGroup = []
for expName in self.expNames:
ptLoc['expName'] = expName
for diagnosticName in myLoc['diagName']:
linesGroup.append(expName)
linesLabel.append(expDiagnosticLabel(
expName, diagnosticName, myLoc['diagName']))
ptLoc['diagName'] = diagnosticName
lineVals = []
for binVal in myBinConfigs['str']:
ptLoc['binVal'] = binVal
pt = dfwDict['agg'].loc(ptLoc, statName).to_numpy()
if len(pt) == 1:
lineVals.append(pt[0])
else:
lineVals.append(np.NaN)
linesVals.append(lineVals)
# define subplot title
title = varLabel+' @ '+str(float(fcTDelta.total_seconds()) / 3600.0 / 24.0)+'days'
# perform subplot agnostic plotting (all expNames)
options['profilefunc'](
fig,
linesVals, myBinConfigs['values'],
linesLabel,
title, fcstatDiagLabel,
scilogScale, scilogScale, signDefinite,
myBinConfigs['indepLabel'], myBinConfigs['invert_ind_axis'],
nyplots, nxplots, nsubplots, iplot,
dmin = dmin, dmax = dmax,
interiorLabels = interiorLabels)
iplot = iplot + 1
# save each figure
filename = myPath/('%s%s_BinValAxis_%s-%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
self.fcTDeltas_totmin[0], self.fcTDeltas_totmin[-1],
self.DiagSpaceName, fcDiagName, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels, True)
class BinValAxisProfileDiffCI(OneDimBinMethodBase):
'''
Similar to BinValAxisProfile, except
shows difference between experiment(s) and control
- control is selected using cntrlExpIndex
- statistics are narrowed down by bootStrapStats
- confidence intervals (CI) are shown at each lead time and binVal
- line+shaded region: per experiment
- subplot: column by lead time, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
- self.MAX_FC_SUBFIGS determines number of FC lead times to include
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
# OPTIONAL: implement fine-grained parallelism for bootStrapping
#self.blocking = True
for key in self.binVarDict:
if 'onlyStatNames' in self.binVarDict[key]:
self.binVarDict[key]['onlyStatNames'] += bootStrapStats
else:
self.binVarDict[key]['onlyStatNames'] = bootStrapStats
self.subWidth = 1.2
self.subAspect = 1.3
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
if self.nExp * len(myLoc['diagName']) < 2: return
if self.cntrlExpName not in dfwDict['dfw'].levels('expName'): return
bgstatDiagLabel, fcstatDiagLabel, fcstatDiagDiffLabel, scilogScale, signDefinite = \
self.statPlotAttributes(diagnosticGroup, statName, myLoc['diagName'])
fcDiagName = self.fcName(diagnosticGroup)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
if self.nFC > 1:
nxplots = min([self.nFC, self.MAX_FC_SUBFIGS])
nyplots = nVarsLoc
nsubplots = nxplots * nyplots
else:
nsubplots = nVarsLoc
nxplots = np.int(np.ceil(np.sqrt(nsubplots)))
nyplots = np.int(np.ceil(np.true_divide(nsubplots, nxplots)))
# Only bootstrap over the union of cyDTimes available
# from both experiments at each fcTDelta
myExpsCYDTimes = self.UNIONcntrlANDexpCYDTimes(dfwDict['dfw'])
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
fcLoc = {}
#subplot loop 1
for (varName, varLabel) in varMapLoc:
fcLoc['varName'] = varName
#subplot loop 2
for fcTDelta in self.fcTDeltas:
fcLoc['fcTDelta'] = fcTDelta
# intermediate tempdfw reduces extraction time in inner loops
tempdfw = sdb.DFWrapper.fromLoc(dfwDict['dfw'], fcLoc)
cntrlLoc = deepcopy(fcLoc)
cntrlLoc['expName'] = self.cntrlExpName
#Setting to avoid over-crowding
if self.fcTDeltas.index(fcTDelta) > (self.MAX_FC_SUBFIGS-1): continue
linesVals = defaultdict(list)
linesLabel = []
linesGroup = []
cntrlLoc['diagName'] = myLoc['diagName'][0]
for expName in self.expNames:
for diagnosticName in myLoc['diagName']:
if (expName == cntrlLoc['expName'] and
diagnosticName == cntrlLoc['diagName']): continue
cntrlLoc['cyDTime'] = myExpsCYDTimes[(expName, fcTDelta)]
linesGroup.append(expName)
linesLabel.append(expDiagnosticLabel(
expName, diagnosticName, myLoc['diagName']))
lineVals = defaultdict(list)
for binVal in myBinConfigs['str']:
cntrlLoc['binVal'] = binVal
expLoc = deepcopy(cntrlLoc)
expLoc['diagName'] = diagnosticName
expLoc['expName'] = expName
X = tempdfw.loc(expLoc)
Y = tempdfw.loc(cntrlLoc)
ciVals = su.bootStrapClusterFunc(
X, Y,
n_samples = 10000,
statNames = [statName])
for trait in su.ciTraits:
lineVals[trait] += [ciVals[statName][trait][0]]
for trait in su.ciTraits:
linesVals[trait].append(lineVals[trait])
# define subplot title
title = varLabel+' @ '+str(float(fcTDelta.total_seconds()) / 3600.0 / 24.0)+'days'
# use specific y-axis limits for each varName
dmin = np.nanmin(linesVals[su.cimin])
dmax = np.nanmax(linesVals[su.cimax])
# perform subplot agnostic plotting (all expNames)
options['profilefunc'](
fig,
linesVals[su.cimean], myBinConfigs['values'],
linesLabel,
title,
fcstatDiagDiffLabel,
scilogScale, scilogScale, False,
myBinConfigs['indepLabel'], myBinConfigs['invert_ind_axis'],
nyplots, nxplots, nsubplots, iplot,
linesValsMinCI = linesVals[su.cimin],
linesValsMaxCI = linesVals[su.cimax],
dmin = dmin, dmax = dmax,
lineAttribOffset = 1,
interiorLabels = interiorLabels)
iplot = iplot + 1
# save each figure
filename = myPath/('%s%s_BinValAxis_%s-%smin_%s_%s_%s'%(
myLoc['binVar'], self.binMethodFile(myLoc['binMethod']),
self.fcTDeltas_totmin[0], self.fcTDeltas_totmin[-1],
self.DiagSpaceName, fcDiagName, statName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels, True)
class BinValAxisPDF(AnalysisBase):
'''
Similar to BinValAxisProfile, except
uses Count statistic to analyze a PDF across binVals
- x-axis: binVal
- line: per binMethod
- subplot: combination of FC lead time and DiagSpace variable
- file: per experiment (if applicable)
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
# TODO(JJG): Make a generic version of bpf.plotPDF, which
# currently overlays a standard Gaussian model. That should
# be a special case only for vu.obsVarNormErr.
self.binVarDict = {
vu.obsVarNormErr: {'pdffunc': bpf.plotPDF},
}
self.requestAggDFW = True
self.subWidth = 1.2
self.subAspect = 1.3
self.requiredStatistics = ['Count']
def analyze_(self, workers = None):
for diagnosticName, diagnosticConfig in self.diagnosticConfigs.items():
if diagnosticName not in self.db.dfw.levels('diagName'): continue
analysisStatistics = diagnosticConfig['analysisStatistics']
if not set(self.requiredStatistics).issubset(analysisStatistics): continue
diagBinVars = self.db.dfw.levels('binVar', {'diagName': diagnosticName})
for fullBinVar, options in self.binVarDict:
binVar = vu.varDictAll[fullBinVar][1]
if binVar not in diagBinVars: continue
myLoc = {}
myLoc['diagName'] = diagnosticName
myLoc['binVar'] = binVar
myLoc['binVal'] = self.binNumVals2DasStr
# reducing to mydfwDict speeds extractions in innerloops
mydfwDict = {'dfw': self.db.loc(myLoc)}
# include aggregated statistics when requested
if self.requestAggDFW:
mydfwDict['agg'] = sdb.DFWrapper.fromAggStats(mydfwDict['dfw'], ['cyDTime'])
## Get all float/int binVals associated with binVar
binMethods = mydfwDict['dfw'].levels('binMethod')
binVals = mydfwDict['dfw'].levels('binVal')
binUnits = mydfwDict['dfw'].uniquevals('binUnits')[0]
# assume all bins represent same variable/units
indepLabel = binVar
if binUnits != vu.miss_s:
indepLabel = indepLabel+' ('+binUnits+')'
# bin info
binNumVals = []
for binVal in binVals:
ibin = self.allBinVals.index(binVal)
binNumVals.append(self.binNumVals[ibin])
# sort bins by numeric value
indices = list(range(len(binNumVals)))
indices.sort(key=binNumVals.__getitem__)
binNumVals = list(map(binNumVals.__getitem__, indices))
binVals = list(map(binVals.__getitem__, indices))
if len(binVals) < 2: continue
self.logger.info('binVar=>'+binVar)
fcDiagName = self.fcName(diagnosticName)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
if self.nFC > 1:
nxplots = min([self.nFC, self.MAX_FC_SUBFIGS])
nyplots = self.nVars
nsubplots = nxplots * nyplots
else:
nsubplots = self.nVars
nxplots = np.int(np.ceil(np.sqrt(nsubplots)))
nyplots = np.int(np.ceil(np.true_divide(nsubplots, nxplots)))
ptLoc = deepcopy(myLoc)
#file loop 1
for expName in self.expNames:
ptLoc['expName'] = expName
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
#subplot loop 1
for (varName, varLabel) in self.varMap:
ptLoc['varName'] = varName
#subplot loop 2
for fcTDelta in self.fcTDeltas:
ptLoc['fcTDelta'] = fcTDelta
#Setting to avoid over-crowding
if self.fcTDeltas.index(fcTDelta) > (self.MAX_FC_SUBFIGS-1): continue
#collect aggregated statNames, varying across fcTDelta
linesVals = []
binMethodLabels = []
for binMethod in binMethods:
ptLoc['binMethod'] = binMethod
# if binMethod != bu.identityBinMethod: do something with bu.identityBinMethod
if binMethod == bu.identityBinMethod:
binMethodLabels.append('ObsSpace')
else:
binMethodLabels.append(binMethod)
lineVals = []
for binVal in binVals:
ptLoc['binVal'] = binVal
lineVals.append(dfwDict['agg'].loc(ptLoc,'Count'))
linesVals.append(lineVals)
# define subplot title
title = varLabel+' @ '+str(float(fcTDelta.total_seconds()) / 3600.0 / 24.0)+'days'
# perform subplot agnostic plotting (all expNames)
options['pdffunc'](
fig,
linesVals, binNumVals,
binMethodLabels,
title,
indepLabel,
nyplots, nxplots, nsubplots, iplot,
interiorLabels = interiorLabels)
iplot = iplot + 1
# end fcTDelta loop
# end varMap loop
# save each figure
filename = myPath/('%s_BinValAxis_%s-%smin_%s_%s_%s'%(
binVar, self.fcTDeltas_totmin[0], self.fcTDeltas_totmin[-1],
self.DiagSpaceName, fcDiagName, expName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels, True)
# end expName loop
# TODO: generalize as a sub-class of OneDimBinMethodBase
class BinValAxisStatsComposite(AnalysisBase):
'''
Similar to BinValAxisProfile, except
all statistics (Count, Mean, RMS, STD) are placed on the same axis
- x-axis: binVal
- line: per statistic
- subplot: per DiagSpace variable
- file: combination of FC lead time, experiment, and binMethod (if applicable)
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.binVarDict = {
# TODO(JJG): Make a generic version of bpf.plotComposite, because
# bpf.plotfitRampComposite also provides parameters for a ramp fitting
# function that may not be useful for binVars besides vu.obsVarSCI.
vu.obsVarSCI: {'statsfunc': bpf.plotfitRampComposite},
}
self.requestAggDFW = True
# Force serial processing so that console output is contiguous
# TODO(JJG): output to an ascii file and remove this line
self.blocking = True
self.subWidth = 1.9
self.subAspect = 0.9
self.requiredStatistics = ['Count', 'Mean', 'RMS', 'STD']
def analyze_(self, workers = None):
for diagnosticName, diagnosticConfig in self.diagnosticConfigs.items():
if diagnosticName not in self.db.dfw.levels('diagName'): continue
analysisStatistics = diagnosticConfig['analysisStatistics']
if not set(self.requiredStatistics).issubset(set(analysisStatistics)): continue
diagBinVars = self.db.dfw.levels('binVar', {'diagName': diagnosticName})
for fullBinVar, options in self.binVarDict.items():
binVar = vu.varDictAll[fullBinVar][1]
if (binVar not in diagBinVars): continue
myLoc = {}
myLoc['diagName'] = diagnosticName
myLoc['binVar'] = binVar
myLoc['binVal'] = self.binNumVals2DasStr
# reducing to mydfwDict speeds extractions in innerloops
mydfwDict = {'dfw': self.db.loc(myLoc)}
# include aggregated statistics when requested
if self.requestAggDFW:
mydfwDict['agg'] = sdb.DFWrapper.fromAggStats(mydfwDict['dfw'], ['cyDTime'])
## Get all float/int binVals associated with binVar
binMethods = mydfwDict['dfw'].levels('binMethod')
binVals = mydfwDict['dfw'].levels('binVal')
binUnits = mydfwDict['dfw'].uniquevals('binUnits')[0]
# assume all bins represent same variable/units
indepLabel = binVar
if binUnits != vu.miss_s:
indepLabel = indepLabel+' ('+binUnits+')'
# bin info
binNumVals = []
for binVal in binVals:
ibin = self.allBinVals.index(binVal)
binNumVals.append(self.binNumVals[ibin])
# sort bins by numeric value
indices = list(range(len(binNumVals)))
indices.sort(key=binNumVals.__getitem__)
binNumVals = list(map(binNumVals.__getitem__, indices))
binVals = list(map(binVals.__getitem__, indices))
nBins = len(binVals)
if nBins < 2: continue
fcDiagName = self.fcName(diagnosticName)
myPath = self.myFigPath/fcDiagName
myPath.mkdir(parents=True, exist_ok=True)
nsubplots = self.nVars
nxplots = np.int(np.ceil(np.sqrt(nsubplots)))
nyplots = np.int(np.ceil(np.true_divide(nsubplots, nxplots)))
ptLoc = {}
#file loop 1
for binMethod in binMethods:
ptLoc['binMethod'] = binMethod
self.logger.info('binVar=>'+binVar+', binMethod=>'+binMethod)
#file loop 2
for expName in self.expNames:
ptLoc['expName'] = expName
#file loop 3
for (fcTDelta, fcTDelta_totmin) in self.fcMap:
ptLoc['fcTDelta'] = fcTDelta
# establish a new figure
fig = pu.setup_fig(nxplots, nyplots, self.subWidth, self.subAspect, interiorLabels)
iplot = 0
ERRParams = {}
ERRParams[self.DiagSpaceName] = {}
#subplot loop 1
for (varName, varLabel) in self.varMap:
ptLoc['varName'] = varName
#collect aggregated statNames, varying across fcTDelta
countsVals = np.full(nBins,0)
meansVals = np.full(nBins, np.NaN)
rmssVals = np.full(nBins, np.NaN)
stdsVals = np.full(nBins, np.NaN)
for ibin, binVal in enumerate(binVals):
ptLoc['binVal'] = binVal
countsVals[ibin] = mydfwDict['agg'].loc(ptLoc,'Count').to_numpy()
meansVals[ibin] = mydfwDict['agg'].loc(ptLoc,'Mean').to_numpy()
rmssVals[ibin] = mydfwDict['agg'].loc(ptLoc,'RMS').to_numpy()
stdsVals[ibin] = mydfwDict['agg'].loc(ptLoc,'STD').to_numpy()
# define subplot title
title = varLabel
# perform subplot agnostic plotting (all expNames)
FitParams = options['statsfunc'](
fig,
binNumVals,
countsVals,
meansVals,
rmssVals,
stdsVals,
title,
'STATS('+fcDiagName+')',
indepLabel,
nyplots, nxplots, nsubplots, iplot,
interiorLabels = interiorLabels)
paramKey = self.chlist[iplot]
if paramKey == '': paramKey = varName
ERRParams[self.DiagSpaceName][(paramKey, binMethod)] = FitParams
iplot = iplot + 1
YAMLParams = {}
print('\n#For binning_params:')
for key in sorted(ERRParams[self.DiagSpaceName]):
print(binVar+"ErrParams['"+self.DiagSpaceName+"'][", key, "] = ",
ERRParams[self.DiagSpaceName][key]['bu'])
for param, val in ERRParams[self.DiagSpaceName][key]['YAML'].items():
if param not in YAMLParams: YAMLParams[param] = []
YAMLParams[param] += val
print('\n#For UFO YAML config:')
for param, val in YAMLParams.items():
print('# '+param+':', val)
# save each figure
filename = myPath/('%s%s_BinValAxis_%smin_%s_%s_%s'%(
binVar, self.binMethodFile(binMethod), fcTDelta_totmin,
self.DiagSpaceName, fcDiagName, expName))
pu.finalize_fig(fig, str(filename), figureFileType, interiorLabels, True)
# end expName loop
# end binMethod loop
# end fullBinVar loop
#===========================
# Calculate gross statistics
#===========================
class GrossValues(AnalysisBase):
'''
Calculate gross statistics for specified category binMethods at first forecast length
NOTE: currently only calculates statistics at self.fcTDeltas[0]
adjust minimum forecast length in order to calculate
for non-zero forecast lengths, assuming those lengths
are present in db
'''
def __init__(self, db, analysisType, diagnosticGroupings):
super().__init__(db, analysisType, diagnosticGroupings)
self.requestAggDFW = True
# Force serial processing so that console output is contiguous
# TODO(JJG): output to an ascii file and remove this line
self.blocking = True
self.binVarDict = {
(vu.obsVarQC, bu.goodQCMethod): {},
(vu.obsVarCldFrac, bu.cloudbandsMethod): {},
}
def analyze_(self, workers = None):
for diagnosticName, diagnosticConfig in self.diagnosticConfigs.items():
if diagnosticName not in self.db.dfw.levels('diagName'): continue
analysisStatistics = diagnosticConfig['analysisStatistics']
if not set(self.requiredStatistics).issubset(set(analysisStatistics)): continue
diagLoc = {'diagName': diagnosticName}
diagBinVars = self.db.dfw.levels('binVar', diagLoc)
diagBinMethods = self.db.dfw.levels('binMethod', diagLoc)
for (fullBinVar, binMethod), options in self.binVarDict.items():
binVar = vu.varDictAll[fullBinVar][1]
if (binVar not in diagBinVars or
binMethod not in diagBinMethods): continue
# narrow mydfwDict by binVar and binMethod to reduce run-time and memory
myLoc = {}
myLoc['binVar'] = binVar
myLoc['binMethod'] = binMethod
# reducing to mydfwDict speeds extractions in innerloops
mydfwDict = {'dfw': self.db.loc(myLoc)}
if self.requestAggDFW:
mydfwDict['agg'] = sdb.DFWrapper.fromAggStats(mydfwDict['dfw'], ['cyDTime'])
sdb.createORreplaceDerivedDiagnostics(mydfwDict['agg'], {diagnosticName: diagnosticConfig})
# further narrow mydfwDict by diagName
# NOTE: derived diagnostics may require multiple diagName values;
# can only narrow by diagName after aggregation
myLoc['diagName'] = diagnosticName
for key in mydfwDict.keys():
mydfwDict[key] = sdb.DFWrapper.fromLoc(mydfwDict[key], myLoc)
print(' Calculate gross statistics: binVar=>'+binVar+', binMethod=>'+binMethod)
binValsMap = categoryBinValsAttributes(
mydfwDict['dfw'], fullBinVar, binMethod, options)
print(' at FC length ', self.fcTDeltas[0])
# Calculate gross statistics for this binVal
statsLoc = {}
statsLoc['fcTDelta'] = self.fcTDeltas[0]
for binVal, binTitle in binValsMap:
statsLoc['binVal'] = binVal
GrossValues = {}
for varName in self.varNames:
statsLoc['varName'] = varName
for expName in self.expNames:
statsLoc['expName'] = expName
statsDFW = sdb.DFWrapper.fromLoc(mydfwDict['agg'], statsLoc)
for statName in analysisStatistics:
GrossValues[(statName, expName, varName)] = statsDFW.var(statName).to_numpy()
for expName in self.expNames:
print('Gross statistics for')
print('experiment=>'+expName)
if len(binValsMap) > 1:
print('binVal=>'+binVal)
print(' variables: ', self.varNames)
for statName in analysisStatistics:
print(statName)
tmp = np.asarray([])
for varName in self.varNames:
tmp = np.append(tmp, GrossValues[(statName, expName, varName)])
print(tmp)
AnalysisTypeDict = {
#Derived from CategoryBinMethodBase(AnalysisBase)
'CYAxisExpLines': CYAxisExpLines,
'FCAxisExpLines': FCAxisExpLines,
'FCAxisExpLinesDiffCI': FCAxisExpLinesDiffCI,
'CYAxisFCLines': CYAxisFCLines,
'CYAxisBinValLines': CYAxisBinValLines,
#Derived from OneDimBinMethodBase(AnalysisBase)
'CYandBinValAxes2D': CYandBinValAxes2D,
'FCandBinValAxes2D': FCandBinValAxes2D,
'BinValAxisProfile': BinValAxisProfile,
'BinValAxisProfileDiffCI': BinValAxisProfileDiffCI,
# TODO(JJG): TwoDimBinMethodBase(AnalysisBase)
#'BinValAxes2D': BinValAxes2D,
#Derived from AnalysisBase
'BinValAxisPDF': BinValAxisPDF,
'BinValAxisStatsComposite': BinValAxisStatsComposite,
'GrossValues': GrossValues,
}
# NOTES:
# (1) FCAxis* types require non-zero forecast length
# (2) CYAxis* types require > 1 analysis cycle
# (3) CYAxisFCLines requires (1) and (2)
# (4) *DiffCI types require more than one experiment
def AnalysisFactory(db, analysisType, diagnosticGroupings):
myClass = AnalysisTypeDict.get(analysisType, None)
assert (myClass is not None and inspect.isclass(myClass)), \
('\n\nERROR: AnalysisFactory cannot construct ', analysisType, ' without instructions in AnalysisTypeDict')
return myClass(db, analysisType, diagnosticGroupings)
class Analyses():
def __init__(self, db, analysisTypes, diagnosticGroupings = {}, nproc = 1):
self.nproc = nproc
self.analyses = []
for anType in analysisTypes:
self.analyses.append(AnalysisFactory(db, anType, diagnosticGroupings))
self.logger = logging.getLogger(__name__+'.'+db.DiagSpaceName)
self.logger.info('Analyses Constructed')
def analyze(self):
self.logger.info("Entering Analyses.analyze()")
if self.nproc > 1:
workers = mp.Pool(self.nproc)
else:
workers = None
for an in self.analyses:
an.analyze(workers)
if workers is not None:
workers.close()
workers.join()
self.logger.info("Exiting Analyses.analyze()")
| 72,180 | 4,456 | 1,067 |
cfe483046d09d5afb78b6f06db74805d1038903c | 4,967 | py | Python | pyslip/examples/test_displayable_levels.py | DoppleGangster/pySlip | cb351a55ac989e2f681f903db91328ee3ada2535 | [
"MIT"
] | null | null | null | pyslip/examples/test_displayable_levels.py | DoppleGangster/pySlip | cb351a55ac989e2f681f903db91328ee3ada2535 | [
"MIT"
] | null | null | null | pyslip/examples/test_displayable_levels.py | DoppleGangster/pySlip | cb351a55ac989e2f681f903db91328ee3ada2535 | [
"MIT"
] | null | null | null | """
Test if we can have a list of "allowable levels" and if a user requests
the display of a level not in that list we CANCEL the zoom operation.
Usage: test_displayable_levels.py [-d] [-h] [-t (OSM|GMT)]
"""
import sys
import wx
import pyslip
# initialize the logging system
import pyslip.log as log
try:
log = log.Log("pyslip.log")
except AttributeError:
# already set up, ignore exception
pass
######
# Various constants
######
DemoName = 'pySlip %s - Zoom undo test' % pyslip.__version__
DemoWidth = 1000
DemoHeight = 800
DemoAppSize = (DemoWidth, DemoHeight)
InitViewLevel = 2
InitViewPosition = (100.494167, 13.7525) # Bangkok
################################################################################
# The main application frame
################################################################################
################################################################################
if __name__ == '__main__':
import sys
import getopt
import traceback
# print some usage information
# our own handler for uncaught exceptions
sys.excepthook = excepthook
# decide which tiles to use, default is GMT
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])
except getopt.error:
usage()
sys.exit(1)
tile_source = 'GMT'
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ('-t', '--tiles'):
tile_source = param
tile_source = tile_source.lower()
# set up the appropriate tile source
if tile_source == 'gmt':
import pyslip.gmt_local as Tiles
elif tile_source == 'osm':
import pyslip.open_street_map as Tiles
else:
usage('Bad tile source: %s' % tile_source)
sys.exit(3)
# start wxPython app
app = wx.App()
TestFrame().Show()
app.MainLoop()
| 29.921687 | 80 | 0.581639 | """
Test if we can have a list of "allowable levels" and if a user requests
the display of a level not in that list we CANCEL the zoom operation.
Usage: test_displayable_levels.py [-d] [-h] [-t (OSM|GMT)]
"""
import sys
import wx
import pyslip
# initialize the logging system
import pyslip.log as log
try:
log = log.Log("pyslip.log")
except AttributeError:
# already set up, ignore exception
pass
######
# Various constants
######
DemoName = 'pySlip %s - Zoom undo test' % pyslip.__version__
DemoWidth = 1000
DemoHeight = 800
DemoAppSize = (DemoWidth, DemoHeight)
InitViewLevel = 2
InitViewPosition = (100.494167, 13.7525) # Bangkok
################################################################################
# The main application frame
################################################################################
class TestFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=DemoAppSize,
title=('PySlip %s - zoom undo test'
% pyslip.__version__))
self.SetMinSize(DemoAppSize)
self.panel = wx.Panel(self, wx.ID_ANY)
self.panel.SetBackgroundColour(wx.WHITE)
self.panel.ClearBackground()
# create the tile source object
self.tile_src = Tiles.Tiles()
# build the GUI
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.pyslip = pyslip.pySlip(self.panel, tile_src=self.tile_src,
style=wx.SIMPLE_BORDER)
box.Add(self.pyslip, proportion=1, border=1, flag=wx.EXPAND)
self.panel.SetSizerAndFit(box)
self.panel.Layout()
self.Centre()
self.Show(True)
# set initial view position
wx.CallLater(25, self.final_setup, InitViewLevel, InitViewPosition)
# bind the pySlip widget to the "zoom undo" method
self.pyslip.Bind(pyslip.EVT_PYSLIP_LEVEL, self.onZoom)
def final_setup(self, level, position):
"""Perform final setup.
level zoom level required
position position to be in centre of view
We do this in a CallAfter() function for those operations that
must not be done while the GUI is "fluid".
"""
self.pyslip.GotoLevelAndPosition(level, position)
def onZoom(self, event):
"""Catch and undo a zoom.
The pySlip widget automatically zooms if there are tiles available.
Simulate the amount of work a user handler might do before deciding to
undo a zoom.
We must check the level we are zooming to. If we don't, the GotoLevel()
method below will trigger another exception, which we catch, etc, etc.
"""
print('Trying to zoom to level %d' % event.level)
# do some busy waiting - simulates user code
for _ in range(1000000):
pass
l = [InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,
InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,
InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,
InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,
InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,
]
if event.level not in l:
# zoomed level isn't aallowed, go back to the original level
print('Undoing zoom to %d' % event.level)
self.pyslip.GotoLevel(InitViewLevel)
################################################################################
if __name__ == '__main__':
import sys
import getopt
import traceback
# print some usage information
def usage(msg=None):
if msg:
print(msg+'\n')
print(__doc__) # module docstring used
# our own handler for uncaught exceptions
def excepthook(type, value, tb):
msg = '\n' + '=' * 80
msg += '\nUncaught exception:\n'
msg += ''.join(traceback.format_exception(type, value, tb))
msg += '=' * 80 + '\n'
print(msg)
sys.exit(1)
sys.excepthook = excepthook
# decide which tiles to use, default is GMT
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])
except getopt.error:
usage()
sys.exit(1)
tile_source = 'GMT'
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ('-t', '--tiles'):
tile_source = param
tile_source = tile_source.lower()
# set up the appropriate tile source
if tile_source == 'gmt':
import pyslip.gmt_local as Tiles
elif tile_source == 'osm':
import pyslip.open_street_map as Tiles
else:
usage('Bad tile source: %s' % tile_source)
sys.exit(3)
# start wxPython app
app = wx.App()
TestFrame().Show()
app.MainLoop()
| 1,388 | 1,559 | 75 |
47182aada5a3c347274b77898aff1d9b98c33084 | 546 | py | Python | Ex20.py | Kevinwmiguel/PythonExercises | e976b274d8f17f427b2bcf0c2a614c0043478ea5 | [
"MIT"
] | null | null | null | Ex20.py | Kevinwmiguel/PythonExercises | e976b274d8f17f427b2bcf0c2a614c0043478ea5 | [
"MIT"
] | null | null | null | Ex20.py | Kevinwmiguel/PythonExercises | e976b274d8f17f427b2bcf0c2a614c0043478ea5 | [
"MIT"
] | null | null | null | """
Ex 20 - the same teacher from the previous challenge wants to raffle off the order of students' school assignments. Make a program that reads the names of the four students and shows the order of the names drawn
"""
from random import shuffle
Est_1 = str(input('Type the first student: '))
Est_2 = str(input('Type the first student: '))
Est_3 = str(input('Type the first student: '))
Est_4 = str(input('Type the first student: '))
order = [Est_1, Est_2, Est_3, Est_4]
print('-' * 30)
shuffle(order)
print(order)
input('Enter to exit')
| 26 | 211 | 0.716117 | """
Ex 20 - the same teacher from the previous challenge wants to raffle off the order of students' school assignments. Make a program that reads the names of the four students and shows the order of the names drawn
"""
from random import shuffle
Est_1 = str(input('Type the first student: '))
Est_2 = str(input('Type the first student: '))
Est_3 = str(input('Type the first student: '))
Est_4 = str(input('Type the first student: '))
order = [Est_1, Est_2, Est_3, Est_4]
print('-' * 30)
shuffle(order)
print(order)
input('Enter to exit')
| 0 | 0 | 0 |
832b5387393a67978aaea3cbade2eb20f772477c | 12,995 | py | Python | echolab2/plotting/qt/QImageViewer/QIVPolygon.py | nlauffenburger/pyEcholab | d8454984c17c8454cbaaf296232777afc48ca0cc | [
"MIT"
] | 20 | 2018-11-06T23:16:28.000Z | 2022-03-17T01:11:20.000Z | echolab2/plotting/qt/QImageViewer/QIVPolygon.py | nlauffenburger/pyEcholab | d8454984c17c8454cbaaf296232777afc48ca0cc | [
"MIT"
] | 14 | 2019-08-30T15:27:54.000Z | 2021-11-04T15:16:36.000Z | echolab2/plotting/qt/QImageViewer/QIVPolygon.py | nlauffenburger/pyEcholab | d8454984c17c8454cbaaf296232777afc48ca0cc | [
"MIT"
] | 22 | 2019-01-31T21:07:27.000Z | 2022-02-02T19:20:50.000Z | """
Rick Towler
Midwater Assessment and Conservation Engineering
NOAA Alaska Fisheries Science Center
rick.towler@noaa.gov
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from QIVPolygonItem import QIVPolygonItem
from QIVMarkerText import QIVMarkerText
class QIVPolygon(QGraphicsItemGroup):
"""
QIVPolygon implememts open and closed polygon items with simplified vertex
labeling. The labels are implemented by QIVMarkerText, are non-scaling,
and provide the ability to justify and offset labels from the vertex anchor.
If you only need a simple polygon object without labeling, you can use
QIVPolygonItem directly.
If a polygon is specified as "open" the last vertex is not connected
the first and the polygon cannot be filled. You can also think of open
polygons as polylines. "Closed" polygons do have their last vertext connected
to the first. Closed polygons can be filled by setting the fill keyword.
QIVPolygon Arguments:
vertices - The polygon vertices as:
A list of QPoint or QpointF objects defining the vertices
A list of [x,y] pairs (i.e. [[x,y],[x,y],[x,y],...]
A QRect or QRectF object
color - a 3 element list or tuple containing the RGB triplet
specifying the outline color of the polygon
thickness - A float specifying the outline thickness of the polygon.
alpha - A integer specifying the opacity of the polygon. 0 is transparent
and 255 is solid.
linestyle - '=' for solid, '-' for dashed, and '.' for dotted.
fill - a 3 element list or tuple containing the RGB triplet
specifying the fill color of the polygon. Set to None for
no fill.
"""
def getLabelsFromName(self, labelName):
'''
returns a list of QIVMarkerText references that share the name provided in the
labelName argument.
'''
labelReferences = []
# find label(s) given the label name
for label in self.labels:
if (label.name == labelName):
labelReferences.append(label)
return labelReferences
def removeLabel(self, labels):
'''
removeLabel removes a marker label given the label reference or labelName.
You can also pass a list of references or names. If the label name is provided,
all labels with that name will be removed.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given a single item - check if it is a name or ref
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
def removeAllLabels(self):
'''
removeAllLabels is a convenience method to clear all labels associated with this mark.
'''
self.removeLabel(self.labels)
def getLabels(self):
'''
getLabels returns the list of labels associated with this mark
'''
return self.labels
def addLabel(self, vertex, text, size=10, font='helvetica', italics=False, weight=-1,
color=[0,0,0], alpha=255, halign='left', valign='top', name='QIVPolygonLabel',
offset=None):
"""
Add a label to the polygon at a specified vertex. Labels are children of the polygon.
vertex (int) - The 0 based vertex number to attach the label to.
text (string) - The text to add to the dimension line.
offset (QPointF) - An offset from your position. The units are pixels at the
image's native resolution. This gets muddled when used with
classes that transform coordinates, especially QMapViewer.
size (int) - The text size, in point size
font (string) - A string containing the font family to use. Either stick
to the basics with this (i.e. "times", "helvetica") or
consult the QFont docs.
italics (bool) - Set to true to italicise the font.
weight (int) - Set to an integer in the range 0-99. 50 is normal, 75 is bold.
color (list) - A 3 element list or tuple containing the RGB triplet
specifying the color of the text.
alpha (int) - An integer specifying the opacity of the text. 0 is transparent
and 255 is solid.
halign (string) - Set this value to set the horizontal anchor point. Values are:
'left' - Sets the anchor to the left side of the text
'center' - Sets the anchor to the middle of the text
'right' - Sets the anchor to the right side of the text
valign (string) - Set this value to set the vertical anchor point. Values are:
'top' - Sets the anchor to the top of the text
'center' - Sets the anchor to the middle of the text
'bottom' - Sets the anchor to the bottom of the text
name (string) - Set this to the name associated with the text object. The name
can be used to differentiate between your text objects.
"""
if (offset == None) or (offset == []):
offset = QPointF(0,0)
# get the position given the vertex index
position = self.polygon[vertex]
# create a QIVMarkerText associated with the provided mark/line
textItem = QIVMarkerText(position, text, offset=offset, size=size, font=font, italics=italics,
weight=weight, color=color, alpha=alpha, halign=halign,
valign=valign, name=name, view=self.view)
# add the label to our list of labels
self.labels.append(textItem)
self.addToGroup(textItem)
def setLabelText(self, labels, text):
'''
Sets the label text given the label reference or name and text.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
label.setText(text)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
labels.setText(text)
except:
# bad reference - not in our list of labels
pass
def setLabelVisible(self, labels, show):
'''
Sets the label visibility given the label reference or name and the
visibility state.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
label.setVisible(show)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
labels.setVisible(show)
except:
# bad reference - not in our list of labels
pass
def showLabels(self, labels=None):
"""
showLabels makes the provided label or labels visible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are visible.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, True)
def hideLabels(self, labels=None):
"""
hideLabels makes the provided label or labels invisible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are hidden.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, False)
'''
The following methods operate on the QIVPolygonItem object. See that
class for calling details.
'''
| 40.86478 | 102 | 0.5596 | """
Rick Towler
Midwater Assessment and Conservation Engineering
NOAA Alaska Fisheries Science Center
rick.towler@noaa.gov
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from QIVPolygonItem import QIVPolygonItem
from QIVMarkerText import QIVMarkerText
class QIVPolygon(QGraphicsItemGroup):
"""
QIVPolygon implememts open and closed polygon items with simplified vertex
labeling. The labels are implemented by QIVMarkerText, are non-scaling,
and provide the ability to justify and offset labels from the vertex anchor.
If you only need a simple polygon object without labeling, you can use
QIVPolygonItem directly.
If a polygon is specified as "open" the last vertex is not connected
the first and the polygon cannot be filled. You can also think of open
polygons as polylines. "Closed" polygons do have their last vertext connected
to the first. Closed polygons can be filled by setting the fill keyword.
QIVPolygon Arguments:
vertices - The polygon vertices as:
A list of QPoint or QpointF objects defining the vertices
A list of [x,y] pairs (i.e. [[x,y],[x,y],[x,y],...]
A QRect or QRectF object
color - a 3 element list or tuple containing the RGB triplet
specifying the outline color of the polygon
thickness - A float specifying the outline thickness of the polygon.
alpha - A integer specifying the opacity of the polygon. 0 is transparent
and 255 is solid.
linestyle - '=' for solid, '-' for dashed, and '.' for dotted.
fill - a 3 element list or tuple containing the RGB triplet
specifying the fill color of the polygon. Set to None for
no fill.
"""
def __init__(self, vertices, color=[220,10,10], thickness=1.0,
alpha=255, linestyle='=', fill=None, selectable=True,
movable=False, selectThickness=4.0, selectColor=None,
closed=True, view=None, parent=None, name='QIVPolygon'):
super(QIVPolygon, self).__init__(parent)
self.name = name
self.view = view
self.polygon = None
self.labels = []
# create the polygon item - note that we make the item non-selectable and non-movable
# since we want to select/move the "this" object (the QGraphicsItemGroup) and not the
# items contained in it.
self.polygon = QIVPolygonItem(vertices, color=color, thickness=thickness,
alpha=alpha, linestyle=linestyle, fill=fill, selectable=False,
selectThickness=selectThickness, selectColor=selectColor,
movable=False, closed=closed, parent=self)
# and add it to our item group
self.addToGroup(self.polygon)
# now set selectable/movable flags for the itemgroup
self.setFlag(QGraphicsItem.ItemIsSelectable, selectable)
self.setFlag(QGraphicsItem.ItemIsMovable, movable)
def getLabelsFromName(self, labelName):
'''
returns a list of QIVMarkerText references that share the name provided in the
labelName argument.
'''
labelReferences = []
# find label(s) given the label name
for label in self.labels:
if (label.name == labelName):
labelReferences.append(label)
return labelReferences
def removeLabel(self, labels):
'''
removeLabel removes a marker label given the label reference or labelName.
You can also pass a list of references or names. If the label name is provided,
all labels with that name will be removed.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given a single item - check if it is a name or ref
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
def removeAllLabels(self):
'''
removeAllLabels is a convenience method to clear all labels associated with this mark.
'''
self.removeLabel(self.labels)
def getLabels(self):
'''
getLabels returns the list of labels associated with this mark
'''
return self.labels
def addLabel(self, vertex, text, size=10, font='helvetica', italics=False, weight=-1,
color=[0,0,0], alpha=255, halign='left', valign='top', name='QIVPolygonLabel',
offset=None):
"""
Add a label to the polygon at a specified vertex. Labels are children of the polygon.
vertex (int) - The 0 based vertex number to attach the label to.
text (string) - The text to add to the dimension line.
offset (QPointF) - An offset from your position. The units are pixels at the
image's native resolution. This gets muddled when used with
classes that transform coordinates, especially QMapViewer.
size (int) - The text size, in point size
font (string) - A string containing the font family to use. Either stick
to the basics with this (i.e. "times", "helvetica") or
consult the QFont docs.
italics (bool) - Set to true to italicise the font.
weight (int) - Set to an integer in the range 0-99. 50 is normal, 75 is bold.
color (list) - A 3 element list or tuple containing the RGB triplet
specifying the color of the text.
alpha (int) - An integer specifying the opacity of the text. 0 is transparent
and 255 is solid.
halign (string) - Set this value to set the horizontal anchor point. Values are:
'left' - Sets the anchor to the left side of the text
'center' - Sets the anchor to the middle of the text
'right' - Sets the anchor to the right side of the text
valign (string) - Set this value to set the vertical anchor point. Values are:
'top' - Sets the anchor to the top of the text
'center' - Sets the anchor to the middle of the text
'bottom' - Sets the anchor to the bottom of the text
name (string) - Set this to the name associated with the text object. The name
can be used to differentiate between your text objects.
"""
if (offset == None) or (offset == []):
offset = QPointF(0,0)
# get the position given the vertex index
position = self.polygon[vertex]
# create a QIVMarkerText associated with the provided mark/line
textItem = QIVMarkerText(position, text, offset=offset, size=size, font=font, italics=italics,
weight=weight, color=color, alpha=alpha, halign=halign,
valign=valign, name=name, view=self.view)
# add the label to our list of labels
self.labels.append(textItem)
self.addToGroup(textItem)
def setLabelText(self, labels, text):
'''
Sets the label text given the label reference or name and text.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
label.setText(text)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
labels.setText(text)
except:
# bad reference - not in our list of labels
pass
def setLabelVisible(self, labels, show):
'''
Sets the label visibility given the label reference or name and the
visibility state.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
label.setVisible(show)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
labels.setVisible(show)
except:
# bad reference - not in our list of labels
pass
def showLabels(self, labels=None):
"""
showLabels makes the provided label or labels visible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are visible.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, True)
def hideLabels(self, labels=None):
"""
hideLabels makes the provided label or labels invisible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are hidden.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, False)
'''
The following methods operate on the QIVPolygonItem object. See that
class for calling details.
'''
def setColor(self, *args, **kwargs):
self.polygon.setColor(*args, **kwargs)
def setSelectColor(self, *args, **kwargs):
self.polygon.setSelectColor(*args, **kwargs)
def setFill(self, *args, **kwargs):
self.polygon.setFill(*args, **kwargs)
def setSelected(self, *args):
self.polygon.setSelected(*args)
def isSelected(self):
return self.polygon.isSelected()
def setThickness(self, *args):
self.polygon.setThickness(*args)
def setSelectThickness(self, *args):
self.polygon.setSelectThickness(*args)
def setAlpha(self, *args, **kwargs):
self.polygon.setAlpha(*args, **kwargs)
| 1,661 | 0 | 243 |
46e73a0e0ce396c151253878d23b9e5441c56130 | 685 | py | Python | neobabix/strategies/strategy.py | tistaharahap/neo-babix | 96a34dde744b46a61ff5795622ab1336f289f99e | [
"MIT"
] | 5 | 2020-05-23T14:47:48.000Z | 2021-12-19T03:00:17.000Z | neobabix/strategies/strategy.py | tistaharahap/neo-babix | 96a34dde744b46a61ff5795622ab1336f289f99e | [
"MIT"
] | null | null | null | neobabix/strategies/strategy.py | tistaharahap/neo-babix | 96a34dde744b46a61ff5795622ab1336f289f99e | [
"MIT"
] | 2 | 2020-05-02T08:44:58.000Z | 2021-05-23T14:19:44.000Z | from abc import ABC, abstractmethod
import enum
import numpy as np
from logging import Logger
| 22.833333 | 136 | 0.639416 | from abc import ABC, abstractmethod
import enum
import numpy as np
from logging import Logger
class Actions(enum.Enum):
LONG = 1
SHORT = -1
NOTHING = 0
class Strategy(ABC):
__name__ = 'Neobabix Strategy'
def __init__(self, opens: np.ndarray, highs: np.ndarray, lows: np.ndarray, closes: np.ndarray, volumes: np.ndarray, logger: Logger):
self.opens = opens
self.highs = highs
self.lows = lows
self.closes = closes
self.volumes = volumes
self.logger = logger
def debug(self, message):
self.logger.debug(f'{self.__name__}: {message}')
@abstractmethod
def filter(self) -> Actions:
pass
| 360 | 183 | 46 |
bc65ea5c60b4a4db85af399e0fa2da6593e200ef | 3,119 | py | Python | meracanapi/apitelemac/apitelemac.py | meracan/meracan-api | aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4 | [
"MIT"
] | null | null | null | meracanapi/apitelemac/apitelemac.py | meracan/meracan-api | aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4 | [
"MIT"
] | null | null | null | meracanapi/apitelemac/apitelemac.py | meracan/meracan-api | aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4 | [
"MIT"
] | null | null | null | import os
from telapy.api.t2d import Telemac2d
from telapy.api.t3d import Telemac3d
from telapy.api.wac import Tomawac
from telapy.api.sis import Sisyphe
from mpi4py import MPI
import numpy as np
from .apitelemacaws import ApiTelemacAWS
modules = {
"telemac2d":Telemac2d,
"telemac3d":Telemac3d,
"tomawac":Tomawac,
"sisyphe":Sisyphe
}
VARNAMES={
"U":"VELOCITYU",
"V":"VELOCITYV",
"H":"WATERDEPTH",
"S":"FREESURFACE",
"B":"BOTTOMELEVATION",
}
ApiTelemac.__doc__=ApiTelemacAWS.__doc__ | 27.848214 | 121 | 0.6252 | import os
from telapy.api.t2d import Telemac2d
from telapy.api.t3d import Telemac3d
from telapy.api.wac import Tomawac
from telapy.api.sis import Sisyphe
from mpi4py import MPI
import numpy as np
from .apitelemacaws import ApiTelemacAWS
modules = {
"telemac2d":Telemac2d,
"telemac3d":Telemac3d,
"tomawac":Tomawac,
"sisyphe":Sisyphe
}
VARNAMES={
"U":"VELOCITYU",
"V":"VELOCITYV",
"H":"WATERDEPTH",
"S":"FREESURFACE",
"B":"BOTTOMELEVATION",
}
class ApiTelemac(ApiTelemacAWS):
def __ini__(self,**kwargs):
super().__init__(**kwargs)
def run(self,id,uploadNCA=False):
"""
Run telemac using cas and files from DynamoDB and S3.
Parameters
----------
id:str
DynamoDB Id
uploadNCA:bool,False
Upload results on the fly as NCA to S3
"""
casFile,item=self.download(id=id)
os.chdir(os.path.dirname(casFile))
basename=os.path.basename(casFile)
comm = MPI.COMM_WORLD
study=modules[item['module']](basename, user_fortran='user_fortran',comm=comm, stdout=0)
study.set_case()
study.init_state_default()
ntimesteps = study.get("MODEL.NTIMESTEPS")
ilprintout = study.cas.values.get("LISTING PRINTOUT PERIOD",study.cas.dico.data["LISTING PRINTOUT PERIOD"])
igprintout = study.cas.values.get("GRAPHIC PRINTOUT PERIOD",study.cas.dico.data["GRAPHIC PRINTOUT PERIOD"])
vars = study.cas.values.get("VARIABLES FOR GRAPHIC PRINTOUTS",study.cas.dico.data["VARIABLES FOR GRAPHIC PRINTOUTS"])
print(vars)
# nvariables = study.get("MODEL.nvariables")
def getFrame(istep):
return int(np.floor(float(istep)/igprintout))
nframestep= getFrame(ntimesteps)
if study.ncsize>1:
""" Get npoin and index
"""
data = np.zeros(study.ncsize,dtype=np.int)
data[comm.rank]=np.max(study.get_array("MODEL.KNOLG"))
npoin=np.max(comm.allreduce(data, MPI.SUM))
index=study.get_array("MODEL.KNOLG").astype(np.int)-1
else:
npoin=study.get("MODEL.NPOIN")
index= np.arange(npoin,dtype=np.int)
for _ in range(ntimesteps):
study.run_one_time_step()
if _%ilprintout==0 and comm.rank==0:
""" Print to console
"""
print(_)
if _%igprintout==0 and comm.rank==0:
""" Print to AWS
"""
self.updateProgress(id=id,iframe=getFrame(_),nframe=nframestep)
if _%igprintout==0 and uploadNCA:
""" Save frame to AWS
"""
for var in vars:
values = np.zeros(npoin)
name=VARNAMES[var]
if name=="FREESURFACE":
None
values[index]=study.get_array("MODEL.{}".format(var))
values=comm.allreduce(values, MPI.SUM)
if comm.rank==0:
None
# nca["h","s",getFrame(_)]=values
if comm.rank==0:
self.updateProgress(id=id,iframe=nframestep,nframe=nframestep)
# TODO check nframe, save last frame?????
study.finalize()
del study
return None
ApiTelemac.__doc__=ApiTelemacAWS.__doc__ | 88 | 2,506 | 23 |
e0a44b62365354efd0cf715c0841ca7594cf2ba4 | 33,007 | py | Python | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/ite/cost/x_analytical_values.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | 5 | 2021-01-06T16:49:22.000Z | 2021-02-19T05:34:27.000Z | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/ite/cost/x_analytical_values.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | null | null | null | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/ite/cost/x_analytical_values.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | null | null | null | """ Analytical expressions of information theoretical quantities. """
from scipy.linalg import det, inv
from numpy import log, prod, absolute, exp, pi, trace, dot, cumsum, \
hstack, ix_, sqrt, eye, diag, array, sum
from ite.shared import compute_h2
def analytical_value_h_shannon(distr, par):
""" Analytical value of the Shannon entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Shannon entropy.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = 1/2 * log((2 * pi * exp(1))**dim * det(par["cov"]))
# = 1/2 * log(det(c)) + d / 2 * log(2*pi) + d / 2
else:
raise Exception('Distribution=?')
return h
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
""" Analytical value of the cross-entropy for the given distributions.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
par1, par2 : dictionaries
Parameters of the distribution. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
c : float
Analytical value of the cross-entropy.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)))
else:
raise Exception('Distribution=?')
return c
def analytical_value_d_kullback_leibler(distr1, distr2, par1, par2):
""" Analytical value of the KL divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Kullback-Leibler divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
d = 1/2 * (log(det(c2)/det(c1)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)) - dim)
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_shannon(distr, par):
""" Analytical value of mutual information for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["ds"],
par["cov"] are the vector of component dimensions and the (joint)
covariance matrix.
Returns
-------
i : float
Analytical value of the Shannon mutual information.
"""
if distr == 'normal':
c, ds = par["cov"], par["ds"]
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
i = 1
for m in range(len(ds)):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
i *= det(c[ix_(idx, idx)])
i = log(i / det(c)) / 2
else:
raise Exception('Distribution=?')
return i
def analytical_value_h_renyi(distr, alpha, par):
""" Analytical value of the Renyi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Renyi entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Renyi entropy.
References
----------
Kai-Sheng Song. Renyi information, loglikelihood and an intrinsic
distribution measure. Journal of Statistical Planning and Inference
93: 51-69, 2001.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
# We also apply the transformation rule of the Renyi entropy in
# case of linear transformations:
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = log((2*pi)**(dim / 2) * sqrt(absolute(det(par["cov"])))) -\
dim * log(alpha) / 2 / (1 - alpha)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_tsallis(distr, alpha, par):
""" Analytical value of the Tsallis entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Tsallis entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Tsallis entropy.
"""
# Renyi entropy:
h = analytical_value_h_renyi(distr, alpha, par)
# Renyi entropy -> Tsallis entropy:
h = (exp((1 - alpha) * h) - 1) / (1 - alpha)
return h
def analytical_value_k_prob_product(distr1, distr2, rho, par1, par2):
""" Analytical value of the probability product kernel.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
rho: float, >0
Parameter of the probability product kernel.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the probability product kernel.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
# inv1, inv2, inv12:
inv1, inv2 = inv(c1), inv(c2)
inv12 = inv(inv1+inv2)
m12 = dot(inv1, m1) + dot(inv2, m2)
exp_arg = \
dot(m1, dot(inv1, m1)) + dot(m2, dot(inv2, m2)) -\
dot(m12, dot(inv12, m12))
k = (2 * pi)**((1 - 2 * rho) * dim / 2) * rho**(-dim / 2) *\
absolute(det(inv12))**(1 / 2) * \
absolute(det(c1))**(-rho / 2) * \
absolute(det(c2))**(-rho / 2) * exp(-rho / 2 * exp_arg)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_expected(distr1, distr2, kernel, par1, par2):
""" Analytical value of expected kernel for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
kernel: Kernel class.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the expected kernel.
References
----------
Krikamol Muandet, Kenji Fukumizu, Francesco Dinuzzo, and Bernhard
Scholkopf. Learning from distributions via support measure machines.
In Advances in Neural Information Processing Systems (NIPS), pages
10-18, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
if kernel.name == 'RBF':
dim = len(m1)
gam = 1 / kernel.sigma ** 2
diffm = m1 - m2
exp_arg = dot(dot(diffm, inv(c1 + c2 + eye(dim) / gam)), diffm)
k = exp(-exp_arg / 2) / \
sqrt(absolute(det(gam * c1 + gam * c2 + eye(dim))))
elif kernel.name == 'polynomial':
if kernel.exponent == 2:
if kernel.c == 1:
k = (dot(m1, m2) + 1)**2 + sum(c1 * c2) + \
dot(m1, dot(c2, m1)) + dot(m2, dot(c1, m2))
else:
raise Exception('The offset of the polynomial kernel' +
' (c) should be one!')
elif kernel.exponent == 3:
if kernel.c == 1:
k = (dot(m1, m2) + 1)**3 + \
6 * dot(dot(c1, m1), dot(c2, m2)) + \
3 * (dot(m1, m2) + 1) * (sum(c1 * c2) +
dot(m1, dot(c2, m1)) +
dot(m2, dot(c1, m2)))
else:
raise Exception('The offset of the polynomial kernel' +
' (c) should be one!')
else:
raise Exception('The exponent of the polynomial kernel ' +
'should be either 2 or 3!')
else:
raise Exception('Kernel=?')
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_mmd(distr1, distr2, kernel, par1, par2):
""" Analytical value of MMD for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
kernel: Kernel class.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of MMD.
"""
d_pp = analytical_value_k_expected(distr1, distr1, kernel, par1, par1)
d_qq = analytical_value_k_expected(distr2, distr2, kernel, par2, par2)
d_pq = analytical_value_k_expected(distr1, distr2, kernel, par1, par2)
d = sqrt(d_pp + d_qq - 2 * d_pq)
return d
def analytical_value_h_sharma_mittal(distr, alpha, beta, par):
""" Analytical value of the Sharma-Mittal entropy.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal entropy.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal entropy.
par : dictionary
Parameters of the distribution. If distr = 'normal' : par["cov"]
= covariance matrix.
Returns
-------
h : float
Analytical value of the Sharma-Mittal entropy.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr == 'normal':
# par = {"cov": c}
c = par['cov']
dim = c.shape[0] # =c.shape[1]
h = (((2*pi)**(dim / 2) * sqrt(absolute(det(c))))**(1 - beta) /
alpha**(dim * (1 - beta) / (2 * (1 - alpha))) - 1) / \
(1 - beta)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_phi(distr, par, c):
""" Analytical value of the Phi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par.a,
par.b in U[a,b].
c : float, >=1
Parameter of the Phi-entropy: phi = lambda x: x**c
Returns
-------
h : float
Analytical value of the Phi entropy.
"""
if distr == 'uniform':
a, b = par['a'], par['b']
h = 1 / (b-a)**c
else:
raise Exception('Distribution=?')
return h
def analytical_value_d_chi_square(distr1, distr2, par1, par2):
""" Analytical value of chi^2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s.
Names of distributions.
par1, par2 : dictionary-s.
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a']. If (distr1, distr2) =
('normalI', 'normalI'), then distr1 = N(m1,I) where m1 =
par1['mean'], distr2 = N(m2,I), where m2 = par2['mean'].
Returns
-------
d : float
Analytical value of the (Pearson) chi^2 divergence.
References
----------
Frank Nielsen and Richard Nock. On the chi square and higher-order chi
distances for approximating f-divergence. IEEE Signal Processing
Letters, 2:10-13, 2014.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = prod(b) / prod(a) - 1
elif distr1 == 'normalI' and distr2 == 'normalI':
m1 = par1['mean']
m2 = par2['mean']
diffm = m2 - m1
d = exp(dot(diffm, diffm)) - 1
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_l2(distr1, distr2, par1, par2):
""" Analytical value of the L2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the L2 divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = sqrt(1 / prod(b) - 1 / prod(a))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_renyi(distr1, distr2, alpha, par1, par2):
""" Analytical value of Renyi divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Renyi divergence.
References
----------
Manuel Gil. On Renyi Divergence Measures for Continuous Alphabet
Sources. Phd Thesis, Queen’s University, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
mix_c = alpha * c2 + (1 - alpha) * c1
diffm = m1 - m2
d = alpha * (1/2 * dot(dot(diffm, inv(mix_c)), diffm) -
1 / (2 * alpha * (alpha - 1)) *
log(absolute(det(mix_c)) /
(det(c1)**(1 - alpha) * det(c2)**alpha)))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_tsallis(distr1, distr2, alpha, par1, par2):
""" Analytical value of Tsallis divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Tsallis divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
d = analytical_value_d_renyi(distr1, distr2, alpha, par1, par2)
d = (exp((alpha - 1) * d) - 1) / (alpha - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_sharma_mittal(distr1, distr2, alpha, beta, par1,
par2):
""" Analytical value of the Sharma-Mittal divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal divergence.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
D : float
Analytical value of the Tsallis divergence.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
c = inv(alpha * inv(c1) + (1 - alpha) * inv(c2))
diffm = m1 - m2
# Jensen difference divergence, c2:
j = (log(absolute(det(c1))**alpha * absolute(det(c2))**(1 -
alpha) /
absolute(det(c))) + alpha * (1 - alpha) *
dot(dot(diffm, inv(c)), diffm)) / 2
c2 = exp(-j)
d = (c2**((1 - beta) / (1 - alpha)) - 1) / (beta - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_bregman(distr1, distr2, alpha, par1, par2):
""" Analytical value of Bregman divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Bregman divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the Bregman divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = \
-1 / (alpha - 1) * prod(b)**(1 - alpha) +\
1 / (alpha - 1) * prod(a)**(1 - alpha)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2):
""" Analytical value of the Jensen-Renyi divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
w : vector, w[i] > 0 (for all i), sum(w) = 1
Weight used in the Jensen-Renyi divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
Returns
-------
d : float
Analytical value of the Jensen-Renyi divergence.
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
term1 = compute_h2(w, (m1, m2), (s1, s2))
term2 = \
w[0] * compute_h2((1,), (m1,), (s1,)) +\
w[1] * compute_h2((1,), (m2,), (s2,))
# H2(\sum_i wi yi) - \sum_i w_i H2(yi), where H2 is the quadratic
# Renyi entropy:
d = term1 - term2
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_renyi(distr, alpha, par):
""" Analytical value of the Renyi mutual information.
Parameters
----------
distr : str
Name of the distribution.
alpha : float
Parameter of the Renyi mutual information.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["cov"]
is the covariance matrix.
Returns
-------
i : float
Analytical value of the Renyi mutual information.
"""
if distr == 'normal':
c = par["cov"]
t1 = -alpha / 2 * log(det(c))
t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
i = 1 / (alpha - 1) * (t1 + t2 - t3)
else:
raise Exception('Distribution=?')
return i
def analytical_value_k_ejr1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejr2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
"""
if distr1 == 'normal' and distr2 == 'normal':
w = array([1/2, 1/2])
d = analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2)
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(Renyi entropy)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
# quadratic Renyi entropy -> quadratic Tsallis entropy:
h = 1 - exp(-h)
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(analytical value of the Jensen-Renyi divergence)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
# quadratic Renyi entropy -> quadratic Tsallis entropy:
term1 = 1 - exp(-compute_h2(w, (m1, m2), (s1, s2)))
term2 = \
w[0] * (1 - exp(-compute_h2((1, ), (m1, ), (s1,)))) +\
w[1] * (1 - exp(-compute_h2((1,), (m2,), (s2,))))
# H2(\sum_i wi Yi) - \sum_i w_i H2(Yi), where H2 is the quadratic
# Tsallis entropy:
d = term1 - term2
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
""" Analytical value of Hellinger distance for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Hellinger distance.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
# "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
diffm = m1 - m2
avgc = (c1 + c2) / 2
inv_avgc = inv(avgc)
d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
exp(-dot(diffm, dot(inv_avgc, diffm))/8) # D^2
d = sqrt(d)
else:
raise Exception('Distribution=?')
return d
def analytical_value_cond_h_shannon(distr, par):
""" Analytical value of the conditional Shannon entropy.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal': par["cov"]
and par["dim1"] are the covariance matrix and the dimension of
y1.
Returns
-------
cond_h : float
Analytical value of the conditional Shannon entropy.
"""
if distr == 'normal':
# h12 (=joint entropy):
h12 = analytical_value_h_shannon(distr, par)
# h2 (=entropy of the conditioning variable):
c, dim1 = par['cov'], par['dim1'] # covariance matrix, dim(y1)
par = {"cov": c[dim1:, dim1:]}
h2 = analytical_value_h_shannon(distr, par)
cond_h = h12 - h2
else:
raise Exception('Distribution=?')
return cond_h
def analytical_value_cond_i_shannon(distr, par):
""" Analytical value of the conditional Shannon mutual information.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal':
par["cov"] and par["ds"] are the (joint) covariance matrix and
the vector of subspace dimensions.
Returns
-------
cond_i : float
Analytical value of the conditional Shannon mutual
information.
"""
# initialization:
ds = par['ds']
len_ds = len(ds)
# 0,d_1,d_1+d_2,...,d_1+...+d_M; starting indices of the subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
idx_condition = range(cum_ds[len_ds - 1],
cum_ds[len_ds - 1] + ds[len_ds - 1])
if distr == 'normal':
c = par['cov']
# h_joint:
h_joint = analytical_value_h_shannon(distr, par)
# h_cross:
h_cross = 0
for m in range(len_ds-1): # non-conditioning subspaces
idx_m = range(cum_ds[m], cum_ds[m] + ds[m])
idx_m_and_condition = hstack((idx_m, idx_condition))
par = {"cov": c[ix_(idx_m_and_condition, idx_m_and_condition)]}
h_cross += analytical_value_h_shannon(distr, par)
# h_condition:
par = {"cov": c[ix_(idx_condition, idx_condition)]}
h_condition = analytical_value_h_shannon(distr, par)
cond_i = -h_joint + h_cross - (len_ds - 2) * h_condition
else:
raise Exception('Distribution=?')
return cond_i
| 31.6159 | 76 | 0.523525 | """ Analytical expressions of information theoretical quantities. """
from scipy.linalg import det, inv
from numpy import log, prod, absolute, exp, pi, trace, dot, cumsum, \
hstack, ix_, sqrt, eye, diag, array, sum
from ite.shared import compute_h2
def analytical_value_h_shannon(distr, par):
""" Analytical value of the Shannon entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Shannon entropy.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = 1/2 * log((2 * pi * exp(1))**dim * det(par["cov"]))
# = 1/2 * log(det(c)) + d / 2 * log(2*pi) + d / 2
else:
raise Exception('Distribution=?')
return h
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
""" Analytical value of the cross-entropy for the given distributions.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
par1, par2 : dictionaries
Parameters of the distribution. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
c : float
Analytical value of the cross-entropy.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)))
else:
raise Exception('Distribution=?')
return c
def analytical_value_d_kullback_leibler(distr1, distr2, par1, par2):
""" Analytical value of the KL divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Kullback-Leibler divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
d = 1/2 * (log(det(c2)/det(c1)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)) - dim)
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_shannon(distr, par):
""" Analytical value of mutual information for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["ds"],
par["cov"] are the vector of component dimensions and the (joint)
covariance matrix.
Returns
-------
i : float
Analytical value of the Shannon mutual information.
"""
if distr == 'normal':
c, ds = par["cov"], par["ds"]
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
i = 1
for m in range(len(ds)):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
i *= det(c[ix_(idx, idx)])
i = log(i / det(c)) / 2
else:
raise Exception('Distribution=?')
return i
def analytical_value_h_renyi(distr, alpha, par):
""" Analytical value of the Renyi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Renyi entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Renyi entropy.
References
----------
Kai-Sheng Song. Renyi information, loglikelihood and an intrinsic
distribution measure. Journal of Statistical Planning and Inference
93: 51-69, 2001.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
# We also apply the transformation rule of the Renyi entropy in
# case of linear transformations:
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = log((2*pi)**(dim / 2) * sqrt(absolute(det(par["cov"])))) -\
dim * log(alpha) / 2 / (1 - alpha)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_tsallis(distr, alpha, par):
""" Analytical value of the Tsallis entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Tsallis entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Tsallis entropy.
"""
# Renyi entropy:
h = analytical_value_h_renyi(distr, alpha, par)
# Renyi entropy -> Tsallis entropy:
h = (exp((1 - alpha) * h) - 1) / (1 - alpha)
return h
def analytical_value_k_prob_product(distr1, distr2, rho, par1, par2):
""" Analytical value of the probability product kernel.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
rho: float, >0
Parameter of the probability product kernel.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the probability product kernel.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
# inv1, inv2, inv12:
inv1, inv2 = inv(c1), inv(c2)
inv12 = inv(inv1+inv2)
m12 = dot(inv1, m1) + dot(inv2, m2)
exp_arg = \
dot(m1, dot(inv1, m1)) + dot(m2, dot(inv2, m2)) -\
dot(m12, dot(inv12, m12))
k = (2 * pi)**((1 - 2 * rho) * dim / 2) * rho**(-dim / 2) *\
absolute(det(inv12))**(1 / 2) * \
absolute(det(c1))**(-rho / 2) * \
absolute(det(c2))**(-rho / 2) * exp(-rho / 2 * exp_arg)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_expected(distr1, distr2, kernel, par1, par2):
""" Analytical value of expected kernel for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
kernel: Kernel class.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the expected kernel.
References
----------
Krikamol Muandet, Kenji Fukumizu, Francesco Dinuzzo, and Bernhard
Scholkopf. Learning from distributions via support measure machines.
In Advances in Neural Information Processing Systems (NIPS), pages
10-18, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
if kernel.name == 'RBF':
dim = len(m1)
gam = 1 / kernel.sigma ** 2
diffm = m1 - m2
exp_arg = dot(dot(diffm, inv(c1 + c2 + eye(dim) / gam)), diffm)
k = exp(-exp_arg / 2) / \
sqrt(absolute(det(gam * c1 + gam * c2 + eye(dim))))
elif kernel.name == 'polynomial':
if kernel.exponent == 2:
if kernel.c == 1:
k = (dot(m1, m2) + 1)**2 + sum(c1 * c2) + \
dot(m1, dot(c2, m1)) + dot(m2, dot(c1, m2))
else:
raise Exception('The offset of the polynomial kernel' +
' (c) should be one!')
elif kernel.exponent == 3:
if kernel.c == 1:
k = (dot(m1, m2) + 1)**3 + \
6 * dot(dot(c1, m1), dot(c2, m2)) + \
3 * (dot(m1, m2) + 1) * (sum(c1 * c2) +
dot(m1, dot(c2, m1)) +
dot(m2, dot(c1, m2)))
else:
raise Exception('The offset of the polynomial kernel' +
' (c) should be one!')
else:
raise Exception('The exponent of the polynomial kernel ' +
'should be either 2 or 3!')
else:
raise Exception('Kernel=?')
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_mmd(distr1, distr2, kernel, par1, par2):
""" Analytical value of MMD for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
kernel: Kernel class.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of MMD.
"""
d_pp = analytical_value_k_expected(distr1, distr1, kernel, par1, par1)
d_qq = analytical_value_k_expected(distr2, distr2, kernel, par2, par2)
d_pq = analytical_value_k_expected(distr1, distr2, kernel, par1, par2)
d = sqrt(d_pp + d_qq - 2 * d_pq)
return d
def analytical_value_h_sharma_mittal(distr, alpha, beta, par):
""" Analytical value of the Sharma-Mittal entropy.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal entropy.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal entropy.
par : dictionary
Parameters of the distribution. If distr = 'normal' : par["cov"]
= covariance matrix.
Returns
-------
h : float
Analytical value of the Sharma-Mittal entropy.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr == 'normal':
# par = {"cov": c}
c = par['cov']
dim = c.shape[0] # =c.shape[1]
h = (((2*pi)**(dim / 2) * sqrt(absolute(det(c))))**(1 - beta) /
alpha**(dim * (1 - beta) / (2 * (1 - alpha))) - 1) / \
(1 - beta)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_phi(distr, par, c):
""" Analytical value of the Phi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par.a,
par.b in U[a,b].
c : float, >=1
Parameter of the Phi-entropy: phi = lambda x: x**c
Returns
-------
h : float
Analytical value of the Phi entropy.
"""
if distr == 'uniform':
a, b = par['a'], par['b']
h = 1 / (b-a)**c
else:
raise Exception('Distribution=?')
return h
def analytical_value_d_chi_square(distr1, distr2, par1, par2):
""" Analytical value of chi^2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s.
Names of distributions.
par1, par2 : dictionary-s.
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a']. If (distr1, distr2) =
('normalI', 'normalI'), then distr1 = N(m1,I) where m1 =
par1['mean'], distr2 = N(m2,I), where m2 = par2['mean'].
Returns
-------
d : float
Analytical value of the (Pearson) chi^2 divergence.
References
----------
Frank Nielsen and Richard Nock. On the chi square and higher-order chi
distances for approximating f-divergence. IEEE Signal Processing
Letters, 2:10-13, 2014.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = prod(b) / prod(a) - 1
elif distr1 == 'normalI' and distr2 == 'normalI':
m1 = par1['mean']
m2 = par2['mean']
diffm = m2 - m1
d = exp(dot(diffm, diffm)) - 1
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_l2(distr1, distr2, par1, par2):
""" Analytical value of the L2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the L2 divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = sqrt(1 / prod(b) - 1 / prod(a))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_renyi(distr1, distr2, alpha, par1, par2):
""" Analytical value of Renyi divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Renyi divergence.
References
----------
Manuel Gil. On Renyi Divergence Measures for Continuous Alphabet
Sources. Phd Thesis, Queen’s University, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
mix_c = alpha * c2 + (1 - alpha) * c1
diffm = m1 - m2
d = alpha * (1/2 * dot(dot(diffm, inv(mix_c)), diffm) -
1 / (2 * alpha * (alpha - 1)) *
log(absolute(det(mix_c)) /
(det(c1)**(1 - alpha) * det(c2)**alpha)))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_tsallis(distr1, distr2, alpha, par1, par2):
""" Analytical value of Tsallis divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Tsallis divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
d = analytical_value_d_renyi(distr1, distr2, alpha, par1, par2)
d = (exp((alpha - 1) * d) - 1) / (alpha - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_sharma_mittal(distr1, distr2, alpha, beta, par1,
par2):
""" Analytical value of the Sharma-Mittal divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal divergence.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
D : float
Analytical value of the Tsallis divergence.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
c = inv(alpha * inv(c1) + (1 - alpha) * inv(c2))
diffm = m1 - m2
# Jensen difference divergence, c2:
j = (log(absolute(det(c1))**alpha * absolute(det(c2))**(1 -
alpha) /
absolute(det(c))) + alpha * (1 - alpha) *
dot(dot(diffm, inv(c)), diffm)) / 2
c2 = exp(-j)
d = (c2**((1 - beta) / (1 - alpha)) - 1) / (beta - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_bregman(distr1, distr2, alpha, par1, par2):
""" Analytical value of Bregman divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Bregman divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the Bregman divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = \
-1 / (alpha - 1) * prod(b)**(1 - alpha) +\
1 / (alpha - 1) * prod(a)**(1 - alpha)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2):
""" Analytical value of the Jensen-Renyi divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
w : vector, w[i] > 0 (for all i), sum(w) = 1
Weight used in the Jensen-Renyi divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
Returns
-------
d : float
Analytical value of the Jensen-Renyi divergence.
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
term1 = compute_h2(w, (m1, m2), (s1, s2))
term2 = \
w[0] * compute_h2((1,), (m1,), (s1,)) +\
w[1] * compute_h2((1,), (m2,), (s2,))
# H2(\sum_i wi yi) - \sum_i w_i H2(yi), where H2 is the quadratic
# Renyi entropy:
d = term1 - term2
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_renyi(distr, alpha, par):
""" Analytical value of the Renyi mutual information.
Parameters
----------
distr : str
Name of the distribution.
alpha : float
Parameter of the Renyi mutual information.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["cov"]
is the covariance matrix.
Returns
-------
i : float
Analytical value of the Renyi mutual information.
"""
if distr == 'normal':
c = par["cov"]
t1 = -alpha / 2 * log(det(c))
t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
i = 1 / (alpha - 1) * (t1 + t2 - t3)
else:
raise Exception('Distribution=?')
return i
def analytical_value_k_ejr1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejr2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
"""
if distr1 == 'normal' and distr2 == 'normal':
w = array([1/2, 1/2])
d = analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2)
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(Renyi entropy)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
# quadratic Renyi entropy -> quadratic Tsallis entropy:
h = 1 - exp(-h)
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(analytical value of the Jensen-Renyi divergence)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
# quadratic Renyi entropy -> quadratic Tsallis entropy:
term1 = 1 - exp(-compute_h2(w, (m1, m2), (s1, s2)))
term2 = \
w[0] * (1 - exp(-compute_h2((1, ), (m1, ), (s1,)))) +\
w[1] * (1 - exp(-compute_h2((1,), (m2,), (s2,))))
# H2(\sum_i wi Yi) - \sum_i w_i H2(Yi), where H2 is the quadratic
# Tsallis entropy:
d = term1 - term2
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
""" Analytical value of Hellinger distance for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Hellinger distance.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
# "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
diffm = m1 - m2
avgc = (c1 + c2) / 2
inv_avgc = inv(avgc)
d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
exp(-dot(diffm, dot(inv_avgc, diffm))/8) # D^2
d = sqrt(d)
else:
raise Exception('Distribution=?')
return d
def analytical_value_cond_h_shannon(distr, par):
""" Analytical value of the conditional Shannon entropy.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal': par["cov"]
and par["dim1"] are the covariance matrix and the dimension of
y1.
Returns
-------
cond_h : float
Analytical value of the conditional Shannon entropy.
"""
if distr == 'normal':
# h12 (=joint entropy):
h12 = analytical_value_h_shannon(distr, par)
# h2 (=entropy of the conditioning variable):
c, dim1 = par['cov'], par['dim1'] # covariance matrix, dim(y1)
par = {"cov": c[dim1:, dim1:]}
h2 = analytical_value_h_shannon(distr, par)
cond_h = h12 - h2
else:
raise Exception('Distribution=?')
return cond_h
def analytical_value_cond_i_shannon(distr, par):
""" Analytical value of the conditional Shannon mutual information.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal':
par["cov"] and par["ds"] are the (joint) covariance matrix and
the vector of subspace dimensions.
Returns
-------
cond_i : float
Analytical value of the conditional Shannon mutual
information.
"""
# initialization:
ds = par['ds']
len_ds = len(ds)
# 0,d_1,d_1+d_2,...,d_1+...+d_M; starting indices of the subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
idx_condition = range(cum_ds[len_ds - 1],
cum_ds[len_ds - 1] + ds[len_ds - 1])
if distr == 'normal':
c = par['cov']
# h_joint:
h_joint = analytical_value_h_shannon(distr, par)
# h_cross:
h_cross = 0
for m in range(len_ds-1): # non-conditioning subspaces
idx_m = range(cum_ds[m], cum_ds[m] + ds[m])
idx_m_and_condition = hstack((idx_m, idx_condition))
par = {"cov": c[ix_(idx_m_and_condition, idx_m_and_condition)]}
h_cross += analytical_value_h_shannon(distr, par)
# h_condition:
par = {"cov": c[ix_(idx_condition, idx_condition)]}
h_condition = analytical_value_h_shannon(distr, par)
cond_i = -h_joint + h_cross - (len_ds - 2) * h_condition
else:
raise Exception('Distribution=?')
return cond_i
| 0 | 0 | 0 |
65cb6d2a615858142bfa37132106bc4da0224211 | 5,646 | py | Python | IEX_29id/devices/mcp.py | kellyjelly0904/macros_29id | 573946d13eee7f85da049ac666b5dd2d18d19bb1 | [
"MIT"
] | null | null | null | IEX_29id/devices/mcp.py | kellyjelly0904/macros_29id | 573946d13eee7f85da049ac666b5dd2d18d19bb1 | [
"MIT"
] | 1 | 2021-11-10T02:00:41.000Z | 2021-11-11T03:02:23.000Z | IEX_29id/devices/mcp.py | kellyjelly0904/macros_29id | 573946d13eee7f85da049ac666b5dd2d18d19bb1 | [
"MIT"
] | 2 | 2021-09-28T21:19:47.000Z | 2021-10-12T20:51:43.000Z | from IEX_29id.utils.strings import ClearCalcOut
from time import sleep
from epics import caget, caput
from IEX_29id.scans.setup import Scan_FillIn, Scan_Go
from IEX_29id.devices.detectors import cts
def AD_ROI_SetUp(AD,ROInum,xcenter=500,ycenter=500,xsize=50,ysize=50,binX=1,binY=1):
"""
AD = "29id_ps4"
AD = "29iddMPA"
"""
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
ADplugin=AD+':ROI'+str(ROInum)+':'
xstart=xcenter-xsize/2.0
ystart=ycenter-ysize/2.0
caput(ADplugin+'MinX',xstart)
caput(ADplugin+'MinY',ystart)
caput(ADplugin+'SizeX',xsize)
caput(ADplugin+'SizeY',ysize)
caput(ADplugin+'BinX',binX)
caput(ADplugin+'BinY',binY)
caput(ADplugin+'EnableCallbacks','Enable')
print(ADplugin+' - '+caget(ADplugin+'EnableCallbacks_RBV',as_string=True))
#MPA_ROI_Stats(roiNUM)
def AD_OVER_SetUp(AD,ROInum,OVERnum,linewidth=5,shape='Rectangle'):
"""
AD = "29id_ps4"
AD = "29iddMPA"
shape= 'Cross', 'Rectangle', 'Ellipse','Text'
"""
OVER1=AD+":Over1:"+str(OVERnum)+":"
ROI=AD+":ROI"+str(ROInum)+":"
caput(ROI+'EnableCallbacks','Enable')
caput(OVER1+"Name","ROI"+str(ROInum))
caput(OVER1+"Shape",shape)
caput(OVER1+"Red",0)
caput(OVER1+"Green",255)
caput(OVER1+"Blue",0)
caput(OVER1+'WidthX',linewidth)
caput(OVER1+'WidthY',linewidth)
caput(OVER1+"PositionXLink.DOL",ROI+"MinX_RBV CP")
caput(OVER1+"SizeXLink.DOL",ROI+"SizeX_RBV CP")
caput(OVER1+"PositionYLink.DOL",ROI+"MinY_RBV CP")
caput(OVER1+"SizeYLink.DOL",ROI+"SizeY_RBV CP")
caput(OVER1+"Use","Yes")
def AD_ROI_SetUp(AD,ROInum,xcenter=500,ycenter=500,xsize=50,ysize=50,binX=1,binY=1):
"""
AD = "29id_ps4"
AD = "29iddMPA"
"""
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
ADplugin=AD+':ROI'+str(ROInum)+':'
xstart=xcenter-xsize/2.0
ystart=ycenter-ysize/2.0
caput(ADplugin+'MinX',xstart)
caput(ADplugin+'MinY',ystart)
caput(ADplugin+'SizeX',xsize)
caput(ADplugin+'SizeY',ysize)
caput(ADplugin+'BinX',binX)
caput(ADplugin+'BinY',binY)
caput(ADplugin+'EnableCallbacks','Enable')
print(ADplugin+' - '+caget(ADplugin+'EnableCallbacks_RBV',as_string=True))
#MPA_ROI_Stats(roiNUM)
def AD_OVER_SetUp(AD,ROInum,OVERnum,linewidth=5,shape='Rectangle'):
"""
AD = "29id_ps4"
AD = "29iddMPA"
shape= 'Cross', 'Rectangle', 'Ellipse','Text'
"""
OVER1=AD+":Over1:"+str(OVERnum)+":"
ROI=AD+":ROI"+str(ROInum)+":"
caput(ROI+'EnableCallbacks','Enable')
caput(OVER1+"Name","ROI"+str(ROInum))
caput(OVER1+"Shape",shape)
caput(OVER1+"Red",0)
caput(OVER1+"Green",255)
caput(OVER1+"Blue",0)
caput(OVER1+'WidthX',linewidth)
caput(OVER1+'WidthY',linewidth)
caput(OVER1+"PositionXLink.DOL",ROI+"MinX_RBV CP")
caput(OVER1+"SizeXLink.DOL",ROI+"SizeX_RBV CP")
caput(OVER1+"PositionYLink.DOL",ROI+"MinY_RBV CP")
caput(OVER1+"SizeYLink.DOL",ROI+"SizeY_RBV CP")
caput(OVER1+"Use","Yes")
| 30.192513 | 97 | 0.648778 | from IEX_29id.utils.strings import ClearCalcOut
from time import sleep
from epics import caget, caput
from IEX_29id.scans.setup import Scan_FillIn, Scan_Go
from IEX_29id.devices.detectors import cts
def MPA_Interlock():
ioc="Kappa"
n=7
pvioc="29id"+ioc+":userCalcOut"+str(n)
ClearCalcOut(ioc,n)
LLM=-16
HLM=-6
caput(pvioc+".DESC","MPA Interlock")
caput(pvioc+".INPA","29idKappa:m9.DRBV CP NMS")
caput(pvioc+".B",1)
caput(pvioc+".CALC$","ABS((("+str(LLM)+"<A && A<"+str(HLM)+") && (B>0))-1)")
caput(pvioc+".OCAL$",'A')
caput(pvioc+".OOPT",2) # When zero
caput(pvioc+".DOPT",0) # Use CALC
caput(pvioc+".IVOA",0) # Continue Normally
caput(pvioc+".OUT","29iddMPA:C0O PP NMS")
def MPA_HV_Set(volt):
volt=min(volt,2990)
caput("29idKappa:userCalcOut9.A",volt,wait=True,timeout=18000)
sleep(1)
RBV=caget("29idKappa:userCalcOut10.OVAL")
print("HV = "+str(RBV)+" V")
def MPA_HV_ON():
n=1
tth = caget('29idKappa:m9.DRBV')
if -16<= tth <=-6:
print('MPA OFF: detector in direct beam (-5 < mcp < 5); move away before turning HV ON.')
else:
caput('29iddMPA:C1O',1,wait=True,timeout=18000)
caput('29iddMPA:C1O',0,wait=True,timeout=18000)
caput("29iddMPA:C0O",n,wait=True,timeout=18000)
print("MPA - HV On")
def MPA_HV_OFF():
n=0
caput("29iddMPA:C0O",n,wait=True,timeout=18000)
print("MPA - HV Off")
def MPA_HV_Reset():
caput('29iddMPA:C1O',1)
print("MPA - Reset")
def MPA_HV_scan(start=2400,stop=2990,step=10):
cts(1)
VAL='29idKappa:userCalcOut9.A'
RBV='29idKappa:userCalcOut10.OVAL'
Scan_FillIn(VAL,RBV,'Kappa',1,start,stop,step)
caput('29idKappa:scan1.PDLY',1) # positionner settling time
Scan_Go('Kappa')
def MPA_ROI_SetUp(roiNUM=1,xcenter=535,ycenter=539,xsize=50,ysize=50,binX=1,binY=1):
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
AD_ROI_SetUp('29iddMPA',roiNUM,xcenter,ycenter,xsize,ysize,binX,binY)
pv="29iddMPA:ROI"+str(roiNUM)+':'
MPA_ROI_Stats(roiNUM)
def MPA_ROI_SetAll(xcenter=535,ycenter=539):
MPA_ROI_SetUp(1,xcenter,ycenter,xsize=50,ysize=50)
MPA_ROI_SetUp(2,xcenter,ycenter,xsize=100,ysize=100)
MPA_ROI_SetUp(3,xcenter,ycenter,xsize=150,ysize=150)
MPA_ROI_SetUp(4,xcenter,ycenter,xsize=200,ysize=200)
def MPA_ROI_Stats(roiNUM):
pvROI="29iddMPA:ROI"+str(roiNUM)+':'
pvSTATS="29iddMPA:Stats"+str(roiNUM)+':'
caput(pvSTATS+'NDArrayPort','ROI'+str(roiNUM))
caput(pvSTATS+'EnableCallbacks','Enable')
caput(pvSTATS+'ArrayCallbacks','Enable')
caput(pvSTATS+'ComputeStatistics','Yes')
caput(pvSTATS+'ComputeCentroid','Yes')
caput(pvSTATS+'ComputeProfiles','Yes')
def AD_ROI_SetUp(AD,ROInum,xcenter=500,ycenter=500,xsize=50,ysize=50,binX=1,binY=1):
"""
AD = "29id_ps4"
AD = "29iddMPA"
"""
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
ADplugin=AD+':ROI'+str(ROInum)+':'
xstart=xcenter-xsize/2.0
ystart=ycenter-ysize/2.0
caput(ADplugin+'MinX',xstart)
caput(ADplugin+'MinY',ystart)
caput(ADplugin+'SizeX',xsize)
caput(ADplugin+'SizeY',ysize)
caput(ADplugin+'BinX',binX)
caput(ADplugin+'BinY',binY)
caput(ADplugin+'EnableCallbacks','Enable')
print(ADplugin+' - '+caget(ADplugin+'EnableCallbacks_RBV',as_string=True))
#MPA_ROI_Stats(roiNUM)
def AD_OVER_SetUp(AD,ROInum,OVERnum,linewidth=5,shape='Rectangle'):
"""
AD = "29id_ps4"
AD = "29iddMPA"
shape= 'Cross', 'Rectangle', 'Ellipse','Text'
"""
OVER1=AD+":Over1:"+str(OVERnum)+":"
ROI=AD+":ROI"+str(ROInum)+":"
caput(ROI+'EnableCallbacks','Enable')
caput(OVER1+"Name","ROI"+str(ROInum))
caput(OVER1+"Shape",shape)
caput(OVER1+"Red",0)
caput(OVER1+"Green",255)
caput(OVER1+"Blue",0)
caput(OVER1+'WidthX',linewidth)
caput(OVER1+'WidthY',linewidth)
caput(OVER1+"PositionXLink.DOL",ROI+"MinX_RBV CP")
caput(OVER1+"SizeXLink.DOL",ROI+"SizeX_RBV CP")
caput(OVER1+"PositionYLink.DOL",ROI+"MinY_RBV CP")
caput(OVER1+"SizeYLink.DOL",ROI+"SizeY_RBV CP")
caput(OVER1+"Use","Yes")
def AD_ROI_SetUp(AD,ROInum,xcenter=500,ycenter=500,xsize=50,ysize=50,binX=1,binY=1):
"""
AD = "29id_ps4"
AD = "29iddMPA"
"""
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
ADplugin=AD+':ROI'+str(ROInum)+':'
xstart=xcenter-xsize/2.0
ystart=ycenter-ysize/2.0
caput(ADplugin+'MinX',xstart)
caput(ADplugin+'MinY',ystart)
caput(ADplugin+'SizeX',xsize)
caput(ADplugin+'SizeY',ysize)
caput(ADplugin+'BinX',binX)
caput(ADplugin+'BinY',binY)
caput(ADplugin+'EnableCallbacks','Enable')
print(ADplugin+' - '+caget(ADplugin+'EnableCallbacks_RBV',as_string=True))
#MPA_ROI_Stats(roiNUM)
def AD_OVER_SetUp(AD,ROInum,OVERnum,linewidth=5,shape='Rectangle'):
"""
AD = "29id_ps4"
AD = "29iddMPA"
shape= 'Cross', 'Rectangle', 'Ellipse','Text'
"""
OVER1=AD+":Over1:"+str(OVERnum)+":"
ROI=AD+":ROI"+str(ROInum)+":"
caput(ROI+'EnableCallbacks','Enable')
caput(OVER1+"Name","ROI"+str(ROInum))
caput(OVER1+"Shape",shape)
caput(OVER1+"Red",0)
caput(OVER1+"Green",255)
caput(OVER1+"Blue",0)
caput(OVER1+'WidthX',linewidth)
caput(OVER1+'WidthY',linewidth)
caput(OVER1+"PositionXLink.DOL",ROI+"MinX_RBV CP")
caput(OVER1+"SizeXLink.DOL",ROI+"SizeX_RBV CP")
caput(OVER1+"PositionYLink.DOL",ROI+"MinY_RBV CP")
caput(OVER1+"SizeYLink.DOL",ROI+"SizeY_RBV CP")
caput(OVER1+"Use","Yes")
| 2,326 | 0 | 215 |
434931c524da8c79d3c739fad86ad611e2404820 | 2,186 | py | Python | interpcl.py | ntessore/interpcl | 7c9cc7ce77ac885a2ee575831ff30b3770953c34 | [
"MIT"
] | null | null | null | interpcl.py | ntessore/interpcl | 7c9cc7ce77ac885a2ee575831ff30b3770953c34 | [
"MIT"
] | null | null | null | interpcl.py | ntessore/interpcl | 7c9cc7ce77ac885a2ee575831ff30b3770953c34 | [
"MIT"
] | null | null | null | # interpcl: interpolate angular power spectra
#
# author: Nicolas Tessore <n.tessore@ucl.ac.uk>
# license: MIT
'''
Interpolate angular power spectra (:mod:`interpcl`)
===================================================
.. currentmodule:: interpcl
A very small package that does interpolation of angular power spectra for
random fields on the sphere.
Install with pip::
pip install interpcl
Then import into your code::
from interpcl import interpcl
Functionality is absolutely minimal at this point. Please open an issue on
GitHub if you want to see added functionality.
Reference/API
-------------
.. autosummary::
:toctree: api
:nosignatures:
interpcl
'''
__version__ = '2021.5.20'
__all__ = [
'interpcl',
]
import numpy as np
from scipy.interpolate import interp1d
def interpcl(l, cl, lmax=None, dipole=True, monopole=False, **kwargs):
r'''interpolate angular power spectrum
Interpolate an angular power spectrum :math:`C(l)` using spline
interpolation. Given input modes `l`, `cl`, returns the power spectrum for
all integer modes from 0 to `lmax`, or the highest input mode if `lmax` is
not given. The dipole is computed if `dipole` is ``True``, or set to zero,
and similarly for `monopole`.
Parameters
----------
l, cl : array_like
Input angular power spectrum. Must be one-dimensional arrays.
lmax : int, optional
Highest output mode. If not set, the highest input mode is used.
dipole : bool, optional
Compute the dipole (``True``), or set it to zero (``False``).
monopole : bool, optional
Compute the monopole (``True``), or set it to zero (``False``).
**kwargs : dict, optional
Keyword arguments for :class:`scipy.interpolate.interp1d`.
Returns
-------
clout : array_like
Interpolated angular power spectrum.
'''
fv = kwargs.pop('fill_value', 'extrapolate')
if lmax is None:
lmax = np.max(l)
lout = np.arange(lmax+1)
clout = interp1d(l, cl, fill_value=fv, **kwargs)(lout)
if dipole is False:
clout[1] = 0
if monopole is False:
clout[0] = 0
return clout
| 23.76087 | 79 | 0.643184 | # interpcl: interpolate angular power spectra
#
# author: Nicolas Tessore <n.tessore@ucl.ac.uk>
# license: MIT
'''
Interpolate angular power spectra (:mod:`interpcl`)
===================================================
.. currentmodule:: interpcl
A very small package that does interpolation of angular power spectra for
random fields on the sphere.
Install with pip::
pip install interpcl
Then import into your code::
from interpcl import interpcl
Functionality is absolutely minimal at this point. Please open an issue on
GitHub if you want to see added functionality.
Reference/API
-------------
.. autosummary::
:toctree: api
:nosignatures:
interpcl
'''
__version__ = '2021.5.20'
__all__ = [
'interpcl',
]
import numpy as np
from scipy.interpolate import interp1d
def interpcl(l, cl, lmax=None, dipole=True, monopole=False, **kwargs):
r'''interpolate angular power spectrum
Interpolate an angular power spectrum :math:`C(l)` using spline
interpolation. Given input modes `l`, `cl`, returns the power spectrum for
all integer modes from 0 to `lmax`, or the highest input mode if `lmax` is
not given. The dipole is computed if `dipole` is ``True``, or set to zero,
and similarly for `monopole`.
Parameters
----------
l, cl : array_like
Input angular power spectrum. Must be one-dimensional arrays.
lmax : int, optional
Highest output mode. If not set, the highest input mode is used.
dipole : bool, optional
Compute the dipole (``True``), or set it to zero (``False``).
monopole : bool, optional
Compute the monopole (``True``), or set it to zero (``False``).
**kwargs : dict, optional
Keyword arguments for :class:`scipy.interpolate.interp1d`.
Returns
-------
clout : array_like
Interpolated angular power spectrum.
'''
fv = kwargs.pop('fill_value', 'extrapolate')
if lmax is None:
lmax = np.max(l)
lout = np.arange(lmax+1)
clout = interp1d(l, cl, fill_value=fv, **kwargs)(lout)
if dipole is False:
clout[1] = 0
if monopole is False:
clout[0] = 0
return clout
| 0 | 0 | 0 |
6bce7bb94b49db97ad7c387e9e6f3e0828df3a8c | 8,863 | py | Python | test_naive_priority_queue.py | jillhubbard/cs261-priority-queue | 76e7854d474118340d0d5e1c23c7e063fb4c9f1b | [
"FSFAP"
] | null | null | null | test_naive_priority_queue.py | jillhubbard/cs261-priority-queue | 76e7854d474118340d0d5e1c23c7e063fb4c9f1b | [
"FSFAP"
] | null | null | null | test_naive_priority_queue.py | jillhubbard/cs261-priority-queue | 76e7854d474118340d0d5e1c23c7e063fb4c9f1b | [
"FSFAP"
] | null | null | null | # DO NOT MODIFY THIS FILE
# Run me via: python3 -m unittest test_naive_priority_queue
import unittest
import time
from naive_priority_queue import NaivePriorityQueue
from job import Job
class TestNaivePriorityQueue(unittest.TestCase):
"""
Initialization
"""
def test_instantiation(self):
"""
A NaivePriorityQueue exists.
"""
try:
NaivePriorityQueue()
except NameError:
self.fail("Could not instantiate NaivePriorityQueue.")
# def test_internal(self):
# """
# A NaivePriorityQueue uses a list to store its data.
# """
# pq = NaivePriorityQueue()
# self.assertEqual(list, type(pq.data))
# def test_enqueue_one_internal(self):
# """
# Enqueueing a value adds it to the internal list.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'The')
# pq.enqueue(j)
# self.assertEqual(j, pq.data[0])
# def test_enqueue_two_internal(self):
# """
# Enqueueing two values results in the first enqueued value being the first
# one in the list, and the second value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'new')
# second = Job(6, 'moon')
# pq.enqueue(first)
# pq.enqueue(second)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# def test_enqueue_three_internal(self):
# """
# Enqueueing three values results in the first enqueued value being the first
# one in the list, and the third value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'rode')
# second = Job(6, 'high')
# third = Job(7, 'in')
# pq.enqueue(first)
# pq.enqueue(second)
# pq.enqueue(third)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# self.assertEqual(third, pq.data[2])
# def test_dequeue_one(self):
# """
# Dequeuing from a single-element queue returns the single value.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'the')
# pq.enqueue(j)
# self.assertEqual(j, pq.dequeue())
# def test_dequeue_one_internal(self):
# """
# Dequeuing from a single-element queue removes it from the internal list.
# """
# pq = NaivePriorityQueue()
# job = Job(5, 'crown')
# pq.enqueue(job)
# self.assertEqual(1, len(pq.data))
# _ = pq.dequeue()
# self.assertEqual(0, len(pq.data))
# # Hint: NaivePriorityQueues perform a linear search. Don't optimize.
# def test_dequeue_two(self):
# """
# Dequeuing from a two-element queue returns the one with highest priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'of')
# higher_priority = Job(3, 'the')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# def test_dequeue_two_internal(self):
# """
# Dequeuing from a two-element queue removes the job with the highest
# priority from the list.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'metropolis')
# higher_priority = Job(3, 'shining')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# self.assertEqual(1, len(pq.data))
# def test_dequeue_three(self):
# """
# Dequeuing from a three-element queue returns the jobs with the highest
# priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'like')
# middle_priority = Job(3, 'who')
# higher_priority = Job(5, 'on')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# self.assertEqual(middle_priority, pq.dequeue())
# self.assertEqual(lower_priority, pq.dequeue())
# def test_dequeue_three_internal(self):
# """
# Dequeuing from a three-element queue removes each dequeued value from
# the internal list, highest-priority first.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'top')
# middle_priority = Job(3, 'of')
# higher_priority = Job(5, 'this')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# """
# Emptiness
# """
# def test_empty(self):
# """
# A queue is initially empty.
# """
# pq = NaivePriorityQueue()
# self.assertTrue(pq.is_empty())
# def test_not_empty(self):
# """
# A queue with one enqueued value is not empty.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'People'))
# self.assertFalse(pq.is_empty())
# def test_empty_after_dequeue(self):
# """
# A queue with one enqueued value is empty after dequeuing.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'was'))
# _ = pq.dequeue()
# self.assertTrue(pq.is_empty())
# def test_not_empty_multiple(self):
# """
# A queue with two enqueued values is not empty after dequeuing only one.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'hustling'))
# pq.enqueue(Job(3, 'arguing and bustling'))
# _ = pq.dequeue()
# self.assertFalse(pq.is_empty())
# def test_initial_dequeue(self):
# """
# Dequeuing from an empty queue returns None.
# """
# pq = NaivePriorityQueue()
# self.assertIsNone(pq.dequeue())
# """
# Algorithmic complexity
# """
# def test_enqueue_efficiency(self):
# """
# Enqueing a value is always O(1).
# """
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# start_time = time.time()
# pq.enqueue('fake')
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_enqueue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.enqueue('fake')
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_enqueue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertAlmostEqual(small_average_enqueue_time, large_average_enqueue_time, delta=small_average_enqueue_time)
# # While enqueing naively is efficient... what is the complexity of dequeuing?
# def test_dequeue_efficiency(self):
# """
# Dequeuing a value is O(n).
# """
# print("This test will take a while...") # See the comment below.
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# pq.enqueue('fake')
# start_time = time.time()
# pq.dequeue()
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_dequeue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.dequeue()
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_dequeue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertNotAlmostEqual(small_average_dequeue_time, large_average_dequeue_time, delta=small_average_dequeue_time)
# Notice how the last test takes time to "prove."
# By studying *algorithm analysis*, you can prove the efficiency deductively,
# with formal proofs, rather than with long-running tests.
if __name__ == '__main__':
unittest.main()
| 33.828244 | 125 | 0.576329 | # DO NOT MODIFY THIS FILE
# Run me via: python3 -m unittest test_naive_priority_queue
import unittest
import time
from naive_priority_queue import NaivePriorityQueue
from job import Job
class TestNaivePriorityQueue(unittest.TestCase):
"""
Initialization
"""
def test_instantiation(self):
"""
A NaivePriorityQueue exists.
"""
try:
NaivePriorityQueue()
except NameError:
self.fail("Could not instantiate NaivePriorityQueue.")
# def test_internal(self):
# """
# A NaivePriorityQueue uses a list to store its data.
# """
# pq = NaivePriorityQueue()
# self.assertEqual(list, type(pq.data))
# def test_enqueue_one_internal(self):
# """
# Enqueueing a value adds it to the internal list.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'The')
# pq.enqueue(j)
# self.assertEqual(j, pq.data[0])
# def test_enqueue_two_internal(self):
# """
# Enqueueing two values results in the first enqueued value being the first
# one in the list, and the second value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'new')
# second = Job(6, 'moon')
# pq.enqueue(first)
# pq.enqueue(second)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# def test_enqueue_three_internal(self):
# """
# Enqueueing three values results in the first enqueued value being the first
# one in the list, and the third value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'rode')
# second = Job(6, 'high')
# third = Job(7, 'in')
# pq.enqueue(first)
# pq.enqueue(second)
# pq.enqueue(third)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# self.assertEqual(third, pq.data[2])
# def test_dequeue_one(self):
# """
# Dequeuing from a single-element queue returns the single value.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'the')
# pq.enqueue(j)
# self.assertEqual(j, pq.dequeue())
# def test_dequeue_one_internal(self):
# """
# Dequeuing from a single-element queue removes it from the internal list.
# """
# pq = NaivePriorityQueue()
# job = Job(5, 'crown')
# pq.enqueue(job)
# self.assertEqual(1, len(pq.data))
# _ = pq.dequeue()
# self.assertEqual(0, len(pq.data))
# # Hint: NaivePriorityQueues perform a linear search. Don't optimize.
# def test_dequeue_two(self):
# """
# Dequeuing from a two-element queue returns the one with highest priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'of')
# higher_priority = Job(3, 'the')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# def test_dequeue_two_internal(self):
# """
# Dequeuing from a two-element queue removes the job with the highest
# priority from the list.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'metropolis')
# higher_priority = Job(3, 'shining')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# self.assertEqual(1, len(pq.data))
# def test_dequeue_three(self):
# """
# Dequeuing from a three-element queue returns the jobs with the highest
# priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'like')
# middle_priority = Job(3, 'who')
# higher_priority = Job(5, 'on')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# self.assertEqual(middle_priority, pq.dequeue())
# self.assertEqual(lower_priority, pq.dequeue())
# def test_dequeue_three_internal(self):
# """
# Dequeuing from a three-element queue removes each dequeued value from
# the internal list, highest-priority first.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'top')
# middle_priority = Job(3, 'of')
# higher_priority = Job(5, 'this')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# """
# Emptiness
# """
# def test_empty(self):
# """
# A queue is initially empty.
# """
# pq = NaivePriorityQueue()
# self.assertTrue(pq.is_empty())
# def test_not_empty(self):
# """
# A queue with one enqueued value is not empty.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'People'))
# self.assertFalse(pq.is_empty())
# def test_empty_after_dequeue(self):
# """
# A queue with one enqueued value is empty after dequeuing.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'was'))
# _ = pq.dequeue()
# self.assertTrue(pq.is_empty())
# def test_not_empty_multiple(self):
# """
# A queue with two enqueued values is not empty after dequeuing only one.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'hustling'))
# pq.enqueue(Job(3, 'arguing and bustling'))
# _ = pq.dequeue()
# self.assertFalse(pq.is_empty())
# def test_initial_dequeue(self):
# """
# Dequeuing from an empty queue returns None.
# """
# pq = NaivePriorityQueue()
# self.assertIsNone(pq.dequeue())
# """
# Algorithmic complexity
# """
# def test_enqueue_efficiency(self):
# """
# Enqueing a value is always O(1).
# """
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# start_time = time.time()
# pq.enqueue('fake')
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_enqueue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.enqueue('fake')
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_enqueue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertAlmostEqual(small_average_enqueue_time, large_average_enqueue_time, delta=small_average_enqueue_time)
# # While enqueing naively is efficient... what is the complexity of dequeuing?
# def test_dequeue_efficiency(self):
# """
# Dequeuing a value is O(n).
# """
# print("This test will take a while...") # See the comment below.
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# pq.enqueue('fake')
# start_time = time.time()
# pq.dequeue()
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_dequeue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.dequeue()
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_dequeue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertNotAlmostEqual(small_average_dequeue_time, large_average_dequeue_time, delta=small_average_dequeue_time)
# Notice how the last test takes time to "prove."
# By studying *algorithm analysis*, you can prove the efficiency deductively,
# with formal proofs, rather than with long-running tests.
def fake_value():
return f"FAKE {time.time()}"
if __name__ == '__main__':
unittest.main()
| 29 | 0 | 23 |
a171fdc0fe9b7388dac7c335903501f4a4cb96aa | 40,693 | py | Python | riegl_vz/riegl_vz/riegl_vz.py | riegllms/ros-riegl-vz | 932d9c1e69487b57aebc2bd7ab290be28bbb5023 | [
"Apache-2.0"
] | 1 | 2022-02-15T08:05:40.000Z | 2022-02-15T08:05:40.000Z | riegl_vz/riegl_vz/riegl_vz.py | riegllms/ros-riegl-vz | 932d9c1e69487b57aebc2bd7ab290be28bbb5023 | [
"Apache-2.0"
] | null | null | null | riegl_vz/riegl_vz/riegl_vz.py | riegllms/ros-riegl-vz | 932d9c1e69487b57aebc2bd7ab290be28bbb5023 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import time
import json
import math
from datetime import datetime
import subprocess
import threading
import numpy as np
from os.path import join, dirname, basename, abspath
from std_msgs.msg import (
Header
)
from sensor_msgs.msg import (
PointCloud2,
PointField,
NavSatStatus,
NavSatFix
)
from geometry_msgs.msg import (
Point,
PointStamped,
PoseStamped,
Pose,
PoseWithCovariance,
PoseWithCovarianceStamped,
TransformStamped
)
from nav_msgs.msg import (
Path,
Odometry
)
import std_msgs.msg as std_msgs
import builtin_interfaces.msg as builtin_msgs
from rclpy.node import Node
import riegl.rdb
from vzi_services.controlservice import ControlService
from vzi_services.interfaceservice import InterfaceService
from vzi_services.projectservice import ProjectService
from vzi_services.scannerservice import ScannerService
from vzi_services.geosysservice import GeoSysService
from riegl_vz_interfaces.msg import (
Voxels
)
from .pose import (
readVop,
readPop,
readAllSopv,
readTpl,
getTransformFromPose,
calcRelativePose,
calcRelativeCovariances,
eulerFromQuaternion, quaternionFromEuler
)
from .tf2_geometry_msgs import (
do_transform_pose
)
from .project import RieglVzProject
from .status import RieglVzStatus
from .geosys import RieglVzGeoSys
from .ssh import RieglVzSSH
from .utils import (
SubProcess,
parseCSV
)
appDir = dirname(abspath(__file__))
| 41.481142 | 223 | 0.604944 | import sys
import os
import time
import json
import math
from datetime import datetime
import subprocess
import threading
import numpy as np
from os.path import join, dirname, basename, abspath
from std_msgs.msg import (
Header
)
from sensor_msgs.msg import (
PointCloud2,
PointField,
NavSatStatus,
NavSatFix
)
from geometry_msgs.msg import (
Point,
PointStamped,
PoseStamped,
Pose,
PoseWithCovariance,
PoseWithCovarianceStamped,
TransformStamped
)
from nav_msgs.msg import (
Path,
Odometry
)
import std_msgs.msg as std_msgs
import builtin_interfaces.msg as builtin_msgs
from rclpy.node import Node
import riegl.rdb
from vzi_services.controlservice import ControlService
from vzi_services.interfaceservice import InterfaceService
from vzi_services.projectservice import ProjectService
from vzi_services.scannerservice import ScannerService
from vzi_services.geosysservice import GeoSysService
from riegl_vz_interfaces.msg import (
Voxels
)
from .pose import (
readVop,
readPop,
readAllSopv,
readTpl,
getTransformFromPose,
calcRelativePose,
calcRelativeCovariances,
eulerFromQuaternion, quaternionFromEuler
)
from .tf2_geometry_msgs import (
do_transform_pose
)
from .project import RieglVzProject
from .status import RieglVzStatus
from .geosys import RieglVzGeoSys
from .ssh import RieglVzSSH
from .utils import (
SubProcess,
parseCSV
)
appDir = dirname(abspath(__file__))
class ScanPattern(object):
def __init__(self):
self.lineStart = 30.0
self.lineStop = 130.0
self.lineIncrement = 0.04
self.frameStart = 0.0
self.frameStop = 360.0
self.frameIncrement = 0.04
self.measProgram = 3
class PositionWithCovariance(object):
def __init__(self, position, covariance):
self.position = position
self.covariance = covariance
class YawAngleWithCovariance(object):
def __init__(self, angle, covariance):
self.angle = angle
self.covariance = covariance
class ImuPose(object):
def __init__(self, scanpos=None, pose=None):
self.scanpos = scanpos
self.pose = pose
def isValid(self):
return (self.scanpos is not None and self.pose is not None)
class ImuRelativePose(object):
def __init__(self):
self._pose = None
self._previous = ImuPose()
self._threadLock = threading.Lock()
def _lock(self):
self._threadLock.acquire()
def _unlock(self):
self._threadLock.release()
def update(self, pose):
self._lock()
self._pose = pose
self._unlock()
def previous(self):
self._lock()
pose = self._previous
self._unlock()
return pose
def reset(self):
self._lock()
self._previous = ImuPose()
self._unlock()
def get(self, scanposName):
self._lock()
poseCurrent = ImuPose(scanposName, self._pose)
posePrevious = self._previous
self._previous = poseCurrent
self._pose = None
self._unlock()
return poseCurrent, posePrevious
class RieglVz():
def __init__(self, node):
self._node = node
self._logger = node.get_logger()
self._hostname = node.hostname
self._workingDir = node.workingDir
self._connectionString = self._hostname + ':20000'
self._path = Path()
self._yawAngle = None
self._position = None
self._imuRelPose = ImuRelativePose()
self._stopReq = False
self._shutdownReq = False
self._project: RieglVzProject = RieglVzProject(self._node)
self._status: RieglVzStatus = RieglVzStatus(self._node)
self.geosys: RieglVzGeoSys = RieglVzGeoSys(self._node)
self._ssh: RieglVzSSH = RieglVzSSH(self._node)
self.scanposition = None
self.scanPublishFilter = node.scanPublishFilter
self.scanPublishLOD = node.scanPublishLOD
if not os.path.exists(self._workingDir):
os.mkdir(self._workingDir)
def _broadcastTfTransforms(self, ts: datetime.time):
ok, pop = self.getPop()
if ok:
self._node.transformBroadcaster.sendTransform(getTransformFromPose(ts, 'riegl_vz_prcs', pop))
ok, vop = self.getVop()
if ok:
self._node.transformBroadcaster.sendTransform(getTransformFromPose(ts, 'riegl_vz_vocs', vop))
ok, sopv = self.getSopv()
if ok:
self._node.transformBroadcaster.sendTransform(getTransformFromPose(ts, 'riegl_vz_socs', sopv.pose))
else:
return False, None
else:
return False, None
return True, sopv
def getScannerStatus(self):
return self._status.status.getScannerStatus()
def getScannerOpstate(self):
return self.getScannerStatus().opstate
def isScannerAvailable(self):
return (self.getScannerOpstate() != 'unavailable')
def getMemoryStatus(self):
return self._status.status.getMemoryStatus()
def getGnssStatus(self):
return self._status.status.getGnssStatus()
def getErrorStatus(self):
return self._status.status.getErrorStatus()
def getCameraStatus(self):
return self._status.status.getCameraStatus()
def loadProject(self, projectName: str, storageMedia: int, scanRegisterAndPublish: bool):
ok = self._project.loadProject(projectName, storageMedia)
if ok and scanRegisterAndPublish:
ts = self._node.get_clock().now()
self._broadcastTfTransforms(ts)
if ok:
self._imuRelPose.reset()
return ok;
def createProject(self, projectName: str, storageMedia: int):
if self._project.createProject(projectName, storageMedia):
self._path = Path();
self._imuRelPose.reset()
return True
return False
def getCurrentScanpos(self, projectName: str, storageMedia: int):
return self._project.getCurrentScanpos(projectName, storageMedia);
def getNextScanpos(self, projectName: str, storageMedia: int):
return self._project.getNextScanpos(projectName, storageMedia)
#def _getTimeStampFromScanId(self, scanId: str):
# scanFileName: str = os.path.basename(scanId)
# dateTime = datetime.strptime(scanFileName, '%y%m%d_%H%M%S.rxp')
# #self._logger.debug("dateTime = {}".format(dateTime))
# return int(dateTime.strftime("%s"))
def _setPositionEstimate(self, position=None, yawAngle=None):
scanposPath = self._project.getActiveScanposPath(self.scanposition)
remoteFile = scanposPath + '/final.pose'
localFile = self._workingDir + '/final.pose'
self._ssh.downloadFile(remoteFile, localFile)
with open(localFile, 'r') as f:
finalPose = json.load(f)
if position is not None:
finalPose['positionEstimate'] = {
'coordinateSystem': position.position.header.frame_id,
'coord1': position.position.point.x,
'coord2': position.position.point.y,
'coord3': position.position.point.z,
'coord1_conf': position.position.covariance[0],
'coord2_conf': position.position.covariance[1],
'coord3_conf': position.position.covariance[2]
}
if yawAngle is not None:
finalPose['yaw'] = yawAngle.angle
finalPose['yaw_conf'] = yawAngle.covariance
finalPose['yaw_trust_level_high'] = True
with open(localFile, 'w') as f:
json.dump(finalPose, f, indent=4)
f.write('\n')
self._ssh.uploadFile([localFile], scanposPath)
def _prepareImuRelativePose(self):
# This will create a dummy 'imu_relative.pose' file in the scan position directory.
# The trajectory service will not overwrite this file, instead it will create a file
# 'imu_relative01.pose' (if scanner has been moved) which will not be used then.
scanposPath = self._project.getActiveScanposPath(self.scanposition)
localFile = self._workingDir + '/imu_relative.pose'
with open(localFile, 'w') as f:
f.write('{}\n')
self._ssh.uploadFile([localFile], scanposPath)
def _setImuRelativePose(self, posePrevious, poseCurrent):
# This will create the file 'imu_relative.pose' with date from the external imu
# and mode set to 'imu_external'.
scanposPath = self._project.getActiveScanposPath(self.scanposition)
localFile = self._workingDir + '/imu_relative.pose'
pos_x, pos_y, pos_z, pos_roll, pos_pitch, pos_yaw = calcRelativePose(posePrevious.pose.pose.pose, poseCurrent.pose.pose.pose)
cov_x, cov_y, cov_z, cov_roll, cov_pitch, cov_yaw = calcRelativeCovariances(posePrevious.pose.pose.covariance, poseCurrent.pose.pose.covariance)
imuRelative = {
'mode': 'imu_external',
'origin': posePrevious.scanpos,
'x': pos_x,
'y': pos_y,
'z': pos_z,
'roll': pos_roll,
'pitch': pos_pitch,
'yaw': pos_yaw,
'accuracy': {
'x': cov_x,
'y': cov_y,
'z': cov_z,
'roll': cov_roll,
'pitch': cov_pitch,
'yaw': cov_yaw
}
}
with open(localFile, 'w') as f:
json.dump(imuRelative, f, indent=4)
f.write('\n')
self._ssh.uploadFile([localFile], scanposPath)
def _getGnssFixMessage(self, status=None):
if status is None:
status = self.getGnssStatus()
if not status.valid or not status.publish:
return False, None
msg = NavSatFix()
msg.header = Header()
msg.header.stamp = self._node.get_clock().now().to_msg()
msg.header.frame_id = 'riegl_vz_gnss'
if status.fix:
msg.status.status = NavSatStatus.STATUS_FIX
else:
msg.status.status = NavSatStatus.STATUS_NO_FIX
msg.status.service = NavSatStatus.SERVICE_GPS
# Position in degrees.
msg.latitude = status.latitude
msg.longitude = status.longitude
# Altitude in metres.
msg.altitude = status.altitude if not math.isnan(status.altitude) else 0.0
lat_std = status.horAcc if not math.isnan(status.horAcc) else 0.0
lon_std = status.horAcc if not math.isnan(status.horAcc) else 0.0
alt_std = status.verAcc if not math.isnan(status.verAcc) else 0.0
msg.position_covariance[0] = lat_std**2
msg.position_covariance[4] = lon_std**2
msg.position_covariance[8] = alt_std**2
msg.position_covariance_type = NavSatFix.COVARIANCE_TYPE_DIAGONAL_KNOWN
return True, msg
def publishGnssFix(self, status=None):
ok, msg = self._getGnssFixMessage(status)
if ok:
self._node.gnssFixPublisher.publish(msg)
def setProjectControlPoints(coordSystem: str, csvFile: str):
projectPath = self._project.getActiveProjectPath()
remoteSrcCpsFile = csvFile
localSrcCpsFile = self._workingDir + '/' + os.path.basename(csvFile)
self._ssh.downloadFile(remoteSrcCpsFile, localSrcCpsFile)
csvData = parseCSV(localSrcCpsFile)[1:]
# parse points and write resulting csv file
controlPoints = []
if csvData:
if len(csvData[0]) < 4:
raise RuntimeError("Invalid control points definition. File must have at least four columns.")
localDstCpsFile = self._workingDir + '/controlpoints.csv'
with open(localDstCpsFile, 'w') as f:
f.write("Name,CRS,Coord1,Coord2,Coord3\n")
for item in csvData:
f.write("{},{},{},{},{}\n".format(item[0], coordSystem, item[1], item[2], item[3]))
self._ssh.uploadFile([localDstCpsFile], projectPath)
def getPointCloud(self, scanposition: str, pointcloud: PointCloud2, ts: bool = True):
self._logger.debug("Downloading rdbx file..")
self._status.status.setActiveTask('download rdbx file')
scanId = self._project.getScanId(scanposition)
self._logger.debug("scan id = {}".format(scanId))
if scanId == 'null':
self._logger.error("Scan id is null!")
return False, pointcloud
scanposPath = self._project.getActiveScanposPath(scanposition)
self._logger.debug("scanpos path = {}".format(scanposPath))
scan = os.path.basename(scanId).replace('.rxp', '')[0:13]
self._logger.debug("scan = {}".format(scan))
remoteFile = scanposPath + '/scans/' + scan + '.rdbx'
localFile = self._workingDir + '/scan.rdbx'
self._ssh.downloadFile(remoteFile, localFile)
self._logger.debug("Generate point cloud..")
self._status.status.setActiveTask('generate point cloud data')
with riegl.rdb.rdb_open(localFile) as rdb:
rosDtype = PointField.FLOAT32
dtype = np.float32
itemsize = np.dtype(dtype).itemsize
numTotalPoints = 0
numPoints = 0
data = bytearray()
scanPublishLOD = self.scanPublishLOD
if self.scanPublishLOD < 0:
scanPublishLOD = 0
for points in rdb.select(
self.scanPublishFilter,
chunk_size=100000
):
pointStep = 2 ** scanPublishLOD
for point in points:
if not (numTotalPoints % pointStep):
data.extend(point['riegl.xyz'].astype(dtype).tobytes())
data.extend(point['riegl.reflectance'].astype(dtype).tobytes())
numPoints += 1
numTotalPoints += 1
fields = [PointField(
name = n, offset = i*itemsize, datatype = rosDtype, count = 1)
for i, n in enumerate('xyzr')]
if ts:
stamp = self._node.get_clock().now().to_msg()
else:
stamp = builtin_msgs.Time(sec = 0, nanosec = 0)
header = std_msgs.Header(frame_id = 'riegl_vz_socs', stamp = stamp)
pointcloud = PointCloud2(
header = header,
height = 1,
width = numPoints,
is_dense = False,
is_bigendian = False,
fields = fields,
point_step = (itemsize * 4),
row_step = (itemsize * 4 * numPoints),
data = data
)
#for point in rdb.points():
# self._logger.debug("{0}".format(point.riegl_xyz))
self._status.status.setActiveTask('')
self._logger.debug("Point cloud generated.")
return True, pointcloud
def getVoxels(self, voxels: Voxels, scanposition: str = '0', ts: bool = True):
self._logger.debug("Downloading vxls file..")
self._status.status.setActiveTask('download vxls file')
remoteFile = ''
localFile = ''
projectPath = self._project.getActiveProjectPath()
self._logger.debug("project path = {}".format(projectPath))
if scanposition != '1000000':
remoteFile = projectPath + '/Voxels1.VPP/' + self._project.getScanposName(scanposition) + '.vxls'
localFile = self._workingDir + '/scan.vxls'
else:
remoteFile = projectPath + '/Voxels1.VPP/project.vxls'
localFile = self._workingDir + '/project.vxls'
self._ssh.downloadFile(remoteFile, localFile)
self._logger.debug("Generate voxels..")
self._status.status.setActiveTask('generate voxel data')
with riegl.rdb.rdb_open(localFile) as rdb:
voxelSize = objs = float(json.loads(rdb.meta_data['riegl.voxel_info'])['size'])
numTotalPoints = 0
data = bytearray()
for points in rdb.select('', chunk_size=100000):
for point in points:
data.extend(point['riegl.xyz'].astype(np.float64).tobytes())
data.extend(point['riegl.reflectance'].astype(np.float32).tobytes())
data.extend(point['riegl.point_count'].astype(np.uint32).tobytes())
data.extend(point['riegl.pca_axis_min'].astype(np.float32).tobytes())
data.extend(point['riegl.pca_axis_max'].astype(np.float32).tobytes())
data.extend(point['riegl.pca_extents'].astype(np.float32).tobytes())
data.extend(point['riegl.shape_id'].astype(np.uint8).tobytes())
numTotalPoints += 1
fields = []
fieldsize = 0
fields.append(PointField(name='x', offset=fieldsize, datatype=PointField.FLOAT64, count=1))
fieldsize += np.dtype(np.float64).itemsize
fields.append(PointField(name='y', offset=fieldsize, datatype=PointField.FLOAT64, count=1))
fieldsize += np.dtype(np.float64).itemsize
fields.append(PointField(name='z', offset=fieldsize, datatype=PointField.FLOAT64, count=1))
fieldsize += np.dtype(np.float64).itemsize
fields.append(PointField(name='r', offset=fieldsize, datatype=PointField.FLOAT32, count=1))
fieldsize += np.dtype(np.float32).itemsize
fields.append(PointField(name='point_count', offset=fieldsize, datatype=PointField.UINT32, count=1))
fieldsize += np.dtype(np.uint32).itemsize
fields.append(PointField(name='pca_axis_min', offset=fieldsize, datatype=PointField.FLOAT32, count=3))
fieldsize += np.dtype(np.float32).itemsize * 3
fields.append(PointField(name='pca_axis_max', offset=fieldsize, datatype=PointField.FLOAT32, count=3))
fieldsize += np.dtype(np.float32).itemsize * 3
fields.append(PointField(name='pca_extents', offset=fieldsize, datatype=PointField.FLOAT32, count=3))
fieldsize += np.dtype(np.float32).itemsize * 3
fields.append(PointField(name='shape_id', offset=fieldsize, datatype=PointField.UINT8, count=1))
fieldsize += np.dtype(np.uint8).itemsize
if ts:
stamp = self._node.get_clock().now().to_msg()
else:
stamp = builtin_msgs.Time(sec = 0, nanosec = 0)
header = std_msgs.Header(frame_id = 'riegl_vz_vocs', stamp = stamp)
voxels = Voxels(
voxel_size = voxelSize,
pointcloud = PointCloud2(
header = header,
height = 1,
width = numTotalPoints,
is_dense = False,
is_bigendian = False,
fields = fields,
point_step = fieldsize,
row_step = (fieldsize * numTotalPoints),
data = data
)
)
self._status.status.setActiveTask('')
self._logger.debug("Voxels generated.")
return True, voxels
def _scanThreadFunc(self):
self._status.status.setOpstate('scanning', 'scan data acquisition')
self._status.status.setProgress(0)
self._logger.info("Starting data acquisition..")
self._logger.info("project name = {}".format(self.projectName))
scanposName = self._project.getScanposName(self.scanposition)
self._logger.info("scanpos name = {0} ({1})".format(self.scanposition, scanposName))
self._logger.info("storage media = {}".format(self.storageMedia))
self._logger.info("scan pattern = {0}, {1}, {2}, {3}, {4}, {5}".format(
self.scanPattern.lineStart,
self.scanPattern.lineStop,
self.scanPattern.lineIncrement,
self.scanPattern.frameStart,
self.scanPattern.frameStop,
self.scanPattern.frameIncrement))
self._logger.info("meas program = {}".format(self.scanPattern.measProgram))
self._logger.info("scan publish = {}".format(self.scanPublish))
self._logger.info("scan publish filter = '{}'".format(self.scanPublishFilter))
self._logger.info("scan publish LOD = {}".format(self.scanPublishLOD))
self._logger.info("voxel publish = {}".format(self.voxelPublish))
self._logger.info("scan register = {}".format(self.scanRegister))
self._logger.info("scan register mode = {}".format(self.scanRegistrationMode))
self._logger.info("pose publish = {}".format(self.posePublish))
if self.reflSearchSettings:
self._logger.info("reflector search = {}".format(self.reflSearchSettings))
self._logger.info("image capture = {}".format(self.captureImages))
self._logger.info("image capture mode = {}".format(self.captureMode))
self._logger.info("image capture overlap = {}".format(self.imageOverlap))
# prepare project
try:
projSvc = ProjectService(self._connectionString)
projSvc.setStorageMedia(self.storageMedia)
projSvc.createProject(self.projectName)
projSvc.loadProject(self.projectName)
projSvc.createScanposition(scanposName)
projSvc.selectScanposition(scanposName)
posePrevious = self._imuRelPose.previous()
if posePrevious.isValid():
self._prepareImuRelativePose()
except:
self._logger.error("Project and scan position prepare failed!")
scriptPath = join(appDir, 'acquire-data.py')
cmd = [
'python3', scriptPath,
'--connectionstring', self._connectionString]
if self.reflSearchSettings:
rssFilePath = join(self._workingDir, 'reflsearchsettings.json')
with open(rssFilePath, 'w') as f:
json.dump(self.reflSearchSettings, f)
cmd.append('--reflsearch')
cmd.append(rssFilePath)
if self.scanPattern:
cmd.extend([
'--line-start', str(self.scanPattern.lineStart),
'--line-stop', str(self.scanPattern.lineStop),
'--line-incr', str(self.scanPattern.lineIncrement),
'--frame-start', str(self.scanPattern.frameStart),
'--frame-stop', str(self.scanPattern.frameStop),
'--frame-incr', str(self.scanPattern.frameIncrement),
'--measprog', str(self.scanPattern.measProgram)
])
captureImages = (self.captureImages != 0)
if self.captureImages == 2:
if not self.getCameraStatus().avail:
captureImages = False
if captureImages:
cmd.extend([
'--capture-images',
'--capture-mode', str(self.captureMode),
'--image-overlap', str(self.imageOverlap)
])
self._logger.debug("CMD = {}".format(' '.join(cmd)))
subproc = SubProcess(subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
self._logger.debug("Subprocess started.")
subproc.waitFor(errorMessage='Data acquisition failed.', block=True)
if self._stopReq:
self._stopReq = False
self._status.status.setOpstate('waiting')
self._logger.info("Scan stopped")
return
self._logger.info("Data acquisition finished")
self._status.status.setOpstate('processing')
if self._position is not None or self._yawAngle is not None:
if self._position is not None:
self._status.status.setActiveTask('set position estimate')
if self._yawAngle is not None:
self._status.status.setActiveTask('set yaw angle estimate')
if self._position is not None and self.scanposition == '1':
self._logger.info("Set project position.")
projSvc = ProjectService(self._connectionString)
projSvc.setProjectLocation(self._position.position.header.frame_id, self._position.position.point.x, self._position.position.point.y, self._position.position.point.z)
self._logger.info("Set scan position and/or yaw angle estimate..")
try:
self._setPositionEstimate(self._position, self._yawAngle)
self._logger.info("Set position and/or yaw angle estimate finished")
except:
self._logger.error("Set position and/or yaw angle estimate failed!")
self._position = None
poseCurrent, posePrevious = self._imuRelPose.get(self._project.getScanposName(self.scanposition))
if poseCurrent.isValid():
self._logger.info("Set relative imu pose (current available).")
if posePrevious.isValid():
self._logger.info("Set relative imu pose (previous available).")
self._status.status.setActiveTask('set relative imu pose')
try:
self._setImuRelativePose(posePrevious, poseCurrent)
self._logger.info("Set relative imu pose finished")
except:
self._logger.error("Set relative imu pose failed!")
self._logger.info("Set relative imu pose (previous = current).")
if self.scanPublish:
self._logger.info("Converting RXP to RDBX..")
self._status.status.setActiveTask('convert rxp to rdbx')
scriptPath = join(appDir, 'create-rdbx.py')
cmd = [
'python3', scriptPath,
'--connectionstring', self._connectionString,
'--project', self.projectName,
'--scanposition', scanposName]
self._logger.debug("CMD = {}".format(' '.join(cmd)))
subproc = SubProcess(subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
self._logger.debug("Subprocess started.")
subproc.waitFor('RXP to RDBX conversion failed.')
if self._stopReq:
self._stopReq = False
self._status.status.setOpstate('waiting')
self._logger.info("Scan stopped")
return
self._logger.info("RXP to RDBX conversion finished")
if self.scanRegister:
self._logger.info("Starting registration..")
self._status.status.setActiveTask('scan position registration')
scriptPath = os.path.join(appDir, 'register-scan.py')
cmd = [
'python3', scriptPath,
'--connectionstring', self._connectionString,
'--project', self.projectName,
'--scanposition', scanposName,
'--registrationmode', str(self.scanRegistrationMode)]
if self.posePublish:
cmd.append('--wait-until-finished')
self._logger.debug("CMD = {}".format(' '.join(cmd)))
subproc = SubProcess(subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
subproc.waitFor(errorMessage='Registration failed.', block=True)
if self._stopReq:
self._stopReq = False
self._status.status.setOpstate('waiting')
self._logger.info("Scan stopped")
return
self._logger.info("Registration finished")
if self.posePublish:
self._logger.info("Downloading and publishing pose..")
self._status.status.setActiveTask('publish registered scan position')
ts = self._node.get_clock().now()
ok, sopv = self._broadcastTfTransforms(ts)
if ok:
# update sopv timestamp
sopv.pose.header.stamp = ts.to_msg()
# publish pose
self._node.posePublisher.publish(sopv.pose)
# publish path
self._path.header = sopv.pose.header
self._path.poses.append(sopv.pose)
self._node.pathPublisher.publish(self._path)
# publish odometry
odom = Odometry(
header = Header(stamp = ts.to_msg(), frame_id = 'riegl_vz_vocs'),
child_frame_id = 'riegl_vz_socs',
pose = PoseWithCovariance(pose = sopv.pose.pose)
)
self._node.odomPublisher.publish(odom)
self._logger.info("Pose published")
if self.scanPublish:
self._logger.info("Downloading and publishing point cloud..")
pointcloud: PointCloud2 = PointCloud2()
ok, pointcloud = self.getPointCloud(self.scanposition, pointcloud)
if ok:
self._status.status.setActiveTask('publish point cloud data')
self._node.pointCloudPublisher.publish(pointcloud)
self._logger.info("Point cloud published")
if self.voxelPublish and self.scanRegister:
self._logger.info("Downloading and publishing voxel data..")
voxels: Voxels = Voxels()
ok, voxels = self.getVoxels(voxels, self.scanposition)
if ok:
self._status.status.setActiveTask('publish voxel data')
self._node.voxelsPublisher.publish(voxels)
self._logger.info("Voxels published")
self._status.status.setOpstate('waiting')
def scan(
self,
projectName: str,
scanposition: str,
storageMedia: int,
scanPattern: ScanPattern,
scanPublish: bool = True,
scanPublishFilter: str = '',
scanPublishLOD: int = 1,
voxelPublish: bool = False,
scanRegister: bool = True,
scanRegistrationMode: int = 1,
posePublish: bool = True,
reflSearchSettings: dict = None,
captureImages: int = 2,
captureMode: int = 1,
imageOverlap: int = 25):
"""Acquire data at scan position.
Args:
projectName ... the project name
scanposition ... the name of the new scan position
storageMedia ... storage media for data recording
scanPattern ... the scan pattern
reflSearchSettings ... reflector search settings"""
if self.isBusy(block=False):
return False
self.projectName = projectName
self.scanposition = scanposition
self.storageMedia = storageMedia
self.scanPattern = scanPattern
self.scanPublish = scanPublish
self.scanPublishFilter = scanPublishFilter
self.scanPublishLOD = scanPublishLOD
self.voxelPublish = voxelPublish
self.scanRegister = scanRegister
self.scanRegistrationMode = scanRegistrationMode
self.posePublish = posePublish
self.reflSearchSettings = reflSearchSettings
self.captureImages = captureImages
self.captureMode = captureMode
self.imageOverlap = imageOverlap
thread = threading.Thread(target=self._scanThreadFunc, args=())
thread.daemon = True
thread.start()
while not self.isScanning(block=False):
time.sleep(0.2)
return True
def isScanning(self, block = True):
if block:
while self.getScannerOpstate() == 'scanning':
time.sleep(0.2)
return True if self.getScannerOpstate() == 'scanning' else False
def isBusy(self, block = True):
if block:
while self.getScannerOpstate() != 'waiting':
time.sleep(0.2)
return False if self.getScannerOpstate() == 'waiting' else True
def setPosition(self, position, covariance):
if position.header.frame_id != '' and position.header.frame_id != 'riegl_vz_prcs':
try:
# try to convert to PRCS
position = self._node.transformBuffer.transform(position, 'riegl_vz_prcs')
except:
self._logger.warning("Position coordinate transformation to PRCS failed!")
self._position = PositionWithCovariance(position, covariance)
def _setYawAngle(self, header, yawAngle, covariance):
if header.frame_id != '' and header.frame_id != 'riegl_vz_prcs':
# must be converted to PRCS
pose = PoseStamped()
pose.header = header
pose.pose = Pose(
position = Point(x=0.0, y=0.0, z=0.0),
orientation = quaternionFromEuler(0.0, 0.0, yawAngle)
)
pose = self._node.transformBuffer.transform(pose, 'riegl_vz_prcs')
roll, pitch, yawAngle = eulerFromQuaternion(pose.pose.orientation)
self._yawAngle = YawAngleWithCovariance(yawAngle, covariance)
def setPose(self, pose, isRelative, mountingPose):
self.robotRelativePose = isRelative
self._logger.info("robot relative pose = {}".format(self.robotRelativePose))
self._logger.info("robot scanner mounting pose = x: {0}, y: {1}, z: {2}, roll: {3}, pitch: {4}, yaw: {5}".format(mountingPose[0], mountingPose[1], mountingPose[2], mountingPose[3], mountingPose[4], mountingPose[5]))
if self.robotRelativePose:
try:
# try to set yaw angle
trans = TransformStamped()
trans.transform.translation.x = mountingPose[0]
trans.transform.translation.y = mountingPose[1]
trans.transform.translation.z = mountingPose[2]
self._logger.error("euler = {0} {1} {2}".format(mountingPose[0], mountingPose[1], mountingPose[2]))
trans.transform.rotation = quaternionFromEuler(mountingPose[3], mountingPose[4], mountingPose[5])
self._logger.error("trans = {}".format(trans))
self._logger.error("pose = {}".format(pose.pose.pose))
pose2 = do_transform_pose(pose.pose.pose, trans)
self._logger.error("pose2 = {}".format(pose2))
roll, pitch, yaw = eulerFromQuaternion(pose2.orientation)
self._logger.error("euler2 = {}".format(eulerFromQuaternion(pose2.orientation)))
cov = np.array(pose.pose.covariance).reshape(6,6)
self._setYawAngle(pose.header, yaw, cov[5][5].item())
except:
self._logger.warning("Yaw angle configuration with transformation to PRCS failed!")
self._imuRelPose.update(pose)
else:
trans = TransformStamped()
trans.transform.translation.x = mountingPose[0]
trans.transform.translation.y = mountingPose[1]
trans.transform.translation.z = mountingPose[2]
trans.transform.rotation = quaternionFromEuler(mountingPose[3], mountingPose[4], mountingPose[5])
pose2 = do_transform_pose(pose.pose.pose, trans)
roll, pitch, yaw = eulerFromQuaternion(pose2.orientation)
cov = np.array(pose.pose.covariance).reshape(6,6)
self._setYawAngle(pose.header, yaw, cov[5][5].item())
position = PointStamped(
header = pose.header,
point = pose2.position
)
self.setPosition(position, [cov[0][0].item(), cov[1][1].item(), cov[2][2].item()])
def getAllSopv(self):
try:
sopvFileName = 'all_sopv.csv'
remoteFile = self._project.getActiveProjectPath() + '/Voxels1.VPP/' + sopvFileName
localFile = self._workingDir + '/' + sopvFileName
self._ssh.downloadFile(remoteFile, localFile)
ok = True
sopvs = readAllSopv(localFile, self._logger)
except Exception as e:
ok = False
sopvs = None
return ok, sopvs
def getSopv(self):
ok, sopvs = self.getAllSopv()
if ok and len(sopvs):
sopv = sopvs[-1]
else:
sopv = None
ok = False
return ok, sopv
def getVop(self):
try:
sopvFileName = 'VPP.vop'
remoteFile = self._project.getActiveProjectPath() + '/Voxels1.VPP/' + sopvFileName
localFile = self._workingDir + '/' + sopvFileName
self._ssh.downloadFile(remoteFile, localFile)
except Exception as e:
return False, None
vop = readVop(localFile)
return True, vop
def getPop(self):
try:
popFileName = 'project.pop'
remoteFile = self._project.getActiveProjectPath() + '/' + popFileName
localFile = self._workingDir + '/' + popFileName
self._ssh.downloadFile(remoteFile, localFile)
except Exception as e:
return False, None
pop = readPop(localFile)
return True, pop
def getTpl(self, scanposition: str):
try:
scanId = self._project.getScanId(scanposition)
self._logger.debug("scan id = {}".format(scanId))
if scanId == 'null':
self._logger.error("Scan id is null!")
return False, None
scanposPath = self._project.getActiveScanposPath(scanposition)
self._logger.debug("scanpos path = {}".format(scanposPath))
scan = os.path.basename(scanId).replace('.rxp', '')[0:13]
self._logger.debug("scan = {}".format(scan))
remoteFile = scanposPath + '/' + scan + '.tpl'
localFile = self._workingDir + '/scan.tpl'
self._ssh.downloadFile(remoteFile, localFile)
ok = True
tpl = readTpl(localFile, self._logger)
except Exception as e:
ok = False
tpl = None
return ok, tpl
def stop(self):
self._stopReq = True
if self.isScannerAvailable():
ctrlSvc = ControlService(self._connectionString)
ctrlSvc.stop()
self.isBusy()
def trigStartStop(self):
trigStartedPrev = self._status.trigStarted
if not self._status.trigStarted:
if self.isBusy(block = False):
return False
self._status.trigStarted = True
intfSvc = InterfaceService(self._connectionString)
intfSvc.triggerInputEvent('ACQ_START_STOP')
if not trigStartedPrev and self._status.trigStarted:
startTime = time.time()
while not self.isScanning(block=False):
time.sleep(0.2)
if (time.time() - startTime) > 5:
self._status.trigStarted = False
return False
return True
def getScanPatterns(self):
patterns: str = []
ctrlSvc = ControlService(self._connectionString)
for pattern in json.loads(ctrlSvc.scanPatternsDetailed()):
patterns.append(pattern['name'])
#instIdentLower = self._status.status.scannerStatus.instIdent.lower()
#remotePath = '/usr/share/gui/' + instIdentLower + '/patterns'
#files = self._ssh.listFiles(remotePath, '*.pat')
#for file in files:
# patterns.append(os.path.basename(file).replace('.pat', ''))
return True, patterns
def getScanPattern(self, patternName):
ctrlSvc = ControlService(self._connectionString)
for p in json.loads(ctrlSvc.scanPatternsDetailed()):
if p['name'] == patternName:
pattern: ScanPattern = ScanPattern()
pattern.lineStart = p['thetaStart']
pattern.lineStop = p['thetaStop']
pattern.lineIncrement = p['thetaIncrement']
pattern.frameStart = p['phiStart']
pattern.frameStop = p['phiStop']
pattern.frameIncrement = p['phiIncrement']
return True, pattern
self._logger.error("Scan pattern '{}' is not available!")
return False, None
def getReflectorModels(self):
models: str = []
ctrlSvc = ControlService(self._connectionString)
for model in json.loads(ctrlSvc.supportedReflectorSearchModels()):
models.append(model['name'])
return True, models
def transformGeoCoordinate(self, srcCs: str, dstCs: str, coord1=0, coord2=0, coord3=0):
return self.geosys.transformCoordinate(srcCs, dstCs, coord1, coord2, coord3)
def shutdown(self):
self._status.shutdown()
self.stop()
if self.isScannerAvailable():
scnSvc = ScannerService(self._connectionString)
scnSvc.shutdown()
| 35,580 | 3,173 | 457 |
f8c945d466e35b1b96726b45557f65fa9d08abdb | 5,932 | py | Python | analyze_data.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | analyze_data.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | analyze_data.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from KPIForecaster.forecaster import KPIForecaster
from configuration.settings import Conf
from database.sql_connect import SQLDatabase
from datetime import datetime
import pandas as pd
import sys
import os.path
import time
path = sys.argv[0].rsplit("/", 1)[0]
# Create configuration and Database connection and our KPI Forecaster Object
#conf = Conf(os.path.join(path,"config.json"))
try:
conf = Conf(os.path.join(path,"config.json"))
except:
conf = Conf("config.json")
sql = SQLDatabase(conf)
KPIForecaster = KPIForecaster(conf, crontab=True)
StoreForecast = False
#input_report = pd.read_csv("DAILY_ANOMALY_REPORT_DL_USER_THROUGHPUT_MBPS_2020_12_13.csv")
#del input_report['Unnamed: 0']
input_df = sql.getDailyKPIData()
input_report = KPIForecaster.getYesterdaysReport(input_df)
#input_report = sql.getYesterdaysReport()
# Ensure all columns are uppercase
input_report.columns = [x.upper() for x in input_report.columns]
# Get unique cell IDs
cell_names = input_report.CELL_NAME.unique()
print(f'[INFO] Analysing {len(cell_names)} Models')
T_START = time.time()
appended_data = []
full_forecast = []
KPI = 'DL_USER_THROUGHPUT_MBPS'
# Iterate through each cell, creating a model, forecast and plot for each
for i,cell_name in enumerate(cell_names):
df_last_day, last_day = KPIForecaster.getLastDay(input_report, cell = cell_name)
ret, forecast = KPIForecaster.getForecastData(cell_name, KPI = KPI)
if ret:
foreLD, long_forecast = KPIForecaster.analyzeData(forecast, df_last_day, last_day, cell = cell_name)
print(str(i+1) + " of " + str(len(cell_names)) + " cells processed.")
appended_data.append(foreLD)
full_forecast.append(long_forecast)
#if i == 2:
# break
# Concatenate all dataframes from appended_data list
appended_data = pd.concat(appended_data, axis=0)
full_forecast = pd.concat(full_forecast, axis=0)
# Rename columns as per SQL DWH naming convention
appended_data = appended_data.rename({'ds':'START_TIME',
'Date':'DATE',
'pred_upper_15':'HISTORICAL_UPPER_BOUND',
'pred_lower_15':'HISTORICAL_LOWER_BOUND',
'Expected_Value':'HISTORICAL_PREDICTION',
'Actual_Value':'ACTUAL_VALUE',
'Exceeds_Thresh':'EXCEEDS_THRESHOLD',
'Under_Thresh':'UNDER_THRESHOLD',
'Investigate_Cell':'OUT_OF_RANGE',
'Delta':'DELTA_FROM_HIST_PREDICTION',
'Delta_from_Bound':'DELTA_FROM_HIST_BOUND'
}, axis='columns')
# Change datatypes to string
appended_data['START_TIME'] = appended_data['START_TIME'].astype(str)
appended_data['EXCEEDS_THRESHOLD'] = appended_data['EXCEEDS_THRESHOLD'].astype(str)
appended_data['UNDER_THRESHOLD'] = appended_data['UNDER_THRESHOLD'].astype(str)
appended_data['OUT_OF_RANGE'] = appended_data['OUT_OF_RANGE'].astype(str)
appended_data = appended_data.fillna(0)
appended_data['KEY'] = appended_data['CELL_NAME'] + appended_data['START_TIME']
# Get AI Predictions
predictions = KPIForecaster.getPredictions(input_df)
fin = pd.merge(appended_data, predictions, on=['KEY'], how='inner')
final = fin[['CELL_NAME',
'START_TIME',
'DATE',
'HISTORICAL_UPPER_BOUND',
'HISTORICAL_LOWER_BOUND',
'EXCEEDS_THRESHOLD',
'UNDER_THRESHOLD',
'OUT_OF_RANGE',
'DELTA_FROM_HIST_PREDICTION',
'DELTA_FROM_HIST_BOUND',
0,
'ACTUAL_VALUE',
'HISTORICAL_PREDICTION']].copy()
final = final.rename({0:'AI_PREDICTION',
'DELTA_FROM_HIST_PREDICTION':'PCT_DELTA_FROM_HIST_PREDICTION',
'DELTA_FROM_HIST_BOUND':'PCT_DELTA_FROM_HIST_BOUND'}, axis='columns')
final['DELTA_FROM_AI_PREDICTION'] = final['ACTUAL_VALUE'] - final['AI_PREDICTION']
final['DELTA_FROM_HIST_PREDICTION'] = final['ACTUAL_VALUE'] - final['HISTORICAL_PREDICTION']
#final = final[['CELL_NAME', 'START_TIME', 'DATE', 'HISTORICAL_UPPER_BOUND', 'HISTORICAL_LOWER_BOUND',
# '0', '1', '2', '3', '0', '1', '2', '3']]
final['START_TIME'] = pd.to_datetime(final['START_TIME'])
final['DATE'] = final['START_TIME'].dt.strftime('%m/%d/%Y')
final['START_TIME'] = final['START_TIME'].dt.strftime('%H:%M:%S')
final['START_TIME'] = final['START_TIME'].astype(str)
final['DATE'] = final['DATE'].astype(str)
# Add Maintenance Window filter
maintenance_window = ['00:00:00','01:00:00','02:00:00' ,'03:00:00' ,'04:00:00','05:00:00']
final['MAINTENANCE_WINDOW'] = final['START_TIME'].isin(maintenance_window)
# Output Statistics
t0 = time.time()
completion_time = t0-T_START
print("******* Total Time to Produce Reports: " + str(completion_time))
print("******* Average Time Per Model " + str(completion_time/len(cell_names)))
path = os.path.join(path,"./Reports/ANOMALY/")
KPIForecaster.makeDir(path)
date = datetime.today().strftime('%Y_%m_%d')
file_name = path + "DAILY_ANOMALY_REPORT_" + KPI + "_" + str(date) + ".csv"
appended_data.to_csv(file_name)
print("[INFO] Analysis Completed.")
print("[INFO] Uploading Report to DWH...")
sql.dumpToDWH(final, "KPI_ANOMALY")
## This should be in Train Model ##
if StoreForecast == True:
full_forecast_df = full_forecast[['CELL_NAME', 'ds',
'pred_upper_15','pred_lower_15','yhat']].copy()
full_forecast_df = full_forecast_df.rename({'ds':'TIMESTAMP',
'yhat':'PREDICTED',
'pred_upper_15':'UPPER_PREDICTION',
'pred_lower_15':'LOWER_PREDICTION'
}, axis='columns')
full_forecast_df['TIMESTAMP'] = full_forecast_df['TIMESTAMP'].astype(str)
sql.dumpToDWH(full_forecast_df, "FORECAST_DATA", if_exists = 'append')
| 39.546667 | 108 | 0.668409 | #!/usr/bin/env python
from KPIForecaster.forecaster import KPIForecaster
from configuration.settings import Conf
from database.sql_connect import SQLDatabase
from datetime import datetime
import pandas as pd
import sys
import os.path
import time
path = sys.argv[0].rsplit("/", 1)[0]
# Create configuration and Database connection and our KPI Forecaster Object
#conf = Conf(os.path.join(path,"config.json"))
try:
conf = Conf(os.path.join(path,"config.json"))
except:
conf = Conf("config.json")
sql = SQLDatabase(conf)
KPIForecaster = KPIForecaster(conf, crontab=True)
StoreForecast = False
#input_report = pd.read_csv("DAILY_ANOMALY_REPORT_DL_USER_THROUGHPUT_MBPS_2020_12_13.csv")
#del input_report['Unnamed: 0']
input_df = sql.getDailyKPIData()
input_report = KPIForecaster.getYesterdaysReport(input_df)
#input_report = sql.getYesterdaysReport()
# Ensure all columns are uppercase
input_report.columns = [x.upper() for x in input_report.columns]
# Get unique cell IDs
cell_names = input_report.CELL_NAME.unique()
print(f'[INFO] Analysing {len(cell_names)} Models')
T_START = time.time()
appended_data = []
full_forecast = []
KPI = 'DL_USER_THROUGHPUT_MBPS'
# Iterate through each cell, creating a model, forecast and plot for each
for i,cell_name in enumerate(cell_names):
df_last_day, last_day = KPIForecaster.getLastDay(input_report, cell = cell_name)
ret, forecast = KPIForecaster.getForecastData(cell_name, KPI = KPI)
if ret:
foreLD, long_forecast = KPIForecaster.analyzeData(forecast, df_last_day, last_day, cell = cell_name)
print(str(i+1) + " of " + str(len(cell_names)) + " cells processed.")
appended_data.append(foreLD)
full_forecast.append(long_forecast)
#if i == 2:
# break
# Concatenate all dataframes from appended_data list
appended_data = pd.concat(appended_data, axis=0)
full_forecast = pd.concat(full_forecast, axis=0)
# Rename columns as per SQL DWH naming convention
appended_data = appended_data.rename({'ds':'START_TIME',
'Date':'DATE',
'pred_upper_15':'HISTORICAL_UPPER_BOUND',
'pred_lower_15':'HISTORICAL_LOWER_BOUND',
'Expected_Value':'HISTORICAL_PREDICTION',
'Actual_Value':'ACTUAL_VALUE',
'Exceeds_Thresh':'EXCEEDS_THRESHOLD',
'Under_Thresh':'UNDER_THRESHOLD',
'Investigate_Cell':'OUT_OF_RANGE',
'Delta':'DELTA_FROM_HIST_PREDICTION',
'Delta_from_Bound':'DELTA_FROM_HIST_BOUND'
}, axis='columns')
# Change datatypes to string
appended_data['START_TIME'] = appended_data['START_TIME'].astype(str)
appended_data['EXCEEDS_THRESHOLD'] = appended_data['EXCEEDS_THRESHOLD'].astype(str)
appended_data['UNDER_THRESHOLD'] = appended_data['UNDER_THRESHOLD'].astype(str)
appended_data['OUT_OF_RANGE'] = appended_data['OUT_OF_RANGE'].astype(str)
appended_data = appended_data.fillna(0)
appended_data['KEY'] = appended_data['CELL_NAME'] + appended_data['START_TIME']
# Get AI Predictions
predictions = KPIForecaster.getPredictions(input_df)
fin = pd.merge(appended_data, predictions, on=['KEY'], how='inner')
final = fin[['CELL_NAME',
'START_TIME',
'DATE',
'HISTORICAL_UPPER_BOUND',
'HISTORICAL_LOWER_BOUND',
'EXCEEDS_THRESHOLD',
'UNDER_THRESHOLD',
'OUT_OF_RANGE',
'DELTA_FROM_HIST_PREDICTION',
'DELTA_FROM_HIST_BOUND',
0,
'ACTUAL_VALUE',
'HISTORICAL_PREDICTION']].copy()
final = final.rename({0:'AI_PREDICTION',
'DELTA_FROM_HIST_PREDICTION':'PCT_DELTA_FROM_HIST_PREDICTION',
'DELTA_FROM_HIST_BOUND':'PCT_DELTA_FROM_HIST_BOUND'}, axis='columns')
final['DELTA_FROM_AI_PREDICTION'] = final['ACTUAL_VALUE'] - final['AI_PREDICTION']
final['DELTA_FROM_HIST_PREDICTION'] = final['ACTUAL_VALUE'] - final['HISTORICAL_PREDICTION']
#final = final[['CELL_NAME', 'START_TIME', 'DATE', 'HISTORICAL_UPPER_BOUND', 'HISTORICAL_LOWER_BOUND',
# '0', '1', '2', '3', '0', '1', '2', '3']]
final['START_TIME'] = pd.to_datetime(final['START_TIME'])
final['DATE'] = final['START_TIME'].dt.strftime('%m/%d/%Y')
final['START_TIME'] = final['START_TIME'].dt.strftime('%H:%M:%S')
final['START_TIME'] = final['START_TIME'].astype(str)
final['DATE'] = final['DATE'].astype(str)
# Add Maintenance Window filter
maintenance_window = ['00:00:00','01:00:00','02:00:00' ,'03:00:00' ,'04:00:00','05:00:00']
final['MAINTENANCE_WINDOW'] = final['START_TIME'].isin(maintenance_window)
# Output Statistics
t0 = time.time()
completion_time = t0-T_START
print("******* Total Time to Produce Reports: " + str(completion_time))
print("******* Average Time Per Model " + str(completion_time/len(cell_names)))
path = os.path.join(path,"./Reports/ANOMALY/")
KPIForecaster.makeDir(path)
date = datetime.today().strftime('%Y_%m_%d')
file_name = path + "DAILY_ANOMALY_REPORT_" + KPI + "_" + str(date) + ".csv"
appended_data.to_csv(file_name)
print("[INFO] Analysis Completed.")
print("[INFO] Uploading Report to DWH...")
sql.dumpToDWH(final, "KPI_ANOMALY")
## This should be in Train Model ##
if StoreForecast == True:
full_forecast_df = full_forecast[['CELL_NAME', 'ds',
'pred_upper_15','pred_lower_15','yhat']].copy()
full_forecast_df = full_forecast_df.rename({'ds':'TIMESTAMP',
'yhat':'PREDICTED',
'pred_upper_15':'UPPER_PREDICTION',
'pred_lower_15':'LOWER_PREDICTION'
}, axis='columns')
full_forecast_df['TIMESTAMP'] = full_forecast_df['TIMESTAMP'].astype(str)
sql.dumpToDWH(full_forecast_df, "FORECAST_DATA", if_exists = 'append')
| 0 | 0 | 0 |
9cefe2e29a9ce84c08bf406ec5093cb4fd533a11 | 3,021 | py | Python | phylotoast/graph_util.py | bhawan1/phylotoast | 87d4b00f5da30855b9eb05398f2f605dcf61de38 | [
"MIT"
] | null | null | null | phylotoast/graph_util.py | bhawan1/phylotoast | 87d4b00f5da30855b9eb05398f2f605dcf61de38 | [
"MIT"
] | null | null | null | phylotoast/graph_util.py | bhawan1/phylotoast | 87d4b00f5da30855b9eb05398f2f605dcf61de38 | [
"MIT"
] | null | null | null | from __future__ import division
# python libs
import sys
# 3rd party
importerrors = []
try:
import statsmodels.nonparametric.kde as kde
except ImportError as ie:
importerrors.append(ie)
try:
import matplotlib as mpl
except ImportError as ie:
importerrors.append(ie)
if len(importerrors) != 0:
for item in importerrors:
print ('Import Error:', item)
sys.exit()
from matplotlib.ticker import FuncFormatter, MaxNLocator, MultipleLocator
import matplotlib.pyplot as plt
def plot_kde(data, ax, title=None, color='r', fill_bt=True):
"""
Plot a smoothed (by kernel density estimate) histogram.
:type data: numpy array
:param data: An array containing the data to be plotted
:type ax: matplotlib.Axes
:param ax: The Axes object to draw to
:type title: str
:param title: The plot title
:type color: str
:param color: The color of the histogram line and fill. Note that the fill
will be plotted with an alpha of 0.35.
:type fill_bt: bool
:param fill_bt: Specify whether to fill the area beneath the histogram line
"""
e = kde.KDEUnivariate(data)
e.fit()
ax.plot(e.support, e.density, color=color, alpha=0.9, linewidth=2.25)
if fill_bt:
ax.fill_between(e.support, e.density, alpha=.35, zorder=1,
antialiased=True, color=color)
if title is not None:
t = ax.set_title(title)
t.set_y(1.05)
def ggplot2_style(ax):
"""
Styles an axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have been
carried out (needs to know final tick spacing)
"""
#set the style of the major and minor grid lines, filled blocks
ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)
ax.grid(True, 'minor', color='0.92', linestyle='-', linewidth=0.7)
ax.patch.set_facecolor('0.85')
ax.set_axisbelow(True)
#set minor tick spacing to 1/2 of the major ticks
ax.xaxis.set_minor_locator(MultipleLocator( (plt.xticks()[0][1]-plt.xticks()[0][0]) / 2.0 ))
ax.yaxis.set_minor_locator(MultipleLocator( (plt.yticks()[0][1]-plt.yticks()[0][0]) / 2.0 ))
#remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_color("gray")
line.set_markeredgewidth(1.4)
#remove the minor tick lines
for line in ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True):
line.set_markersize(0)
#only show bottom left ticks, pointing out of axis
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.direction'] = 'out'
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if ax.legend_ != None:
lg = ax.legend_
lg.get_frame().set_linewidth(0)
lg.get_frame().set_alpha(0.5)
| 31.8 | 96 | 0.664681 | from __future__ import division
# python libs
import sys
# 3rd party
importerrors = []
try:
import statsmodels.nonparametric.kde as kde
except ImportError as ie:
importerrors.append(ie)
try:
import matplotlib as mpl
except ImportError as ie:
importerrors.append(ie)
if len(importerrors) != 0:
for item in importerrors:
print ('Import Error:', item)
sys.exit()
from matplotlib.ticker import FuncFormatter, MaxNLocator, MultipleLocator
import matplotlib.pyplot as plt
def plot_kde(data, ax, title=None, color='r', fill_bt=True):
"""
Plot a smoothed (by kernel density estimate) histogram.
:type data: numpy array
:param data: An array containing the data to be plotted
:type ax: matplotlib.Axes
:param ax: The Axes object to draw to
:type title: str
:param title: The plot title
:type color: str
:param color: The color of the histogram line and fill. Note that the fill
will be plotted with an alpha of 0.35.
:type fill_bt: bool
:param fill_bt: Specify whether to fill the area beneath the histogram line
"""
e = kde.KDEUnivariate(data)
e.fit()
ax.plot(e.support, e.density, color=color, alpha=0.9, linewidth=2.25)
if fill_bt:
ax.fill_between(e.support, e.density, alpha=.35, zorder=1,
antialiased=True, color=color)
if title is not None:
t = ax.set_title(title)
t.set_y(1.05)
def ggplot2_style(ax):
"""
Styles an axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have been
carried out (needs to know final tick spacing)
"""
#set the style of the major and minor grid lines, filled blocks
ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)
ax.grid(True, 'minor', color='0.92', linestyle='-', linewidth=0.7)
ax.patch.set_facecolor('0.85')
ax.set_axisbelow(True)
#set minor tick spacing to 1/2 of the major ticks
ax.xaxis.set_minor_locator(MultipleLocator( (plt.xticks()[0][1]-plt.xticks()[0][0]) / 2.0 ))
ax.yaxis.set_minor_locator(MultipleLocator( (plt.yticks()[0][1]-plt.yticks()[0][0]) / 2.0 ))
#remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_color("gray")
line.set_markeredgewidth(1.4)
#remove the minor tick lines
for line in ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True):
line.set_markersize(0)
#only show bottom left ticks, pointing out of axis
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.direction'] = 'out'
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if ax.legend_ != None:
lg = ax.legend_
lg.get_frame().set_linewidth(0)
lg.get_frame().set_alpha(0.5)
| 0 | 0 | 0 |
90aa7d257f193631e135662f2a521bf1d12d6998 | 658 | py | Python | setup.py | tiegs/python-seafile | e2907fe786b3d19a31f028650cc0ada62f5bdcb5 | [
"Apache-2.0"
] | 1 | 2019-11-07T15:14:28.000Z | 2019-11-07T15:14:28.000Z | setup.py | tiegs/python-seafile | e2907fe786b3d19a31f028650cc0ada62f5bdcb5 | [
"Apache-2.0"
] | null | null | null | setup.py | tiegs/python-seafile | e2907fe786b3d19a31f028650cc0ada62f5bdcb5 | [
"Apache-2.0"
] | 5 | 2019-11-19T11:56:30.000Z | 2022-03-10T17:09:46.000Z | from setuptools import setup, find_packages
__version__ = '0.2.4'
setup(name='python-seafile-api',
version=__version__,
license='BSD',
description='Client interface for Seafile Web API',
author='Igor Rumyantsev',
author_email='igorrum@mail.ru',
url='https://github.com/Widly/python-seafile',
platforms=['Any'],
packages=find_packages(),
install_requires=['requests'],
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
)
| 31.333333 | 60 | 0.604863 | from setuptools import setup, find_packages
__version__ = '0.2.4'
setup(name='python-seafile-api',
version=__version__,
license='BSD',
description='Client interface for Seafile Web API',
author='Igor Rumyantsev',
author_email='igorrum@mail.ru',
url='https://github.com/Widly/python-seafile',
platforms=['Any'],
packages=find_packages(),
install_requires=['requests'],
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
)
| 0 | 0 | 0 |
0a6553dd0a98c8a21003daa74f061c0504ce1b94 | 4,827 | py | Python | bz.py | nursultanramazanov/123 | 34780b36963abf6e6330170b3aa6940b5944f686 | [
"Apache-2.0"
] | 2 | 2021-04-08T03:58:38.000Z | 2021-05-15T11:10:11.000Z | bz.py | nursultanramazanov/123 | 34780b36963abf6e6330170b3aa6940b5944f686 | [
"Apache-2.0"
] | null | null | null | bz.py | nursultanramazanov/123 | 34780b36963abf6e6330170b3aa6940b5944f686 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: Kei Choi(hanul93@gmail.com)
import bz2
import kernel
# -------------------------------------------------------------------------
# KavMain 클래스
# -------------------------------------------------------------------------
# ---------------------------------------------------------------------
# init(self, plugins_path)
# 플러그인 엔진을 초기화 한다.
# 인력값 : plugins_path - 플러그인 엔진의 위치
# verbose - 디버그 모드 (True or False)
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# uninit(self)
# 플러그인 엔진을 종료한다.
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# getinfo(self)
# 플러그인 엔진의 주요 정보를 알려준다. (제작자, 버전, ...)
# 리턴값 : 플러그인 엔진 정보
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# format(self, filehandle, filename, filename_ex)
# 파일 포맷을 분석한다.
# 입력값 : filehandle - 파일 핸들
# filename - 파일 이름
# filename_ex - 압축 파일 내부 파일 이름
# 리턴값 : {파일 포맷 분석 정보} or None
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# arclist(self, filename, fileformat)
# 압축 파일 내부의 파일 목록을 얻는다.
# 입력값 : filename - 파일 이름
# fileformat - 파일 포맷 분석 정보
# 리턴값 : [[압축 엔진 ID, 압축된 파일 이름]]
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# unarc(self, arc_engine_id, arc_name, fname_in_arc)
# 입력값 : arc_engine_id - 압축 엔진 ID
# arc_name - 압축 파일
# fname_in_arc - 압축 해제할 파일 이름
# 리턴값 : 압축 해제된 내용 or None
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# arcclose(self)
# 압축 파일 핸들을 닫는다.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# mkarc(self, arc_engine_id, arc_name, file_infos)
# 입력값 : arc_engine_id - 압축 가능 엔진 ID
# arc_name - 최종적으로 압축될 압축 파일 이름
# file_infos - 압축 대상 파일 정보 구조체
# 리턴값 : 압축 성공 여부 (True or False)
# ---------------------------------------------------------------------
| 34.478571 | 75 | 0.359644 | # -*- coding:utf-8 -*-
# Author: Kei Choi(hanul93@gmail.com)
import bz2
import kernel
# -------------------------------------------------------------------------
# KavMain 클래스
# -------------------------------------------------------------------------
class KavMain:
# ---------------------------------------------------------------------
# init(self, plugins_path)
# 플러그인 엔진을 초기화 한다.
# 인력값 : plugins_path - 플러그인 엔진의 위치
# verbose - 디버그 모드 (True or False)
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
def init(self, plugins_path, verbose=False): # 플러그인 엔진 초기화
return 0 # 플러그인 엔진 초기화 성공
# ---------------------------------------------------------------------
# uninit(self)
# 플러그인 엔진을 종료한다.
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
def uninit(self): # 플러그인 엔진 종료
return 0 # 플러그인 엔진 종료 성공
# ---------------------------------------------------------------------
# getinfo(self)
# 플러그인 엔진의 주요 정보를 알려준다. (제작자, 버전, ...)
# 리턴값 : 플러그인 엔진 정보
# ---------------------------------------------------------------------
def getinfo(self): # 플러그인 엔진의 주요 정보
info = dict() # 사전형 변수 선언
info['author'] = 'Kei Choi' # 제작자
info['version'] = '1.0' # 버전
info['title'] = 'Bz2 Archive Engine' # 엔진 설명
info['kmd_name'] = 'bz2' # 엔진 파일 이름
info['engine_type'] = kernel.ARCHIVE_ENGINE # 엔진 타입
info['make_arc_type'] = kernel.MASTER_PACK # 악성코드 치료 후 재압축 유무
return info
# ---------------------------------------------------------------------
# format(self, filehandle, filename, filename_ex)
# 파일 포맷을 분석한다.
# 입력값 : filehandle - 파일 핸들
# filename - 파일 이름
# filename_ex - 압축 파일 내부 파일 이름
# 리턴값 : {파일 포맷 분석 정보} or None
# ---------------------------------------------------------------------
def format(self, filehandle, filename, filename_ex):
ret = {}
mm = filehandle
if mm[0:3] == 'BZh': # 헤더 체크
ret['ff_bz2'] = 'bz2'
return ret
return None
# ---------------------------------------------------------------------
# arclist(self, filename, fileformat)
# 압축 파일 내부의 파일 목록을 얻는다.
# 입력값 : filename - 파일 이름
# fileformat - 파일 포맷 분석 정보
# 리턴값 : [[압축 엔진 ID, 압축된 파일 이름]]
# ---------------------------------------------------------------------
def arclist(self, filename, fileformat):
file_scan_list = [] # 검사 대상 정보를 모두 가짐
# 미리 분석된 파일 포맷중에 BZ2 포맷이 있는가?
if 'ff_bz2' in fileformat:
file_scan_list.append(['arc_bz2', 'BZ2'])
return file_scan_list
# ---------------------------------------------------------------------
# unarc(self, arc_engine_id, arc_name, fname_in_arc)
# 입력값 : arc_engine_id - 압축 엔진 ID
# arc_name - 압축 파일
# fname_in_arc - 압축 해제할 파일 이름
# 리턴값 : 압축 해제된 내용 or None
# ---------------------------------------------------------------------
def unarc(self, arc_engine_id, arc_name, fname_in_arc):
if arc_engine_id == 'arc_bz2':
try:
s = open(arc_name, 'rb').read()
data = ''
while len(s):
try:
b = bz2.BZ2Decompressor()
data += b.decompress(s)
s = b.unused_data
except IOError:
break
if len(data):
return data
except IOError:
pass
return None
# ---------------------------------------------------------------------
# arcclose(self)
# 압축 파일 핸들을 닫는다.
# ---------------------------------------------------------------------
def arcclose(self):
pass
# ---------------------------------------------------------------------
# mkarc(self, arc_engine_id, arc_name, file_infos)
# 입력값 : arc_engine_id - 압축 가능 엔진 ID
# arc_name - 최종적으로 압축될 압축 파일 이름
# file_infos - 압축 대상 파일 정보 구조체
# 리턴값 : 압축 성공 여부 (True or False)
# ---------------------------------------------------------------------
def mkarc(self, arc_engine_id, arc_name, file_infos):
if arc_engine_id == 'arc_bz2':
try:
zfile = bz2.BZ2File(arc_name, 'w')
file_info = file_infos[0]
rname = file_info.get_filename()
data = open(rname, 'rb').read()
zfile.write(data)
zfile.close()
return True
except:
pass
return False
| 2,158 | -7 | 230 |
d5e7250479c5c55649c8784da51d8416ce69eb1c | 545 | py | Python | Lib/importlib/machinery.py | certik/python-3.3 | 7caa9aa12103810526ed7d99794f491d8e0cbd91 | [
"PSF-2.0"
] | null | null | null | Lib/importlib/machinery.py | certik/python-3.3 | 7caa9aa12103810526ed7d99794f491d8e0cbd91 | [
"PSF-2.0"
] | null | null | null | Lib/importlib/machinery.py | certik/python-3.3 | 7caa9aa12103810526ed7d99794f491d8e0cbd91 | [
"PSF-2.0"
] | 3 | 2015-05-20T14:18:45.000Z | 2021-06-28T15:52:33.000Z | """The machinery of importlib: finders, loaders, hooks, etc."""
import _imp
from ._bootstrap import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES)
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap import PathFinder
from ._bootstrap import FileFinder
from ._bootstrap import SourceFileLoader
from ._bootstrap import SourcelessFileLoader
from ._bootstrap import ExtensionFileLoader
EXTENSION_SUFFIXES = _imp.extension_suffixes()
| 34.0625 | 72 | 0.809174 | """The machinery of importlib: finders, loaders, hooks, etc."""
import _imp
from ._bootstrap import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES)
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap import PathFinder
from ._bootstrap import FileFinder
from ._bootstrap import SourceFileLoader
from ._bootstrap import SourcelessFileLoader
from ._bootstrap import ExtensionFileLoader
EXTENSION_SUFFIXES = _imp.extension_suffixes()
| 0 | 0 | 0 |
442163a6de5f31ffa480996d7d83a24d0eeb06ce | 2,667 | py | Python | dataset/wider_face/widerface2coco.py | tianxingxia-cn/PaddleFaceDectection | d4fb9f27e95ae34a4f91189cde18091149029f92 | [
"Apache-2.0"
] | null | null | null | dataset/wider_face/widerface2coco.py | tianxingxia-cn/PaddleFaceDectection | d4fb9f27e95ae34a4f91189cde18091149029f92 | [
"Apache-2.0"
] | null | null | null | dataset/wider_face/widerface2coco.py | tianxingxia-cn/PaddleFaceDectection | d4fb9f27e95ae34a4f91189cde18091149029f92 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import os
import cv2
import sys
import json
import numpy as np
import shutil
print('开始转换,并清除无效标注...')
conver('train')
print('训练集转换完毕')
conver('val')
print('验证集转换完毕')
| 31.75 | 103 | 0.421822 | # coding=utf-8
import os
import cv2
import sys
import json
import numpy as np
import shutil
def conver(dataclass='train'):
dataset = {"info": {
"description": "WIDER face in COCO format.",
"url": "",
"version": "1.1",
"contributor": "tianxingxia",
"date_created": "2022-02-22"},
"images": [],
"annotations": [],
"categories": [{"supercategory": "none", "id": 1, "name": "face"}],
}
outputpath = ""
image_root = 'WIDER_' + dataclass + '/images/'
phase = "WIDERFace" + dataclass.capitalize() + "COCO"
with open('wider_face_split/wider_face_' + dataclass + '_bbx_gt.txt', 'r') as f:
lines = f.readlines()
num_lines = len(lines)
i_l = 0
img_id = 1
anno_id = 1
imagepath = None
while i_l < num_lines:
# print(num_lines, '\\', i_l, '-', img_id)
if len(lines[i_l]) < 1:
break
if '--' in lines[i_l]:
imagepath = lines[i_l].strip()
im = image_root + imagepath
if os.path.exists(im):
im = cv2.imread(im)
height, width, channels = im.shape
dataset["images"].append(
{"file_name": imagepath, "coco_url": "local", "height": height, "width": width,
"flickr_url": "local", "id": img_id})
i_l += 1
num_gt = int(lines[i_l])
while num_gt > 0:
i_l += 1
x1, y1, wid, hei = list(map(int, lines[i_l].split()))[:4]
num_gt -= 1
if wid <= 0 or hei <= 0:
print(f'图像id:{img_id}有无效标注:x1={x1},wid={wid},y1={y1},hei={hei}')
else:
dataset["annotations"].append({
"segmentation": [],
"iscrowd": 0,
"area": wid * hei,
"image_id": img_id,
"bbox": [x1, y1, wid, hei],
"category_id": 1,
"id": anno_id})
anno_id = anno_id + 1
img_id += 1
else:
i_l += 1
i_l += 1
json_name = os.path.join(outputpath, "{}.json".format(phase))
with open(json_name, 'w') as f:
json.dump(dataset, f)
print('开始转换,并清除无效标注...')
conver('train')
print('训练集转换完毕')
conver('val')
print('验证集转换完毕')
| 2,473 | 0 | 23 |
eb512cac8841822493bebde02dfbbd66b14613ee | 16,351 | py | Python | src/onevision/models/enhancement/finet/finet.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/models/enhancement/finet/finet.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/models/enhancement/finet/finet.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""FINet: Fraction Detection Normalization Network for Image Restoration.
"""
from __future__ import annotations
from typing import Optional
import torch
from torch import nn
from torch import Tensor
from onevision.factory import DEBLUR
from onevision.factory import DEHAZE
from onevision.factory import DENOISE
from onevision.factory import DERAIN
from onevision.factory import IMAGE_ENHANCEMENT
from onevision.factory import MODELS
from onevision.models.enhancement.image_enhancer import ImageEnhancer
from onevision.nn import Conv3x3
from onevision.nn import FractionInstanceNorm
from onevision.nn import SAM
from onevision.type import Indexes
from onevision.type import Pretrained
from onevision.type import Tensors
from onevision.utils import console
__all__ = [
"FINet",
"FINetDeBlur",
"FINetDeBlur_x0_5",
"FINetDeHaze",
"FINetDeNoise",
"FINetDeRain",
]
# MARK: - Modules
# MARK: Magic Functions
# MARK: Forward Pass
# MARK: Magic Functions
# MARK: Forward Pass
# MARK: Magic Functions
# MARK: Forward Pass
# MARK: - FINet
cfgs = {
# De-blur
"finet_deblur": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
"finet_deblur_x0.5": {
"in_channels": 3, "out_channels": 32, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-haze
"finet_dehaze": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-noise
"finet_denoise": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 3, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-rain
"finet_derain": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.0,
"selection": "linear",
},
}
@MODELS.register(name="finet")
@IMAGE_ENHANCEMENT.register(name="finet")
@MODELS.register(name="finet_deblur")
@IMAGE_ENHANCEMENT.register(name="finet_deblur")
@DEBLUR.register(name="finet_deblur")
@MODELS.register(name="finet_deblur_x0.5")
@IMAGE_ENHANCEMENT.register(name="finet_deblur_x0.5")
@DEBLUR.register(name="finet_deblur_x0.5")
@MODELS.register(name="finet_dehaze")
@IMAGE_ENHANCEMENT.register(name="finet_dehaze")
@DEHAZE.register(name="finet_dehaze")
@MODELS.register(name="finet_denoise")
@IMAGE_ENHANCEMENT.register(name="finet_denoise")
@DENOISE.register(name="finet_denoise")
@MODELS.register(name="finet_derain")
@IMAGE_ENHANCEMENT.register(name="finet_derain")
@DERAIN.register(name="finet_derain")
| 30.967803 | 88 | 0.541496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""FINet: Fraction Detection Normalization Network for Image Restoration.
"""
from __future__ import annotations
from typing import Optional
import torch
from torch import nn
from torch import Tensor
from onevision.factory import DEBLUR
from onevision.factory import DEHAZE
from onevision.factory import DENOISE
from onevision.factory import DERAIN
from onevision.factory import IMAGE_ENHANCEMENT
from onevision.factory import MODELS
from onevision.models.enhancement.image_enhancer import ImageEnhancer
from onevision.nn import Conv3x3
from onevision.nn import FractionInstanceNorm
from onevision.nn import SAM
from onevision.type import Indexes
from onevision.type import Pretrained
from onevision.type import Tensors
from onevision.utils import console
__all__ = [
"FINet",
"FINetDeBlur",
"FINetDeBlur_x0_5",
"FINetDeHaze",
"FINetDeNoise",
"FINetDeRain",
]
# MARK: - Modules
class UNetConvBlock(nn.Module):
# MARK: Magic Functions
def __init__(
self,
in_channels : int,
out_channels: int,
downsample : bool,
relu_slope : float,
use_csff : bool = False,
use_fin : bool = False,
alpha : float = 0.5,
selection : str = "linear",
):
super().__init__()
self.downsample = downsample
self.identity = nn.Conv2d(in_channels, out_channels, (1, 1), (1, 1), 0)
self.use_csff = use_csff
self.conv_1 = nn.Conv2d(
in_channels, out_channels, (3, 3), padding=(1, 1), bias=True
)
self.relu_1 = nn.LeakyReLU(relu_slope, inplace=False)
self.conv_2 = nn.Conv2d(
out_channels, out_channels, (3, 3), padding=(1, 1), bias=True
)
self.relu_2 = nn.LeakyReLU(relu_slope, inplace=False)
if downsample and use_csff:
self.csff_enc = nn.Conv2d(
out_channels, out_channels, (3, 3), (1, 1), (1, 1)
)
self.csff_dec = nn.Conv2d(
out_channels, out_channels, (3, 3), (1, 1), (1, 1)
)
self.use_fin = use_fin
self.alpha = alpha
if self.use_fin:
self.norm = FractionInstanceNorm(
alpha = self.alpha,
num_features = out_channels,
selection = selection,
)
console.log(f"Fractional Detection Normalization: "
f"num_features: {self.norm.num_features}")
if downsample:
self.downsample = nn.Conv2d(
out_channels, out_channels, kernel_size=(4, 4), stride=(2, 2),
padding=1, bias=False
)
# MARK: Forward Pass
def forward(
self,
x : Tensor,
enc: Optional[Tensor] = None,
dec: Optional[Tensor] = None
) -> Tensors:
out = self.conv_1(x)
if self.use_fin:
out = self.norm(out)
out = self.relu_1(out)
out = self.relu_2(self.conv_2(out))
out += self.identity(x)
if enc is not None and dec is not None:
if not self.use_csff:
raise ValueError()
out = out + self.csff_enc(enc) + self.csff_dec(dec)
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
# MARK: Magic Functions
def __init__(self, in_channels: int, out_channels: int, relu_slope: float):
super().__init__()
self.up = nn.ConvTranspose2d(
in_channels, out_channels, kernel_size=(2, 2), stride=(2, 2),
bias=True
)
self.conv_block = UNetConvBlock(
in_channels, out_channels, False, relu_slope
)
# MARK: Forward Pass
def forward(self, x: Tensor, bridge: Tensor) -> Tensor:
up = self.up(x)
out = torch.cat([up, bridge], 1)
out = self.conv_block(out)
return out
class SkipBlocks(nn.Module):
# MARK: Magic Functions
def __init__(
self, in_channels: int, out_channels: int, repeat_num: int = 1
):
super().__init__()
self.blocks = nn.ModuleList()
self.re_num = repeat_num
mid_c = 128
self.blocks.append(UNetConvBlock(in_channels, mid_c, False, 0.2))
for i in range(self.re_num - 2):
self.blocks.append(UNetConvBlock(mid_c, mid_c, False, 0.2))
self.blocks.append(UNetConvBlock(mid_c, out_channels, False, 0.2))
self.shortcut = nn.Conv2d(
in_channels, out_channels, kernel_size=(1, 1), bias=True
)
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
sc = self.shortcut(x)
for m in self.blocks:
x = m(x)
return x + sc
# MARK: - FINet
cfgs = {
# De-blur
"finet_deblur": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
"finet_deblur_x0.5": {
"in_channels": 3, "out_channels": 32, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-haze
"finet_dehaze": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-noise
"finet_denoise": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 3, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-rain
"finet_derain": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.0,
"selection": "linear",
},
}
@MODELS.register(name="finet")
@IMAGE_ENHANCEMENT.register(name="finet")
class FINet(ImageEnhancer):
# MARK: Magic Functions
def __init__(
self,
# Hyperparameters
in_channels : int = 3,
out_channels : int = 64,
depth : int = 5,
relu_slope : float = 0.2,
fin_position_left : int = 0,
fin_position_right: int = 4,
alpha : float = 0.5,
selection : str = "linear",
# BaseModel's args
basename : Optional[str] = "finet",
name : Optional[str] = "finet",
num_classes: Optional[int] = None,
out_indexes: Indexes = -1,
pretrained : Pretrained = False,
*args , **kwargs
):
super().__init__(
basename = basename,
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
# NOTE: Get Hyperparameters
self.in_channels = in_channels
self.out_channels = out_channels
self.depth = depth
self.relu_slope = relu_slope
self.fin_position_left = fin_position_left
self.fin_position_right = fin_position_right
self.alpha = alpha
self.selection = selection
# UNet Down-paths
self.down_path_1 = nn.ModuleList() # 1st UNet
self.down_path_2 = nn.ModuleList() # 2nd Unet
self.conv_01 = nn.Conv2d(
self.in_channels, self.out_channels, (3, 3), (1, 1), (1, 1)
)
self.conv_02 = nn.Conv2d(
self.in_channels, self.out_channels, (3, 3), (1, 1), (1, 1)
)
prev_channels = self.get_input_channels(self.out_channels)
for i in range(self.depth): # 0, 1, 2, 3, 4
use_fin = (True if self.fin_position_left <= i <= self.fin_position_right
else False)
downsample = True if (i + 1) < self.depth else False
self.down_path_1.append(UNetConvBlock(
prev_channels, (2**i) * self.out_channels, downsample,
self.relu_slope, use_fin=use_fin, alpha=self.alpha,
selection=self.selection,
))
self.down_path_2.append(UNetConvBlock(
prev_channels, (2**i) * self.out_channels, downsample,
self.relu_slope, use_csff=downsample, use_fin=use_fin,
alpha=self.alpha, selection=self.selection,
))
prev_channels = (2**i) * self.out_channels
# UNet Up-paths
self.up_path_1 = nn.ModuleList()
self.up_path_2 = nn.ModuleList()
self.skip_conv_1 = nn.ModuleList()
self.skip_conv_2 = nn.ModuleList()
for i in reversed(range(self.depth - 1)):
self.up_path_1.append(UNetUpBlock(
prev_channels, (2**i) * self.out_channels, self.relu_slope
))
self.up_path_2.append(UNetUpBlock(
prev_channels, (2**i) * self.out_channels, self.relu_slope
))
self.skip_conv_1.append(nn.Conv2d(
(2**i) * self.out_channels, (2**i) * self.out_channels, (3, 3),
(1, 1), (1, 1)
))
self.skip_conv_2.append(nn.Conv2d(
(2**i) * self.out_channels, (2**i) * self.out_channels, (3, 3),
(1, 1), (1, 1)
))
prev_channels = (2**i) * self.out_channels
# SAM and CSFF
self.sam12 = SAM(prev_channels, kernel_size=3, bias=True)
self.cat12 = nn.Conv2d(prev_channels * 2, prev_channels, (1, 1), (1, 1), 0)
self.last = Conv3x3(prev_channels, self.in_channels, padding=1, bias=True)
# MARK: Configure
# noinspection PyMethodMayBeStatic
def get_input_channels(self, input_channels: int):
return input_channels
def _initialize(self):
gain = nn.init.calculate_gain("leaky_relu", 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.orthogonal_(m.weight, gain=gain)
if not m.bias is None:
nn.init.constant_(m.bias, 0)
# MARK: Forward Pass
def forward_once(self, x: Tensor, *args, **kwargs) -> Tensors:
"""Forward pass once. Implement the logic for a single forward pass.
Args:
x (Tensor):
Input of shape [B, C, H, W].
Returns:
yat (Tensors):
Predictions.
"""
image = x
# NOTE: Stage 1
x1 = self.conv_01(image)
encs = []
decs = []
# UNet1 down-path
for i, down in enumerate(self.down_path_1):
if (i+1) < self.depth:
x1, x1_up = down(x1)
encs.append(x1_up)
else:
x1 = down(x1)
# Unet1 up-path
for i, up in enumerate(self.up_path_1):
x1 = up(x1, self.skip_conv_1[i](encs[-i-1]))
decs.append(x1)
# SAM
sam_feature, out_1 = self.sam12(x1, image)
# NOTE: Stage 2
x2 = self.conv_02(image)
x2 = self.cat12(torch.cat([x2, sam_feature], dim=1))
blocks = []
# Unet2 down-path
for i, down in enumerate(self.down_path_2):
if (i+1) < self.depth:
x2, x2_up = down(x2, encs[i], decs[-i-1])
blocks.append(x2_up)
else:
x2 = down(x2)
# Unet2 up-path
for i, up in enumerate(self.up_path_2):
x2 = up(x2, self.skip_conv_2[i](blocks[-i-1]))
# NOTE: Last layer
out_2 = self.last(x2)
out_2 += image
return [out_1, out_2]
# MARK: Training
def on_fit_start(self):
"""Called at the very beginning of fit."""
super().on_fit_start()
if self.shape:
h, w, c = self.shape
if h != w:
raise ValueError("Image height and width must be equal.")
# assert h == 256 and w == 256, \
# (f"FINet model requires image's shape to be [256, 256, :]. "
# f"Got {self.shape}.")
@MODELS.register(name="finet_deblur")
@IMAGE_ENHANCEMENT.register(name="finet_deblur")
@DEBLUR.register(name="finet_deblur")
class FINetDeBlur(FINet):
models_zoo = {}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "finet_deblur",
num_classes: Optional[int] = None,
out_indexes: Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["finet_deblur"] | kwargs
super().__init__(
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
@MODELS.register(name="finet_deblur_x0.5")
@IMAGE_ENHANCEMENT.register(name="finet_deblur_x0.5")
@DEBLUR.register(name="finet_deblur_x0.5")
class FINetDeBlur_x0_5(FINet):
models_zoo = {}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "finet_deblur_x0.5",
num_classes: Optional[int] = None,
out_indexes: Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["finet_deblur_x0.5"] | kwargs
super().__init__(
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
@MODELS.register(name="finet_dehaze")
@IMAGE_ENHANCEMENT.register(name="finet_dehaze")
@DEHAZE.register(name="finet_dehaze")
class FINetDeHaze(FINet):
models_zoo = {}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "finet_dehaze",
num_classes: Optional[int] = None,
out_indexes: Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["finet_dehaze"] | kwargs
super().__init__(
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
@MODELS.register(name="finet_denoise")
@IMAGE_ENHANCEMENT.register(name="finet_denoise")
@DENOISE.register(name="finet_denoise")
class FINetDeNoise(FINet):
models_zoo = {}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "finet_denoise",
num_classes: Optional[int] = None,
out_indexes: Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["finet_denoise"] | kwargs
super().__init__(
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
@MODELS.register(name="finet_derain")
@IMAGE_ENHANCEMENT.register(name="finet_derain")
@DERAIN.register(name="finet_derain")
class FINetDeRain(FINet):
models_zoo = {}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "finet_derain",
num_classes: Optional[int] = None,
out_indexes: Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["finet_derain"] | kwargs
super().__init__(
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
| 10,221 | 2,734 | 387 |
284a9385f7d56efba69289984d21767d59cdaf60 | 382 | py | Python | core/models/abstract/user_document_interaction.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | core/models/abstract/user_document_interaction.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | core/models/abstract/user_document_interaction.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
class UserDocumentInteraction(models.Model):
"""Tracks an instance of a document being downloaded"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
design_document = models.ForeignKey('core.DesignDocument', on_delete=models.SET_NULL, null=True)
| 34.727273 | 100 | 0.756545 | from django.db import models
from django.contrib.auth.models import User
class UserDocumentInteraction(models.Model):
"""Tracks an instance of a document being downloaded"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
design_document = models.ForeignKey('core.DesignDocument', on_delete=models.SET_NULL, null=True)
class Meta:
abstract = True
| 0 | 14 | 27 |
12a4e1194aaa3a9502740381e40f1a087fea9158 | 9,359 | py | Python | old/sage_interface.py | avigad/boole | 2a436c2967dbc968f6a5877c220b9757c3bc17c3 | [
"Apache-2.0"
] | 16 | 2015-01-01T18:21:35.000Z | 2021-11-20T00:39:25.000Z | old/sage_interface.py | avigad/boole | 2a436c2967dbc968f6a5877c220b9757c3bc17c3 | [
"Apache-2.0"
] | null | null | null | old/sage_interface.py | avigad/boole | 2a436c2967dbc968f6a5877c220b9757c3bc17c3 | [
"Apache-2.0"
] | 1 | 2021-05-14T11:12:31.000Z | 2021-05-14T11:12:31.000Z | ################################################################################
#
# sage_interface.py
#
# description: interface between Boole and Sage
#
# Converts Boole expressions to Sage symbolic expressions and back.
#
# In the forward direction, the user specifies the symbolic ring, by
# default the_SymbolicRing().
#
# Note: this is meant to be called from Sage.
#
# TODO: associate domain information with sage constants?
# TODO: define function with arity?
# TODO: need to better understand symbolic functions
#
################################################################################
from boole.core.expr import *
import sage
from sage.symbolic.expression_conversions import Converter
from sage.symbolic.ring import the_SymbolicRing
from sage.symbolic.function_factory import function_factory
import operator as _operator
################################################################################
#
# These dictionaries gives the Sage translations of the built-in symbols,
# built-in sorts, and Sage functions for building constants of the built-in
# sorts.
#
################################################################################
_built_in_sage_funs = {
equals.name: (lambda args: args[0] == args[1]),
not_equals.name: (lambda args: args[0] != args[1]),
plus.name: (lambda args: args[0] + args[1]),
Sum.name: (lambda args: reduce((lambda a, b: a + b), args, 0)),
times.name: (lambda args: args[0] * args[1]),
Product.name: (lambda args: reduce((lambda a, b: a * b), args, 1)),
sub.name: (lambda args: args[0] - args[1]),
div.name: (lambda args: args[0] / args[1]),
power.name: (lambda args: pow(args[0], args[1])),
neg.name: (lambda args: -args[0]),
absf.name: (lambda args: abs(args[0])),
less_than.name: (lambda args: args[0] < args[1]),
less_eq.name: (lambda args: args[0] <= args[1]),
greater_than.name: (lambda args: args[0] > args[1]),
greater_eq.name: (lambda args: args[0] >= args[1])
}
# TODO: use these to set the domain
#
#_built_in_sage_sorts = {
# Int.name: z3.IntSort,
# Real.name: z3.RealSort,
# Bool.name: z3.BoolSort
#}
_built_in_sage_sort_values = {
Int.name: (lambda val: sage.rings.integer.Integer(val)),
Real.name: (lambda val: val),
Bool.name: (lambda val: val)
}
################################################################################
#
# Exceptions associated with the Sage interface
#
################################################################################
class Sage_Interface_Error(Exception):
"""Class of all possible type errors
"""
def __init__(self, mess = ''):
"""
Arguments:
-`mess`: a string that represents the error message
"""
Exception.__init__(self, mess)
class Sage_Unexpected_Type(Sage_Interface_Error):
"""Raised when trying to translate an unexpected type
"""
pass
class Sage_Unexpected_Expression(Sage_Interface_Error):
"""Raised when there is a problem translating an expression
"""
pass
################################################################################
#
# Convert Sage expressions to Boole expressions
#
# for now, put symbolic expressions in the global namespace; later, allow
# user to specify any ring
# also, check global namespace before creating these?
#
################################################################################
class _Expr_Trans(ExprVisitor):
"""Visitor class for translating an expression from Boole
to Sage.
"""
def __init__(self, translator):
"""
Initialize with calling instance of Boole_to_Z3.
"""
self.trans = translator
class Boole_to_Sage():
"""
Translates Boole expressions to a Sage symbolic expression ring,
creating symbols as necessary.
For example:
C = Boole_to_Sage()
print C(x + y)
print C(f(x))
The call of C(x + y) creates Sage variables for x and y.
The call of C(f(x)) creates a Sage function variable for f,
but uses the previous x.
Note: do not use the same name for symbols of different type!
"""
def handle_function(self, fun, args):
"""
fun: Boole function symbol to apply
args: Sage expressions, already translated
"""
if fun.name in self.symbol_dict.keys():
# defined function symbol
sage_fun = self.symbol_dict[fun.name]
return sage_fun(*args)
elif fun.name in _built_in_sage_funs.keys():
# built-in function symbol
sage_fun = _built_in_sage_funs[fun.name]
return sage_fun(args)
else:
# new function symbol
sage_fun = function_factory(fun.name)
self.symbol_dict[fun.name] = sage_fun
return sage_fun(*args)
################################################################################
#
# Convert Sage expressions to Boole expressions
#
################################################################################
| 33.665468 | 99 | 0.562774 | ################################################################################
#
# sage_interface.py
#
# description: interface between Boole and Sage
#
# Converts Boole expressions to Sage symbolic expressions and back.
#
# In the forward direction, the user specifies the symbolic ring, by
# default the_SymbolicRing().
#
# Note: this is meant to be called from Sage.
#
# TODO: associate domain information with sage constants?
# TODO: define function with arity?
# TODO: need to better understand symbolic functions
#
################################################################################
from boole.core.expr import *
import sage
from sage.symbolic.expression_conversions import Converter
from sage.symbolic.ring import the_SymbolicRing
from sage.symbolic.function_factory import function_factory
import operator as _operator
################################################################################
#
# These dictionaries gives the Sage translations of the built-in symbols,
# built-in sorts, and Sage functions for building constants of the built-in
# sorts.
#
################################################################################
_built_in_sage_funs = {
equals.name: (lambda args: args[0] == args[1]),
not_equals.name: (lambda args: args[0] != args[1]),
plus.name: (lambda args: args[0] + args[1]),
Sum.name: (lambda args: reduce((lambda a, b: a + b), args, 0)),
times.name: (lambda args: args[0] * args[1]),
Product.name: (lambda args: reduce((lambda a, b: a * b), args, 1)),
sub.name: (lambda args: args[0] - args[1]),
div.name: (lambda args: args[0] / args[1]),
power.name: (lambda args: pow(args[0], args[1])),
neg.name: (lambda args: -args[0]),
absf.name: (lambda args: abs(args[0])),
less_than.name: (lambda args: args[0] < args[1]),
less_eq.name: (lambda args: args[0] <= args[1]),
greater_than.name: (lambda args: args[0] > args[1]),
greater_eq.name: (lambda args: args[0] >= args[1])
}
# TODO: use these to set the domain
#
#_built_in_sage_sorts = {
# Int.name: z3.IntSort,
# Real.name: z3.RealSort,
# Bool.name: z3.BoolSort
#}
_built_in_sage_sort_values = {
Int.name: (lambda val: sage.rings.integer.Integer(val)),
Real.name: (lambda val: val),
Bool.name: (lambda val: val)
}
################################################################################
#
# Exceptions associated with the Sage interface
#
################################################################################
class Sage_Interface_Error(Exception):
"""Class of all possible type errors
"""
def __init__(self, mess = ''):
"""
Arguments:
-`mess`: a string that represents the error message
"""
Exception.__init__(self, mess)
class Sage_Unexpected_Type(Sage_Interface_Error):
"""Raised when trying to translate an unexpected type
"""
pass
class Sage_Unexpected_Expression(Sage_Interface_Error):
"""Raised when there is a problem translating an expression
"""
pass
################################################################################
#
# Convert Sage expressions to Boole expressions
#
# for now, put symbolic expressions in the global namespace; later, allow
# user to specify any ring
# also, check global namespace before creating these?
#
################################################################################
class _Expr_Trans(ExprVisitor):
"""Visitor class for translating an expression from Boole
to Sage.
"""
def __init__(self, translator):
"""
Initialize with calling instance of Boole_to_Z3.
"""
self.trans = translator
def visit_const(self, expr):
return self.trans.get_sage_var(expr)
def visit_app(self, expr):
args = [self.visit(arg) for arg in expr.args]
return self.trans.handle_function(expr.fun, args)
def visit_abs(self, expr):
raise Sage_Unexpected_Expression(str(expr))
def visit_forall(self, expr):
raise Sage_Unexpected_Expression(str(expr))
def visit_exists(self, expr):
raise Sage_Unexpected_Expression(str(expr))
class Boole_to_Sage():
"""
Translates Boole expressions to a Sage symbolic expression ring,
creating symbols as necessary.
For example:
C = Boole_to_Sage()
print C(x + y)
print C(f(x))
The call of C(x + y) creates Sage variables for x and y.
The call of C(f(x)) creates a Sage function variable for f,
but uses the previous x.
Note: do not use the same name for symbols of different type!
"""
def __init__(self, target = None):
self.reset(target)
self.expr_trans = _Expr_Trans(self).visit
def reset(self, target = None):
if target == None:
target = the_SymbolicRing()
self.target = target
self.symbol_dict = {} # constant and function symbols
def make_sage_var(self, etype, name):
# TODO: what to do with constants of type EnumType?
sage_var = self.target.var(name)
self.symbol_dict[name] = sage_var
return sage_var
def get_sage_var(self, c):
if c.name in self.symbol_dict.keys():
# defined constant
return self.symbol_dict[c.name]
elif c.value != None:
# interpreted constant
etype = c.etype()
if etype.name in _built_in_sage_sort_values.keys():
val_trans = _built_in_sage_sort_values[etype.name]
return val_trans(c.value)
else:
raise Sage_Unexpected_Expression('Unrecognized value:' + str(c))
else:
# new constant
return self.make_sage_var(c.etype(), c.name)
def handle_function(self, fun, args):
"""
fun: Boole function symbol to apply
args: Sage expressions, already translated
"""
if fun.name in self.symbol_dict.keys():
# defined function symbol
sage_fun = self.symbol_dict[fun.name]
return sage_fun(*args)
elif fun.name in _built_in_sage_funs.keys():
# built-in function symbol
sage_fun = _built_in_sage_funs[fun.name]
return sage_fun(args)
else:
# new function symbol
sage_fun = function_factory(fun.name)
self.symbol_dict[fun.name] = sage_fun
return sage_fun(*args)
def __call__(self, expr):
return self.expr_trans(expr)
################################################################################
#
# Convert Sage expressions to Boole expressions
#
################################################################################
class Sage_to_Boole(Converter):
def __init__(self, language = None, use_fake_div=False):
language = get_language(language)
self.language = language
self.use_fake_div = use_fake_div
def pyobject(self, ex, obj):
# TODO: is there any reasonable way to assign a type
# to this constant?
if ex.is_integer():
return ii(obj)
elif ex.is_real():
return rr(obj)
return Const(repr(ex), language = null_language, value = obj)
def symbol(self, ex):
if repr(ex) in self.language.const_dict.keys():
return self.language.const_dict[repr(ex)]
else:
raise Sage_Unexpected_Expression('symbol: ' + str(ex))
def relation(self, ex, operator):
if operator == _operator.eq:
return equals(self(ex.lhs()), self(ex.rhs()))
elif operator == _operator.lt:
return less_than(self(ex.lhs()), self(ex.rhs()))
elif operator == _operator.gt:
return greater_than(self(ex.lhs()), self(ex.rhs()))
elif operator == _operator.ne:
return not_equals(self(ex.lhs()), self(ex.rhs()))
elif operator == _operator.le:
return less_eq(self(ex.lhs()), self(ex.rhs()))
elif operator == _operator.ge:
return greater_eq(self(ex.lhs()), self(ex.rhs()))
else:
raise Sage_Unexpected_Expression('relation: ' + str(ex))
def arithmetic(self, ex, operator):
if operator == _operator.add:
return self(ex.operands()[0]) + self(ex.operands()[1])
elif operator == _operator.sub:
return self(ex.operands()[0]) - self(ex.operands()[1])
elif operator == _operator.mul:
return self(ex.operands()[0]) * self(ex.operands()[1])
elif operator == _operator.div:
return self(ex.operands()[0]) / self(ex.operands()[1])
elif operator == _operator.pow:
return power(self(ex.operands()[0]), self(ex.operands()[1]))
else:
raise Sage_Unexpected_Expression('arithmetic: ' + str(ex))
def composition(self, ex, operator):
op = str(operator)
if str(op) in self.language.const_dict.keys():
f = self.language.const_dict[op]
args = map(self, ex.operands())
return f(*args)
else:
raise Sage_Unexpected_Expression('composition: ' + str(ex))
| 3,703 | 10 | 527 |
63cbb0dac383fa5f9c71e22ae91675cd18bd56c6 | 545 | py | Python | practice/79.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | 1 | 2020-01-15T11:04:16.000Z | 2020-01-15T11:04:16.000Z | practice/79.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | 2 | 2021-03-31T19:36:19.000Z | 2021-06-10T22:29:26.000Z | practice/79.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | null | null | null | #Create a script that lets the user submit a password until they have satisfied three conditions:
#1. Password contains at least one number
#2. Contains one uppercase letter
#3. It is at least 5 chars long
#Print out message "Passowrd is not fine" if the user didn't create a correct password
while True:
psw = input("Enter new password: ")
if any(i.isdigit() for i in psw) and any(i.isupper() for i in psw) and len(psw) >= 5:
print("Password is fine")
break
else:
print("Passowrd is not fine")
| 38.928571 | 98 | 0.675229 | #Create a script that lets the user submit a password until they have satisfied three conditions:
#1. Password contains at least one number
#2. Contains one uppercase letter
#3. It is at least 5 chars long
#Print out message "Passowrd is not fine" if the user didn't create a correct password
while True:
psw = input("Enter new password: ")
if any(i.isdigit() for i in psw) and any(i.isupper() for i in psw) and len(psw) >= 5:
print("Password is fine")
break
else:
print("Passowrd is not fine")
| 0 | 0 | 0 |
3a1d327c0377248229fd10aaad88c686d742298d | 2,091 | py | Python | examples/person_segmentation.py | HumanParsingSDK/human_parsing_sdk | 52c530e85c84245545af0330f93185218eb28276 | [
"MIT"
] | 3 | 2020-10-28T08:01:22.000Z | 2021-08-25T13:25:02.000Z | examples/person_segmentation.py | HumanParsingSDK/human_parsing_sdk | 52c530e85c84245545af0330f93185218eb28276 | [
"MIT"
] | 1 | 2021-08-25T13:24:59.000Z | 2021-08-25T13:24:59.000Z | examples/person_segmentation.py | HumanParsingSDK/human_parsing_sdk | 52c530e85c84245545af0330f93185218eb28276 | [
"MIT"
] | null | null | null | import argparse
import sys
import cv2
from albumentations import Compose, SmallestMaxSize, CenterCrop
from pietoolbelt.viz import ColormapVisualizer
from segmentation import Segmentation
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Segmentation example')
parser.add_argument('-i', '--image', type=str, help='Path to image to predict', required=False)
parser.add_argument('-w', '--web_cam', help='Use web camera id to predict', action='store_true')
parser.add_argument('-d', '--device', type=str, help='Device', required=False, default='cuda')
if len(sys.argv) < 2:
print('Bad arguments passed', file=sys.stderr)
parser.print_help(file=sys.stderr)
exit(2)
args = parser.parse_args()
if (args.image is None and args.web_cam is None) or (args.image is not None and args.web_cam is not None):
print("Please define one of option: -i or -w")
parser.print_help(file=sys.stderr)
sys.exit(1)
vis = ColormapVisualizer([0.5, 0.5])
seg = Segmentation(accuracy_lvl=Segmentation.Level.LEVEL_2)
seg.set_device(args.device)
data_transform = Compose([SmallestMaxSize(max_size=512, always_apply=True),
CenterCrop(height=512, width=512, always_apply=True)], p=1)
if args.image is not None:
image = cv2.cvtColor(cv2.imread(args.image), cv2.COLOR_BGR2RGB)
image = data_transform(image)
cv2.imwrite('result.jpg', seg.process(image)[0])
elif args.web_cam is not None:
title = "Person segmentation example"
cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL)
cap = cv2.VideoCapture(0)
while cv2.waitKey(1) & 0xFF != ord('q'):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = data_transform(image=frame)['image']
img, mask = seg.process(frame)
image = vis.process_img(cv2.cvtColor(img, cv2.COLOR_RGB2BGR), mask)
cv2.imshow(title, image)
cap.release()
cv2.destroyAllWindows()
| 36.051724 | 110 | 0.658537 | import argparse
import sys
import cv2
from albumentations import Compose, SmallestMaxSize, CenterCrop
from pietoolbelt.viz import ColormapVisualizer
from segmentation import Segmentation
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Segmentation example')
parser.add_argument('-i', '--image', type=str, help='Path to image to predict', required=False)
parser.add_argument('-w', '--web_cam', help='Use web camera id to predict', action='store_true')
parser.add_argument('-d', '--device', type=str, help='Device', required=False, default='cuda')
if len(sys.argv) < 2:
print('Bad arguments passed', file=sys.stderr)
parser.print_help(file=sys.stderr)
exit(2)
args = parser.parse_args()
if (args.image is None and args.web_cam is None) or (args.image is not None and args.web_cam is not None):
print("Please define one of option: -i or -w")
parser.print_help(file=sys.stderr)
sys.exit(1)
vis = ColormapVisualizer([0.5, 0.5])
seg = Segmentation(accuracy_lvl=Segmentation.Level.LEVEL_2)
seg.set_device(args.device)
data_transform = Compose([SmallestMaxSize(max_size=512, always_apply=True),
CenterCrop(height=512, width=512, always_apply=True)], p=1)
if args.image is not None:
image = cv2.cvtColor(cv2.imread(args.image), cv2.COLOR_BGR2RGB)
image = data_transform(image)
cv2.imwrite('result.jpg', seg.process(image)[0])
elif args.web_cam is not None:
title = "Person segmentation example"
cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL)
cap = cv2.VideoCapture(0)
while cv2.waitKey(1) & 0xFF != ord('q'):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = data_transform(image=frame)['image']
img, mask = seg.process(frame)
image = vis.process_img(cv2.cvtColor(img, cv2.COLOR_RGB2BGR), mask)
cv2.imshow(title, image)
cap.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
d86575109e6d184dcaa9bf4d056ca2b477c5ba37 | 28,369 | py | Python | coastSHARK/tests/complexity_java_test.py | SteffenTunkel/coastSHARK | d97ef55230b149a779af048153a61221f5a3eb06 | [
"Apache-2.0"
] | null | null | null | coastSHARK/tests/complexity_java_test.py | SteffenTunkel/coastSHARK | d97ef55230b149a779af048153a61221f5a3eb06 | [
"Apache-2.0"
] | 2 | 2020-11-09T17:54:41.000Z | 2020-11-13T15:54:11.000Z | coastSHARK/tests/complexity_java_test.py | SteffenTunkel/coastSHARK | d97ef55230b149a779af048153a61221f5a3eb06 | [
"Apache-2.0"
] | 2 | 2019-08-09T09:54:36.000Z | 2021-07-23T10:29:34.000Z | import unittest
import javalang
from coastSHARK.util.complexity_java import ComplexityJava, SourcemeterConversion
BINOP_TEST = """package de.ugoe.cs.coast;
public class BinopTest {
public void test1() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
Boolean d = true;
Boolean e = true;
Boolean f = true;
if (a && b && c || d || e && f) {
// if cc = 1
// sequence cc = 3
}
}
public void test2() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
if (a && !(b && c)) {
// if cc = 1
// sequence cc = 2
}
}
public void test3() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
Boolean d = true;
Boolean e = true;
if (a && b || c && d || e) {
// if cc = 1
// sequence cc = 4
}
}
public void test4() {
Boolean a = true;
Boolean b = true;
if(a == b) {
// if = 1
// cc = 1
} else if (a != b) {
// cc = 1
}
}
public void test5() {
Boolean a = true;
Boolean b = true;
Boolean c = a && b;
}
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
String resultText = "";
if (resultCode != RESULT_OK) {
resultText = "An error occured while contacting OI Safe. Does it allow remote access? (Please check in the settings of OI Safe).";
} else {
if (requestCode == ENCRYPT_REQUEST || requestCode == DECRYPT_REQUEST) {
resultText = data.getStringExtra(CryptoIntents.EXTRA_TEXT);
} else if (requestCode == SET_PASSWORD_REQUEST) {
resultText = "Request to set password sent.";
} else if (requestCode == GET_PASSWORD_REQUEST) {
String uname = data.getStringExtra(CryptoIntents.EXTRA_USERNAME);
String pwd = data.getStringExtra(CryptoIntents.EXTRA_PASSWORD);
resultText = uname + ":" + pwd;
} else if (requestCode == SPOOF_REQUEST) {
resultText = data.getStringExtra("masterKey");
}
}
EditText outputText = (EditText) findViewById(R.id.output_entry);
outputText.setText(resultText, android.widget.TextView.BufferType.EDITABLE);
}
}
"""
NESTING_TEST = """package de.ugoe.cs.coast;
public class NestingTest {
public void myMethod() {
Boolean condition1 = true;
Boolean condition2 = true;
try {
// try does not count towards nesting
if (condition1) {
// +1
for (int i = 0; i < 10; i++) {
// +2 (nesting=1)
while (condition2) {
// +3 (nesting=2)
}
}
}
} catch (ExcepType2 e) {
// +1
if (condition2) {
// +2 (nesting=1)
}
}
}
// sum cc = 9
}
"""
# Sonar does not count default: but we include this in our count
SWITCH_TEST = """package de.ugoe.cs.coast;
public class SwitchTest {
public String getWords(int number) { // mccc = +1
switch (number) {
case 1: // mccc = +1
return "one";
case 2: // mccc = +1
return "a couple";
case 3: // mccc = +1
return "a few";
default:
return "lots";
}
}
// mccc = 4
// cc = 1
}
"""
OVERLOADING_TEST = """package de.ugoe.cs.coast;
public class OverloadingTest {
public void test(long number) {
}
public String test(int number1, int number2) {
}
public boolean test(int number1, int number2, boolean test) {
}
}
"""
PARAM_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public java.lang.String writeAll(java.sql.ResultSet rs, boolean includeColumnNames) throws SQLException, IOException {
ResultSetMetaData metadata = rs.getMetaData();
if (includeColumnNames) {
writeColumnNames(metadata);
}
int columnCount = metadata.getColumnCount();
while (rs.next()) {
String[] nextLine = new String[columnCount];
for (int i = 0; i < columnCount; i++) {
nextLine[i] = getColumnValue(rs, metadata.getColumnType(i + 1), i + 1);
}
writeNext(nextLine);
}
}
}
"""
CONSTRUCTOR_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public ParamTest(int i) {
}
}
"""
STATIC_NESTED_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public void test1() {
}
static class ParamTest2 {
public void test2() {
}
}
}
"""
ANO_TEST = """
package de.ugoe.cs.coast;
public class AnoTest {
// nested anonymous class
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void onReceive(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
};
// anonymous class in method
public void test() {
// we need to ignore this
List<String> passDescriptions4Adapter=new ArrayList<String>();
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void onReceive2(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
};
}
// we want to only count NewDialogInterface and not new AlertDialog, this would otherwise mess with the counting of anonymous classes
public void test2() {
dbHelper = new DBHelper(this);
if (dbHelper.isDatabaseOpen()==false) {
Dialog dbError = new AlertDialog.Builder(this)
.setIcon(android.R.drawable.ic_dialog_alert)
.setTitle(R.string.database_error_title)
.setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int whichButton) {
finish();
}
})
.setMessage(R.string.database_error_msg)
.create();
dbError.show();
return;
}
}
// anonymous class in method with multiple methods
public void test3() {
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void naTest1(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
public void naTest2() {
}
};
}
// multi layer inline is not counted (only outermost layer)
public void test4() {
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void inTest1(Context context, Intent intent) {
//hallo
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void sTest1(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
public void sTest2() {
}
};
}
};
}
}
"""
INTERFACE_TEST = """
package de.ugoe.cs.coast;
public interface ITest {
public int getWordSize();
}
"""
INLINE_INTERFACE_TEST = """
package de.ugoe.cs.coast;
public class InterfaceClass {
public interface ITest {
public int getWordSize();
}
}
"""
CC_TEST = """
package de.ugoe.cs.coast;
public class CCTestClass {
public long updatePassword(long Id, PassEntry entry) {
ContentValues args = new ContentValues();
args.put("description", entry.description);
args.put("username", entry.username);
args.put("password", entry.password);
args.put("website", entry.website);
args.put("note", entry.note);
args.put("unique_name", entry.uniqueName);
DateFormat dateFormatter = DateFormat.getDateTimeInstance(DateFormat.DEFAULT, DateFormat.FULL);
Date today = new Date();
String dateOut = dateFormatter.format(today);
args.put("lastdatetimeedit", dateOut);
try {
db.update(TABLE_PASSWORDS, args, "id=" + Id, null);
} catch (SQLException e)
{
Log.d(TAG,"updatePassword: SQLite exception: " + e.getLocalizedMessage());
return -1;
}
return Id;
}
}
"""
LONG_NAME1 = """org.openintents.safe.CategoryList.onCreateContextMenu(Landroid/view/ContextMenu;Landroid/view/View;Landroid/view/ContextMenu$ContextMenuInfo;)Landroid/app/Dialog;"""
LONG_NAME2 = """org.openintents.safe.SearchFragment.getRowsIds(Ljava/util/List;)[J"""
LONG_NAME3 = """org.openintents.safe.RestoreHandler.characters([CII)V"""
LONG_NAME4 = """org.openintents.safe.Import.doInBackground([Ljava/lang/String;)Ljava/lang/String;"""
LONG_NAME5 = """org.openintents.safe.CryptoContentProvider.insert(Landroid/net/Uri;Landroid/content/ContentValues;)Landroid/net/Uri;"""
LONG_NAME6 = """org.openintents.safe.CryptoContentProvider.query(Landroid/net/Uri;[Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;)Landroid/database/Cursor;"""
LONG_NAME7 = """org.openintents.distribution.EulaActivity$1.onClick(Landroid/view/View;)V"""
LONG_NAME8 = """org.openintents.distribution.AboutDialog.<init>(Landroid/content/Context;)V"""
LONG_NAME9 = """estreamj.ciphers.trivium.Trivium$Maker.getName()Ljava/lang/String;"""
LONG_NAME10 = """de.guoe.cs.test(D)Ljava/lang/String;"""
LONG_NAME11 = """org.apache.zookeeper.ZKParameterized$RunnerFactory.createRunnerForTestWithParameters(LTestWithParameters;)Lorg.junit.runner.Runner;"""
# LONG_NAME12 = """"""
# LONG_NAME13 = """de.guoe.cs.test(LLString;L)V"""
NESTED_ANO_TEST = """package de.ugoe.cs.coast;
public class NestedAnoTest {
private class importTask {
private class importTask2 {
protected void onPostExecute(String result) {
Dialog about = new AlertDialog.Builder(CategoryList.this)
.setIcon(R.drawable.passicon)
.setTitle(R.string.import_complete)
.setPositiveButton(R.string.yes,
new DialogInterface.OnClickListener() {
public void onClick(int whichButton) {
File csvFile = new File(
importedFilename);
// csvFile.delete();
SecureDelete.delete(csvFile);
importedFilename = "";
}
})
.setNegativeButton(R.string.no,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog,
int whichButton) {
}
}).setMessage(deleteMsg).create();
about.show();
}
}
}
}
"""
NESTED_NAMED_TEST = """package de.ugoe.cs.coast;
public class NestedNamedTest {
private void puit() {
}
private class importTask {
private void zoot() {
}
private class importTask2 {
private importTask2() {
}
private Void narf() {
}
}
}
}
"""
NESTED_INTERFACE_TEST = """package de.ugoe.cs.coast;
public interface NestedInterfaceTest {
public class TestClass {
public void test1() {
}
}
}
"""
LONG_NAME_CONVERSION_TEST = """package de.ugoe.cs.coast;
public class LongNameConversionTest {
public void test1(String a, long b, int i) {
}
public String[] test2(int[] a, byte[][] b) {
}
public String test3(long a, String[] b, long c) {
}
public void test4(K key, V value) {
}
}
"""
OBJECT_NAME_TEST = """package de.ugoe.cs.coast;
public class ObjectNameTest {
public java.lang.Object test1(Object K, java.lang.Object V) {
}
}
"""
ENUM_TEST = """package de.ugoe.cs.coast;
public enum EnumTest {
PERSISTENT_SEQUENTIAL_WITH_TTL(6, false, true, false, true);
EnumTest() {
}
public void test1(int a) {
}
}
"""
ARRAY_TEST = """package de.ugoe.cs.coast;
public class Pinky {
private bytes[] narf(java.lang.String[][] args, int[] a, float b) {
}
}
"""
VARARGS_TEST = """package de.ugoe.cs.coast;
public class Pinky {
private void narf(int a, String... args) {
}
}
"""
# todo:
# - anonymous class in named inner class
# org.openintents.safe.CategoryList$importTask$2.onClick(Landroid/content/DialogInterface;I)V
# import logging, sys
# log = logging.getLogger()
# log.setLevel(logging.DEBUG)
# i = logging.StreamHandler(sys.stdout)
# e = logging.StreamHandler(sys.stderr)
# i.setLevel(logging.DEBUG)
# e.setLevel(logging.ERROR)
# log.addHandler(i)
# log.addHandler(e)
| 34.554202 | 185 | 0.565476 | import unittest
import javalang
from coastSHARK.util.complexity_java import ComplexityJava, SourcemeterConversion
BINOP_TEST = """package de.ugoe.cs.coast;
public class BinopTest {
public void test1() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
Boolean d = true;
Boolean e = true;
Boolean f = true;
if (a && b && c || d || e && f) {
// if cc = 1
// sequence cc = 3
}
}
public void test2() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
if (a && !(b && c)) {
// if cc = 1
// sequence cc = 2
}
}
public void test3() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
Boolean d = true;
Boolean e = true;
if (a && b || c && d || e) {
// if cc = 1
// sequence cc = 4
}
}
public void test4() {
Boolean a = true;
Boolean b = true;
if(a == b) {
// if = 1
// cc = 1
} else if (a != b) {
// cc = 1
}
}
public void test5() {
Boolean a = true;
Boolean b = true;
Boolean c = a && b;
}
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
String resultText = "";
if (resultCode != RESULT_OK) {
resultText = "An error occured while contacting OI Safe. Does it allow remote access? (Please check in the settings of OI Safe).";
} else {
if (requestCode == ENCRYPT_REQUEST || requestCode == DECRYPT_REQUEST) {
resultText = data.getStringExtra(CryptoIntents.EXTRA_TEXT);
} else if (requestCode == SET_PASSWORD_REQUEST) {
resultText = "Request to set password sent.";
} else if (requestCode == GET_PASSWORD_REQUEST) {
String uname = data.getStringExtra(CryptoIntents.EXTRA_USERNAME);
String pwd = data.getStringExtra(CryptoIntents.EXTRA_PASSWORD);
resultText = uname + ":" + pwd;
} else if (requestCode == SPOOF_REQUEST) {
resultText = data.getStringExtra("masterKey");
}
}
EditText outputText = (EditText) findViewById(R.id.output_entry);
outputText.setText(resultText, android.widget.TextView.BufferType.EDITABLE);
}
}
"""
NESTING_TEST = """package de.ugoe.cs.coast;
public class NestingTest {
public void myMethod() {
Boolean condition1 = true;
Boolean condition2 = true;
try {
// try does not count towards nesting
if (condition1) {
// +1
for (int i = 0; i < 10; i++) {
// +2 (nesting=1)
while (condition2) {
// +3 (nesting=2)
}
}
}
} catch (ExcepType2 e) {
// +1
if (condition2) {
// +2 (nesting=1)
}
}
}
// sum cc = 9
}
"""
# Sonar does not count default: but we include this in our count
SWITCH_TEST = """package de.ugoe.cs.coast;
public class SwitchTest {
public String getWords(int number) { // mccc = +1
switch (number) {
case 1: // mccc = +1
return "one";
case 2: // mccc = +1
return "a couple";
case 3: // mccc = +1
return "a few";
default:
return "lots";
}
}
// mccc = 4
// cc = 1
}
"""
OVERLOADING_TEST = """package de.ugoe.cs.coast;
public class OverloadingTest {
public void test(long number) {
}
public String test(int number1, int number2) {
}
public boolean test(int number1, int number2, boolean test) {
}
}
"""
PARAM_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public java.lang.String writeAll(java.sql.ResultSet rs, boolean includeColumnNames) throws SQLException, IOException {
ResultSetMetaData metadata = rs.getMetaData();
if (includeColumnNames) {
writeColumnNames(metadata);
}
int columnCount = metadata.getColumnCount();
while (rs.next()) {
String[] nextLine = new String[columnCount];
for (int i = 0; i < columnCount; i++) {
nextLine[i] = getColumnValue(rs, metadata.getColumnType(i + 1), i + 1);
}
writeNext(nextLine);
}
}
}
"""
CONSTRUCTOR_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public ParamTest(int i) {
}
}
"""
STATIC_NESTED_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public void test1() {
}
static class ParamTest2 {
public void test2() {
}
}
}
"""
ANO_TEST = """
package de.ugoe.cs.coast;
public class AnoTest {
// nested anonymous class
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void onReceive(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
};
// anonymous class in method
public void test() {
// we need to ignore this
List<String> passDescriptions4Adapter=new ArrayList<String>();
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void onReceive2(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
};
}
// we want to only count NewDialogInterface and not new AlertDialog, this would otherwise mess with the counting of anonymous classes
public void test2() {
dbHelper = new DBHelper(this);
if (dbHelper.isDatabaseOpen()==false) {
Dialog dbError = new AlertDialog.Builder(this)
.setIcon(android.R.drawable.ic_dialog_alert)
.setTitle(R.string.database_error_title)
.setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int whichButton) {
finish();
}
})
.setMessage(R.string.database_error_msg)
.create();
dbError.show();
return;
}
}
// anonymous class in method with multiple methods
public void test3() {
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void naTest1(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
public void naTest2() {
}
};
}
// multi layer inline is not counted (only outermost layer)
public void test4() {
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void inTest1(Context context, Intent intent) {
//hallo
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void sTest1(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
public void sTest2() {
}
};
}
};
}
}
"""
INTERFACE_TEST = """
package de.ugoe.cs.coast;
public interface ITest {
public int getWordSize();
}
"""
INLINE_INTERFACE_TEST = """
package de.ugoe.cs.coast;
public class InterfaceClass {
public interface ITest {
public int getWordSize();
}
}
"""
CC_TEST = """
package de.ugoe.cs.coast;
public class CCTestClass {
public long updatePassword(long Id, PassEntry entry) {
ContentValues args = new ContentValues();
args.put("description", entry.description);
args.put("username", entry.username);
args.put("password", entry.password);
args.put("website", entry.website);
args.put("note", entry.note);
args.put("unique_name", entry.uniqueName);
DateFormat dateFormatter = DateFormat.getDateTimeInstance(DateFormat.DEFAULT, DateFormat.FULL);
Date today = new Date();
String dateOut = dateFormatter.format(today);
args.put("lastdatetimeedit", dateOut);
try {
db.update(TABLE_PASSWORDS, args, "id=" + Id, null);
} catch (SQLException e)
{
Log.d(TAG,"updatePassword: SQLite exception: " + e.getLocalizedMessage());
return -1;
}
return Id;
}
}
"""
LONG_NAME1 = """org.openintents.safe.CategoryList.onCreateContextMenu(Landroid/view/ContextMenu;Landroid/view/View;Landroid/view/ContextMenu$ContextMenuInfo;)Landroid/app/Dialog;"""
LONG_NAME2 = """org.openintents.safe.SearchFragment.getRowsIds(Ljava/util/List;)[J"""
LONG_NAME3 = """org.openintents.safe.RestoreHandler.characters([CII)V"""
LONG_NAME4 = """org.openintents.safe.Import.doInBackground([Ljava/lang/String;)Ljava/lang/String;"""
LONG_NAME5 = """org.openintents.safe.CryptoContentProvider.insert(Landroid/net/Uri;Landroid/content/ContentValues;)Landroid/net/Uri;"""
LONG_NAME6 = """org.openintents.safe.CryptoContentProvider.query(Landroid/net/Uri;[Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;)Landroid/database/Cursor;"""
LONG_NAME7 = """org.openintents.distribution.EulaActivity$1.onClick(Landroid/view/View;)V"""
LONG_NAME8 = """org.openintents.distribution.AboutDialog.<init>(Landroid/content/Context;)V"""
LONG_NAME9 = """estreamj.ciphers.trivium.Trivium$Maker.getName()Ljava/lang/String;"""
LONG_NAME10 = """de.guoe.cs.test(D)Ljava/lang/String;"""
LONG_NAME11 = """org.apache.zookeeper.ZKParameterized$RunnerFactory.createRunnerForTestWithParameters(LTestWithParameters;)Lorg.junit.runner.Runner;"""
# LONG_NAME12 = """"""
# LONG_NAME13 = """de.guoe.cs.test(LLString;L)V"""
NESTED_ANO_TEST = """package de.ugoe.cs.coast;
public class NestedAnoTest {
private class importTask {
private class importTask2 {
protected void onPostExecute(String result) {
Dialog about = new AlertDialog.Builder(CategoryList.this)
.setIcon(R.drawable.passicon)
.setTitle(R.string.import_complete)
.setPositiveButton(R.string.yes,
new DialogInterface.OnClickListener() {
public void onClick(int whichButton) {
File csvFile = new File(
importedFilename);
// csvFile.delete();
SecureDelete.delete(csvFile);
importedFilename = "";
}
})
.setNegativeButton(R.string.no,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog,
int whichButton) {
}
}).setMessage(deleteMsg).create();
about.show();
}
}
}
}
"""
NESTED_NAMED_TEST = """package de.ugoe.cs.coast;
public class NestedNamedTest {
private void puit() {
}
private class importTask {
private void zoot() {
}
private class importTask2 {
private importTask2() {
}
private Void narf() {
}
}
}
}
"""
NESTED_INTERFACE_TEST = """package de.ugoe.cs.coast;
public interface NestedInterfaceTest {
public class TestClass {
public void test1() {
}
}
}
"""
LONG_NAME_CONVERSION_TEST = """package de.ugoe.cs.coast;
public class LongNameConversionTest {
public void test1(String a, long b, int i) {
}
public String[] test2(int[] a, byte[][] b) {
}
public String test3(long a, String[] b, long c) {
}
public void test4(K key, V value) {
}
}
"""
OBJECT_NAME_TEST = """package de.ugoe.cs.coast;
public class ObjectNameTest {
public java.lang.Object test1(Object K, java.lang.Object V) {
}
}
"""
ENUM_TEST = """package de.ugoe.cs.coast;
public enum EnumTest {
PERSISTENT_SEQUENTIAL_WITH_TTL(6, false, true, false, true);
EnumTest() {
}
public void test1(int a) {
}
}
"""
ARRAY_TEST = """package de.ugoe.cs.coast;
public class Pinky {
private bytes[] narf(java.lang.String[][] args, int[] a, float b) {
}
}
"""
VARARGS_TEST = """package de.ugoe.cs.coast;
public class Pinky {
private void narf(int a, String... args) {
}
}
"""
# todo:
# - anonymous class in named inner class
# org.openintents.safe.CategoryList$importTask$2.onClick(Landroid/content/DialogInterface;I)V
# import logging, sys
# log = logging.getLogger()
# log.setLevel(logging.DEBUG)
# i = logging.StreamHandler(sys.stdout)
# e = logging.StreamHandler(sys.stderr)
# i.setLevel(logging.DEBUG)
# e.setLevel(logging.ERROR)
# log.addHandler(i)
# log.addHandler(e)
class ComplexityJavaTest(unittest.TestCase):
def test_array(self):
ast = javalang.parse.parse(ARRAY_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'narf')
self.assertEqual(m[0]['parameter_types'], ['[[String', '[int', 'float'])
self.assertEqual(m[0]['return_type'], '[bytes')
def test_varargs(self):
ast = javalang.parse.parse(VARARGS_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'narf')
self.assertEqual(m[0]['parameter_types'], ['int', '[String'])
self.assertEqual(m[0]['return_type'], 'Void')
def test_enum(self):
ast = javalang.parse.parse(ENUM_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['class_name'], 'EnumTest')
self.assertEqual(m[0]['method_name'], '<init>')
self.assertEqual(m[0]['parameter_types'], [])
self.assertEqual(m[1]['class_name'], 'EnumTest')
self.assertEqual(m[1]['method_name'], 'test1')
self.assertEqual(m[1]['parameter_types'], ['int'])
def test_object_name(self):
ast = javalang.parse.parse(OBJECT_NAME_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['class_name'], 'ObjectNameTest')
self.assertEqual(m[0]['method_name'], 'test1')
self.assertEqual(m[0]['parameter_types'], ['Object', 'Object'])
def test_nested_interface(self):
ast = javalang.parse.parse(NESTED_INTERFACE_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['class_name'], 'NestedInterfaceTest$TestClass')
self.assertEqual(m[0]['method_name'], 'test1')
def test_nested_named(self):
ast = javalang.parse.parse(NESTED_NAMED_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'puit')
self.assertEqual(m[0]['class_name'], 'NestedNamedTest')
self.assertEqual(m[1]['method_name'], 'zoot')
self.assertEqual(m[1]['class_name'], 'NestedNamedTest$importTask')
self.assertEqual(m[2]['method_name'], '<init>')
self.assertEqual(m[2]['class_name'], 'NestedNamedTest$importTask$importTask2')
self.assertEqual(m[3]['method_name'], 'narf')
self.assertEqual(m[3]['class_name'], 'NestedNamedTest$importTask$importTask2')
def test_nested_ano(self):
ast = javalang.parse.parse(NESTED_ANO_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[1]['method_name'], 'onClick')
self.assertEqual(m[1]['class_name'], 'NestedAnoTest$importTask$importTask2$1')
self.assertEqual(m[2]['method_name'], 'onClick')
self.assertEqual(m[2]['class_name'], 'NestedAnoTest$importTask$importTask2$2')
self.assertEqual(m[0]['method_name'], 'onPostExecute')
self.assertEqual(m[0]['class_name'], 'NestedAnoTest$importTask$importTask2')
def test_cc_class(self):
ast = javalang.parse.parse(CC_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'updatePassword')
self.assertEqual(m[0]['cyclomatic_complexity'], 1)
def test_inline_interface(self):
ast = javalang.parse.parse(INLINE_INTERFACE_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'getWordSize')
self.assertEqual(m[0]['class_name'], 'InterfaceClass$ITest')
self.assertEqual(m[0]['is_interface_method'], True)
def test_interface(self):
ast = javalang.parse.parse(INTERFACE_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'getWordSize')
self.assertEqual(m[0]['class_name'], 'ITest')
self.assertEqual(m[0]['is_interface_method'], True)
def test_anonymous_nested(self):
ast = javalang.parse.parse(ANO_TEST)
cj = ComplexityJava(ast)
# order is not important here, only that we want o have everything with the correct class name
methods_want = {'onReceive': 'AnoTest$1',
'onReceive2': 'AnoTest$2',
'onClick': 'AnoTest$3',
'naTest1': 'AnoTest$4',
'naTest2': 'AnoTest$4',
'inTest1': 'AnoTest$5',
'sTest1': 'AnoTest$5$1',
'sTest2': 'AnoTest$5$1',
'test': 'AnoTest',
'test2': 'AnoTest',
'test3': 'AnoTest',
'test4': 'AnoTest'}
methods_have = set()
for m in cj.cognitive_complexity():
self.assertEqual(m['class_name'], methods_want[m['method_name']])
methods_have.add(m['method_name'])
self.assertEqual(methods_have, set(methods_want.keys()))
def test_static_nested(self):
ast = javalang.parse.parse(STATIC_NESTED_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['method_name'], 'test1')
self.assertEqual(m[0]['class_name'], 'ParamTest')
self.assertEqual(m[1]['method_name'], 'test2')
self.assertEqual(m[1]['class_name'], 'ParamTest$ParamTest2')
def test_constructor(self):
ast = javalang.parse.parse(CONSTRUCTOR_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['parameter_types'], ['int'])
self.assertEqual(m[0]['return_type'], 'Void')
self.assertEqual(m[0]['method_name'], '<init>')
def test_method_params(self):
ast = javalang.parse.parse(PARAM_TEST)
cj = ComplexityJava(ast)
m = list(cj.cognitive_complexity())
self.assertEqual(m[0]['parameter_types'], ['ResultSet', 'boolean'])
self.assertEqual(m[0]['return_type'], 'String')
def test_sourcemeter_conversion(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME1)
self.assertEqual(tmp[0], ['ContextMenu', 'View', 'ContextMenuInfo'])
self.assertEqual(tmp[1], 'Dialog')
def test_sourcemeter_conversion2(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME2)
self.assertEqual(tmp[0], ['List'])
self.assertEqual(tmp[1], '[long')
def test_sourcemeter_conversion3(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME3)
self.assertEqual(tmp[0], ['char', 'int', 'int'])
self.assertEqual(tmp[1], 'Void')
def test_sourcemeter_conversion4(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME4)
self.assertEqual(tmp[0], ['String'])
self.assertEqual(tmp[1], 'String')
def test_sourcemeter_conversion5(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME5)
self.assertEqual(tmp[0], ['Uri', 'ContentValues'])
self.assertEqual(tmp[1], 'Uri')
def test_sourcemeter_conversion6(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME6)
self.assertEqual(tmp[0], ['Uri', 'String', 'String', 'String', 'String'])
self.assertEqual(tmp[1], 'Cursor')
def test_sourcemeter_conversion7(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME7)
self.assertEqual(tmp[0], ['View'])
self.assertEqual(tmp[1], 'Void')
def test_sourcemeter_conversion8(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME8)
self.assertEqual(tmp[0], ['Context'])
self.assertEqual(tmp[1], 'Void')
def test_sourcemeter_conversion9(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME9)
self.assertEqual(tmp[0], [])
self.assertEqual(tmp[1], 'String')
def test_sourcemeter_conversion10(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME10)
self.assertEqual(tmp[0], ['double'])
self.assertEqual(tmp[1], 'String')
def test_sourcemeter_conversion11(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_params(LONG_NAME11)
self.assertEqual(tmp[0], ['TestWithParameters'])
self.assertEqual(tmp[1], 'Runner')
def test_long_name_conversion(self):
ast = javalang.parse.parse(LONG_NAME_CONVERSION_TEST)
cj = ComplexityJava(ast)
l = list(cj.cognitive_complexity())
sc = SourcemeterConversion()
ln1, lnl1 = sc.get_sm_long_name(l[0])
ln2, lnl2 = sc.get_sm_long_name(l[1])
ln3, lnl3 = sc.get_sm_long_name(l[2])
ln4, lnl4 = sc.get_sm_long_name(l[3])
self.assertEqual('de.ugoe.cs.coast.LongNameConversionTest.test1(LString;JI)V', ln1)
self.assertEqual('de.ugoe.cs.coast.LongNameConversionTest.test1(LString;LI)V', lnl1)
self.assertEqual('de.ugoe.cs.coast.LongNameConversionTest.test2([I[[B)[LString;', ln2)
self.assertEqual('de.ugoe.cs.coast.LongNameConversionTest.test3(J[LString;J)LString;', ln3)
self.assertEqual('de.ugoe.cs.coast.LongNameConversionTest.test3(L[LString;L)LString;', lnl3)
self.assertEqual('de.ugoe.cs.coast.LongNameConversionTest.test4(LK;LV;)V', ln4)
def test_sourcemeter_long_name_conversion(self):
sc = SourcemeterConversion()
tmp = sc.get_sm_long_name2('de.ugoe.cs.test.LongNameConversionTest.test1(LLjava/lang/String;IL)V')
self.assertEqual('de.ugoe.cs.test.LongNameConversionTest.test1(LLString;IL)V', tmp)
# this should alos not change anymore
tmp2 = sc.get_sm_long_name2(tmp)
self.assertEqual('de.ugoe.cs.test.LongNameConversionTest.test1(LLString;IL)V', tmp2)
tmp3 = sc.get_sm_long_name2('de.ugoe.cs.test(D)LStatement;')
self.assertEqual('de.ugoe.cs.test(D)LStatement;', tmp3)
tmp3 = sc.get_sm_long_name2('de.ugoe.cs.test(Ljava/lang/Long;)Ljava/lang/Long;')
self.assertEqual('de.ugoe.cs.test(J)J', tmp3)
tmp4 = sc.get_sm_long_name2('de.ugoe.cs.test([[J)V')
self.assertEqual('de.ugoe.cs.test([[J)V', tmp4)
tmp5 = sc.get_sm_long_name2('org.apache.zookeeper.JaasConfiguration.addSection(Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;)V')
self.assertEqual('org.apache.zookeeper.JaasConfiguration.addSection(LString;LString;[LString;)V', tmp5)
def test_overloading(self):
ast = javalang.parse.parse(OVERLOADING_TEST)
cj = ComplexityJava(ast)
l = list(cj.cognitive_complexity())
self.assertEqual(l[0]['parameter_types'], ['long'])
self.assertEqual(l[0]['return_type'], 'Void')
self.assertEqual(l[1]['parameter_types'], ['int', 'int'])
self.assertEqual(l[1]['return_type'], 'String')
self.assertEqual(l[2]['parameter_types'], ['int', 'int', 'boolean'])
self.assertEqual(l[2]['return_type'], 'boolean')
def test_binary_operation_sequences(self):
ast = javalang.parse.parse(BINOP_TEST)
cj = ComplexityJava(ast)
for m in cj.cognitive_complexity():
if m['method_name'] == 'test1':
self.assertEqual(m['cognitive_complexity_sonar'], 4)
self.assertEqual(m['package_name'], 'de.ugoe.cs.coast')
self.assertEqual(m['class_name'], 'BinopTest')
elif m['method_name'] == 'test2':
self.assertEqual(m['cognitive_complexity_sonar'], 3)
elif m['method_name'] == 'test3':
self.assertEqual(m['cognitive_complexity_sonar'], 5)
elif m['method_name'] == 'test4':
self.assertEqual(m['cognitive_complexity_sonar'], 3)
elif m['method_name'] == 'test5':
self.assertEqual(m['cognitive_complexity_sonar'], 1)
def test_nesting(self):
ast = javalang.parse.parse(NESTING_TEST)
cj = ComplexityJava(ast)
for m in cj.cognitive_complexity():
if m['method_name'] == 'myMethod':
self.assertEqual(m['cognitive_complexity_sonar'], 9)
self.assertEqual(m['package_name'], 'de.ugoe.cs.coast')
self.assertEqual(m['class_name'], 'NestingTest')
def test_switch(self):
try:
ast = javalang.parse.parse(SWITCH_TEST)
except javalang.parser.JavaSyntaxError as e:
print(e.description)
print(e.at)
cj = ComplexityJava(ast)
for m in cj.cognitive_complexity():
if m['method_name'] == 'getWords':
self.assertEqual(m['cognitive_complexity_sonar'], 1)
self.assertEqual(m['cyclomatic_complexity'], 5) # we include default: as a branch in switch case
| 12,203 | 23 | 860 |
3ed1e9a0fce5b099778643c5168711803d7281a7 | 681 | py | Python | atividade07Encapsulamento/Juridica.py | JulianoRQueiroz/Algoritmo-e-programa--o-II | 9fc6ed73c116e34fb91bd9d92adcf8160d52d5e4 | [
"MIT"
] | null | null | null | atividade07Encapsulamento/Juridica.py | JulianoRQueiroz/Algoritmo-e-programa--o-II | 9fc6ed73c116e34fb91bd9d92adcf8160d52d5e4 | [
"MIT"
] | null | null | null | atividade07Encapsulamento/Juridica.py | JulianoRQueiroz/Algoritmo-e-programa--o-II | 9fc6ed73c116e34fb91bd9d92adcf8160d52d5e4 | [
"MIT"
] | null | null | null | from Pessoa import Pessoa | 37.833333 | 98 | 0.666667 | from Pessoa import Pessoa
class Juridica(Pessoa):
def __init__(self, codigo, nome, endereco, telefone, cnpj, inscricaoEstadual, qtdFuncionario):
Pessoa.__init__(self, codigo, nome, endereco, telefone)
self.cnpj = cnpj
self.inscricaoEstadual = inscricaoEstadual
self.qtdFuncionario = qtdFuncionario
def imprimiCnpj(self):
print('CNPJ: ' , self.cnpj)
def emitirNotaFiscal(self):
print('Codigo: ', self.codigo)
print('Nome: ', self.nome)
print('CNPJ: ' , self.cnpj)
print('Número da inscrição estadual: ' , self.inscricaoEstadual)
print('Quantidade de funcionários: ' , self.qtdFuncionario) | 551 | 2 | 107 |
f6ec726241464ad5cb865c209875e36e1e78ffbe | 64 | py | Python | tradeboost/__init__.py | dotX12/TradeBoost | 349d1e540fcf45bb23f91fef793d8322c4c127b7 | [
"MIT"
] | 5 | 2021-01-29T08:45:56.000Z | 2021-06-07T21:35:12.000Z | tradeboost/__init__.py | dotX12/TradeBoost | 349d1e540fcf45bb23f91fef793d8322c4c127b7 | [
"MIT"
] | null | null | null | tradeboost/__init__.py | dotX12/TradeBoost | 349d1e540fcf45bb23f91fef793d8322c4c127b7 | [
"MIT"
] | null | null | null | from .boost import *
from .logger import *
from .utils import *
| 16 | 21 | 0.71875 | from .boost import *
from .logger import *
from .utils import *
| 0 | 0 | 0 |
07896ec747fc49c5036b29a4c1c7a85164e1f378 | 973 | py | Python | cataloger/library/models.py | elephantatech/dcatalog | c0f7e15769ce9e793af0754ff3323a7f247688bb | [
"Apache-2.0"
] | null | null | null | cataloger/library/models.py | elephantatech/dcatalog | c0f7e15769ce9e793af0754ff3323a7f247688bb | [
"Apache-2.0"
] | 8 | 2020-07-15T15:03:21.000Z | 2021-09-22T19:33:03.000Z | cataloger/library/models.py | elephantatech/dcatalog | c0f7e15769ce9e793af0754ff3323a7f247688bb | [
"Apache-2.0"
] | null | null | null | from django.db import models
from datetime import datetime, timedelta
# Create your models here. | 40.541667 | 102 | 0.743063 | from django.db import models
from datetime import datetime, timedelta
# Create your models here.
class Book(models.Model):
title = models.TextField(max_length=255)
author = models.TextField(max_length=255)
isbn = models.CharField(max_length=13)
publish_date = models.DateTimeField(auto_now=False)
def __str__(self):
return self.title
class Borrow(models.Model):
borrowedbook = models.ForeignKey(Book, on_delete=models.CASCADE)
borrower = models.TextField(max_length=255, blank=False)
borrowdate = models.DateTimeField(verbose_name="date borrowed",default=datetime.now(),blank=False)
returndate = models.DateTimeField(verbose_name="date returned",null=True, blank=True)
expiryduration = models.CharField(max_length=2, default=14)
def __str__(self):
return f"{self.borrowedbook} - {self.borrower}"
def get_borrow_expiry(self):
return self.borrowdate.date() + timedelta(days=int(self.expiryduration)) | 164 | 668 | 45 |
6005d49e10d8da4a4a1598ffa598870456ac8df8 | 1,741 | py | Python | 205_香农的魔鬼.py | globien/life_python | 9d9e6f68f0178f05eec1296d56016bf7d8a7fc54 | [
"MIT"
] | 18 | 2020-04-04T15:06:01.000Z | 2022-03-11T00:44:38.000Z | 205_香农的魔鬼.py | globien/life_python | 9d9e6f68f0178f05eec1296d56016bf7d8a7fc54 | [
"MIT"
] | null | null | null | 205_香农的魔鬼.py | globien/life_python | 9d9e6f68f0178f05eec1296d56016bf7d8a7fc54 | [
"MIT"
] | 6 | 2020-03-10T22:43:01.000Z | 2022-01-22T03:49:01.000Z | import random
import matplotlib.pyplot as plt
trial = 20 # 模拟实验次数
amp = 2.0 # 上下振幅(对称乘除)
cash = 1.0 # 初始现金
days = 200 # 每次模拟实验观察天数
print("\n多次实验,每次实验的最终股价与总资产的对比:\n")
for i in range(trial):
money = value = cash / 2 # 一半买为股票,一半保留现金
price = 1.0 # 初始股票价格
shares = value / price # 初始买的股票数,假定允许买卖分数股数
moneys = [money] # 数组,用来存放每天的现金额
values = [value] # 数组,用来存放每天的股票市值
prices = [price] # 数组,用来存放每天的股票价格
assets = [money + value] # 数组,用来存放每天的总资产
for day in range(1, days):
price = price * amp**random.choice([-1,1]) # 随机决定上涨还是下跌
prices.append(price)
val_tmp = shares * price
delta = (val_tmp - money) / price / 2 # 卖出/买入股值与现金的差值一半对应的股票,保持股值与现金相等
shares = shares - delta
value = shares * price
values.append(value)
money = money + delta * price
moneys.append(money)
assets.append(money + value)
print("第{:2d}次实验结果: Price = {:.2e} Assets = {:.2e} A/P = {:.2e}".format(i+1, prices[days-1],assets[days-1],assets[days-1]/prices[days-1]))
# 把最后一次实验数据用走势图展示出来
plt.plot(range(days), prices, label='Stock Price') # 对价格按日期作图(折线图)
plt.plot(range(days), assets, label='Total Assets') # 对资产按日期作图(折线图)
plt.xlabel('Days') # 横坐标名称
plt.ylabel('Total Assets / Stock Price') # 纵坐标名称
plt.yscale('log') # 纵坐标为对数坐标
plt.legend(loc='best') # 自动选择最佳图例位置
plt.title("Earn Money with Shannon's Strategy") # 图表名称
plt.show() # 显示图形
| 39.568182 | 147 | 0.520391 | import random
import matplotlib.pyplot as plt
trial = 20 # 模拟实验次数
amp = 2.0 # 上下振幅(对称乘除)
cash = 1.0 # 初始现金
days = 200 # 每次模拟实验观察天数
print("\n多次实验,每次实验的最终股价与总资产的对比:\n")
for i in range(trial):
money = value = cash / 2 # 一半买为股票,一半保留现金
price = 1.0 # 初始股票价格
shares = value / price # 初始买的股票数,假定允许买卖分数股数
moneys = [money] # 数组,用来存放每天的现金额
values = [value] # 数组,用来存放每天的股票市值
prices = [price] # 数组,用来存放每天的股票价格
assets = [money + value] # 数组,用来存放每天的总资产
for day in range(1, days):
price = price * amp**random.choice([-1,1]) # 随机决定上涨还是下跌
prices.append(price)
val_tmp = shares * price
delta = (val_tmp - money) / price / 2 # 卖出/买入股值与现金的差值一半对应的股票,保持股值与现金相等
shares = shares - delta
value = shares * price
values.append(value)
money = money + delta * price
moneys.append(money)
assets.append(money + value)
print("第{:2d}次实验结果: Price = {:.2e} Assets = {:.2e} A/P = {:.2e}".format(i+1, prices[days-1],assets[days-1],assets[days-1]/prices[days-1]))
# 把最后一次实验数据用走势图展示出来
plt.plot(range(days), prices, label='Stock Price') # 对价格按日期作图(折线图)
plt.plot(range(days), assets, label='Total Assets') # 对资产按日期作图(折线图)
plt.xlabel('Days') # 横坐标名称
plt.ylabel('Total Assets / Stock Price') # 纵坐标名称
plt.yscale('log') # 纵坐标为对数坐标
plt.legend(loc='best') # 自动选择最佳图例位置
plt.title("Earn Money with Shannon's Strategy") # 图表名称
plt.show() # 显示图形
| 0 | 0 | 0 |
d4b0e363685b489de99bdbe92a253724ad6314f0 | 7,666 | py | Python | Chapter07/transformer/transformer/Translator.py | bpbpublications/Getting-started-with-Deep-Learning-for-Natural-Language-Processing | 89f35a8e327bd9143fdb44e84b8f7b4fdc8ae58d | [
"MIT"
] | null | null | null | Chapter07/transformer/transformer/Translator.py | bpbpublications/Getting-started-with-Deep-Learning-for-Natural-Language-Processing | 89f35a8e327bd9143fdb44e84b8f7b4fdc8ae58d | [
"MIT"
] | 1 | 2021-10-14T10:15:10.000Z | 2021-10-14T10:15:10.000Z | Chapter07/transformer/transformer/Translator.py | bpbpublications/Getting-started-with-Deep-Learning-for-Natural-Language-Processing | 89f35a8e327bd9143fdb44e84b8f7b4fdc8ae58d | [
"MIT"
] | 1 | 2022-01-02T20:57:01.000Z | 2022-01-02T20:57:01.000Z | ''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformer.Beam import Beam
from transformer.Models import Transformer
class Translator(object):
''' Load with trained model and handle the beam search '''
def translate_batch(self, src_seq, src_pos):
''' Translation work in one batch '''
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):
''' Decode and update beam status, and then return active beam idx '''
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
with torch.no_grad():
# -- Encode
src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device)
src_enc, *_ = self.model.encoder(src_seq, src_pos)
# -- Repeat data for beam search
n_bm = self.opt.beam_size
n_inst, len_s, d_h = src_enc.size()
src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s)
src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
# -- Prepare beams
inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]
# -- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
# -- Decode
for len_dec_seq in range(1, self.model_opt.max_token_seq_len + 1):
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
src_seq, src_enc, inst_idx_to_position_map = collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, self.opt.n_best)
return batch_hyp, batch_scores
| 45.904192 | 105 | 0.638795 | ''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformer.Beam import Beam
from transformer.Models import Transformer
class Translator(object):
''' Load with trained model and handle the beam search '''
def __init__(self, opt):
self.opt = opt
self.device = torch.device('cuda' if opt.cuda else 'cpu')
checkpoint = torch.load(opt.model)
model_opt = checkpoint['settings']
self.model_opt = model_opt
model = Transformer(
model_opt.src_vocab_size,
model_opt.tgt_vocab_size,
model_opt.max_token_seq_len,
tgt_emb_prj_weight_sharing=model_opt.proj_share_weight,
emb_src_tgt_weight_sharing=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout)
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
model.word_prob_prj = nn.LogSoftmax(dim=1)
model = model.to(self.device)
self.model = model
self.model.eval()
def translate_batch(self, src_seq, src_pos):
''' Translation work in one batch '''
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list):
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)
active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm)
active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
return active_src_seq, active_src_enc, active_inst_idx_to_position_map
def beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):
dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)
dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)
return dec_partial_pos
def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm):
dec_output, *_ = self.model.decoder(dec_seq, dec_pos, src_seq, enc_output)
dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h
word_prob = F.log_softmax(self.model.tgt_word_prj(dec_output), dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
with torch.no_grad():
# -- Encode
src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device)
src_enc, *_ = self.model.encoder(src_seq, src_pos)
# -- Repeat data for beam search
n_bm = self.opt.beam_size
n_inst, len_s, d_h = src_enc.size()
src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s)
src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
# -- Prepare beams
inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]
# -- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
# -- Decode
for len_dec_seq in range(1, self.model_opt.max_token_seq_len + 1):
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
src_seq, src_enc, inst_idx_to_position_map = collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, self.opt.n_best)
return batch_hyp, batch_scores
| 3,835 | 0 | 243 |
31bb233163e98bceb26172a1307ead3dc73da8cf | 3,563 | py | Python | classes/data_splitters/FusionDataSplitter.py | anuj-harisinghani/canary-nlp | 5225fa028f0f744cd6582f927f3990c1a50b1f9b | [
"MIT"
] | null | null | null | classes/data_splitters/FusionDataSplitter.py | anuj-harisinghani/canary-nlp | 5225fa028f0f744cd6582f927f3990c1a50b1f9b | [
"MIT"
] | null | null | null | classes/data_splitters/FusionDataSplitter.py | anuj-harisinghani/canary-nlp | 5225fa028f0f744cd6582f927f3990c1a50b1f9b | [
"MIT"
] | null | null | null | from classes.data_splitters.DataSplitter import DataSplitter
from classes.handlers.ParamsHandler import ParamsHandler
import numpy as np
import pandas as pd
import os
import random
import copy
| 38.728261 | 139 | 0.611844 | from classes.data_splitters.DataSplitter import DataSplitter
from classes.handlers.ParamsHandler import ParamsHandler
import numpy as np
import pandas as pd
import os
import random
import copy
class FusionDataSplitter(DataSplitter):
def __init__(self):
super().__init__()
def make_splits(self, data: dict, seed: int) -> list:
self.random_seed = seed
x = data['x']
y = data['y']
labels = np.array(data['labels'])
fold_data = []
params = ParamsHandler.load_parameters("settings")
output_folder = params["output_folder"]
extraction_method = params["PID_extraction_method"]
tasks = params["tasks"]
method = 1
splits = []
# option 1: Superset PIDs
if method == 1:
# get list of superset_ids from the saved file
super_pids_file_path = os.path.join('results', output_folder, extraction_method + '_super_pids.csv')
superset_ids = list(pd.read_csv(super_pids_file_path)['interview'])
# random shuffle based on random seed
random.Random(self.random_seed).shuffle(superset_ids)
splits = np.array_split(superset_ids, self.nfolds)
# option 2: Split an intersection of pids across tasks, then split the out-of-intersection pids, then merge them equally
if method == 2:
pid_file_paths = {task: os.path.join('results', output_folder, extraction_method + '_' + task + '_pids.csv') for task in tasks}
pids = [list(pd.read_csv(pid_file_paths[task])['interview']) for task in tasks]
uni_pids = inter_pids = copy.deepcopy(pids)
# creating intersection of pids across tasks
while len(inter_pids) > 1:
inter_pids = [np.intersect1d(inter_pids[i], inter_pids[i + 1]) for i in range(len(inter_pids) - 1)]
inter_pids = list(inter_pids[0])
# creating union of pids across tasks
while len(uni_pids) > 1:
uni_pids = [np.union1d(uni_pids[i], uni_pids[i+1]) for i in range(len(uni_pids) - 1)]
uni_pids = uni_pids[0]
# difference in uni_pids and inter_pids
diff_pids = list(np.setxor1d(uni_pids, inter_pids))
# shuffling before splitting
random.Random(self.random_seed).shuffle(inter_pids)
random.Random(self.random_seed).shuffle(diff_pids)
inter_splits = np.array_split(inter_pids, self.nfolds)
diff_splits = np.array_split(diff_pids, self.nfolds)
splits = []
for i in range(self.nfolds):
splits.append(np.append(inter_splits[i], diff_splits[i]))
# after creating the splits:
# manually creating folds and filling data
folds = [np.intersect1d(group, labels) for group in splits]
for i in range(len(folds)):
fold = {}
test = folds[i]
train = np.concatenate(folds[:i] + folds[i + 1:])
train_index = [np.where(x.index == train[j])[0][0] for j in range(len(train))]
test_index = [np.where(x.index == test[j])[0][0] for j in range(len(test))]
fold['x_train'] = x.values[train_index]
fold['y_train'] = y.values[train_index]
fold['x_test'] = x.values[test_index]
fold['y_test'] = y.values[test_index]
fold['train_labels'] = labels[train_index]
fold['test_labels'] = labels[test_index]
fold_data.append(fold)
return fold_data
| 3,274 | 18 | 76 |
76cf6ae3e0ade99e5866b487482a562d84bb6443 | 436 | py | Python | reverse/leet2.py | adrianogil/pynsecply | 424859f691bc42e9434ab66e88611eba5845cc3b | [
"MIT"
] | null | null | null | reverse/leet2.py | adrianogil/pynsecply | 424859f691bc42e9434ab66e88611eba5845cc3b | [
"MIT"
] | null | null | null | reverse/leet2.py | adrianogil/pynsecply | 424859f691bc42e9434ab66e88611eba5845cc3b | [
"MIT"
] | null | null | null | import os
# https://stackoverflow.com/questions/10492869/how-to-perform-leet-with-python
from utilitybelt import change_charset
origspace = "abcdefghijklmnopqrstuvwxyz"
keyspace = "abcd3fgh1jklmnopqr57uvwxyz"
# print(change_charset("leetspeak",origspace, keyspace))
reverse_file='invertido.txt'
with open(reverse_file, 'r') as f:
words = f.readlines()
for w in words:
print(change_charset(w.lower(), keyspace, origspace)) | 25.647059 | 78 | 0.772936 | import os
# https://stackoverflow.com/questions/10492869/how-to-perform-leet-with-python
from utilitybelt import change_charset
origspace = "abcdefghijklmnopqrstuvwxyz"
keyspace = "abcd3fgh1jklmnopqr57uvwxyz"
# print(change_charset("leetspeak",origspace, keyspace))
reverse_file='invertido.txt'
with open(reverse_file, 'r') as f:
words = f.readlines()
for w in words:
print(change_charset(w.lower(), keyspace, origspace)) | 0 | 0 | 0 |
6d625954e1d792337f6364572fda303937c4f708 | 1,970 | py | Python | spark_auto_mapper_fhir/value_sets/x_path_usage_type.py | imranq2/SparkAutoMapper.FHIR | dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2 | [
"Apache-2.0"
] | 1 | 2020-10-31T23:25:07.000Z | 2020-10-31T23:25:07.000Z | spark_auto_mapper_fhir/value_sets/x_path_usage_type.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper_fhir/value_sets/x_path_usage_type.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class XPathUsageTypeCode(GenericTypeCode):
"""
XPathUsageType
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
How a search parameter relates to the set of elements returned by evaluating
its xpath query.
"""
"""
http://hl7.org/fhir/search-xpath-usage
"""
codeset: FhirUri = "http://hl7.org/fhir/search-xpath-usage"
class XPathUsageTypeCodeValues:
"""
The search parameter is derived directly from the selected nodes based on the
type definitions.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Normal = XPathUsageTypeCode("normal")
"""
The search parameter is derived by a phonetic transform from the selected
nodes.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Phonetic = XPathUsageTypeCode("phonetic")
"""
The search parameter is based on a spatial transform of the selected nodes.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Nearby = XPathUsageTypeCode("nearby")
"""
The search parameter is based on a spatial transform of the selected nodes,
using physical distance from the middle.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Distance = XPathUsageTypeCode("distance")
"""
The interpretation of the xpath statement is unknown (and can't be automated).
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Other = XPathUsageTypeCode("other")
| 33.965517 | 84 | 0.72335 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class XPathUsageTypeCode(GenericTypeCode):
"""
XPathUsageType
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
How a search parameter relates to the set of elements returned by evaluating
its xpath query.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/search-xpath-usage
"""
codeset: FhirUri = "http://hl7.org/fhir/search-xpath-usage"
class XPathUsageTypeCodeValues:
"""
The search parameter is derived directly from the selected nodes based on the
type definitions.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Normal = XPathUsageTypeCode("normal")
"""
The search parameter is derived by a phonetic transform from the selected
nodes.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Phonetic = XPathUsageTypeCode("phonetic")
"""
The search parameter is based on a spatial transform of the selected nodes.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Nearby = XPathUsageTypeCode("nearby")
"""
The search parameter is based on a spatial transform of the selected nodes,
using physical distance from the middle.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Distance = XPathUsageTypeCode("distance")
"""
The interpretation of the xpath statement is unknown (and can't be automated).
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Other = XPathUsageTypeCode("other")
| 68 | 0 | 27 |
72e0ea0f5a7c55d10fc959c7a4591fc43cc95bc7 | 1,349 | py | Python | notifier.py | yancz1989/tunas | ea3a4a56295a3e8d0d3fbb4d7ec3c0a78500897d | [
"MIT"
] | null | null | null | notifier.py | yancz1989/tunas | ea3a4a56295a3e8d0d3fbb4d7ec3c0a78500897d | [
"MIT"
] | null | null | null | notifier.py | yancz1989/tunas | ea3a4a56295a3e8d0d3fbb4d7ec3c0a78500897d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: yancz1989
# @Date: 2016-06-20 11:00:29
# @Last Modified by: yancz1989
# @Last Modified time: 2016-06-20 11:00:31
import numpy as np
import tunas.core.arch.theano_mod.ops as ops
reload(ops)
from tunas.core.arch.theano_mod.ops import *
np.random.seed(2012310818)
x = theano.shared(np.random.rand(10, 10).astype('float32'))
y = theano.shared(np.random.rand(10, 10).astype('float32'))
ops_scalar = [round, abs, neg, sign, inv, sqrt, square, exp, log,
ceil, floor, sin, cos, diag, diagv, trace, determinant, matinv,
cholesky, fft, ifft, sum, prod, max, min, argmax, argmin, mean, std, unique, where]
ops_binary = [add, sub, mul, div, pow, elmw_max, elmw_min, matmul, batch_matmul, pad, ]
ops_bool = [eq, lt, le, gt, ge, logic_and, logic_or, logic_not, logic_xor]
rand_func = [randn, rand, binomial, shuffle]
activations = [relu, softplus, softmax, tanh, sigmoid, thresholding, clip, linear]
conv = [conv2d, conv3d, max_pool2d, max_pool3d, avg_pool2d, avg_pool3d, window_slides]
loss = [mse, mae, msle, sqr_hinge, hinge, categorical_crossentropy, binary_crossentropy, cosine_proximity]
optimizer = [gd, momentum, rmsprop, adagrad, adadelta, adam, adamax]
funcs = [ops_scalar, ops_binary, ops_bool, rand_func, activations, conv, loss]
for f in ops_scalar:
print(type(f(x))) | 36.459459 | 106 | 0.706449 | # -*- coding: utf-8 -*-
# @Author: yancz1989
# @Date: 2016-06-20 11:00:29
# @Last Modified by: yancz1989
# @Last Modified time: 2016-06-20 11:00:31
import numpy as np
import tunas.core.arch.theano_mod.ops as ops
reload(ops)
from tunas.core.arch.theano_mod.ops import *
np.random.seed(2012310818)
x = theano.shared(np.random.rand(10, 10).astype('float32'))
y = theano.shared(np.random.rand(10, 10).astype('float32'))
ops_scalar = [round, abs, neg, sign, inv, sqrt, square, exp, log,
ceil, floor, sin, cos, diag, diagv, trace, determinant, matinv,
cholesky, fft, ifft, sum, prod, max, min, argmax, argmin, mean, std, unique, where]
ops_binary = [add, sub, mul, div, pow, elmw_max, elmw_min, matmul, batch_matmul, pad, ]
ops_bool = [eq, lt, le, gt, ge, logic_and, logic_or, logic_not, logic_xor]
rand_func = [randn, rand, binomial, shuffle]
activations = [relu, softplus, softmax, tanh, sigmoid, thresholding, clip, linear]
conv = [conv2d, conv3d, max_pool2d, max_pool3d, avg_pool2d, avg_pool3d, window_slides]
loss = [mse, mae, msle, sqr_hinge, hinge, categorical_crossentropy, binary_crossentropy, cosine_proximity]
optimizer = [gd, momentum, rmsprop, adagrad, adadelta, adam, adamax]
funcs = [ops_scalar, ops_binary, ops_bool, rand_func, activations, conv, loss]
for f in ops_scalar:
print(type(f(x))) | 0 | 0 | 0 |
16639b90cf841f99ea26cbcc3a5c2280f67854e1 | 550 | py | Python | day-02/part-1/badouralix.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 12 | 2020-11-30T19:22:18.000Z | 2021-06-21T05:55:58.000Z | day-02/part-1/badouralix.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 13 | 2020-11-30T17:27:22.000Z | 2020-12-22T17:43:13.000Z | day-02/part-1/badouralix.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 3 | 2020-12-01T08:49:40.000Z | 2022-03-26T21:47:38.000Z | from tool.runners.python import SubmissionPy
| 26.190476 | 65 | 0.512727 | from tool.runners.python import SubmissionPy
class BadouralixSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
result = 0
for line in s.split("\n"):
mincount, maxcount, char, password = (
line.replace("-", " ").replace(":", " ").split()
)
count = password.count(char)
if count >= int(mincount) and count <= int(maxcount):
result += 1
return result
| 0 | 481 | 23 |
e6c0db53f9b4431c882370890aa7e808d3ceb12a | 1,824 | py | Python | teachers/migrations/0001_initial.py | joseph0919/Student_Management_Django | 085e839a86ac574f5ebe83a4911c5808841f50cd | [
"MIT"
] | null | null | null | teachers/migrations/0001_initial.py | joseph0919/Student_Management_Django | 085e839a86ac574f5ebe83a4911c5808841f50cd | [
"MIT"
] | null | null | null | teachers/migrations/0001_initial.py | joseph0919/Student_Management_Django | 085e839a86ac574f5ebe83a4911c5808841f50cd | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-21 01:26
from django.db import migrations, models
import django.db.models.deletion
| 42.418605 | 118 | 0.589912 | # Generated by Django 2.1.7 on 2019-03-21 01:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('departments', '__first__'),
]
operations = [
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('techer_id', models.CharField(blank=True, max_length=6, null=True, unique=True)),
('designation', models.CharField(max_length=30)),
('joined', models.DateField(verbose_name='Year-Month')),
('phone', models.CharField(max_length=12, null=True)),
],
),
migrations.CreateModel(
name='TeacherDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_bio', models.TextField(max_length=100)),
('gender', models.CharField(max_length=6)),
('birthdate', models.DateField()),
('qualification', models.CharField(max_length=100)),
('englis_skill', models.CharField(max_length=10)),
('math_skill', models.CharField(blank=True, max_length=10, null=True)),
('programming_skill', models.CharField(blank=True, max_length=10, null=True)),
('dept', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='departments.Department')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teachers.Teacher')),
],
),
]
| 0 | 1,677 | 23 |
ea914284f3826f73ce612c90d1a156759feb1fba | 2,117 | py | Python | src/ptm_ppi_shortcut.py | elangovana/bert-shortcuts | 5febbec1b6f25ed9e4d293071416c1026a10936a | [
"MIT"
] | null | null | null | src/ptm_ppi_shortcut.py | elangovana/bert-shortcuts | 5febbec1b6f25ed9e4d293071416c1026a10936a | [
"MIT"
] | null | null | null | src/ptm_ppi_shortcut.py | elangovana/bert-shortcuts | 5febbec1b6f25ed9e4d293071416c1026a10936a | [
"MIT"
] | null | null | null | import argparse
import logging
import sys
import pandas as pd
import sklearn
from model_nb_tree_classifier import ModelNBTreeClassifier
if __name__ == "__main__":
run_main()
| 35.881356 | 122 | 0.570146 | import argparse
import logging
import sys
import pandas as pd
import sklearn
from model_nb_tree_classifier import ModelNBTreeClassifier
def train(trainfile, testfile=None):
df_train = pd.read_json(trainfile, orient="records")
m = ModelNBTreeClassifier("PROTPART1", "PROTPART0")
m.train(df_train["x"], df_train["y"])
if testfile is not None:
df_test = pd.read_json(testfile, orient="records")
actual = m.predict(df_test["x"])
pos_f1 = sklearn.metrics.f1_score(df_test["y"], actual, labels=[1, 2, 3, 4, 5, 6], average='micro',
sample_weight=None, zero_division='warn')
all_f1 = sklearn.metrics.f1_score(df_test["y"], actual, average='micro', sample_weight=None, zero_division='warn')
print(sklearn.metrics.classification_report(df_test["y"],
actual,
output_dict=False,
labels=[1, 2, 3, 4, 5, 6]))
# print(sklearn.metrics.classification_report(df_test["y"],
# actual,
# output_dict=False))
print("Pos labels", pos_f1, "All labels", all_f1)
return m
def run_main():
parser = argparse.ArgumentParser()
parser.add_argument("--trainfile",
help="The input ppi multiclass train file", required=True)
parser.add_argument("--testfile",
help="The input ppi multiclass test file", required=True)
parser.add_argument("--log-level", help="Log level", default="INFO", choices={"INFO", "WARN", "DEBUG", "ERROR"})
args = parser.parse_args()
# Set up logging
logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
print(args.__dict__)
train(args.trainfile, args.testfile)
if __name__ == "__main__":
run_main()
| 1,885 | 0 | 46 |
16488afbc467578cb27eca6a855d6d9add84243c | 4,143 | py | Python | tests/chainer_tests/functions_tests/array_tests/test_cast.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | 2 | 2019-08-12T21:48:04.000Z | 2020-08-27T18:04:20.000Z | tests/chainer_tests/functions_tests/array_tests/test_cast.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/array_tests/test_cast.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | null | null | null | import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4)},
{'shape': ()},
],
[
{'in_type': numpy.bool_},
{'in_type': numpy.uint8},
{'in_type': numpy.uint64},
{'in_type': numpy.int8},
{'in_type': numpy.int64},
{'in_type': numpy.float16},
{'in_type': numpy.float32},
{'in_type': numpy.float64},
],
[
{'out_type': numpy.bool_},
{'out_type': numpy.uint8},
{'out_type': numpy.uint64},
{'out_type': numpy.int8},
{'out_type': numpy.int64},
{'out_type': numpy.float16},
{'out_type': numpy.float32},
{'out_type': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@attr.chainerx
testing.run_module(__name__, __file__)
| 29.805755 | 79 | 0.599083 | import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4)},
{'shape': ()},
],
[
{'in_type': numpy.bool_},
{'in_type': numpy.uint8},
{'in_type': numpy.uint64},
{'in_type': numpy.int8},
{'in_type': numpy.int64},
{'in_type': numpy.float16},
{'in_type': numpy.float32},
{'in_type': numpy.float64},
],
[
{'out_type': numpy.bool_},
{'out_type': numpy.uint8},
{'out_type': numpy.uint64},
{'out_type': numpy.int8},
{'out_type': numpy.int64},
{'out_type': numpy.float16},
{'out_type': numpy.float32},
{'out_type': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@attr.chainerx
class TestCast(testing.FunctionTestCase):
def _skip_chainerx_unsupported_dtype(self):
supported_dtypes = chainerx.testing.dtypes.all_dtypes
if (self.in_type.__name__ not in supported_dtypes
or self.out_type.__name__ not in supported_dtypes):
raise unittest.SkipTest(
'ChainerX does not support either of {} or {} dtypes'.format(
self.in_type.__name__, self.out_type.__name__))
def setUp(self):
# Skip e.g. uint64 for ChainerX.
self._skip_chainerx_unsupported_dtype()
if (numpy.dtype(self.in_type).kind != 'f'
or numpy.dtype(self.out_type).kind != 'f'):
self.skip_backward_test = True
self.skip_double_backward_test = True
self.check_backward_options = {
'eps': 2.0 ** -2, 'atol': 1e-2, 'rtol': 1e-3}
self.check_double_backward_options = {
'eps': 2.0 ** -2, 'atol': 1e-2, 'rtol': 1e-3}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.in_type)
return x,
def forward_expected(self, inputs):
x, = inputs
return x.astype(self.out_type),
def forward(self, inputs, devices):
x, = inputs
y = functions.cast(x, self.out_type)
return y,
class TestNoCast(unittest.TestCase):
def setUp(self):
self.dtype = numpy.float32
self.x = numpy.empty(1, self.dtype)
def check_forward_no_cast(self, x_data):
y = functions.cast(x_data, self.dtype)
assert isinstance(y, chainer.Variable)
assert y.data is x_data
def test_forward_no_cast_array(self):
y = functions.cast(self.x, self.dtype)
assert isinstance(y, chainer.Variable)
assert y.data is self.x
def test_forward_no_cast_variable(self):
# If backprop is disabled, it's safe to simply return the input
# variable for no-op casts.
x = chainer.Variable(self.x)
with chainer.using_config('enable_backprop', False):
y = functions.cast(x, self.dtype)
assert y is x
def test_forward_no_cast_grad(self):
# This test would fail if F.cast does not create new function nodes for
# no-op casts
x = chainer.Variable(self.x)
y1 = functions.cast(x, self.dtype)
y2 = functions.cast(x, self.dtype)
z = y1 + y2
gy1, gy2 = chainer.grad([z], [y1, y2], [numpy.ones_like(z.data)])
assert gy1.dtype == self.dtype
assert gy2.dtype == self.dtype
numpy.testing.assert_array_equal(gy1.data, numpy.ones_like(y1.data))
numpy.testing.assert_array_equal(gy2.data, numpy.ones_like(y2.data))
testing.run_module(__name__, __file__)
| 2,372 | 35 | 315 |
eec55862e09568765da4b657bde3b402fb72344a | 451,812 | py | Python | MortrackLibrary/machineLearning/MortrackML_Library.py | Mortrack/Mortrack_ML_Library | d041c8cd73a058d7223af8e07636a0ade7014f4c | [
"Apache-2.0"
] | null | null | null | MortrackLibrary/machineLearning/MortrackML_Library.py | Mortrack/Mortrack_ML_Library | d041c8cd73a058d7223af8e07636a0ade7014f4c | [
"Apache-2.0"
] | null | null | null | MortrackLibrary/machineLearning/MortrackML_Library.py | Mortrack/Mortrack_ML_Library | d041c8cd73a058d7223af8e07636a0ade7014f4c | [
"Apache-2.0"
] | null | null | null |
"""
Copyright 2021 Cesar Miranda Meza (alias: Mortrack)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 17:15:51 2020
Last updated on Mon May 24 8:40:00 2021
@author: enginer Cesar Miranda Meza (alias: Mortrack)
"""
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
# IMPORTANT NOTE: Remember to be careful with input argument variables when
# you call any method or class because it gets altered in
# python ignoring parenting or children logic
"""
DiscreteDistribution()
The Combinations class allows you to get some parameters, through some of its
methods, that describe the dataset characteristics (eg. mean, variance and
standard deviation).
"""
class DiscreteDistribution:
"""
getMean(samplesList="will contain a matrix of rows and columns, were we want to get the Mean of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
mean value.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getMean(matrix_x)
EXPECTED CODE RESULT:
result =
[[2.0], [5.0], [5.0]]
"""
"""
getVariance(samplesList="will contain a matrix of rows and columns, were we want to get the Variance of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
variance value.
Remember that Variance is also denoted as the square of sigma
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getVariance(matrix_x)
EXPECTED CODE RESULT:
result =
[[1.0], [1.0], [16.0], [9.0]]
"""
"""
getStandardDeviation(samplesList="will contain a matrix of rows and columns, were we want to get the Standard Deviation of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
Standard Deviation value.
Remember that Standard Deviation is also denoted as sigma
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getStandardDeviation(matrix_x)
EXPECTED CODE RESULT:
result =
[[1.0], [1.0], [16.0], [9.0]]
"""
"""
Tdistribution(desiredTrustInterval="Its a float numeric type value that will represent the desired percentage(%) that you desire for your trust interval")
The Combinations class allows you to get some parameters, through some of its
methods, that describe the dataset characteristics (eg. mean, variance and
standard deviation).
"""
class Tdistribution:
"""
getCriticalValue(numberOfSamples="Must have a whole number that represents the number of samples you want to get the critical value from")
Returns a float numeric value which will represent the Critical Value of
the parameters that you specified (the desired trust interval and the
number of samples)
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tD = mSL.Tdistribution(desiredTrustInterval=95)
result = tD.getCriticalValue(len(matrix_x[0]))
EXPECTED CODE RESULT:
result =
4.303
"""
class TrustIntervals:
"""
getMeanIntervals(samplesList="Must contain the matrix of the dataset from which you want to get the Mean Intervals",
meanList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding mean value.",
standardDeviationList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding standard deviation value.",
tValue="Must contain a float numeric value that represents the T-Value (Critical Value) required to calculate the mean intervals")
This method returns a matrix with 2 columns:
* Column 1 = negative mean interval values in the corresponding "n" number of rows
* Column 2 = positive mean interval values in the corresponding "n" number of rows
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tI = mSL.TrustIntervals()
dD = mSL.DiscreteDistribution()
meanList = dD.getMean(matrix_x)
standardDeviationList = dD.getStandardDeviation(matrix_x)
tD = mSL.Tdistribution(desiredTrustInterval=95)
tValue = tD.getCriticalValue(len(matrix_x[0]))
meanIntervalsList = tI.getMeanIntervals(matrix_x, meanList, standardDeviationList, tValue)
negativeMeanIntervalList = []
positiveMeanIntervalList = []
for row in range(0, len(meanIntervalsList)):
temporalRow = []
temporalRow.append(meanIntervalsList[row][0])
negativeMeanIntervalList.append(temporalRow)
temporalRow = []
temporalRow.append(meanIntervalsList[row][1])
positiveMeanIntervalList.append(temporalRow)
EXPECTED CODE RESULT:
negativeMeanIntervalList =
[[-0.48433820832295993],
[2.51566179167704],
[-4.93735283329184],
[-3.453014624968879]]
positiveMeanIntervalList =
[[4.48433820832296],
[7.48433820832296],
[14.93735283329184],
[11.45301462496888]]
"""
"""
getPredictionIntervals(samplesList="Must contain the matrix of the dataset from which you want to get the Prediction Intervals",
meanList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding mean value.",
standardDeviationList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding standard deviation value.",
tValue="Must contain a float numeric value that represents the T-Value (Critical Value) required to calculate the Prediction intervals")
This method returns a matrix with 2 columns:
* Column 1 = negative Prediction interval values in the corresponding "n" number of rows
* Column 2 = positive Prediction interval values in the corresponding "n" number of rows
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tI = mSL.TrustIntervals()
dD = mSL.DiscreteDistribution()
meanList = dD.getMean(matrix_x)
standardDeviationList = dD.getStandardDeviation(matrix_x)
tD = mSL.Tdistribution(desiredTrustInterval=95)
numberOfSamples = len(matrix_x[0])
tValue = tD.getCriticalValue(numberOfSamples)
predictionIntervalsList = tI.getPredictionIntervals(numberOfSamples, meanList, standardDeviationList, tValue)
negativePredictionIntervalList = []
positivePredictionIntervalList = []
for row in range(0, len(predictionIntervalsList)):
temporalRow = []
temporalRow.append(predictionIntervalsList[row][0])
negativePredictionIntervalList.append(temporalRow)
temporalRow = []
temporalRow.append(predictionIntervalsList[row][1])
positivePredictionIntervalList.append(temporalRow)
EXPECTED CODE RESULT:
negativePredictionIntervalList =
[[-2.968676416645919],
[0.03132358335408103],
[-14.874705666583676],
[-10.906029249937756]]
positivePredictionIntervalList =
[[6.968676416645919],
[9.96867641664592],
[24.874705666583676],
[18.906029249937756]]
"""
"""
Combinations("The sample list you want to work with")
The Combinations class allows you to get the possible combinations within
the values contained in the "samplesList" variable contained within this class.
"""
class Combinations:
"""
setSamplesList("The new sample list you want to work with")
This method changes the value of the object's variable "samplesList" to a
new set of list values that you want to work with through this class
methods.
"""
"""
getPositionCombinationsList()
Returns all the possible positions of the elements contained within a list
EXAMPLE CODE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
combinations = mSL.Combinations([0,1,2])
result = combinations.getPositionCombinationsList()
EXPECTED CODE RESULT:
result =
[[0, 1, 2], [1, 0, 2], [1, 2, 0], [0, 2, 1], [2, 0, 1]]
"""
"""
getCustomizedPermutationList()
Returns a customized form of permutation of the elements contained within a
list. See code example and expected code result to get a better idea of how
this method works.
EXAMPLE CODE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
combinations = mSL.Combinations([0,1,2])
result = combinations.getCustomizedPermutationList()
EXPECTED CODE RESULT:
result =
[[], [0], [1], [0, 1], [2], [0, 2], [1, 2], [0, 1, 2]]
"""
"""
DatasetSplitting("x independent variable datapoints to model", "y dependent variable datapoints to model")
The DatasetSplitting library allows you to split your dataset into training and
test set.
"""
class DatasetSplitting:
"""
getDatasetSplitted(testSize = "the desired size of the test samples. This value must be greater than zero and lower than one",
isSplittingRandom = "True if you want samples to be splitted randomly. False if otherwise is desired")
This method returns a splited dataset into training and test sets.
CODE EXAMPLE1:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dS = mSL.DatasetSplitting(matrix_x, matrix_y)
datasetSplitResults = dS.getDatasetSplitted(testSize = 0.10, isSplittingRandom = False)
x_train = datasetSplitResults[0]
x_test = datasetSplitResults[1]
y_train = datasetSplitResults[2]
y_test = datasetSplitResults[3]
EXPECTED CODE1 RESULT:
x_train =
[[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]]
x_test =
[[75, 15], [100, 15]]
y_train =
[[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]]
y_test =
[[14.05], [10.55]]
"""
"""
FeatureScaling("datapoints you want to apply Feature Scaling to")
The Feature Scaling library gives several methods to apply feature scaling
techniques to your datasets.
"""
class FeatureScaling:
"""
getStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value",
isPreferedDataUsed="True to define you will used prefered values. False to define otherwise.")
This method returns a dataset but with the standarization method, of Feature
Scaling, applied to such dataset. This method will also return the
calculated mean and the calculated standard deviation value.
CODE EXAMPLE1:
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
normalizedResults = featureScaling.getStandarization()
preferedMean = normalizedResults[0]
preferedStandardDeviation = normalizedResults[1]
normalizedDataPoints = normalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
normalizedDataPoints =
[[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]]
# ------------------------------------------------------------------------- #
CODE EXAMPLE2:
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
normalizedResults = featureScaling.getStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation, isPreferedDataUsed = True)
preferedMean = normalizedResults[0]
preferedStandardDeviation = normalizedResults[1]
normalizedDataPoints = normalizedResults[2]
EXPECTED CODE2 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
normalizedDataPoints =
[[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]]
"""
"""
getReverseStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value")
This method returns a dataset but with its original datapoint values before
having applied the Standarization Feature Scaling method.
CODE EXAMPLE1:
matrix_x = [
[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
deNormalizedResults = featureScaling.getReverseStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation)
preferedMean = deNormalizedResults[0]
preferedStandardDeviation = deNormalizedResults[1]
deNormalizedDataPoints = deNormalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
deNormalizedDataPoints =
[[75.0, 15.0],
[100.0, 15.0],
[125.0, 15.0],
[75.0, 17.5],
[100.0, 17.5],
[125.0, 17.5],
[75.0, 20.0],
[100.0, 20.0],
[125.0, 20.0],
[75.0, 22.5],
[100.0, 22.5],
[125.0, 22.5],
[75.0, 25.0],
[100.0, 25.0],
[125.0, 25.0],
[75.0, 27.5],
[100.0, 27.5],
[125.0, 27.5]]
"""
"""
setSamplesList(newSamplesList="the new samples list that you wish to work with")
This method sets a new value in the objects local variable "samplesList".
"""
"""
The Regression library gives several different types of coeficients to model
a required data. But notice that the arguments of this class are expected to be
the mean values of both the "x" and the "y" values.
Regression("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class Regression:
"""
# ----------------------------------- #
# ----------------------------------- #
# ----- STILL UNDER DEVELOPMENT ----- #
# ----------------------------------- #
# ----------------------------------- #
getGaussianRegression()
Returns the best fitting model to predict the behavior of a dataset through
a Gaussian Regression model that may have any number of independent
variables (x).
Note that if no fitting model is found, then this method will swap the
dependent variables values in such a way that "0"s will be interpretated as
"1"s and vice-versa to then try again to find at least 1 fitting model to
your dataset. If this still doenst work, then this method will return
modeling results will all coefficients with values equal to zero, predicted
accuracy equal to zero and all predicted values will also equal zero.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[39.139277579342206],
[-13.813509557297337],
[2.302251592882884],
[-13.813509557296968],
[2.302251592882836]]
accuracyFromTraining =
99.94999999999685
predictedData =
[[0.9989999999998915],
[0.9990000000000229],
[0.9989999999999554],
[0.9989999999999234],
[0.0009999999999997621],
[0.0010000000000001175],
[0.00099999999999989],
[0.000999999999999915]]
# NOTE:"predictedData" will try to give "1" for positive values and "0"
# for negative values always, regardless if your negative values
# were originally given to the trained model as "-1"s.
coefficientDistribution =
'Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getLinearLogisticRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Logistic Regression model to be able
to predict a classification problem that can have any number of
independent variables (x).
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[4.395207586412653], [5.985854141495452], [-4.395207586412653]]
accuracyFromTraining =
80.02122762886552
predictedData =
[[0.012185988957723588],
[0.05707820342364075],
[0.22900916243958236],
[0.5930846789223594],
[0.8773292738274195],
[0.9722944298625625],
[0.9942264149220237],
[0.9988179452639562],
[0.9997588776328182],
[0.9999508513195541]]
coefficientDistribution =
'Coefficients distribution is as follows: p = (exp(bo + b1*x1 + b2*x2 + ... + bn*xn))/(1 + exp(bo + b1*x1 + b2*x2 + ... + bn*xn))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getLinearRegression(isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model to predict the behavior of a dataset through
a regular Linear Regression model. Note that this method can only solve
regression problems that have 1 independent variable (x).
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[8.470909090909096], [1.0242424242424237]]
accuracyFromTraining =
97.05959379759686
predictedData =
[[8.470909090909096],
[9.49515151515152],
[10.519393939393943],
[11.543636363636367],
[12.56787878787879],
[13.592121212121214],
[14.616363636363639],
[15.640606060606062],
[16.664848484848484],
[17.689090909090908]]
coefficientDistribution =
'Coefficients distribution is as follows: y = b + m*x'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getMultipleLinearRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has any number
of independent variables (x) through the Multiple Linear Regression method.
EXAMPLE CODE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultipleLinearRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[36.094678333151364], [1.030512601856226], [-1.8696429022156238], [0]]
accuracyFromTraining =
94.91286851439088
predictedData =
[[27.97866287863839],
[32.47405344687403],
[26.780769909063693],
[38.27922426742052],
[15.633130663659042],
[26.414492729454558],
[27.942743988094456],
[26.30423186956247],
[32.03534812093171],
[26.162019574015964],
[37.5240060242906],
[32.03387415343133],
[27.937442374564142]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getPolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has only 1
independent variable (x) in it, through a polynomial regression solution.
EXAMPLE CODE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[3.468869185343018e-08],
[1.5123521825664843e-07],
[2.2104758041867345e-07],
[1.0817080022072073e-07]]
accuracyFromTraining =
99.99999615014885
predictedData =
[[3.4769003219065136e-11],
[7.199670288280337e-11],
[1.597970024878988e-10],
[3.792980021998557e-10]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getMultiplePolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method returns the best fitting model of a dataset to predict its
behavior through a Multiple Polynomial Regression that may have any number
of independent variables (x). This method gets a model by through the
following equation format:
y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[-1.745717777706403e-08],
[0],
[0.07581354676648289],
[-0.00104662847289827],
[3.942075523087618e-06],
[-14.202436859894078],
[0.670002091817878],
[-0.009761974914994198],
[-5.8006065221068606e-15]]
accuracyFromTraining =
91.33822971744071
predictedData =
[[14.401799310251064],
[10.481799480368835],
[7.578466505722503],
[13.96195814877683],
[10.041958318894615],
[7.1386253442482825],
[15.490847097061135],
[11.57084726717892],
[8.667514292532587],
[18.073281006823265],
[14.15328117694105],
[11.249948202294718],
[20.794074729782523],
[16.874074899900307],
[13.970741925253975],
[22.73804311765818],
[18.818043287775964],
[15.914710313129632]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method obtains the best solution of a customized 2nd order model when
using specifically 2 independent variables and were the equation to solve
is the following:
y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2
IMPORTANT NOTE: While the book "Probabilidad y estadistica para ingenieria
& ciencias (Walpole, Myers, Myers, Ye)" describes a model whos accuracy is
89.936% through finding a solution using the same model equation as used in
this method, i was able to achieve an algorithm that finds an even
better solution were i was able to get an accuracy of 90.57% (see code
example).
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[40.36892063492269],
[-0.29913333333337394],
[0.0008133333333341963],
[-1.2861238095233603],
[0.047676190476181546],
[0]]
accuracyFromTraining =
90.56977726188016
predictedData =
[[13.944206349214937],
[10.0242063492177],
[7.120873015888202],
[14.602587301596287],
[10.68258730159905],
[7.779253968269552],
[15.856920634929907],
[11.936920634932669],
[9.033587301603172],
[17.707206349215795],
[13.787206349218557],
[10.88387301588906],
[20.153444444453953],
[16.233444444456715],
[13.330111111127216],
[23.19563492064438],
[19.275634920647143],
[16.372301587317644]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method obtains the best solution of a customized 3rd order model when
using specifically 2 independent variables and were the equation to solve
is the following:
y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2
IMPORTANT NOTE: The same base algorithm used in the method
"getCustomizedMultipleSecondOrderPolynomialRegression()" was applied in
this one. This is important to mention because the algorithm i created in
that method demonstrated to be superior of that one used in the book
"Probabilidad y estadistica para ingenieria & ciencias (Walpole, Myers,
Myers, Ye)". See that method's description to see more information about
this.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[118.62284443469252],
[2.6850685669390923e-10],
[0],
[2.711111111130216e-06],
[-14.043715503707062],
[0.7156842175145357],
[-0.011482404265578339],
[-0.024609341568850862],
[0],
[0.0006459332618172914]]
accuracyFromTraining =
92.07595419629946
predictedData =
[[14.601310971885873],
[10.5735435991239],
[7.56244289303574],
[14.177873191206809],
[9.924073908458023],
[6.686941292383061],
[15.770722763127356],
[11.492745714709685],
[8.23143533296583],
[18.303384287749555],
[14.203083617980887],
[11.11944961488603],
[20.699382365175477],
[16.978612218373712],
[14.274508738245757],
[21.882241595507075],
[18.742856115990087],
[16.62013730314699]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
predictLinearLogisticRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictLinearLogisticRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[0.5],
[0.999978721536189],
[0.9999991162466249],
[0.9999999984756125],
[1.7295081461872963e-11]]
"""
"""
predictGaussianRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictGaussianRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[1.003006010014743e-12],
[0.09993332221727314],
[1.0046799183277663e-17],
[1.0318455659367212e-97],
[1.0083723565531913e-28]]
"""
"""
predictLinearRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0],
[4],
[6],
[10],
[1]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictLinearRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[8.470909090909096],
[12.56787878787879],
[14.616363636363639],
[18.71333333333333],
[9.49515151515152]]
"""
"""
predictMultipleLinearRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultipleLinearRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1,1],
[4,4,4],
[6,6,6],
[10,10,10],
[1,8,9]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictMultipleLinearRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[34.22503543093558],
[32.73815713171364],
[31.059896530994866],
[27.703375329557314],
[22.168047717282477]]
"""
"""
predictPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0],
[4],
[6],
[10],
[1]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[3.468869185343018e-08],
[1.1099322065704926e-05],
[3.226470574414124e-05],
[0.000131822599137008],
[5.151422907494728e-07]]
"""
"""
predictMultiplePolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
orderOfThePolynomial="Assign a whole number that represents the order of degree of the Multiple Polynomial equation you want to make predictions with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictMultiplePolynomialRegression(coefficients=modelCoefficients, orderOfThePolynomial=4)
EXPECTED CODE RESULT:
predictedValues =
[[-13.54219748494156],
[-37.053240090011386],
[-48.742713747779355],
[-60.84907570434054],
[-73.31818590442116]]
"""
"""
predictCustomizedMultipleSecondOrderPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleSecondOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[39.13047301587551],
[34.803724444448],
[32.60300063492485],
[29.365301587306917],
[32.832886349211385]]
"""
"""
predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[105.28333074423442],
[72.81181980293967],
[56.899154811293464],
[36.45941710222553],
[46.042387049575304]]
"""
"""
Classification("x independent variable datapoints to model", "y dependent variable datapoints to model")
The Classification library gives several methods to be able to get the best
fitting classification model to predict a determined classification problem.
"""
class Classification:
"""
getSupportVectorMachine(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Linear Support Vector Machine model to
be able to predict a classification problem of any number of independent
variables (x).
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[1.5736095873424212], [-0.26050769870994606], [-0.25468164794007475]]
accuracyFromTraining =
88.88888888888889
predictedData = [
[1],
[1],
[-1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + bn*xn >= -bo (As a note, remember that true equation representation is: w.x>=c)'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getKernelSupportVectorMachine(kernel="you specify here the type of kernel that you want to model with. literally write, in strings, gaussian for a gaussian kernel; polynomial for a polynomial kernel; and linear for a linear kernel",
isPolynomialSVC="True if you want to apply a polynomial SVC. False if otherwise is desired",
orderOfPolynomialSVC="If you apply a polynomial SVC through the argument isPolynomialSVC, you then give a whole number here to indicate the order of degree that you desire in such Polynomial SVC",
orderOfPolynomialKernel="if you selected polynomial kernel in the kernel argument, you then here give a whole number to indicate the order of degree that you desire in such Polynomial Kernel",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Kernel Support Vector Machine
model to be able to predict a classification problem of any number of
independent variables (x).
* If "gaussian" kernel is applied. This method will find the best
fitting model of such gaussian kernel through a gaussian regression.
* If "polynomimal" kernel is applied. This method will find the best
fitting model of such polynomial kernel through a Multiple Polynomial
Regression. You can specify the order of degree that you desire for your
Multiple Polynomial Kernel through the argument of this method named as
"orderOfPolynomialKernel".
* If "linear" kernel is applied. This method will find the best fitting
model of such polynomial kernel through a Multiple Linear Regression.
* You can also get a modified SVC by getting a non-linear intersection
plane to split your dataset into 2 specified categories. If you apply
this modified SVC, through "isPolynomialSVC" argument of this method,
you will be able to get a polynomial intersecting plane for your dataset
whos degree order can be modified through the argument of this method
named as "orderOfPolynomialSVC".
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getKernelSupportVectorMachine(kernel='gaussian', isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[
[
[-0.4067247938936074],
[-2.638275880744686],
[0.6025816805607462],
[1.5978782207152165],
[0.0018850313260649898]
],
[
[17.733125277353782],
[-0.41918858713133034],
[-0.07845753695120994],
[-7.126885817943787],
[0.7414460867570138],
[13.371724079069963],
[-16.435714646771032]
]
]
accuracyFromTraining =
100.0
predictedData = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
[
'Coefficients distribution for the Gaussian Kernel is as follows: kernel = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2))',
[
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + b_(n-1)*xn + bn*Kernel >= -b_0 --> for linear SVC (As a note, remember that true equation representation is: w.x>=c and that x here represents each one of the coordinates of your independent samples (x))',
'Coefficients distribution is as follows: b1*x1 + ... + b_(n-5)*x_m^m + b_(n-4)*x_(m-1) + ... + b_(n-3)*x_m^m + ... + b_(n-2)*x_m + ... + b_(n-1)*x_m^m + bn*Kernel >= -b_0 --> for polynomial SVC (m stands for the order degree selected for the polynomial SVC and n stands for the number of coefficients used in the polynomial SVC)'
]
]
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
predictSupportVectorMachine(coefficients="We give the SVC mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification.set_xSamplesList(predictThisValues)
predictedValuesForBg = classification.predictSupportVectorMachine(coefficients=modelCoefficients)
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
"""
predictKernelSupportVectorMachine(coefficients="We give the kernel and the SVC mathematical coefficients that we want to predict with",
isPolynomialSVC="True if you want to apply a polynomial SVC. False if otherwise is desired",
orderOfPolynomialSVC="If you apply a polynomial SVC through the argument isPolynomialSVC, you then give a whole number here to indicate the order of degree that you desire in such Polynomial SVC",
orderOfPolynomialKernel="if you selected polynomial kernel in the kernel argument, you then here give a whole number to indicate the order of degree that you desire in such Polynomial Kernel",
kernel="you specify here the type of kernel that you want to predict with. literally write, in strings, gaussian for a gaussian kernel; polynomial for a polynomial kernel; and linear for a linear kernel")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5],
[3, 3]
]
y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0],
[0]
]
matrix_y = []
for row in range(0, len(y)):
temporalRow = []
if (y[row][0] == 0):
temporalRow.append(-1)
if (y[row][0] == 1):
temporalRow.append(1)
if ((y[row][0]!=0) and (y[row][0]!=1)):
raise Exception('ERROR: The dependent variable y has values different from 0 and 1.')
matrix_y.append(temporalRow)
matrix_x = []
for row in range(0, len(y)):
temporalRow = []
for column in range(0, len(x[0])):
temporalRow.append(x[row][column])
matrix_x.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getKernelSupportVectorMachine(kernel='gaussian', isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification.set_xSamplesList(predictThisValues)
predictedValuesForBg = classification.predictKernelSupportVectorMachine(coefficients=modelCoefficients, isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, kernel='gaussian')
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
"""
predictLinearLogisticClassifier(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification = mSL.Classification(predictThisValues, [])
predictedValuesForBg = classification.predictLinearLogisticClassifier(coefficients=modelCoefficients, threshold=0.5)
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
"""
The ReinforcementLearning Class gives several methods to make a model that is
able to learn in real time to predict the best option among the ones you tell
it it has available. This is very useful when you actually dont have a dataset
to tell your model the expected output values to compare them and train itself
with them.
Regression("independent values (x) or options that your model will have available to pick from")
"""
class ReinforcementLearning:
"""
getUpperConfidenceBound()
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getRealTimeUpperConfidenceBound()", this method
cannot solve a problem in real time, since it needs that you already have
meassured several rounds so that then this algorithm studies it to then
tell you which arm is the best option among all the others.
This methods advantages:
* When this algorithm tries to identify the best arm, it only needs
to know if his current selection was successful or not (0 or 1)
and it doesnt need to know, in that round, anything about the
other arms
This methods disadvantages:
* This is the method that takes the most time to be able to
identify the best arm. Just so that you have it in mind, for a
problem to solve, this algorithm needed around the following
round samples to start identifying the best arm / option for a
random problem that i wanted to solve:
+ For 2 arms --> around 950 samples
+ For 3 arms --> around 1400 samples
+ For 4 arms --> around 1200 samples
+ For 5 arms --> around 320 samples
+ For 6 arms --> around 350 samples
+ For 7 arms --> around 400 samples
+ For 8 arms --> around 270 samples
+ For 9 arms --> around 600 samples
+ For 10 arms --> around 600 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
NOTE: The logic of this algorithm follows the one described and teached by
the Machine Learning Course "Machine Learning A-Z™: Hands-On Python & R In
Data Science" teached by " Kirill Eremenko, Hadelin de Ponteves,
SuperDataScience Team, SuperDataScience Support". I mention this because i
dont quite agree with how this algorithm works but, even though i havent
checked, there is a great chance that this is how other data scientists do
Upper Confidence Bound.
CODE EXAMPLE:
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
rL = mSL.ReinforcementLearning(matrix_y)
modelingResults = rL.getUpperConfidenceBound()
accuracyFromTraining = modelingResults[1]
historyOfPredictedData = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
for row in range(0, len(historyOfPredictedData)):
histogram_x_data.append(historyOfPredictedData[row][0])
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
accuracyFromTraining =
21.78
historyOfPredictedData =
NOTE: We wont show this result because it has 10'000 rows and its just
way too long to show here as a demonstration.
"""
"""
getRealTimeUpperConfidenceBound(currentNumberOfSamples="You have to indicate here the current number of samples that have occured for a particular UCB problem to solve",
sumsOfRewardsForEachArm="You have to indicate here the sums of rewards for each of the available arms for a particular UCB problem to solve",
numberOfSelectionsOfArms="You have to indicate here the number of times that each arm was selected by the algorithm for a particular UCB problem to solve")
IMPORTANT NOTE: WHEN YOU RUN THIS METHOD TO SOLVE THE VERY FIRST ROUND OF A
PARTICULAR UCB PROBLEM, DONT DEFINE ANY VALUES IN THE
ARGUMENTS OF THIS METHOD. FOR FURTHER ROUNDS, INPUT IN THE
ARGUMENTS THE OUTPUT VALUES OF THE LAST TIME YOU RAN THIS
METHOD (SEE CODE EXAMPLE).
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getUpperConfidenceBound()", this method learns in
real time, while "getUpperConfidenceBound()" expects you to already have
measured several rounds.
This methods advantages:
* When this algorithm tries to identify the best arm, it only needs
to know if his current selection was successful or not (0 or 1)
and it doesnt need to know, in that round, anything about the
other arms
This methods disadvantages:
* This is the method that takes the most time to be able to
identify the best arm. Just so that you have it in mind, for a
problem to solve, this algorithm needed around the following
round samples to start identifying the best arm / option for a
random problem that i wanted to solve:
+ For 2 arms --> around 950 samples
+ For 3 arms --> around 1400 samples
+ For 4 arms --> around 1200 samples
+ For 5 arms --> around 320 samples
+ For 6 arms --> around 350 samples
+ For 7 arms --> around 400 samples
+ For 8 arms --> around 270 samples
+ For 9 arms --> around 600 samples
+ For 10 arms --> around 600 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
NOTE: The logic of this algorithm follows the one described and teached by
the Machine Learning Course "Machine Learning A-Z™: Hands-On Python & R In
Data Science" teached by " Kirill Eremenko, Hadelin de Ponteves,
SuperDataScience Team, SuperDataScience Support". I mention this because i
dont quite agree with how this algorithm works but, even though i havent
checked, there is a great chance that this is how other data scientists do
Upper Confidence Bound.
CODE EXAMPLE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
# With this for-loop, we will simulate that we are getting the data in
# real-time and that we are, at the same time, giving it to the algorithm
numberOfArmsAvailable = len(matrix_y[0])
for currentSample in range(0, len(matrix_y)):
rL = mSL.ReinforcementLearning([matrix_y[currentSample]])
if (currentSample == 0):
modelingResults = rL.getRealTimeUpperConfidenceBound()
else:
modelingResults = rL.getRealTimeUpperConfidenceBound(currentNumberOfSamples, sumsOfRewardsForEachArm, numberOfSelectionsOfArms)
currentNumberOfSamples = modelingResults[0]
currentAccuracyFromTraining = modelingResults[1]
sumsOfRewardsForEachArm = modelingResults[2]
numberOfSelectionsOfArms = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We now add the real selected options by the algorithm
for currentArm in range(0, numberOfArmsAvailable):
for selectedTimes in range(0, numberOfSelectionsOfArms[0][currentArm]):
histogram_x_data.append(currentArm)
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
currentNumberOfSamples=
10000
currentAccuracyFromTraining =
21.78
sumsOfRewardsForEachArm =
[[120, 47, 7, 38, 1675, 1, 27, 236, 20, 7]]
numberOfSelectionsOfArms =
[[705, 387, 186, 345, 6323, 150, 292, 1170, 256, 186]]
"""
"""
getModifiedUpperConfidenceBound()
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the method "getRealTimeModifiedUpperConfidenceBound()" which learns
in real-time, this method does not and it requires that you have already
meassured several rounds to the input them to this method.
This methods advantages:
* This method is the fastest of all, so far, to detect the best
possible arm (option) among all the available ones:
+ For 2 arms --> around 1 sample
+ For 3 arms --> around 1 sample
+ For 4 arms --> around 1 sample
+ For 5 arms --> around 60 samples
+ For 6 arms --> around 60 samples
+ For 7 arms --> around 60 samples
+ For 8 arms --> around 60 samples
+ For 9 arms --> around 60 samples
+ For 10 arms --> around 60 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
This methods disadvantages:
* When this algorithm tries to identify the best arm, it needs to
know, for each arm (regardless of the one picked by the
algorithm), if they were a successful pick or not (0 or 1),
unlike the "getUpperConfidenceBound()" which only needs
to know if his actual pick was sucessful or not.
CODE EXAMPLE:
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
rL = mSL.ReinforcementLearning(matrix_y)
modelingResults = rL.getModifiedUpperConfidenceBound()
accuracyFromTraining = modelingResults[1]
historyOfPredictedData = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We first add a fake selection for each available option (arms) so that we
# ensure that they appear in the histogram. Otherwise, if we dont do this and
# if the algorithm never consideres one or some of the available options, it
# will plot considering those options never existed.
numberOfAvailableOptions = len(matrix_y[0])
for row in range(0, numberOfAvailableOptions):
histogram_x_data.append(row)
# We now add the real selected options by the algorithm
for row in range(0, len(historyOfPredictedData)):
histogram_x_data.append(historyOfPredictedData[row][0])
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
accuracyFromTraining =
26.93
historyOfPredictedData =
NOTE: We wont show this result because it has 10'000 rows and its just
way too long to show here as a demonstration.
"""
"""
getRealTimeModifiedUpperConfidenceBound(currentNumberOfSamples="You have to indicate here the current number of samples that have occured for a particular UCB problem to solve",
sumsOfRewardsForEachSelectedArm="You have to indicate the sums of the rewards for each arm but only for those situations were the algorithm picked each arm",
numberOfSelectionsOfArms="You have to indicate here the number of times that each arm was selected by the algorithm for a particular UCB problem to solve",
trueSumsOfRewardsForEachArm="You have to indicate the real number of times that each arm has been a successful result, regardless of what the algorithm identified",
meanList="You have to indicate the mean list of the rewards obtained for each arm",
standardDeviationList="You have to indicate the standard deviation list of the rewards obtained for each arm")
IMPORTANT NOTE: WHEN YOU RUN THIS METHOD TO SOLVE THE VERY FIRST ROUND OF A
PARTICULAR UCB PROBLEM, DONT DEFINE ANY VALUES IN THE
ARGUMENTS OF THIS METHOD. FOR FURTHER ROUNDS, INPUT IN THE
ARGUMENTS THE OUTPUT VALUES OF THE LAST TIME YOU RAN THIS
METHOD (SEE CODE EXAMPLE).
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getModifiedUpperConfidenceBound()", this method
learns in real time, while "getModifiedUpperConfidenceBound()" expects you
to already have measured several rounds.
This methods advantages:
* This method is the fastest of all, so far, to detect the best
possible arm (option) among all the available ones:
+ For 2 arms --> around 1 sample
+ For 3 arms --> around 1 sample
+ For 4 arms --> around 1 sample
+ For 5 arms --> around 60 samples
+ For 6 arms --> around 60 samples
+ For 7 arms --> around 60 samples
+ For 8 arms --> around 60 samples
+ For 9 arms --> around 60 samples
+ For 10 arms --> around 60 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
This methods disadvantages:
* When this algorithm tries to identify the best arm, it needs to
know, for each arm (regardless of the one picked by the
algorithm), if they were a successful pick or not (0 or 1),
unlike the "getRealTimeUpperConfidenceBound()" which only needs
to know if his actual pick was sucessful or not.
CODE EXAMPLE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
# With this for-loop, we will simulate that we are getting the data in
# real-time and that we are, at the same time, giving it to the algorithm
numberOfArmsAvailable = len(matrix_y[0])
for currentSample in range(0, len(matrix_y)):
rL = mSL.ReinforcementLearning([matrix_y[currentSample]])
if (currentSample == 0):
modelingResults = rL.getRealTimeModifiedUpperConfidenceBound()
else:
modelingResults = rL.getRealTimeModifiedUpperConfidenceBound(currentNumberOfSamples, sumsOfRewardsForEachSelectedArm, numberOfSelectionsOfArms, trueSumsOfRewardsForEachArm, meanList, standardDeviationList)
currentNumberOfSamples = modelingResults[0]
currentAccuracyFromTraining = modelingResults[1]
sumsOfRewardsForEachSelectedArm = modelingResults[2]
numberOfSelectionsOfArms = modelingResults[3]
trueSumsOfRewardsForEachArm = modelingResults[4]
meanList = modelingResults[5]
standardDeviationList = modelingResults[6]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We first add a fake selection for each available option (arms) so that we
# ensure that they appear in the histogram. Otherwise, if we dont do this and
# if the algorithm never consideres one or some of the available options, it
# will plot considering those options never existed.
for row in range(0, numberOfArmsAvailable):
histogram_x_data.append(row)
# We now add the real selected options by the algorithm
for currentArm in range(0, numberOfArmsAvailable):
for selectedTimes in range(0, numberOfSelectionsOfArms[0][currentArm]):
histogram_x_data.append(currentArm)
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
currentNumberOfSamples=
10000
currentAccuracyFromTraining =
26.93
sumsOfRewardsForEachSelectedArm =
[[3, 0, 0, 0, 2690, 0, 0, 0, 0, 0]]
numberOfSelectionsOfArms =
[[25, 0, 0, 0, 9975, 0, 0, 0, 0, 0]]
trueSumsOfRewardsForEachArm =
[[1703, 1295, 728, 1196, 2695, 126, 1112, 2091, 952, 489]]
meanList =
[[0.1703,
0.1295,
0.0728,
0.1196,
0.2695,
0.0126,
0.1112,
0.2091,
0.0952,
0.0489]]
standardDeviationList =
[[1.2506502260503618,
1.0724240984136193,
0.7004403369435815,
0.9286872458865242,
1.412843221683186,
0.3047987328938745,
0.7525852536272276,
1.2007787911241279,
1.030718190027389,
0.5406998109413704]]
"""
"""
The DeepLearning Class gives several methods to make a model through the
concept of how a real neuron works.
DeepLearning("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class DeepLearning:
"""
getReluActivation(x="the instant independent value from which you want to know the dependent ReLU value/result")
This method calculates and returns the ReLU function value of the instant
independent value that you give in the "x" local variable of this method.
"""
"""
getReluActivationDerivative(x="the instant independent value from which you want to know the derivate of the dependent ReLU value/result")
This method calculates and returns the derivate ReLU function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
"""
getTanhActivation(x="the instant independent value from which you want to know the dependent Hyperbolic Tangent (Tanh) value/result")
This method calculates and returns the Hyperbolic Tangent (Tanh) function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getReluActivation(x="the instant independent value from which you want to know the dependent Sigmoid value/result")
This method calculates and returns the Sigmoid function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
"""
getRaiseToTheSecondPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheSecondPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheThirdPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheThirdPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheFourthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheFourthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheFifthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheFifthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheSixthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheSixthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getExponentialActivation(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the Exponential-Euler function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getExponentialDerivative(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the derivate Exponential-Euler function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getSingleArtificialNeuron(activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your neuron to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your neuron to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates a single Artificial Neuron and, within this method,
such neuron trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[[28.235246103419946],
[1.12749544645359],
[-1.7353168202914326],
[0.7285727543658252]]
accuracyFromTraining =
95.06995458954695
predictedData =
[[28.868494779855514],
[32.80418405006583],
[25.89997715314427],
[38.25484973427189],
[16.295874460357858],
[26.67205741761012],
[27.198762118476985],
[26.859066716794352],
[31.50391014224514],
[26.42881371215305],
[38.14632853395502],
[30.297502725191123],
[26.929105800646223]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM]
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
This method is used within the method "getArtificialNeuralNetwork()" to get
the weights of a particular neuron from a variable that contains all the
weights of all neurons (matrix_w).
"""
"""
This method is used within the method "getArtificialNeuralNetwork()" to get
the partial derivative of the Total Error (dEtotal) due respect with the
partial derivative of the corresponding Activation Function (dFz) for a
particular neuron within an Artificial Neural Network.
"""
"""
getArtificialNeuralNetwork(artificialNeuralNetworkDistribution="must contain an array that indicates the distribution of the desired neurons for each layer in columns. If a row-column value equals 1, this will mean that you want a neuron in that position. A 0 means otherwise",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The activation functions must be assigned in an array accordingly to the distribution specified in argument input variable artificialNeuralNetworkDistribution. The available activation functions are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your Artificial Neural Network to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your Artificial Neural Network to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates an Artificial Neural Network with a customized desired
number of neurons within it and, within this method, such Artificial Neural
Network trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResultForOutputNeuron1, expectedResultForOutputNeuron2]
matrix_y = [
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[1, 0]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[1,1,1]
]
aF = [
['relu', 'relu', 'sigmoid'],
['relu', 'relu', 'sigmoid']
]
modelingResults = dL.getArtificialNeuralNetwork(artificialNeuralNetworkDistribution=aNND, activationFunction=aF, learningRate=0.1, numberOfEpochs=10000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[
[2.133298325032156, -0.45548307884431677, -2.1332978269534664, -2.1332978292080043],
[2.287998188065245, 1.3477978318721369, -1.143999014059006, -1.1439990110690932],
[-0.6930287605411998, 0.41058709282271444, 0.6057943758418374],
[4.6826225603458056e-08, -1.8387485390712266, 2.2017181913306803],
[-4.1791269585765285, -2.5797524896448563, 3.3885776200605955],
[4.181437529101815, 2.5824655964639742, -3.3907451300458136]
]
accuracyFromTraining =
98.94028954483407
predictedData =
[[0.011560111421083964, 0.9884872182827878],
[0.9873319964204451, 0.01262867979045398],
[0.9873319961998808, 0.012628680010459043],
[0.015081447917016324, 0.9849528347708301],
[0.9989106156594524, 0.0010867877109744279]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM],
[Neuron2_bias, Neuron2_weight1, Neuron2_weight2, ... , Neuron2_weightZ],
[ . , . , . , ... , . ],
[ . , . , . , ... , . ],
[ . , . , . , ... , . ],
[NeuronN_bias, NeuronN_weight1, NeuronN_weight2, ... , NeuronN_weightK],
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
predictSingleArtificialNeuron(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
isThreshold="Set to True if you want to predict output values of a classification neuron. False if otherwise."
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
# -------------------------------------------------------- #
# ----- WE PREDICT SOME DATA WITH OUR CURRENT NEURON ----- #
# -------------------------------------------------------- #
matrix_x = [
[1, 2.3, 3.8],
[3.32, 2.42, 1.4],
[2.22, 3.41, 1.2]
]
dL = mSL.DeepLearning(matrix_x, [])
getPredictedData = dL.predictSingleArtificialNeuron(coefficients=modelCoefficients, activationFunction='none', isThreshold=False, threshold=0.5)
EXPECTED CODE RESULT:
getPredictedData =
[[28.140432977147068], [28.799532314784063], [25.69562041179361]]
"""
"""
predictArtificialNeuralNetwork(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The activation functions must be assigned in an array accordingly to the distribution specified in argument input variable coefficients. The available activation functions are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
isThreshold="Set to True if you want to predict output values of a classification neuron. False if otherwise."
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[0,1,0]
]
aF = [
['none', 'none', 'none'],
['', 'none', '']
]
modelingResults = dL.getArtificialNeuralNetwork(artificialNeuralNetworkDistribution=aNND, activationFunction=aF, learningRate=0.00001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
# -------------------------------------------------------- #
# ----- WE PREDICT SOME DATA WITH OUR CURRENT NEURON ----- #
# -------------------------------------------------------- #
matrix_x = [
[1, 2.3, 3.8],
[3.32, 2.42, 1.4],
[2.22, 3.41, 1.2]
]
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[0,1,0]
]
aF = [
['none', 'none', 'none'],
['', 'none', '']
]
dL = mSL.DeepLearning(matrix_x, [])
getPredictedData = dL.predictArtificialNeuralNetwork(coefficients=modelCoefficients, artificialNeuralNetworkDistribution=aNND, activationFunction=aF, isThreshold=False, threshold=0.5)
EXPECTED CODE RESULT:
getPredictedData =
[[28.22084819611869], [28.895166544625255], [25.788001189515317]]
"""
| 54.448301 | 630 | 0.588468 |
"""
Copyright 2021 Cesar Miranda Meza (alias: Mortrack)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 17:15:51 2020
Last updated on Mon May 24 8:40:00 2021
@author: enginer Cesar Miranda Meza (alias: Mortrack)
"""
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
# IMPORTANT NOTE: Remember to be careful with input argument variables when
# you call any method or class because it gets altered in
# python ignoring parenting or children logic
"""
DiscreteDistribution()
The Combinations class allows you to get some parameters, through some of its
methods, that describe the dataset characteristics (eg. mean, variance and
standard deviation).
"""
class DiscreteDistribution:
"""
getMean(samplesList="will contain a matrix of rows and columns, were we want to get the Mean of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
mean value.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getMean(matrix_x)
EXPECTED CODE RESULT:
result =
[[2.0], [5.0], [5.0]]
"""
def getMean(self, samplesList):
meanList = []
numberOfRows = len(samplesList)
numberOfSamplesPerRow = len(samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(0)
meanList.append(temporalRow)
for row in range(0, numberOfRows):
for column in range(0, numberOfSamplesPerRow):
meanList[row][0] = meanList[row][0] + samplesList[row][column]
meanList[row][0] = meanList[row][0]/numberOfSamplesPerRow
return meanList
"""
getVariance(samplesList="will contain a matrix of rows and columns, were we want to get the Variance of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
variance value.
Remember that Variance is also denoted as the square of sigma
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getVariance(matrix_x)
EXPECTED CODE RESULT:
result =
[[1.0], [1.0], [16.0], [9.0]]
"""
def getVariance(self, samplesList):
numberOfSamplesPerRow = len(samplesList[0])
if (numberOfSamplesPerRow<2):
raise Exception('ERROR: The given number of samples must be at least 2.')
varianceList = []
numberOfRows = len(samplesList)
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(0)
varianceList.append(temporalRow)
meanList = self.getMean(samplesList)
for row in range(0, numberOfRows):
for column in range(0, numberOfSamplesPerRow):
varianceList[row][0] = varianceList[row][0] + (samplesList[row][column] - meanList[row][0])**2
varianceList[row][0] = varianceList[row][0]/(numberOfSamplesPerRow-1)
return varianceList
"""
getStandardDeviation(samplesList="will contain a matrix of rows and columns, were we want to get the Standard Deviation of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
Standard Deviation value.
Remember that Standard Deviation is also denoted as sigma
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getStandardDeviation(matrix_x)
EXPECTED CODE RESULT:
result =
[[1.0], [1.0], [16.0], [9.0]]
"""
def getStandardDeviation(self, samplesList):
numberOfSamplesPerRow = len(samplesList[0])
if (numberOfSamplesPerRow<2):
raise Exception('ERROR: The given number of samples must be at least 2.')
standardDeviationList = []
numberOfRows = len(samplesList)
numberOfSamplesPerRow = len(samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(0)
standardDeviationList.append(temporalRow)
meanList = self.getMean(samplesList)
for row in range(0, numberOfRows):
for column in range(0, numberOfSamplesPerRow):
standardDeviationList[row][0] = standardDeviationList[row][0] + (samplesList[row][column] - meanList[row][0])**2
standardDeviationList[row][0] = (standardDeviationList[row][0]/(numberOfSamplesPerRow-1))**(1/2)
return standardDeviationList
"""
Tdistribution(desiredTrustInterval="Its a float numeric type value that will represent the desired percentage(%) that you desire for your trust interval")
The Combinations class allows you to get some parameters, through some of its
methods, that describe the dataset characteristics (eg. mean, variance and
standard deviation).
"""
class Tdistribution:
def __init__(self, desiredTrustInterval):
self.desiredTrustInterval = desiredTrustInterval
if ((self.desiredTrustInterval != 95) and (self.desiredTrustInterval != 99) and (self.desiredTrustInterval != 99.9)):
raise Exception('ERROR: The desired trust interval hasnt been programmed on this class yet.')
"""
getCriticalValue(numberOfSamples="Must have a whole number that represents the number of samples you want to get the critical value from")
Returns a float numeric value which will represent the Critical Value of
the parameters that you specified (the desired trust interval and the
number of samples)
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tD = mSL.Tdistribution(desiredTrustInterval=95)
result = tD.getCriticalValue(len(matrix_x[0]))
EXPECTED CODE RESULT:
result =
4.303
"""
def getCriticalValue(self, numberOfSamples):
# Ecuacion de valores criticos de la distribucion t (la cual supone
# que tenemos un dataset con tendencia a la de una funcion normal).
if ((type(numberOfSamples)!=int) and (numberOfSamples>0)):
raise Exception('ERROR: The number of samples must have a whole value and not with decimals. Such whole value must be greater than zero.')
v = numberOfSamples-1
if (self.desiredTrustInterval == 95):
if (v==0):
raise Exception('ERROR: The t distribution mathematical method requires at least 2 samples.')
if (v<=30):
v = int(v)
criticalValue = [12.706,4.303,3.182,2.776,2.571,2.447,2.365,2.306,2.262,2.228,2.201,2.179,2.160,2.145,2.131,2.120,2.110,2.101,2.093,2.086,2.080,2.074,2.069,2.064,2.060,2.056,2.052,2.048,2.045,2.042]
return (criticalValue[v-1])
if ((v>30) and (v<=120)):
criticalValue = 2.16783333-6.11250000e-03*v+7.26157407e-05*v**2-2.89351852e-07*v**3
return (criticalValue)
if ((v>120) and (v<=2400)):
criticalValue = 1.98105-8.77193e-6*v
return (criticalValue)
if (v>2400):
criticalValue = 1.96
return (criticalValue)
if (self.desiredTrustInterval == 99):
if (v==0):
raise Exception('ERROR: The t distribution mathematical method requires at least 2 samples.')
if (v<=30):
v = int(v)
criticalValue = [63.656,9.925,5.841,4.604,4.032,3.707,3.499,3.355,3.250,3.196,3.106,3.055,3.012,2.977,2.947,2.921,2.898,2.878,2.861,2.845,2.831,2.819,2.807,2.797,2.787,2.779,2.771,2.763,2.756,2.750]
return (criticalValue[v-1])
if ((v>30) and (v<=120)):
criticalValue = 3.03316667-1.38875000e-02*v+1.68773148e-04*v**2-6.82870370e-07*v**3
return (criticalValue)
if ((v>120) and (v<=2400)):
criticalValue = 2.619-17.982e-6*v
return (criticalValue)
if (v>2400):
criticalValue = 2.576
return (criticalValue)
if (self.desiredTrustInterval == 99.9):
if (v==0):
raise Exception('ERROR: The t distribution mathematical method requires at least 2 samples.')
if (v<=30):
v = int(v)
criticalValue = [636.578,31.600,12.924,8.610,6.869,5.959,5.408,5.041,4.781,4.587,4.437,4.318,4.221,4.140,4.073,4.015,3.965,3.922,3.883,3.850,3.819,3.792,3.768,3.745,3.725,3.707,3.689,3.674,3.660,3.646]
return (criticalValue[v-1])
if ((v>30) and (v<=120)):
criticalValue = 4.23-2.86250000e-02*v+3.47361111e-04*v**2-1.40277778e-06*v**3
return (criticalValue)
if ((v>120) and (v<=2400)):
criticalValue = 3.37737-36.4035e-6*v
return (criticalValue)
if (v>2400):
criticalValue = 3.290
return (criticalValue)
class TrustIntervals:
"""
getMeanIntervals(samplesList="Must contain the matrix of the dataset from which you want to get the Mean Intervals",
meanList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding mean value.",
standardDeviationList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding standard deviation value.",
tValue="Must contain a float numeric value that represents the T-Value (Critical Value) required to calculate the mean intervals")
This method returns a matrix with 2 columns:
* Column 1 = negative mean interval values in the corresponding "n" number of rows
* Column 2 = positive mean interval values in the corresponding "n" number of rows
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tI = mSL.TrustIntervals()
dD = mSL.DiscreteDistribution()
meanList = dD.getMean(matrix_x)
standardDeviationList = dD.getStandardDeviation(matrix_x)
tD = mSL.Tdistribution(desiredTrustInterval=95)
tValue = tD.getCriticalValue(len(matrix_x[0]))
meanIntervalsList = tI.getMeanIntervals(matrix_x, meanList, standardDeviationList, tValue)
negativeMeanIntervalList = []
positiveMeanIntervalList = []
for row in range(0, len(meanIntervalsList)):
temporalRow = []
temporalRow.append(meanIntervalsList[row][0])
negativeMeanIntervalList.append(temporalRow)
temporalRow = []
temporalRow.append(meanIntervalsList[row][1])
positiveMeanIntervalList.append(temporalRow)
EXPECTED CODE RESULT:
negativeMeanIntervalList =
[[-0.48433820832295993],
[2.51566179167704],
[-4.93735283329184],
[-3.453014624968879]]
positiveMeanIntervalList =
[[4.48433820832296],
[7.48433820832296],
[14.93735283329184],
[11.45301462496888]]
"""
def getMeanIntervals(self, samplesList, meanList, standardDeviationList, tValue):
# media-(valorT)(s/(n)**(1/2)) < media < media+(valorT)(s/(n)**(1/2))
meanIntervals = []
numberOfRows = len(samplesList)
numberOfSamplesPerRow = len(samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(meanList[row][0] - tValue*standardDeviationList[row][0]/(numberOfSamplesPerRow**(1/2)))
temporalRow.append(meanList[row][0] + tValue*standardDeviationList[row][0]/(numberOfSamplesPerRow**(1/2)))
meanIntervals.append(temporalRow)
return meanIntervals
"""
getPredictionIntervals(samplesList="Must contain the matrix of the dataset from which you want to get the Prediction Intervals",
meanList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding mean value.",
standardDeviationList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding standard deviation value.",
tValue="Must contain a float numeric value that represents the T-Value (Critical Value) required to calculate the Prediction intervals")
This method returns a matrix with 2 columns:
* Column 1 = negative Prediction interval values in the corresponding "n" number of rows
* Column 2 = positive Prediction interval values in the corresponding "n" number of rows
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tI = mSL.TrustIntervals()
dD = mSL.DiscreteDistribution()
meanList = dD.getMean(matrix_x)
standardDeviationList = dD.getStandardDeviation(matrix_x)
tD = mSL.Tdistribution(desiredTrustInterval=95)
numberOfSamples = len(matrix_x[0])
tValue = tD.getCriticalValue(numberOfSamples)
predictionIntervalsList = tI.getPredictionIntervals(numberOfSamples, meanList, standardDeviationList, tValue)
negativePredictionIntervalList = []
positivePredictionIntervalList = []
for row in range(0, len(predictionIntervalsList)):
temporalRow = []
temporalRow.append(predictionIntervalsList[row][0])
negativePredictionIntervalList.append(temporalRow)
temporalRow = []
temporalRow.append(predictionIntervalsList[row][1])
positivePredictionIntervalList.append(temporalRow)
EXPECTED CODE RESULT:
negativePredictionIntervalList =
[[-2.968676416645919],
[0.03132358335408103],
[-14.874705666583676],
[-10.906029249937756]]
positivePredictionIntervalList =
[[6.968676416645919],
[9.96867641664592],
[24.874705666583676],
[18.906029249937756]]
"""
def getPredictionIntervals(self, numberOfSamples, meanList, standardDeviation, tValue):
# media-(valorT)(s)*(1+1/n)**(1/2) < media < media+(valorT)(s)*(1+1/n)**(1/2)
predictionIntervals = []
numberOfRows = len(meanList)
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(meanList[row][0] - tValue*standardDeviation[row][0]*((1+1/numberOfSamples)**(1/2)))
temporalRow.append(meanList[row][0] + tValue*standardDeviation[row][0]*((1+1/numberOfSamples)**(1/2)))
predictionIntervals.append(temporalRow)
return predictionIntervals
"""
Combinations("The sample list you want to work with")
The Combinations class allows you to get the possible combinations within
the values contained in the "samplesList" variable contained within this class.
"""
class Combinations:
def __init__(self, samplesList):
self.samplesList = samplesList
"""
setSamplesList("The new sample list you want to work with")
This method changes the value of the object's variable "samplesList" to a
new set of list values that you want to work with through this class
methods.
"""
def setSamplesList(self, samplesList):
self.samplesList = samplesList
"""
getPositionCombinationsList()
Returns all the possible positions of the elements contained within a list
EXAMPLE CODE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
combinations = mSL.Combinations([0,1,2])
result = combinations.getPositionCombinationsList()
EXPECTED CODE RESULT:
result =
[[0, 1, 2], [1, 0, 2], [1, 2, 0], [0, 2, 1], [2, 0, 1]]
"""
def getPositionCombinationsList(self):
samplesListLength = len(self.samplesList)
originalSamplesList = self.samplesList
possibleCombinations = [ [ 0 for i in range(samplesListLength) ] for j in range(samplesListLength**2) ]
for row in range(0, samplesListLength**2):
for column in range(0, samplesListLength):
possibleCombinations[row][column] = originalSamplesList[column]
possibleCombinationsRow = 0
for specificDataPoint in range(0, samplesListLength):
for newDataPointPosition in range(0, samplesListLength):
possibleCombinations[possibleCombinationsRow].pop(specificDataPoint)
possibleCombinations[possibleCombinationsRow].insert(newDataPointPosition ,originalSamplesList[specificDataPoint])
possibleCombinationsRow = possibleCombinationsRow + 1
possibleCombinationsRow = 0
while(True):
for row in range(0, len(possibleCombinations)):
isRowMatch = True
for column in range(0, samplesListLength):
if (possibleCombinations[possibleCombinationsRow][column] != possibleCombinations[row][column]):
isRowMatch = False
if ((isRowMatch==True) and (possibleCombinationsRow!=row)):
possibleCombinations.pop(row)
possibleCombinationsRow = possibleCombinationsRow - 1
break
possibleCombinationsRow = possibleCombinationsRow + 1
if (possibleCombinationsRow==len(possibleCombinations)):
break
return possibleCombinations
"""
getCustomizedPermutationList()
Returns a customized form of permutation of the elements contained within a
list. See code example and expected code result to get a better idea of how
this method works.
EXAMPLE CODE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
combinations = mSL.Combinations([0,1,2])
result = combinations.getCustomizedPermutationList()
EXPECTED CODE RESULT:
result =
[[], [0], [1], [0, 1], [2], [0, 2], [1, 2], [0, 1, 2]]
"""
def getCustomizedPermutationList(self):
samplesListLength = len(self.samplesList)
originalSamplesList = self.samplesList
customizedPermutations = []
for row in range(0, 2**samplesListLength):
temporalRow = []
for column in range(0, samplesListLength):
if (((row)&(2**column)) == 2**column):
temporalRow.append(originalSamplesList[column])
customizedPermutations.append(temporalRow)
return customizedPermutations
"""
DatasetSplitting("x independent variable datapoints to model", "y dependent variable datapoints to model")
The DatasetSplitting library allows you to split your dataset into training and
test set.
"""
class DatasetSplitting:
def __init__(self, x_samplesList, y_samplesList):
self.y_samplesList = y_samplesList
self.x_samplesList = x_samplesList
"""
getDatasetSplitted(testSize = "the desired size of the test samples. This value must be greater than zero and lower than one",
isSplittingRandom = "True if you want samples to be splitted randomly. False if otherwise is desired")
This method returns a splited dataset into training and test sets.
CODE EXAMPLE1:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dS = mSL.DatasetSplitting(matrix_x, matrix_y)
datasetSplitResults = dS.getDatasetSplitted(testSize = 0.10, isSplittingRandom = False)
x_train = datasetSplitResults[0]
x_test = datasetSplitResults[1]
y_train = datasetSplitResults[2]
y_test = datasetSplitResults[3]
EXPECTED CODE1 RESULT:
x_train =
[[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]]
x_test =
[[75, 15], [100, 15]]
y_train =
[[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]]
y_test =
[[14.05], [10.55]]
"""
def getDatasetSplitted(self, testSize = 0.10, isSplittingRandom = True):
if ((testSize<=0) or (testSize>=1)):
raise Exception('ERROR: The testSize argument variable must comply the following criteria: 0>testSize<1')
totalNumberOfSamples = len(self.y_samplesList)
totalNumberOfColumns = len(self.x_samplesList[0])
matrix_x = self.x_samplesList
matrix_y = self.y_samplesList
x_train = []
x_test = []
y_train = []
y_test = []
if (isSplittingRandom == True):
totalNumberOfTestSamples = round(totalNumberOfSamples*testSize)
for row in range(0, totalNumberOfTestSamples):
import random
# random.randrange(start, stop, step)
nextTestSampleToRetrieve = random.randrange(0,(totalNumberOfSamples-row-1),1)
temporalRow = []
for column in range(0, totalNumberOfColumns):
temporalRow.append(matrix_x[nextTestSampleToRetrieve][column])
x_test.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_y[nextTestSampleToRetrieve][0])
y_test.append(temporalRow)
matrix_x.pop(nextTestSampleToRetrieve)
matrix_y.pop(nextTestSampleToRetrieve)
x_train = matrix_x
y_train = matrix_y
else:
totalNumberOfTestSamples = round(totalNumberOfSamples*testSize)
for row in range(0, totalNumberOfTestSamples):
temporalRow = []
for column in range(0, totalNumberOfColumns):
temporalRow.append(matrix_x[0][column])
x_test.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_y[0][0])
y_test.append(temporalRow)
matrix_x.pop(0)
matrix_y.pop(0)
x_train = matrix_x
y_train = matrix_y
# We save the current the modeling results
datasetSplitResults = []
datasetSplitResults.append(x_train)
datasetSplitResults.append(x_test)
datasetSplitResults.append(y_train)
datasetSplitResults.append(y_test)
return datasetSplitResults
"""
FeatureScaling("datapoints you want to apply Feature Scaling to")
The Feature Scaling library gives several methods to apply feature scaling
techniques to your datasets.
"""
class FeatureScaling:
def __init__(self, samplesList):
self.samplesList = samplesList
"""
getStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value",
isPreferedDataUsed="True to define you will used prefered values. False to define otherwise.")
This method returns a dataset but with the standarization method, of Feature
Scaling, applied to such dataset. This method will also return the
calculated mean and the calculated standard deviation value.
CODE EXAMPLE1:
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
normalizedResults = featureScaling.getStandarization()
preferedMean = normalizedResults[0]
preferedStandardDeviation = normalizedResults[1]
normalizedDataPoints = normalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
normalizedDataPoints =
[[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]]
# ------------------------------------------------------------------------- #
CODE EXAMPLE2:
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
normalizedResults = featureScaling.getStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation, isPreferedDataUsed = True)
preferedMean = normalizedResults[0]
preferedStandardDeviation = normalizedResults[1]
normalizedDataPoints = normalizedResults[2]
EXPECTED CODE2 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
normalizedDataPoints =
[[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]]
"""
def getStandarization(self, preferedMean=[], preferedStandardDeviation=[], isPreferedDataUsed = False):
numberOfSamples = len(self.samplesList)
numberOfColumns = len(self.samplesList[0])
if (isPreferedDataUsed == True):
mean = preferedMean
standardDeviation = preferedStandardDeviation
else:
mean = []
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append(0)
mean.append(temporalRow)
for row in range(0, numberOfSamples):
for column in range(0, numberOfColumns):
mean[0][column] = mean[0][column] + self.samplesList[row][column]
for column in range(0, numberOfColumns):
mean[0][column] = mean[0][column]/numberOfSamples
standardDeviation = []
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append(0)
standardDeviation.append(temporalRow)
for row in range(0, numberOfSamples):
for column in range(0, numberOfColumns):
standardDeviation[0][column] = standardDeviation[0][column] + (self.samplesList[row][column]-mean[0][column])**2
for column in range(0, numberOfColumns):
standardDeviation[0][column] = (standardDeviation[0][column]/(numberOfSamples-1))**(0.5)
# Now that we have obtained the data we need for the Normalization
# equation, we now plug in those values in it.
normalizedDataPoints = []
for row in range(0, numberOfSamples):
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append((self.samplesList[row][column] - mean[0][column])/standardDeviation[0][column])
normalizedDataPoints.append(temporalRow)
# We save the current the modeling results
normalizedResults = []
normalizedResults.append(mean)
normalizedResults.append(standardDeviation)
normalizedResults.append(normalizedDataPoints)
return normalizedResults
"""
getReverseStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value")
This method returns a dataset but with its original datapoint values before
having applied the Standarization Feature Scaling method.
CODE EXAMPLE1:
matrix_x = [
[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
deNormalizedResults = featureScaling.getReverseStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation)
preferedMean = deNormalizedResults[0]
preferedStandardDeviation = deNormalizedResults[1]
deNormalizedDataPoints = deNormalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
deNormalizedDataPoints =
[[75.0, 15.0],
[100.0, 15.0],
[125.0, 15.0],
[75.0, 17.5],
[100.0, 17.5],
[125.0, 17.5],
[75.0, 20.0],
[100.0, 20.0],
[125.0, 20.0],
[75.0, 22.5],
[100.0, 22.5],
[125.0, 22.5],
[75.0, 25.0],
[100.0, 25.0],
[125.0, 25.0],
[75.0, 27.5],
[100.0, 27.5],
[125.0, 27.5]]
"""
def getReverseStandarization(self, preferedMean, preferedStandardDeviation):
numberOfSamples = len(self.samplesList)
numberOfColumns = len(self.samplesList[0])
deNormalizedDataPoints = []
for row in range(0, numberOfSamples):
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append(self.samplesList[row][column]*preferedStandardDeviation[0][column] + preferedMean[0][column])
deNormalizedDataPoints.append(temporalRow)
# We save the current the modeling results
deNormalizedResults = []
deNormalizedResults.append(preferedMean)
deNormalizedResults.append(preferedStandardDeviation)
deNormalizedResults.append(deNormalizedDataPoints)
return deNormalizedResults
"""
setSamplesList(newSamplesList="the new samples list that you wish to work with")
This method sets a new value in the objects local variable "samplesList".
"""
def setSamplesList(self, newSamplesList):
self.samplesList = newSamplesList
"""
The Regression library gives several different types of coeficients to model
a required data. But notice that the arguments of this class are expected to be
the mean values of both the "x" and the "y" values.
Regression("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class Regression:
def __init__(self, x_samplesList, y_samplesList):
self.y_samplesList = y_samplesList
self.x_samplesList = x_samplesList
def set_xSamplesList(self, x_samplesList):
self.x_samplesList = x_samplesList
def set_ySamplesList(self, y_samplesList):
self.y_samplesList = y_samplesList
"""
# ----------------------------------- #
# ----------------------------------- #
# ----- STILL UNDER DEVELOPMENT ----- #
# ----------------------------------- #
# ----------------------------------- #
getGaussianRegression()
Returns the best fitting model to predict the behavior of a dataset through
a Gaussian Regression model that may have any number of independent
variables (x).
Note that if no fitting model is found, then this method will swap the
dependent variables values in such a way that "0"s will be interpretated as
"1"s and vice-versa to then try again to find at least 1 fitting model to
your dataset. If this still doenst work, then this method will return
modeling results will all coefficients with values equal to zero, predicted
accuracy equal to zero and all predicted values will also equal zero.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[39.139277579342206],
[-13.813509557297337],
[2.302251592882884],
[-13.813509557296968],
[2.302251592882836]]
accuracyFromTraining =
99.94999999999685
predictedData =
[[0.9989999999998915],
[0.9990000000000229],
[0.9989999999999554],
[0.9989999999999234],
[0.0009999999999997621],
[0.0010000000000001175],
[0.00099999999999989],
[0.000999999999999915]]
# NOTE:"predictedData" will try to give "1" for positive values and "0"
# for negative values always, regardless if your negative values
# were originally given to the trained model as "-1"s.
coefficientDistribution =
'Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getGaussianRegression(self):
from . import MortrackML_Library as mSL
import math
numberOfRows = len(self.y_samplesList)
# We re-adapt the current dependent samples (y) so that we can later
# use them to make the Gaussian function model withouth obtaining
# indeterminate values.
modifiedSamplesList_y = []
for row in range(0, numberOfRows):
temporalRow = []
if ((self.y_samplesList[row][0]!=1) and (self.y_samplesList[row][0]!=-1) and (self.y_samplesList[row][0]!=0) and (self.y_samplesList[row][0]!=0.001) and (self.y_samplesList[row][0]!=0.999)):
raise Exception('ERROR: One of the dependent (y) data points doesnt have the right format values (eg. 1 or a -1; 1 or a 0; 0.999 or a 0.001).')
if ((self.y_samplesList[row][0]==1) or (self.y_samplesList[row][0]==0.999)):
temporalRow.append(0.999)
if ((self.y_samplesList[row][0]==-1) or (self.y_samplesList[row][0]==0) or self.y_samplesList[row][0]==0.001):
temporalRow.append(0.001)
modifiedSamplesList_y.append(temporalRow)
# We modify our current dependent samples (y) to get the dependent
# samples (y) that we will input to make the Gaussian function model
modifiedGaussianSamplesList_y = []
for row in range(0, numberOfRows):
temporalRow = []
#temporalRow.append( -math.log(modifiedSamplesList_y[row][0])*2 )
temporalRow.append( -math.log(modifiedSamplesList_y[row][0]) )
modifiedGaussianSamplesList_y.append(temporalRow)
# We obtain the independent coefficients of the best fitting model
# obtained through the Gaussian function (kernel) that we will use to distort
# the current dimentional spaces that we were originally given by the
# user
regression = mSL.Regression(self.x_samplesList, modifiedGaussianSamplesList_y)
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=2, evtfbmip=False)
allModeledAccuracies = modelingResults[4]
# Re-evaluate every obtained model trained through the Multiple
# Polynomial Regression but this time determining the best fitting
# model by recalculating each of their accuracies but this time with
# the right math equation, which would be the gaussian function.
bestModelingResults = []
for currentModelingResults in range(0, len(allModeledAccuracies)):
currentCoefficients = allModeledAccuracies[currentModelingResults][1]
isComplyingWithGaussCoefficientsSigns = True
for currentCoefficient in range(0, len(currentCoefficients)):
if ((currentCoefficients==0) and (currentCoefficients[currentCoefficient][0]<0)):
isComplyingWithGaussCoefficientsSigns = False
else:
#if (((currentCoefficient%2)!=0) and (currentCoefficients[currentCoefficient][0]>0)):
# isComplyingWithGaussCoefficientsSigns = False
if (((currentCoefficient%2)==0) and (currentCoefficients[currentCoefficient][0]<0)):
isComplyingWithGaussCoefficientsSigns = False
if (isComplyingWithGaussCoefficientsSigns == True):
# We determine the accuracy of the obtained coefficients
predictedData = []
orderOfThePolynomial = 2
numberOfIndependentVariables = (len(currentCoefficients)-1)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = currentCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfIndependentVariables):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + currentCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-(actualIc)))
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = numberOfRows
for row in range(0, numberOfDataPoints):
n2 = modifiedSamplesList_y[row][0]
n1 = predictedData[row][0]
if ((n1<0.2) and (n2<0.051)):
newAcurracyValueToAdd = 1-n1
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
if (len(bestModelingResults) == 0):
# We save the first best fitting modeling result
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
else:
if (predictionAcurracy > bestModelingResults[1]):
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentCoefficients)
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# if we couldnt obtain a fitting model at all, try again but this time
# withouth trying to find a perfect gauss form in the resulting model
# equation
if (len(bestModelingResults)==0):
# Re-evaluate every obtained model trained through the Multiple
# Polynomial Regression but this time determining the best fitting
# model by recalculating each of their accuracies but this time with
# the right math equation, which would be the gaussian function.
bestModelingResults = []
for currentModelingResults in range(0, len(allModeledAccuracies)):
currentCoefficients = allModeledAccuracies[currentModelingResults][1]
# We determine the accuracy of the obtained coefficients
predictedData = []
orderOfThePolynomial = 2
numberOfIndependentVariables = (len(currentCoefficients)-1)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = currentCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfIndependentVariables):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + currentCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-(actualIc)))
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = numberOfRows
for row in range(0, numberOfDataPoints):
n2 = modifiedSamplesList_y[row][0]
n1 = predictedData[row][0]
if ((n1<0.2) and (n2<0.051)):
newAcurracyValueToAdd = 1-n1
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
if (len(bestModelingResults) == 0):
# We save the first best fitting modeling result
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
else:
if (predictionAcurracy > bestModelingResults[1]):
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentCoefficients)
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
if (len(bestModelingResults)==0):
# We save the first best fitting modeling result
bestModelingResults = []
temporalRow = []
currentCoefficients = []
for row in range(0, len(allModeledAccuracies[0][1])):
temporalRow.append(0)
currentCoefficients.append(temporalRow)
temporalRow = []
predictedData = []
for row in range(0, numberOfRows):
temporalRow.append(0)
predictedData.append(temporalRow)
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(0)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
# We include all the reports of all the models studied to the reporting
# variable that contains the report of the best fitting model and we
# then return it
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getLinearLogisticRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Logistic Regression model to be able
to predict a classification problem that can have any number of
independent variables (x).
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[4.395207586412653], [5.985854141495452], [-4.395207586412653]]
accuracyFromTraining =
80.02122762886552
predictedData =
[[0.012185988957723588],
[0.05707820342364075],
[0.22900916243958236],
[0.5930846789223594],
[0.8773292738274195],
[0.9722944298625625],
[0.9942264149220237],
[0.9988179452639562],
[0.9997588776328182],
[0.9999508513195541]]
coefficientDistribution =
'Coefficients distribution is as follows: p = (exp(bo + b1*x1 + b2*x2 + ... + bn*xn))/(1 + exp(bo + b1*x1 + b2*x2 + ... + bn*xn))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getLinearLogisticRegression(self, evtfbmip=True):
from . import MortrackML_Library as mSL
import math
getOptimizedRegression = evtfbmip
numberOfRows = len(self.y_samplesList)
matrix_x = self.x_samplesList
modifiedSamplesList_y = []
for row in range(0, numberOfRows):
temporalRow = []
if ((self.y_samplesList[row][0]!=1) and (self.y_samplesList[row][0]!=0)):
raise Exception('ERROR: One of the dependent (y) data points doesnt have a 1 or a 0 as value.')
if (self.y_samplesList[row][0] == 1):
temporalRow.append(0.999)
if (self.y_samplesList[row][0] == 0):
temporalRow.append(0.001)
modifiedSamplesList_y.append(temporalRow)
matrix_y = []
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(math.log(modifiedSamplesList_y[row][0]/(1-modifiedSamplesList_y[row][0])))
matrix_y.append(temporalRow)
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getMultipleLinearRegression(evtfbmip = getOptimizedRegression)
coefficients = modelingResults[0]
# We determine the accuracy of the obtained coefficientsfor the
# Probability Equation of the Logistic Regression Equation
predictedData = []
numberOfIndependentVariables = len(matrix_x[0])
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = coefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*matrix_x[row][currentIndependentVariable]
actualIc = math.exp(actualIc)
actualIc = actualIc/(1+actualIc)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = numberOfRows
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n2 == 0):
n2 = 0.001
if (n1 < 0.2):
newAcurracyValueToAdd = 1-n1
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(coefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: p = (exp(bo + b1*x1 + b2*x2 + ... + bn*xn))/(1 + exp(bo + b1*x1 + b2*x2 + ... + bn*xn))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getLinearRegression(isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model to predict the behavior of a dataset through
a regular Linear Regression model. Note that this method can only solve
regression problems that have 1 independent variable (x).
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[8.470909090909096], [1.0242424242424237]]
accuracyFromTraining =
97.05959379759686
predictedData =
[[8.470909090909096],
[9.49515151515152],
[10.519393939393943],
[11.543636363636367],
[12.56787878787879],
[13.592121212121214],
[14.616363636363639],
[15.640606060606062],
[16.664848484848484],
[17.689090909090908]]
coefficientDistribution =
'Coefficients distribution is as follows: y = b + m*x'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getLinearRegression(self, isClassification=True):
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
x_samples = matrixMath.getTransposedMatrix(self.x_samplesList)
y_samples = matrixMath.getTransposedMatrix(self.y_samplesList)
x_length = len(x_samples[0])
y_length = len(y_samples[0])
if x_length != y_length:
raise Exception('Dependent Variable has a different vector size than Independent Variable')
x_mean = 0
x_squared_mean = 0
y_mean = 0
xy_mean = 0
for n in range (0, x_length):
x_mean += x_samples[0][n]
x_squared_mean += x_samples[0][n]*x_samples[0][n]
y_mean += y_samples[0][n]
xy_mean += x_samples[0][n]*y_samples[0][n]
x_mean = x_mean/x_length
x_squared_mean = x_squared_mean/x_length
y_mean = y_mean/y_length
xy_mean = xy_mean/x_length
m = ( (x_mean*y_mean - xy_mean) / (x_mean**2 - x_squared_mean) )
# m = ( (mean(xs)*mean(ys) - mean(xs*ys)) / (mean(xs)*mean(xs) - mean(xs*xs)) )
b = y_mean - m*x_mean
matrix_b = [[b], [m]]
# We determine the accuracy of the obtained coefficients
predictedData = []
bothMatrixRowLength = len(self.y_samplesList)
for row in range(0, bothMatrixRowLength):
temporalRow = []
actualIc = matrix_b[0][0] + matrix_b[1][0]*self.x_samplesList[row][0]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = bothMatrixRowLength
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = b + m*x")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getMultipleLinearRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has any number
of independent variables (x) through the Multiple Linear Regression method.
EXAMPLE CODE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultipleLinearRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[36.094678333151364], [1.030512601856226], [-1.8696429022156238], [0]]
accuracyFromTraining =
94.91286851439088
predictedData =
[[27.97866287863839],
[32.47405344687403],
[26.780769909063693],
[38.27922426742052],
[15.633130663659042],
[26.414492729454558],
[27.942743988094456],
[26.30423186956247],
[32.03534812093171],
[26.162019574015964],
[37.5240060242906],
[32.03387415343133],
[27.937442374564142]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getMultipleLinearRegression(self, evtfbmip = False, isClassification=True):
# We import the libraries we want to use and we create the class we
# use from it
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
# We define the variables to use within our code
matrix_x = self.x_samplesList
matrix_y = self.y_samplesList
rowLengthOfBothMatrixes = len(matrix_y)
numberOfIndependentVariables = len(matrix_x[0])
# ----- WE GET THE FIRST MODEL EVALUATION RESULTS ----- #
# MATRIX X MATHEMATICAL PROCEDURE to create a matrix that contains
# the x values of the following equation we want to solve and in that
# same variable formation:
# y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn
currentMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
for currentIndependentVariable in range(0, numberOfIndependentVariables):
temporalRow.append(matrix_x[row][currentIndependentVariable])
currentMatrix_x.append(temporalRow)
originalMatrix_x = currentMatrix_x
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# We determine the accuracy of the obtained coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = matrix_b[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + matrix_b[currentIndependentVariable+1][0]*matrix_x[row][currentIndependentVariable]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(originalMatrix_x)
allAccuracies.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS ----- #
# We define a variable to save the search patterns in original matrix x
from .MortrackML_Library import Combinations
possibleCombinations = []
for n in range (0, len(originalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
searchPatterns.pop(0) # We remove the first one because we already did it
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(originalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(originalMatrix_x[0])):
for column in range(0, len(originalMatrix_x[0])):
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(originalMatrix_x[0])):
trueRowOfCoefficient = searchPatterns[currentSearchPattern][row]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn")
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
for column in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
newNumberOfIndependentVariables = len(currentMatrix_x[0])
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
for currentIndependentVariable in range(0, (newNumberOfIndependentVariables-1)):
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2")
# We include all the reports of all the models studied to the reporting
# variable that contains the report of the best fitting model and we
# then return it
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getPolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has only 1
independent variable (x) in it, through a polynomial regression solution.
EXAMPLE CODE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[3.468869185343018e-08],
[1.5123521825664843e-07],
[2.2104758041867345e-07],
[1.0817080022072073e-07]]
accuracyFromTraining =
99.99999615014885
predictedData =
[[3.4769003219065136e-11],
[7.199670288280337e-11],
[1.597970024878988e-10],
[3.792980021998557e-10]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getPolynomialRegression(self, orderOfThePolynomial, evtfbmip=False, isClassification=True):
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
x_samples = matrixMath.getTransposedMatrix(self.x_samplesList)[0]
y_samples = matrixMath.getTransposedMatrix(self.y_samplesList)[0]
dataLength = len(y_samples)
matrixLength = orderOfThePolynomial+1
matrix_A = []
# MATRIX A MATHEMATICAL PROCEDURE
for n in range(0, matrixLength):
temporalRow = []
for i in range(0, matrixLength):
# Math process for Matrix_A's Row 1
if ((n==0) and (i==0)):
temporalRow.append(dataLength)
if ((n==0) and (i!=0)):
temporalSum = 0
for j in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
xMultiplicationsResult = 1
for w in range(0, i):
xMultiplicationsResult = xMultiplicationsResult*x_samples[j]
temporalSum = temporalSum + xMultiplicationsResult
temporalRow.append(temporalSum)
# Math process for Matrix_A's Row 2 and above
if (n!=0):
if (i==0):
temporalSum = 0
for j in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
additionalMultiplications = n-1
if (additionalMultiplications < 0):
additionalMultiplications = 0
xMultiplicationsResult = 1
for w in range(0, (i+1+additionalMultiplications)):
xMultiplicationsResult = xMultiplicationsResult*x_samples[j]
temporalSum = temporalSum + xMultiplicationsResult
temporalRow.append(temporalSum)
else:
temporalSum = 0
for j in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
additionalMultiplications = n-1
if (additionalMultiplications < 0):
additionalMultiplications = 0
xMultiplicationsResult = 1
for w in range(0, (i+1+additionalMultiplications)):
xMultiplicationsResult = xMultiplicationsResult*x_samples[j]
temporalSum = temporalSum + xMultiplicationsResult
temporalRow.append(temporalSum)
matrix_A.append(temporalRow)
# MATRIX g MATHEMATICAL PROCEDURE
matrix_g = []
for n in range(0, matrixLength):
temporalRow = []
temporalSum = 0
for i in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
xMultiplicationsResult = 1
for w in range(0, n):
xMultiplicationsResult = xMultiplicationsResult*x_samples[i]
temporalSum = temporalSum + xMultiplicationsResult*y_samples[i]
temporalRow.append(temporalSum)
matrix_g.append(temporalRow)
# GET THE INVERSE OF MATRIX A
matrixMath = mLAL.MatrixMath()
inverseMatrix_A = matrixMath.getInverse(matrix_A)
# MULTIPLY INVERSE OF MATRIX A WITH MATRIX g
matrix_b = matrixMath.getMultiplication(inverseMatrix_A, matrix_g)
# We determine the accuracy of the obtained coefficients
predictedData = []
bothMatrixRowLength = len(y_samples)
numberOfIndependentVariables = len(matrix_b)-1
for currentDataPoint in range(0, bothMatrixRowLength):
temporalRow = []
actualIc = matrix_b[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + matrix_b[currentIndependentVariable+1][0]*x_samples[currentDataPoint]**(currentIndependentVariable+1)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = bothMatrixRowLength
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
# We recreate some things to apply the Matrix method in the permutation
# section
rowLengthOfBothMatrixes = len(self.y_samplesList)
currentMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
for currentIndependentVariable in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][0]**(currentIndependentVariable+1))
currentMatrix_x.append(temporalRow)
originalMatrix_x = currentMatrix_x
from .MortrackML_Library import Combinations
possibleCombinations = []
for n in range (0, len(originalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
for column in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = self.y_samplesList
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
newNumberOfIndependentVariables = len(currentMatrix_x[0])
predictedData = []
for row in range(0, len(self.y_samplesList)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
for currentIndependentVariable in range(0, (newNumberOfIndependentVariables-1)):
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][0]**(currentIndependentVariable+1)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(self.y_samplesList)
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n")
# We include all the reports of all the models studied to the reporting
# variable that contains the report of the best fitting model and we
# then return it
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getMultiplePolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method returns the best fitting model of a dataset to predict its
behavior through a Multiple Polynomial Regression that may have any number
of independent variables (x). This method gets a model by through the
following equation format:
y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[-1.745717777706403e-08],
[0],
[0.07581354676648289],
[-0.00104662847289827],
[3.942075523087618e-06],
[-14.202436859894078],
[0.670002091817878],
[-0.009761974914994198],
[-5.8006065221068606e-15]]
accuracyFromTraining =
91.33822971744071
predictedData =
[[14.401799310251064],
[10.481799480368835],
[7.578466505722503],
[13.96195814877683],
[10.041958318894615],
[7.1386253442482825],
[15.490847097061135],
[11.57084726717892],
[8.667514292532587],
[18.073281006823265],
[14.15328117694105],
[11.249948202294718],
[20.794074729782523],
[16.874074899900307],
[13.970741925253975],
[22.73804311765818],
[18.818043287775964],
[15.914710313129632]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getMultiplePolynomialRegression(self, orderOfThePolynomial, evtfbmip=False, isClassification=True):
# We import the libraries we want to use and we create the class we
# use from it
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
# We define the variables to use within our code
numberOfIndependentVariables = len(self.x_samplesList[0])
rowLengthOfBothMatrixes = len(self.y_samplesList)
matrix_x = []
# MATRIX X MATHEMATICAL PROCEDURE to add the 1's in the first column of
# each row and to add the additional columns that will represent the
# polynomials that we want to get according to the input value of
# this method's argument "orderOfThePolynomial"
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
for actualIndependentVariable in range(0, numberOfIndependentVariables):
xMultiplicationsResult = 1
for actualOrder in range(0, orderOfThePolynomial):
xMultiplicationsResult = xMultiplicationsResult*self.x_samplesList[row][actualIndependentVariable]
temporalRow.append(xMultiplicationsResult)
matrix_x.append(temporalRow)
originalMatrix_x = matrix_x
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = matrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = self.y_samplesList
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# We determine the accuracy of the obtained coefficients
predictedData = []
numberOfCoefficients = len(matrix_b)
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
actualIc = matrix_b[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + matrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(self.y_samplesList)
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(originalMatrix_x)
allAccuracies.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS ----- #
# We define a variable to save the search patterns in original matrix x
from .MortrackML_Library import Combinations
possibleCombinations = []
for n in range (0, len(originalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
searchPatterns.pop(0) # We remove the first one because we already did it
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(originalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(originalMatrix_x[0])):
for column in range(0, len(originalMatrix_x[0])):
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = self.y_samplesList
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(originalMatrix_x[0])):
trueRowOfCoefficient = searchPatterns[currentSearchPattern][row]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
numberOfCoefficients = len(currentMatrix_b)
predictedData = []
for row in range(0, len(self.y_samplesList)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(self.y_samplesList)
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n")
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
for column in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = self.y_samplesList
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
predictedData = []
numberOfCoefficients = len(currentMatrix_b)
for row in range(0, len(self.y_samplesList)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(self.y_samplesList)
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n")
# Alongside the information of the best model obtained, we add the
# modeled information of ALL the models obtained to the variable that
# we will return in this method
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method obtains the best solution of a customized 2nd order model when
using specifically 2 independent variables and were the equation to solve
is the following:
y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2
IMPORTANT NOTE: While the book "Probabilidad y estadistica para ingenieria
& ciencias (Walpole, Myers, Myers, Ye)" describes a model whos accuracy is
89.936% through finding a solution using the same model equation as used in
this method, i was able to achieve an algorithm that finds an even
better solution were i was able to get an accuracy of 90.57% (see code
example).
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[40.36892063492269],
[-0.29913333333337394],
[0.0008133333333341963],
[-1.2861238095233603],
[0.047676190476181546],
[0]]
accuracyFromTraining =
90.56977726188016
predictedData =
[[13.944206349214937],
[10.0242063492177],
[7.120873015888202],
[14.602587301596287],
[10.68258730159905],
[7.779253968269552],
[15.856920634929907],
[11.936920634932669],
[9.033587301603172],
[17.707206349215795],
[13.787206349218557],
[10.88387301588906],
[20.153444444453953],
[16.233444444456715],
[13.330111111127216],
[23.19563492064438],
[19.275634920647143],
[16.372301587317644]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getCustomizedMultipleSecondOrderPolynomialRegression(self, evtfbmip=False, isClassification=True):
# We import the libraries we want to use and we create the class we
# use from it
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
# We define the variables to use within our code
matrix_y =self.y_samplesList
rowLengthOfBothMatrixes = len(matrix_y)
x1 = 0
x2 = 1
# ----- WE GET THE FIRST MODEL EVALUATION RESULTS ----- #
# MATRIX X MATHEMATICAL PROCEDURE to create a matrix that contains
# the x values of the following equation we want to solve and in that
# same variable formation:
# y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2
currentMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
temporalRow.append(self.x_samplesList[row][x1])
temporalRow.append(self.x_samplesList[row][x1]**2)
temporalRow.append(self.x_samplesList[row][x2])
temporalRow.append(self.x_samplesList[row][x2]**2)
temporalRow.append(self.x_samplesList[row][x1]*self.x_samplesList[row][x2])
currentMatrix_x.append(temporalRow)
originalMatrix_x = currentMatrix_x
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# We determine the accuracy of the obtained coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = matrix_b[0][0] + matrix_b[1][0]*self.x_samplesList[row][0] + matrix_b[2][0]*self.x_samplesList[row][0]**2 + matrix_b[3][0]*self.x_samplesList[row][1] + matrix_b[4][0]*self.x_samplesList[row][1]**2 + matrix_b[5][0]*self.x_samplesList[row][0]*self.x_samplesList[row][1]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(originalMatrix_x)
allAccuracies.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS ----- #
# We define a variable to save the search patterns in original matrix x
from .MortrackML_Library import Combinations
possibleCombinations = []
for n in range (0, len(originalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
searchPatterns.pop(0) # We remove the first one because we already did it
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(originalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(originalMatrix_x[0])):
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 0):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][0]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 1):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][1]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 2):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][2]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 3):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][3]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 4):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][4]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 5):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][5]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(originalMatrix_x[0])):
trueRowOfCoefficient = searchPatterns[currentSearchPattern][row]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0] + currentMatrix_b[1][0]*self.x_samplesList[row][0] + currentMatrix_b[2][0]*self.x_samplesList[row][0]**2 + currentMatrix_b[3][0]*self.x_samplesList[row][1] + currentMatrix_b[4][0]*self.x_samplesList[row][1]**2 + currentMatrix_b[5][0]*self.x_samplesList[row][0]*self.x_samplesList[row][1]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2")
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 0):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][0]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 1):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][1]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 2):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][2]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 3):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][3]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 4):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][4]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 5):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][5]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0] + currentMatrix_b[1][0]*self.x_samplesList[row][0] + currentMatrix_b[2][0]*self.x_samplesList[row][0]**2 + currentMatrix_b[3][0]*self.x_samplesList[row][1] + currentMatrix_b[4][0]*self.x_samplesList[row][1]**2 + currentMatrix_b[5][0]*self.x_samplesList[row][0]*self.x_samplesList[row][1]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2")
# Alongside the information of the best model obtained, we add the
# modeled information of ALL the models obtained to the variable that
# we will return in this method
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method obtains the best solution of a customized 3rd order model when
using specifically 2 independent variables and were the equation to solve
is the following:
y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2
IMPORTANT NOTE: The same base algorithm used in the method
"getCustomizedMultipleSecondOrderPolynomialRegression()" was applied in
this one. This is important to mention because the algorithm i created in
that method demonstrated to be superior of that one used in the book
"Probabilidad y estadistica para ingenieria & ciencias (Walpole, Myers,
Myers, Ye)". See that method's description to see more information about
this.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[118.62284443469252],
[2.6850685669390923e-10],
[0],
[2.711111111130216e-06],
[-14.043715503707062],
[0.7156842175145357],
[-0.011482404265578339],
[-0.024609341568850862],
[0],
[0.0006459332618172914]]
accuracyFromTraining =
92.07595419629946
predictedData =
[[14.601310971885873],
[10.5735435991239],
[7.56244289303574],
[14.177873191206809],
[9.924073908458023],
[6.686941292383061],
[15.770722763127356],
[11.492745714709685],
[8.23143533296583],
[18.303384287749555],
[14.203083617980887],
[11.11944961488603],
[20.699382365175477],
[16.978612218373712],
[14.274508738245757],
[21.882241595507075],
[18.742856115990087],
[16.62013730314699]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getCustomizedMultipleThirdOrderPolynomialRegression(self, evtfbmip=False, isClassification=True):
# We import the libraries we want to use and we create the class we
# use from it
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
# We define the variables to use within our code
matrix_y =self.y_samplesList
rowLengthOfBothMatrixes = len(matrix_y)
x1 = 0
x2 = 1
# ----- WE GET THE FIRST MODEL EVALUATION RESULTS ----- #
# MATRIX X MATHEMATICAL PROCEDURE to create a matrix that contains
# the x values of the following equation we want to solve and in that
# same variable formation:
# y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2
currentMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
temporalRow.append(self.x_samplesList[row][x1])
temporalRow.append(self.x_samplesList[row][x1]**2)
temporalRow.append(self.x_samplesList[row][x1]**3)
temporalRow.append(self.x_samplesList[row][x2])
temporalRow.append(self.x_samplesList[row][x2]**2)
temporalRow.append(self.x_samplesList[row][x2]**3)
temporalRow.append(self.x_samplesList[row][x1]*self.x_samplesList[row][x2])
temporalRow.append((self.x_samplesList[row][x1]**2)*self.x_samplesList[row][x2])
temporalRow.append(self.x_samplesList[row][x1]*(self.x_samplesList[row][x2]**2))
currentMatrix_x.append(temporalRow)
originalMatrix_x = currentMatrix_x
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# We determine the accuracy of the obtained coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = matrix_b[0][0] + matrix_b[1][0]*self.x_samplesList[row][x1] + matrix_b[2][0]*self.x_samplesList[row][x1]**2 + matrix_b[3][0]*self.x_samplesList[row][x1]**3 + matrix_b[4][0]*self.x_samplesList[row][x2] + matrix_b[5][0]*self.x_samplesList[row][x2]**2 + matrix_b[6][0]*self.x_samplesList[row][x2]**3 + matrix_b[7][0]*self.x_samplesList[row][x1]*self.x_samplesList[row][x2] + matrix_b[8][0]*(self.x_samplesList[row][x1]**2)*self.x_samplesList[row][x2] + matrix_b[9][0]*self.x_samplesList[row][x1]*(self.x_samplesList[row][x2]**2)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(originalMatrix_x)
allAccuracies.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS ----- #
# We define a variable to save the search patterns in original matrix x
from .MortrackML_Library import Combinations
possibleCombinations = []
for n in range (0, len(originalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
searchPatterns.pop(0) # We remove the first one because we already did it
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(originalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(originalMatrix_x[0])):
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 0):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][0]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 1):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][1]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 2):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][2]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 3):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][3]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 4):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][4]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 5):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][5]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 6):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][6]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 7):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][7]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 8):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][8]
if (searchPatterns[currentSearchPattern][currentColumnOfMatrix_x] == 9):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][9]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(originalMatrix_x[0])):
trueRowOfCoefficient = searchPatterns[currentSearchPattern][row]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0] + currentMatrix_b[1][0]*self.x_samplesList[row][x1] + currentMatrix_b[2][0]*self.x_samplesList[row][x1]**2 + currentMatrix_b[3][0]*self.x_samplesList[row][x1]**3 + currentMatrix_b[4][0]*self.x_samplesList[row][x2] + currentMatrix_b[5][0]*self.x_samplesList[row][x2]**2 + currentMatrix_b[6][0]*self.x_samplesList[row][x2]**3 + currentMatrix_b[7][0]*self.x_samplesList[row][x1]*self.x_samplesList[row][x2] + currentMatrix_b[8][0]*(self.x_samplesList[row][x1]**2)*self.x_samplesList[row][x2] + currentMatrix_b[9][0]*self.x_samplesList[row][x1]*(self.x_samplesList[row][x2]**2)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2")
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 0):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][0]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 1):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][1]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 2):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][2]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 3):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][3]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 4):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][4]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 5):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][5]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 6):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][6]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 7):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][7]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 8):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][8]
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == 9):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][9]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0] + currentMatrix_b[1][0]*self.x_samplesList[row][x1] + currentMatrix_b[2][0]*self.x_samplesList[row][x1]**2 + currentMatrix_b[3][0]*self.x_samplesList[row][x1]**3 + currentMatrix_b[4][0]*self.x_samplesList[row][x2] + currentMatrix_b[5][0]*self.x_samplesList[row][x2]**2 + currentMatrix_b[6][0]*self.x_samplesList[row][x2]**3 + currentMatrix_b[7][0]*self.x_samplesList[row][x1]*self.x_samplesList[row][x2] + currentMatrix_b[8][0]*(self.x_samplesList[row][x1]**2)*self.x_samplesList[row][x2] + currentMatrix_b[9][0]*self.x_samplesList[row][x1]*(self.x_samplesList[row][x2]**2)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2")
# Alongside the information of the best model obtained, we add the
# modeled information of ALL the models obtained to the variable that
# we will return in this method
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
predictLinearLogisticRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictLinearLogisticRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[0.5],
[0.999978721536189],
[0.9999991162466249],
[0.9999999984756125],
[1.7295081461872963e-11]]
"""
def predictLinearLogisticRegression(self, coefficients):
import math
numberOfRows = len(self.x_samplesList)
# We determine the accuracy of the obtained coefficientsfor the
# Probability Equation of the Logistic Regression Equation
predictedData = []
numberOfIndependentVariables = len(self.x_samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
actualIc = math.exp(actualIc)
actualIc = actualIc/(1+actualIc)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictGaussianRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictGaussianRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[1.003006010014743e-12],
[0.09993332221727314],
[1.0046799183277663e-17],
[1.0318455659367212e-97],
[1.0083723565531913e-28]]
"""
def predictGaussianRegression(self, coefficients):
import math
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
orderOfThePolynomial = 2
numberOfIndependentVariables = (len(coefficients)-1)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfIndependentVariables):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-(actualIc)))
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictLinearRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0],
[4],
[6],
[10],
[1]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictLinearRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[8.470909090909096],
[12.56787878787879],
[14.616363636363639],
[18.71333333333333],
[9.49515151515152]]
"""
def predictLinearRegression(self, coefficients):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0] + coefficients[1][0]*self.x_samplesList[row][0]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictMultipleLinearRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultipleLinearRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1,1],
[4,4,4],
[6,6,6],
[10,10,10],
[1,8,9]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictMultipleLinearRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[34.22503543093558],
[32.73815713171364],
[31.059896530994866],
[27.703375329557314],
[22.168047717282477]]
"""
def predictMultipleLinearRegression(self, coefficients):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
numberOfIndependentVariables = len(self.x_samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0],
[4],
[6],
[10],
[1]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[3.468869185343018e-08],
[1.1099322065704926e-05],
[3.226470574414124e-05],
[0.000131822599137008],
[5.151422907494728e-07]]
"""
def predictPolynomialRegression(self, coefficients):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
numberOfCoefficients = len(coefficients)-1
for currentDataPoint in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0]
for currentIndependentVariable in range(0, numberOfCoefficients):
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*(self.x_samplesList[currentDataPoint][0])**(currentIndependentVariable+1)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictMultiplePolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
orderOfThePolynomial="Assign a whole number that represents the order of degree of the Multiple Polynomial equation you want to make predictions with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictMultiplePolynomialRegression(coefficients=modelCoefficients, orderOfThePolynomial=4)
EXPECTED CODE RESULT:
predictedValues =
[[-13.54219748494156],
[-37.053240090011386],
[-48.742713747779355],
[-60.84907570434054],
[-73.31818590442116]]
"""
def predictMultiplePolynomialRegression(self, coefficients, orderOfThePolynomial):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
numberOfCoefficients = len(coefficients)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictCustomizedMultipleSecondOrderPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleSecondOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[39.13047301587551],
[34.803724444448],
[32.60300063492485],
[29.365301587306917],
[32.832886349211385]]
"""
def predictCustomizedMultipleSecondOrderPolynomialRegression(self, coefficients):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0] + coefficients[1][0]*self.x_samplesList[row][0] + coefficients[2][0]*self.x_samplesList[row][0]**2 + coefficients[3][0]*self.x_samplesList[row][1] + coefficients[4][0]*self.x_samplesList[row][1]**2 + coefficients[5][0]*self.x_samplesList[row][0]*self.x_samplesList[row][1]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[105.28333074423442],
[72.81181980293967],
[56.899154811293464],
[36.45941710222553],
[46.042387049575304]]
"""
def predictCustomizedMultipleThirdOrderPolynomialRegression(self, coefficients):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
x1 = 0
x2 = 1
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0] + coefficients[1][0]*self.x_samplesList[row][x1] + coefficients[2][0]*self.x_samplesList[row][x1]**2 + coefficients[3][0]*self.x_samplesList[row][x1]**3 + coefficients[4][0]*self.x_samplesList[row][x2] + coefficients[5][0]*self.x_samplesList[row][x2]**2 + coefficients[6][0]*self.x_samplesList[row][x2]**3 + coefficients[7][0]*self.x_samplesList[row][x1]*self.x_samplesList[row][x2] + coefficients[8][0]*(self.x_samplesList[row][x1]**2)*self.x_samplesList[row][x2] + coefficients[9][0]*self.x_samplesList[row][x1]*(self.x_samplesList[row][x2]**2)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
Classification("x independent variable datapoints to model", "y dependent variable datapoints to model")
The Classification library gives several methods to be able to get the best
fitting classification model to predict a determined classification problem.
"""
class Classification:
def __init__(self, x_samplesList, y_samplesList):
self.y_samplesList = y_samplesList
self.x_samplesList = x_samplesList
def set_xSamplesList(self, x_samplesList):
self.x_samplesList = x_samplesList
def set_ySamplesList(self, y_samplesList):
self.y_samplesList = y_samplesList
"""
getSupportVectorMachine(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Linear Support Vector Machine model to
be able to predict a classification problem of any number of independent
variables (x).
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[1.5736095873424212], [-0.26050769870994606], [-0.25468164794007475]]
accuracyFromTraining =
88.88888888888889
predictedData = [
[1],
[1],
[-1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + bn*xn >= -bo (As a note, remember that true equation representation is: w.x>=c)'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getSupportVectorMachine(self, evtfbmip=True):
getOptimizedRegression = evtfbmip
numberOfRows = len(self.y_samplesList)
matrix_x = self.x_samplesList
matrix_y = self.y_samplesList
for row in range(0, numberOfRows):
if ((self.y_samplesList[row][0]!=1) and (self.y_samplesList[row][0]!=-1)):
raise Exception('ERROR: One of the dependent (y) data points does not have exactly a 1 or a -1 as value. Note that in this library, the Support Vector Machine method needs to process your data to have either +1 or -1 as values.')
# We apply a Multiple Linear Regression to get the coefficient values
# for our Linear Support Vector Machine Model
from . import MortrackML_Library as mSL
import math
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getMultipleLinearRegression(evtfbmip = getOptimizedRegression)
svcCoefficients = modelingResults[0]
svcPredictedData = modelingResults[2]
# ---------------------------------- #
# ----- b0 Coefficient Tunning ----- #
# ---------------------------------- #
# Through the best fitting Multiple Linear Regression, we make a
# new search to try to find a better fitting b0 coefficient value
# that best fits the conditional of the equation that we actually
# want to solve (w.x>=-b0)
import numpy as np
rangeOfPredictedData = max(svcPredictedData)[0] - min(svcPredictedData)[0]
# linspace(start, stop, num=50)
bStepValues = np.linspace(svcCoefficients[0][0]-rangeOfPredictedData, svcCoefficients[0][0]+rangeOfPredictedData, num=100)
numberOfCoefficients = len(svcCoefficients)
best_b_value = 0
bestPredictedData = 0
bestPredictionAccuracy = 0
# We first get the b value that first pops and that has the highest
# accuracy
for currentStepValue in range(0, len(bStepValues)):
current_b_value = bStepValues[currentStepValue]
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
if (predictionAcurracy > bestPredictionAccuracy):
best_b_value = current_b_value
bestPredictedData = predictedData
bestPredictionAccuracy = predictionAcurracy
# Now that we now what value of b0 gives the best accuracy, we look
# forward to find the range of the b0 values that gives such best
# accuracy
best_b_value_1 = best_b_value
best_b_value_2 = 0
isBest_b_value = False
for currentStepValue in range(0, len(bStepValues)):
current_b_value = bStepValues[currentStepValue]
if (current_b_value == best_b_value_1):
isBest_b_value = True
if (isBest_b_value == True):
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
if (predictionAcurracy == bestPredictionAccuracy):
best_b_value_2 = current_b_value
# We find best fitting b0 coefficient value through exponential
# method
b0_sign = 1
if ((best_b_value_1+best_b_value_2)<0):
b0_sign = -1
best_b_value = (math.log(abs(best_b_value_1)) + math.log(abs(best_b_value_2)))/2
best_b_value = b0_sign*math.exp(best_b_value)
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
# We verify if exponential method was the best choice to pick best
# fitting b0 coefficient. If this isnt true, we then try again but
# with the mean value of the b0 coefficient range that we obtained
#earlier
if ((best_b_value<min([best_b_value_1, best_b_value_2])) or (best_b_value>max([best_b_value_1, best_b_value_2])) or (predictionAcurracy<bestPredictionAccuracy)):
best_b_value = (best_b_value_1+best_b_value_2)/2
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
# If neither the exponential nor the mean methods work to get the
# best fitting b0 coefficient value, we then just pick the initial
# best fitting b0 value that we identified in this algorithm
if (predictionAcurracy < bestPredictionAccuracy):
best_b_value = best_b_value_1
# We save the new-found b0 coefficient value that best fits our
# current dataset
svcCoefficients[0][0] = best_b_value
# ----------------------------------------- #
# ----- We save best modeling results ----- #
# ----------------------------------------- #
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(svcCoefficients)
bestModelingResults.append(bestPredictionAccuracy)
bestModelingResults.append(bestPredictedData)
bestModelingResults.append("Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + bn*xn >= -bo (As a note, remember that true equation representation is: w.x>=c)")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getKernelSupportVectorMachine(kernel="you specify here the type of kernel that you want to model with. literally write, in strings, gaussian for a gaussian kernel; polynomial for a polynomial kernel; and linear for a linear kernel",
isPolynomialSVC="True if you want to apply a polynomial SVC. False if otherwise is desired",
orderOfPolynomialSVC="If you apply a polynomial SVC through the argument isPolynomialSVC, you then give a whole number here to indicate the order of degree that you desire in such Polynomial SVC",
orderOfPolynomialKernel="if you selected polynomial kernel in the kernel argument, you then here give a whole number to indicate the order of degree that you desire in such Polynomial Kernel",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Kernel Support Vector Machine
model to be able to predict a classification problem of any number of
independent variables (x).
* If "gaussian" kernel is applied. This method will find the best
fitting model of such gaussian kernel through a gaussian regression.
* If "polynomimal" kernel is applied. This method will find the best
fitting model of such polynomial kernel through a Multiple Polynomial
Regression. You can specify the order of degree that you desire for your
Multiple Polynomial Kernel through the argument of this method named as
"orderOfPolynomialKernel".
* If "linear" kernel is applied. This method will find the best fitting
model of such polynomial kernel through a Multiple Linear Regression.
* You can also get a modified SVC by getting a non-linear intersection
plane to split your dataset into 2 specified categories. If you apply
this modified SVC, through "isPolynomialSVC" argument of this method,
you will be able to get a polynomial intersecting plane for your dataset
whos degree order can be modified through the argument of this method
named as "orderOfPolynomialSVC".
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getKernelSupportVectorMachine(kernel='gaussian', isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[
[
[-0.4067247938936074],
[-2.638275880744686],
[0.6025816805607462],
[1.5978782207152165],
[0.0018850313260649898]
],
[
[17.733125277353782],
[-0.41918858713133034],
[-0.07845753695120994],
[-7.126885817943787],
[0.7414460867570138],
[13.371724079069963],
[-16.435714646771032]
]
]
accuracyFromTraining =
100.0
predictedData = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
[
'Coefficients distribution for the Gaussian Kernel is as follows: kernel = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2))',
[
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + b_(n-1)*xn + bn*Kernel >= -b_0 --> for linear SVC (As a note, remember that true equation representation is: w.x>=c and that x here represents each one of the coordinates of your independent samples (x))',
'Coefficients distribution is as follows: b1*x1 + ... + b_(n-5)*x_m^m + b_(n-4)*x_(m-1) + ... + b_(n-3)*x_m^m + ... + b_(n-2)*x_m + ... + b_(n-1)*x_m^m + bn*Kernel >= -b_0 --> for polynomial SVC (m stands for the order degree selected for the polynomial SVC and n stands for the number of coefficients used in the polynomial SVC)'
]
]
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getKernelSupportVectorMachine(self, kernel, isPolynomialSVC=True, orderOfPolynomialSVC=3, orderOfPolynomialKernel=3, evtfbmip=True):
if ((kernel!='linear') and (kernel!='polynomial') and (kernel!='gaussian')):
raise Exception('ERROR: The selected Kernel does not exist or has not been programmed in this method yet.')
from . import MortrackML_Library as mSL
import math
getOptimizedRegression = evtfbmip
numberOfRows = len(self.y_samplesList)
# --------------------------- #
# ----- Kernel Tranning ----- #
# --------------------------- #
if (kernel=='gaussian'):
# We obtain the independent coefficients of the best fitting model
# obtained through the Gaussian function (kernel) that we will use to distort
# the current dimentional spaces that we were originally given by the
# user
regression = mSL.Regression(self.x_samplesList, self.y_samplesList)
modelingResults = regression.getGaussianRegression()
kernelCoefficients = modelingResults[0]
# We obtain the coordinates of only the new dimentional space created
# by the obtained kernel
kernelData = []
numberOfCoefficients = len(kernelCoefficients)
gaussAproximationOrder = 2
for row in range(0, numberOfRows):
temporalRow = []
actualIc = kernelCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (gaussAproximationOrder+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + kernelCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-actualIc))
kernelData.append(temporalRow)
if (kernel=='polynomial'):
# We obtain the independent coefficients of the best fitting model
# obtained through the Multiple Polynomial Regression function
# (kernel) that we will use to distort the current dimentional
# spaces that we were originally given by the user
regression = mSL.Regression(self.x_samplesList, self.y_samplesList)
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=orderOfPolynomialKernel, evtfbmip=getOptimizedRegression)
kernelCoefficients = modelingResults[0]
# We obtain the predicted data through the current obtained
# coefficients
kernelData = []
numberOfCoefficients = len(kernelCoefficients)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = kernelCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialKernel+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + kernelCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(actualIc)
kernelData.append(temporalRow)
if (kernel=='linear'):
# We obtain the independent coefficients of the best fitting model
# obtained through the Multiple Linear Regression function
# (kernel) that we will use to distort the current dimentional
# spaces that we were originally given by the user
regression = mSL.Regression(self.x_samplesList, self.y_samplesList)
modelingResults = regression.getMultipleLinearRegression(evtfbmip=getOptimizedRegression)
kernelCoefficients = modelingResults[0]
# We obtain the predicted data through the current obtained
# coefficients
kernelData = []
numberOfIndependentVariables = len(self.x_samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
actualIc = kernelCoefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + kernelCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
temporalRow.append(actualIc)
kernelData.append(temporalRow)
# We create the new matrix of the independent variables (x) but with
# the new dimentional space distortion made by the Kernel created
newMatrix_x = []
for row in range(0, numberOfRows):
temporalRow = []
for column in range(0, len(self.x_samplesList[0])):
temporalRow.append(self.x_samplesList[row][column])
temporalRow.append(kernelData[row][0])
newMatrix_x.append(temporalRow)
# ----------------------------------------------- #
# ----- Support Vector Classifier Trainning ----- #
# ----------------------------------------------- #
# We apply a Multiple Linear Regression to get the coefficient values
# for our Linear Support Vector Machine Model (Its Linear: remember
# that we applied a Kernel to distort the original dimentional space so
# that we could gain a linearly modeable dataset)
regression = mSL.Regression(newMatrix_x, self.y_samplesList)
if (isPolynomialSVC==True):
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=orderOfPolynomialSVC, evtfbmip=getOptimizedRegression)
else:
modelingResults = regression.getMultipleLinearRegression(evtfbmip = getOptimizedRegression)
svcCoefficients = modelingResults[0]
svcPredictedData = modelingResults[2]
# ---------------------------------- #
# ----- b0 Coefficient Tunning ----- #
# ---------------------------------- #
# Through the best fitting Multiple Linear Regression, we make a
# new search to try to find a better fitting b0 coefficient value
# that best fits the conditional of the equation that we actually
# want to solve (w.x>=-b0)
import numpy as np
rangeOfPredictedData = max(svcPredictedData)[0] - min(svcPredictedData)[0]
# linspace(start, stop, num=50)
bStepValues = np.linspace(svcCoefficients[0][0]-rangeOfPredictedData, svcCoefficients[0][0]+rangeOfPredictedData, num=100)
numberOfCoefficients = len(svcCoefficients)
best_b_value = 0
bestPredictedData = 0
bestPredictionAccuracy = 0
# We first get the b value that first pops and that has the highest
# accuracy
for currentStepValue in range(0, len(bStepValues)):
current_b_value = bStepValues[currentStepValue]
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
if (predictionAcurracy > bestPredictionAccuracy):
best_b_value = current_b_value
bestPredictedData = predictedData
bestPredictionAccuracy = predictionAcurracy
# Now that we now what value of b0 gives the best accuracy, we look
# forward to find the range of the b0 values that gives such best
# accuracy
best_b_value_1 = best_b_value
best_b_value_2 = 0
isBest_b_value = False
for currentStepValue in range(0, len(bStepValues)):
current_b_value = bStepValues[currentStepValue]
if (current_b_value == best_b_value_1):
isBest_b_value = True
if (isBest_b_value == True):
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
if (predictionAcurracy == bestPredictionAccuracy):
best_b_value_2 = current_b_value
# We find best fitting b0 coefficient value through exponential
# method
b0_sign = 1
if ((best_b_value_1+best_b_value_2)<0):
b0_sign = -1
best_b_value = (math.log(abs(best_b_value_1)) + math.log(abs(best_b_value_2)))/2
best_b_value = b0_sign*math.exp(best_b_value)
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -best_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
# We verify if exponential method was the best choice to pick best
# fitting b0 coefficient. If this isnt true, we then try again but
# with the mean value of the b0 coefficient range that we obtained
#earlier
if ((best_b_value<min([best_b_value_1, best_b_value_2])) or (best_b_value>max([best_b_value_1, best_b_value_2])) or (predictionAcurracy<bestPredictionAccuracy)):
best_b_value = (best_b_value_1+best_b_value_2)/2
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -best_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
# If neither the exponential nor the mean methods work to get the
# best fitting b0 coefficient value, we then just pick the initial
# best fitting b0 value that we identified in this algorithm
if (predictionAcurracy < bestPredictionAccuracy):
best_b_value = best_b_value_1
# We save the new-found b0 coefficient value that best fits our
# current dataset
svcCoefficients[0][0] = best_b_value
# ----------------------------------------- #
# ----- We save best modeling results ----- #
# ----------------------------------------- #
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append([kernelCoefficients, svcCoefficients])
bestModelingResults.append(bestPredictionAccuracy)
bestModelingResults.append(bestPredictedData)
temporalRow = []
if (kernel=='gaussian'):
temporalRow.append("Coefficients distribution for the Gaussian Kernel is as follows: kernel = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2))")
if (kernel=='polynomial'):
temporalRow.append("Coefficients distribution for the Multiple Polynomial Kernel is as follows: kernel = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n")
if (kernel=='linear'):
temporalRow.append("Coefficients distribution for the Multiple Linear Kernel is as follows: kernel = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn")
temporalRow2 = []
temporalRow2.append("Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + b_(n-1)*xn + bn*Kernel >= -b_0 --> for linear SVC (As a note, remember that true equation representation is: w.x>=c and that x here represents each one of the coordinates of your independent samples (x))")
temporalRow2.append("Coefficients distribution is as follows: b1*x1 + ... + b_(n-5)*x_m^m + b_(n-4)*x_(m-1) + ... + b_(n-3)*x_m^m + ... + b_(n-2)*x_m + ... + b_(n-1)*x_m^m + bn*Kernel >= -b_0 --> for polynomial SVC (m stands for the order degree selected for the polynomial SVC and n stands for the number of coefficients used in the polynomial SVC)")
temporalRow.append(temporalRow2)
bestModelingResults.append(temporalRow)
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
predictSupportVectorMachine(coefficients="We give the SVC mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification.set_xSamplesList(predictThisValues)
predictedValuesForBg = classification.predictSupportVectorMachine(coefficients=modelCoefficients)
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
def predictSupportVectorMachine(self, coefficients):
from . import MortrackML_Library as mSL
numberOfRows = len(self.x_samplesList)
svcCoefficients = coefficients
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
numberOfCoefficients = len(svcCoefficients)
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (self.x_samplesList[row][column])*svcCoefficients[column+1][0]
c = -svcCoefficients[0][0] # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictKernelSupportVectorMachine(coefficients="We give the kernel and the SVC mathematical coefficients that we want to predict with",
isPolynomialSVC="True if you want to apply a polynomial SVC. False if otherwise is desired",
orderOfPolynomialSVC="If you apply a polynomial SVC through the argument isPolynomialSVC, you then give a whole number here to indicate the order of degree that you desire in such Polynomial SVC",
orderOfPolynomialKernel="if you selected polynomial kernel in the kernel argument, you then here give a whole number to indicate the order of degree that you desire in such Polynomial Kernel",
kernel="you specify here the type of kernel that you want to predict with. literally write, in strings, gaussian for a gaussian kernel; polynomial for a polynomial kernel; and linear for a linear kernel")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5],
[3, 3]
]
y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0],
[0]
]
matrix_y = []
for row in range(0, len(y)):
temporalRow = []
if (y[row][0] == 0):
temporalRow.append(-1)
if (y[row][0] == 1):
temporalRow.append(1)
if ((y[row][0]!=0) and (y[row][0]!=1)):
raise Exception('ERROR: The dependent variable y has values different from 0 and 1.')
matrix_y.append(temporalRow)
matrix_x = []
for row in range(0, len(y)):
temporalRow = []
for column in range(0, len(x[0])):
temporalRow.append(x[row][column])
matrix_x.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getKernelSupportVectorMachine(kernel='gaussian', isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification.set_xSamplesList(predictThisValues)
predictedValuesForBg = classification.predictKernelSupportVectorMachine(coefficients=modelCoefficients, isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, kernel='gaussian')
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
def predictKernelSupportVectorMachine(self, coefficients, isPolynomialSVC=True, orderOfPolynomialSVC=3, orderOfPolynomialKernel=3, kernel='gaussian'):
if ((kernel!='linear') and (kernel!='polynomial') and (kernel!='gaussian')):
raise Exception('ERROR: The selected Kernel does not exist or has not been programmed in this method yet.')
from . import MortrackML_Library as mSL
import math
numberOfRows = len(self.x_samplesList)
# We create the local variables needed to run this algorithm
kernelCoefficients = coefficients[0]
svcCoefficients = coefficients[1]
if (kernel=='gaussian'):
# We obtain the coordinates of only the new dimentional space created
# by the obtained kernel
kernelData = []
numberOfCoefficients = len(kernelCoefficients)
gaussAproximationOrder = 2
for row in range(0, numberOfRows):
temporalRow = []
actualIc = kernelCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (gaussAproximationOrder+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + kernelCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-actualIc))
kernelData.append(temporalRow)
# We create the new matrix of the independent variables (x) but with
# the new dimentional space distortion made by the Kernel created
newMatrix_x = []
for row in range(0, numberOfRows):
temporalRow = []
for column in range(0, len(self.x_samplesList[0])):
temporalRow.append(self.x_samplesList[row][column])
temporalRow.append(kernelData[row][0])
newMatrix_x.append(temporalRow)
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
numberOfCoefficients = len(svcCoefficients)
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -svcCoefficients[0][0] # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
if (kernel=='polynomial'):
# We obtain the coordinates of only the new dimentional space created
# by the obtained kernel
kernelData = []
numberOfCoefficients = len(kernelCoefficients)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = kernelCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialKernel+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + kernelCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(actualIc)
kernelData.append(temporalRow)
# We create the new matrix of the independent variables (x) but with
# the new dimentional space distortion made by the Kernel created
newMatrix_x = []
for row in range(0, numberOfRows):
temporalRow = []
for column in range(0, len(self.x_samplesList[0])):
temporalRow.append(self.x_samplesList[row][column])
temporalRow.append(kernelData[row][0])
newMatrix_x.append(temporalRow)
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
numberOfCoefficients = len(svcCoefficients)
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -svcCoefficients[0][0] # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
if (kernel=='linear'):
# We obtain the coordinates of only the new dimentional space created
# by the obtained kernel
kernelData = []
numberOfIndependentVariables = len(self.x_samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
actualIc = kernelCoefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + kernelCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
temporalRow.append(actualIc)
kernelData.append(temporalRow)
# We create the new matrix of the independent variables (x) but with
# the new dimentional space distortion made by the Kernel created
newMatrix_x = []
for row in range(0, numberOfRows):
temporalRow = []
for column in range(0, len(self.x_samplesList[0])):
temporalRow.append(self.x_samplesList[row][column])
temporalRow.append(kernelData[row][0])
newMatrix_x.append(temporalRow)
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
numberOfCoefficients = len(svcCoefficients)
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
if (isPolynomialSVC==True):
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfCoefficients-1):
if (currentOrderOfThePolynomial == (orderOfPolynomialSVC+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
wx = wx + svcCoefficients[currentIndependentVariable+1][0]*newMatrix_x[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
else:
for column in range(0, numberOfCoefficients-1):
wx = wx + (newMatrix_x[row][column])*svcCoefficients[column+1][0]
c = -svcCoefficients[0][0] # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
predictLinearLogisticClassifier(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification = mSL.Classification(predictThisValues, [])
predictedValuesForBg = classification.predictLinearLogisticClassifier(coefficients=modelCoefficients, threshold=0.5)
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
def predictLinearLogisticClassifier(self, coefficients, threshold):
from . import MortrackML_Library as mSL
import math
numberOfRows = len(self.x_samplesList)
# We determine the accuracy of the obtained coefficientsfor the
# Probability Equation of the Logistic Regression Equation
predictedData = []
numberOfIndependentVariables = len(self.x_samplesList[0])
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
actualIc = math.exp(actualIc)
actualIc = actualIc/(1+actualIc)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictedDataWithThreshold = []
for row in range(0, numberOfRows):
temporalRow = []
if (predictedData[row][0]>=threshold):
temporalRow.append(1)
else:
temporalRow.append(0)
predictedDataWithThreshold.append(temporalRow)
# We return the predicted data
return predictedDataWithThreshold
"""
The ReinforcementLearning Class gives several methods to make a model that is
able to learn in real time to predict the best option among the ones you tell
it it has available. This is very useful when you actually dont have a dataset
to tell your model the expected output values to compare them and train itself
with them.
Regression("independent values (x) or options that your model will have available to pick from")
"""
class ReinforcementLearning:
def __init__(self, y_samplesList):
self.y_samplesList = y_samplesList
def set_ySamplesList(self, y_samplesList):
self.y_samplesList = y_samplesList
"""
getUpperConfidenceBound()
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getRealTimeUpperConfidenceBound()", this method
cannot solve a problem in real time, since it needs that you already have
meassured several rounds so that then this algorithm studies it to then
tell you which arm is the best option among all the others.
This methods advantages:
* When this algorithm tries to identify the best arm, it only needs
to know if his current selection was successful or not (0 or 1)
and it doesnt need to know, in that round, anything about the
other arms
This methods disadvantages:
* This is the method that takes the most time to be able to
identify the best arm. Just so that you have it in mind, for a
problem to solve, this algorithm needed around the following
round samples to start identifying the best arm / option for a
random problem that i wanted to solve:
+ For 2 arms --> around 950 samples
+ For 3 arms --> around 1400 samples
+ For 4 arms --> around 1200 samples
+ For 5 arms --> around 320 samples
+ For 6 arms --> around 350 samples
+ For 7 arms --> around 400 samples
+ For 8 arms --> around 270 samples
+ For 9 arms --> around 600 samples
+ For 10 arms --> around 600 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
NOTE: The logic of this algorithm follows the one described and teached by
the Machine Learning Course "Machine Learning A-Z™: Hands-On Python & R In
Data Science" teached by " Kirill Eremenko, Hadelin de Ponteves,
SuperDataScience Team, SuperDataScience Support". I mention this because i
dont quite agree with how this algorithm works but, even though i havent
checked, there is a great chance that this is how other data scientists do
Upper Confidence Bound.
CODE EXAMPLE:
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
rL = mSL.ReinforcementLearning(matrix_y)
modelingResults = rL.getUpperConfidenceBound()
accuracyFromTraining = modelingResults[1]
historyOfPredictedData = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
for row in range(0, len(historyOfPredictedData)):
histogram_x_data.append(historyOfPredictedData[row][0])
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
accuracyFromTraining =
21.78
historyOfPredictedData =
NOTE: We wont show this result because it has 10'000 rows and its just
way too long to show here as a demonstration.
"""
def getUpperConfidenceBound(self):
import math
numberOfSamples = len(self.y_samplesList)
numberOfOptionsAvailable = len(self.y_samplesList[0])
adsSelectedByTheAlgorithm = []
numberOfSelectionsOfAds = []
temporalRow = []
for column in range(0, numberOfOptionsAvailable):
temporalRow.append(0)
numberOfSelectionsOfAds.append(temporalRow)
sumsOfRewardsForEachAd = []
temporalRow = []
for column in range(0, numberOfOptionsAvailable):
temporalRow.append(0)
sumsOfRewardsForEachAd.append(temporalRow)
totalRewards = 0
currentUpperConfidenceBound = 0
meanOfRewards = 0
for row in range(0, numberOfSamples):
highestUpperConfidenceBound = 0
currentAdSelected = 0
for column in range(0, numberOfOptionsAvailable):
if (numberOfSelectionsOfAds[0][column] > 0):
meanOfRewards = sumsOfRewardsForEachAd[0][column] / numberOfSelectionsOfAds[0][column]
delta_i = math.sqrt(3/2 * math.log(row+1) / numberOfSelectionsOfAds[0][column])
currentUpperConfidenceBound = meanOfRewards + delta_i
else:
currentUpperConfidenceBound = 1e400 # the idea is to assign a very big value to this variable
if (currentUpperConfidenceBound > highestUpperConfidenceBound):
highestUpperConfidenceBound = currentUpperConfidenceBound
currentAdSelected = column
temporalRow = []
temporalRow.append(currentAdSelected)
adsSelectedByTheAlgorithm.append(temporalRow)
numberOfSelectionsOfAds[0][currentAdSelected] = numberOfSelectionsOfAds[0][currentAdSelected] + 1
currentReward = self.y_samplesList[row][currentAdSelected]
sumsOfRewardsForEachAd[0][currentAdSelected] = sumsOfRewardsForEachAd[0][currentAdSelected] + currentReward
totalRewards = totalRewards + currentReward
accuracy = 100*totalRewards/numberOfSamples
modelingResults = []
modelingResults.append(0) # Null value since this model doesnt give coefficients at all
modelingResults.append(accuracy)
modelingResults.append(sumsOfRewardsForEachAd)
modelingResults.append(adsSelectedByTheAlgorithm)
return modelingResults
"""
getRealTimeUpperConfidenceBound(currentNumberOfSamples="You have to indicate here the current number of samples that have occured for a particular UCB problem to solve",
sumsOfRewardsForEachArm="You have to indicate here the sums of rewards for each of the available arms for a particular UCB problem to solve",
numberOfSelectionsOfArms="You have to indicate here the number of times that each arm was selected by the algorithm for a particular UCB problem to solve")
IMPORTANT NOTE: WHEN YOU RUN THIS METHOD TO SOLVE THE VERY FIRST ROUND OF A
PARTICULAR UCB PROBLEM, DONT DEFINE ANY VALUES IN THE
ARGUMENTS OF THIS METHOD. FOR FURTHER ROUNDS, INPUT IN THE
ARGUMENTS THE OUTPUT VALUES OF THE LAST TIME YOU RAN THIS
METHOD (SEE CODE EXAMPLE).
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getUpperConfidenceBound()", this method learns in
real time, while "getUpperConfidenceBound()" expects you to already have
measured several rounds.
This methods advantages:
* When this algorithm tries to identify the best arm, it only needs
to know if his current selection was successful or not (0 or 1)
and it doesnt need to know, in that round, anything about the
other arms
This methods disadvantages:
* This is the method that takes the most time to be able to
identify the best arm. Just so that you have it in mind, for a
problem to solve, this algorithm needed around the following
round samples to start identifying the best arm / option for a
random problem that i wanted to solve:
+ For 2 arms --> around 950 samples
+ For 3 arms --> around 1400 samples
+ For 4 arms --> around 1200 samples
+ For 5 arms --> around 320 samples
+ For 6 arms --> around 350 samples
+ For 7 arms --> around 400 samples
+ For 8 arms --> around 270 samples
+ For 9 arms --> around 600 samples
+ For 10 arms --> around 600 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
NOTE: The logic of this algorithm follows the one described and teached by
the Machine Learning Course "Machine Learning A-Z™: Hands-On Python & R In
Data Science" teached by " Kirill Eremenko, Hadelin de Ponteves,
SuperDataScience Team, SuperDataScience Support". I mention this because i
dont quite agree with how this algorithm works but, even though i havent
checked, there is a great chance that this is how other data scientists do
Upper Confidence Bound.
CODE EXAMPLE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
# With this for-loop, we will simulate that we are getting the data in
# real-time and that we are, at the same time, giving it to the algorithm
numberOfArmsAvailable = len(matrix_y[0])
for currentSample in range(0, len(matrix_y)):
rL = mSL.ReinforcementLearning([matrix_y[currentSample]])
if (currentSample == 0):
modelingResults = rL.getRealTimeUpperConfidenceBound()
else:
modelingResults = rL.getRealTimeUpperConfidenceBound(currentNumberOfSamples, sumsOfRewardsForEachArm, numberOfSelectionsOfArms)
currentNumberOfSamples = modelingResults[0]
currentAccuracyFromTraining = modelingResults[1]
sumsOfRewardsForEachArm = modelingResults[2]
numberOfSelectionsOfArms = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We now add the real selected options by the algorithm
for currentArm in range(0, numberOfArmsAvailable):
for selectedTimes in range(0, numberOfSelectionsOfArms[0][currentArm]):
histogram_x_data.append(currentArm)
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
currentNumberOfSamples=
10000
currentAccuracyFromTraining =
21.78
sumsOfRewardsForEachArm =
[[120, 47, 7, 38, 1675, 1, 27, 236, 20, 7]]
numberOfSelectionsOfArms =
[[705, 387, 186, 345, 6323, 150, 292, 1170, 256, 186]]
"""
def getRealTimeUpperConfidenceBound(self, currentNumberOfSamples=0, sumsOfRewardsForEachArm=[], numberOfSelectionsOfArms=[]):
import math
# We save on this variable the number of arms (options) available
numberOfArmsAvailable = len(self.y_samplesList[0])
# We innitialize the variables that have to be innitialized only the
# first time that this algorithm is ran for a particular problem
if (currentNumberOfSamples == 0):
# We save on this variable the number of times that each arm was picked
# by our algorithm
numberOfSelectionsOfArms = []
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
numberOfSelectionsOfArms.append(temporalRow)
# We save on this variable the number of times that we selected the
# right arm in a way that we keep a count of this for each arm
sumsOfRewardsForEachArm = []
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
sumsOfRewardsForEachArm.append(temporalRow)
# We innitialize the following variables that we will be using within
# the core process of this algorithm
highestUpperConfidenceBound = 0
currentAdSelected = 0
# We increase by one the number of current samples to follow up through
# this algorithm
currentNumberOfSamples = currentNumberOfSamples + 1
for column in range(0, numberOfArmsAvailable):
if (numberOfSelectionsOfArms[0][column] > 0):
meanOfRewards = sumsOfRewardsForEachArm[0][column] / numberOfSelectionsOfArms[0][column]
delta_i = math.sqrt(3/2 * math.log(currentNumberOfSamples) / numberOfSelectionsOfArms[0][column])
currentUpperConfidenceBound = meanOfRewards + delta_i
else:
currentUpperConfidenceBound = 1e400 # the idea is to assign a very big value to this variable
if (currentUpperConfidenceBound > highestUpperConfidenceBound):
highestUpperConfidenceBound = currentUpperConfidenceBound
currentAdSelected = column
numberOfSelectionsOfArms[0][currentAdSelected] = numberOfSelectionsOfArms[0][currentAdSelected] + 1
currentReward = self.y_samplesList[0][currentAdSelected]
sumsOfRewardsForEachArm[0][currentAdSelected] = sumsOfRewardsForEachArm[0][currentAdSelected] + currentReward
totalRewards = 0
for column in range(0, numberOfArmsAvailable):
totalRewards = totalRewards + sumsOfRewardsForEachArm[0][column]
currentAccuracy = 100*totalRewards/currentNumberOfSamples
modelingResults = []
modelingResults.append(currentNumberOfSamples)
modelingResults.append(currentAccuracy)
modelingResults.append(sumsOfRewardsForEachArm)
modelingResults.append(numberOfSelectionsOfArms)
return modelingResults
"""
getModifiedUpperConfidenceBound()
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the method "getRealTimeModifiedUpperConfidenceBound()" which learns
in real-time, this method does not and it requires that you have already
meassured several rounds to the input them to this method.
This methods advantages:
* This method is the fastest of all, so far, to detect the best
possible arm (option) among all the available ones:
+ For 2 arms --> around 1 sample
+ For 3 arms --> around 1 sample
+ For 4 arms --> around 1 sample
+ For 5 arms --> around 60 samples
+ For 6 arms --> around 60 samples
+ For 7 arms --> around 60 samples
+ For 8 arms --> around 60 samples
+ For 9 arms --> around 60 samples
+ For 10 arms --> around 60 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
This methods disadvantages:
* When this algorithm tries to identify the best arm, it needs to
know, for each arm (regardless of the one picked by the
algorithm), if they were a successful pick or not (0 or 1),
unlike the "getUpperConfidenceBound()" which only needs
to know if his actual pick was sucessful or not.
CODE EXAMPLE:
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
rL = mSL.ReinforcementLearning(matrix_y)
modelingResults = rL.getModifiedUpperConfidenceBound()
accuracyFromTraining = modelingResults[1]
historyOfPredictedData = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We first add a fake selection for each available option (arms) so that we
# ensure that they appear in the histogram. Otherwise, if we dont do this and
# if the algorithm never consideres one or some of the available options, it
# will plot considering those options never existed.
numberOfAvailableOptions = len(matrix_y[0])
for row in range(0, numberOfAvailableOptions):
histogram_x_data.append(row)
# We now add the real selected options by the algorithm
for row in range(0, len(historyOfPredictedData)):
histogram_x_data.append(historyOfPredictedData[row][0])
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
accuracyFromTraining =
26.93
historyOfPredictedData =
NOTE: We wont show this result because it has 10'000 rows and its just
way too long to show here as a demonstration.
"""
def getModifiedUpperConfidenceBound(self):
from . import MortrackML_Library as mSL
numberOfSamples = len(self.y_samplesList)
numberOfOptionsAvailable = len(self.y_samplesList[0])
adsSelectedByTheAlgorithm = []
numberOfSelectionsOfAds = []
temporalRow = []
for column in range(0, numberOfOptionsAvailable):
temporalRow.append(0)
numberOfSelectionsOfAds.append(temporalRow)
sumsOfRewardsForEachAd = []
temporalRow = []
for column in range(0, numberOfOptionsAvailable):
temporalRow.append(0)
sumsOfRewardsForEachAd.append(temporalRow)
totalRewards = 0
currentUpperConfidenceBound = 0
meanList = []
temporalRow = []
for column in range(0, numberOfOptionsAvailable):
temporalRow.append(0)
meanList.append(temporalRow)
standardDeviationList = []
temporalRow = []
for column in range(0, numberOfOptionsAvailable):
temporalRow.append(0)
standardDeviationList.append(temporalRow)
# We start the modified UCB algorithm
for row in range(0, numberOfSamples):
highestUpperConfidenceBound = 0
currentAdSelected = 0
for column in range(0, numberOfOptionsAvailable):
# We compare all the prediction intervals to then pick the best one
transcuredNumberOfRounds = row+1
if (transcuredNumberOfRounds > 2):
tD = mSL.Tdistribution(desiredTrustInterval=99.9)
tValue = tD.getCriticalValue(transcuredNumberOfRounds)
tI = mSL.TrustIntervals()
predictionIntervalsList = tI.getPredictionIntervals(transcuredNumberOfRounds, [[meanList[0][column]]], [[standardDeviationList[0][column]]], tValue)
currentUpperConfidenceBound = predictionIntervalsList[0][1]
else:
currentUpperConfidenceBound = 1e400 # the idea is to assign a very big value to this variable
if (currentUpperConfidenceBound > highestUpperConfidenceBound):
highestUpperConfidenceBound = currentUpperConfidenceBound
currentAdSelected = column
# We update the means and the standard deviations of all the
# options available for this model (arms) with the latest
# observations that were made.
currentReward = self.y_samplesList[row][column]
sumsOfRewardsForEachAd[0][column] = sumsOfRewardsForEachAd[0][column] + currentReward
if (transcuredNumberOfRounds == 1):
meanList[0][column] = currentReward
standardDeviationList[0][column] = 0
if (transcuredNumberOfRounds == 2):
firstValue = meanList[0][column]
meanList[0][column] = sumsOfRewardsForEachAd[0][column]/transcuredNumberOfRounds
standardDeviationList[0][column] = (( (firstValue-meanList[0][column])**2 + (currentReward-meanList[0][column])**2 )/(2-1) )**(0.5)
if (transcuredNumberOfRounds > 2):
meanList[0][column] = sumsOfRewardsForEachAd[0][column]/transcuredNumberOfRounds
standardDeviationList[0][column] = (( (standardDeviationList[0][column]**2)*(transcuredNumberOfRounds-1)+(currentReward-meanList[0][column])**2 )/(transcuredNumberOfRounds-1) )**(0.5)
# We update the list of the currently selected arm and the total
# rewards variable
temporalRow = []
temporalRow.append(currentAdSelected)
adsSelectedByTheAlgorithm.append(temporalRow)
numberOfSelectionsOfAds[0][currentAdSelected] = numberOfSelectionsOfAds[0][currentAdSelected] + 1
currentReward = self.y_samplesList[row][currentAdSelected]
totalRewards = totalRewards + currentReward
# We return the model results
accuracy = 100*totalRewards/numberOfSamples
modelingResults = []
modelingResults.append(0) # Null value since this model doesnt give coefficients at all
modelingResults.append(accuracy)
modelingResults.append(sumsOfRewardsForEachAd)
modelingResults.append(adsSelectedByTheAlgorithm)
return modelingResults
"""
getRealTimeModifiedUpperConfidenceBound(currentNumberOfSamples="You have to indicate here the current number of samples that have occured for a particular UCB problem to solve",
sumsOfRewardsForEachSelectedArm="You have to indicate the sums of the rewards for each arm but only for those situations were the algorithm picked each arm",
numberOfSelectionsOfArms="You have to indicate here the number of times that each arm was selected by the algorithm for a particular UCB problem to solve",
trueSumsOfRewardsForEachArm="You have to indicate the real number of times that each arm has been a successful result, regardless of what the algorithm identified",
meanList="You have to indicate the mean list of the rewards obtained for each arm",
standardDeviationList="You have to indicate the standard deviation list of the rewards obtained for each arm")
IMPORTANT NOTE: WHEN YOU RUN THIS METHOD TO SOLVE THE VERY FIRST ROUND OF A
PARTICULAR UCB PROBLEM, DONT DEFINE ANY VALUES IN THE
ARGUMENTS OF THIS METHOD. FOR FURTHER ROUNDS, INPUT IN THE
ARGUMENTS THE OUTPUT VALUES OF THE LAST TIME YOU RAN THIS
METHOD (SEE CODE EXAMPLE).
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getModifiedUpperConfidenceBound()", this method
learns in real time, while "getModifiedUpperConfidenceBound()" expects you
to already have measured several rounds.
This methods advantages:
* This method is the fastest of all, so far, to detect the best
possible arm (option) among all the available ones:
+ For 2 arms --> around 1 sample
+ For 3 arms --> around 1 sample
+ For 4 arms --> around 1 sample
+ For 5 arms --> around 60 samples
+ For 6 arms --> around 60 samples
+ For 7 arms --> around 60 samples
+ For 8 arms --> around 60 samples
+ For 9 arms --> around 60 samples
+ For 10 arms --> around 60 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
This methods disadvantages:
* When this algorithm tries to identify the best arm, it needs to
know, for each arm (regardless of the one picked by the
algorithm), if they were a successful pick or not (0 or 1),
unlike the "getRealTimeUpperConfidenceBound()" which only needs
to know if his actual pick was sucessful or not.
CODE EXAMPLE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
# With this for-loop, we will simulate that we are getting the data in
# real-time and that we are, at the same time, giving it to the algorithm
numberOfArmsAvailable = len(matrix_y[0])
for currentSample in range(0, len(matrix_y)):
rL = mSL.ReinforcementLearning([matrix_y[currentSample]])
if (currentSample == 0):
modelingResults = rL.getRealTimeModifiedUpperConfidenceBound()
else:
modelingResults = rL.getRealTimeModifiedUpperConfidenceBound(currentNumberOfSamples, sumsOfRewardsForEachSelectedArm, numberOfSelectionsOfArms, trueSumsOfRewardsForEachArm, meanList, standardDeviationList)
currentNumberOfSamples = modelingResults[0]
currentAccuracyFromTraining = modelingResults[1]
sumsOfRewardsForEachSelectedArm = modelingResults[2]
numberOfSelectionsOfArms = modelingResults[3]
trueSumsOfRewardsForEachArm = modelingResults[4]
meanList = modelingResults[5]
standardDeviationList = modelingResults[6]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We first add a fake selection for each available option (arms) so that we
# ensure that they appear in the histogram. Otherwise, if we dont do this and
# if the algorithm never consideres one or some of the available options, it
# will plot considering those options never existed.
for row in range(0, numberOfArmsAvailable):
histogram_x_data.append(row)
# We now add the real selected options by the algorithm
for currentArm in range(0, numberOfArmsAvailable):
for selectedTimes in range(0, numberOfSelectionsOfArms[0][currentArm]):
histogram_x_data.append(currentArm)
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
currentNumberOfSamples=
10000
currentAccuracyFromTraining =
26.93
sumsOfRewardsForEachSelectedArm =
[[3, 0, 0, 0, 2690, 0, 0, 0, 0, 0]]
numberOfSelectionsOfArms =
[[25, 0, 0, 0, 9975, 0, 0, 0, 0, 0]]
trueSumsOfRewardsForEachArm =
[[1703, 1295, 728, 1196, 2695, 126, 1112, 2091, 952, 489]]
meanList =
[[0.1703,
0.1295,
0.0728,
0.1196,
0.2695,
0.0126,
0.1112,
0.2091,
0.0952,
0.0489]]
standardDeviationList =
[[1.2506502260503618,
1.0724240984136193,
0.7004403369435815,
0.9286872458865242,
1.412843221683186,
0.3047987328938745,
0.7525852536272276,
1.2007787911241279,
1.030718190027389,
0.5406998109413704]]
"""
def getRealTimeModifiedUpperConfidenceBound(self, currentNumberOfSamples=0, sumsOfRewardsForEachSelectedArm=[], numberOfSelectionsOfArms=[], trueSumsOfRewardsForEachArm=[], meanList=[], standardDeviationList=[]):
from . import MortrackML_Library as mSL
# We save on this variable the number of arms (options) available
numberOfArmsAvailable = len(self.y_samplesList[0])
# We innitialize the variables that have to be innitialized only the
# first time that this algorithm is ran for a particular problem
if (currentNumberOfSamples == 0):
# We save on this variable the number of times that each arm was picked
# by our algorithm
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
numberOfSelectionsOfArms.append(temporalRow)
# We save on this variable the number of times that the algorithm
# selected a particular arm
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
sumsOfRewardsForEachSelectedArm.append(temporalRow)
# We save on this variable the number of times that each arm had
# been the right pick or that it gave a successful result
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
trueSumsOfRewardsForEachArm.append(temporalRow)
# We save on this variable the mean of the results obtained for
# each arm
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
meanList.append(temporalRow)
# We save on this variable the standard deviation of the results
# obtained for each arm
temporalRow = []
for column in range(0, numberOfArmsAvailable):
temporalRow.append(0)
standardDeviationList.append(temporalRow)
# We innitialize the following variables that we will be using within
# the core process of this algorithm
highestUpperConfidenceBound = 0
currentAdSelected = 0
# We increase by one the number of current samples to follow up through
# this algorithm
currentNumberOfSamples = currentNumberOfSamples + 1
for column in range(0, numberOfArmsAvailable):
# We compare all the prediction intervals to then pick the best one
if (currentNumberOfSamples > 2):
tD = mSL.Tdistribution(desiredTrustInterval=99.9)
tValue = tD.getCriticalValue(currentNumberOfSamples)
tI = mSL.TrustIntervals()
predictionIntervalsList = tI.getPredictionIntervals(currentNumberOfSamples, [[meanList[0][column]]], [[standardDeviationList[0][column]]], tValue)
currentUpperConfidenceBound = predictionIntervalsList[0][1]
else:
currentUpperConfidenceBound = 1e400 # the idea is to assign a very big value to this variable
if (currentUpperConfidenceBound > highestUpperConfidenceBound):
highestUpperConfidenceBound = currentUpperConfidenceBound
currentAdSelected = column
# We update the means and the standard deviations of all the
# options available for this model (arms) with the latest
# observations that were made.
currentReward = self.y_samplesList[0][column]
trueSumsOfRewardsForEachArm[0][column] = trueSumsOfRewardsForEachArm[0][column] + currentReward
if (currentNumberOfSamples == 1):
meanList[0][column] = currentReward
standardDeviationList[0][column] = 0
if (currentNumberOfSamples == 2):
firstValue = meanList[0][column]
meanList[0][column] = trueSumsOfRewardsForEachArm[0][column]/currentNumberOfSamples
standardDeviationList[0][column] = (( (firstValue-meanList[0][column])**2 + (currentReward-meanList[0][column])**2 )/(currentNumberOfSamples-1) )**(0.5)
if (currentNumberOfSamples > 2):
meanList[0][column] = trueSumsOfRewardsForEachArm[0][column]/currentNumberOfSamples
standardDeviationList[0][column] = (( (standardDeviationList[0][column]**2)*(currentNumberOfSamples-1)+(currentReward-meanList[0][column])**2 )/(currentNumberOfSamples-1) )**(0.5)
# We update the list of the currently selected arm and the total
# rewards variable
numberOfSelectionsOfArms[0][currentAdSelected] = numberOfSelectionsOfArms[0][currentAdSelected] + 1
currentReward = self.y_samplesList[0][currentAdSelected]
sumsOfRewardsForEachSelectedArm[0][currentAdSelected] = sumsOfRewardsForEachSelectedArm[0][currentAdSelected] + currentReward
totalRewards = 0
for column in range(0, numberOfArmsAvailable):
totalRewards = totalRewards + sumsOfRewardsForEachSelectedArm[0][column]
currentAccuracy = 100*totalRewards/currentNumberOfSamples
modelingResults = []
modelingResults.append(currentNumberOfSamples)
modelingResults.append(currentAccuracy)
modelingResults.append(sumsOfRewardsForEachSelectedArm)
modelingResults.append(numberOfSelectionsOfArms)
modelingResults.append(trueSumsOfRewardsForEachArm)
modelingResults.append(meanList)
modelingResults.append(standardDeviationList)
return modelingResults
"""
The DeepLearning Class gives several methods to make a model through the
concept of how a real neuron works.
DeepLearning("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class DeepLearning:
def __init__(self, x_samplesList, y_samplesList):
self.x_samplesList = x_samplesList
self.y_samplesList = y_samplesList
def set_xSamplesList(self, x_samplesList):
self.x_samplesList = x_samplesList
def set_ySamplesList(self, y_samplesList):
self.y_samplesList = y_samplesList
"""
getReluActivation(x="the instant independent value from which you want to know the dependent ReLU value/result")
This method calculates and returns the ReLU function value of the instant
independent value that you give in the "x" local variable of this method.
"""
def getReluActivation(self, x):
if (x > 0):
return x
else:
return 0
"""
getReluActivationDerivative(x="the instant independent value from which you want to know the derivate of the dependent ReLU value/result")
This method calculates and returns the derivate ReLU function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
def getReluActivationDerivative(self, x):
if (x > 0):
return 1
else:
return 0
"""
getTanhActivation(x="the instant independent value from which you want to know the dependent Hyperbolic Tangent (Tanh) value/result")
This method calculates and returns the Hyperbolic Tangent (Tanh) function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getTanhActivation(self, x):
import math
a = math.exp(x)
b = math.exp(-x)
return ((a-b)/(a+b))
"""
getReluActivation(x="the instant independent value from which you want to know the dependent Sigmoid value/result")
This method calculates and returns the Sigmoid function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
def getSigmoidActivation(self, x):
import math
return (1/(1+math.exp(-x)))
"""
getRaiseToTheSecondPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheSecondPowerActivation(self, x):
return x*x
"""
getRaiseToTheSecondPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheSecondPowerDerivative(self, x):
return 2*x
"""
getRaiseToTheThirdPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheThirdPowerActivation(self, x):
return x*x*x
"""
getRaiseToTheThirdPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheThirdPowerDerivative(self, x):
return 3*x*x
"""
getRaiseToTheFourthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheFourthPowerActivation(self, x):
return x*x*x*x
"""
getRaiseToTheFourthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheFourthPowerDerivative(self, x):
return 4*x*x*x
"""
getRaiseToTheFifthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheFifthPowerActivation(self, x):
return x*x*x*x*x
"""
getRaiseToTheFifthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheFifthPowerDerivative(self, x):
return 5*x*x*x*x
"""
getRaiseToTheSixthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheSixthPowerActivation(self, x):
return x*x*x*x*x*x
"""
getRaiseToTheSixthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheSixthPowerDerivative(self, x):
return 6*x*x*x*x*x
"""
getExponentialActivation(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the Exponential-Euler function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getExponentialActivation(self, x):
import math
return math.exp(x)
"""
getExponentialDerivative(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the derivate Exponential-Euler function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getExponentialDerivative(self, x):
import math
return math.exp(x)
"""
getSingleArtificialNeuron(activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your neuron to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your neuron to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates a single Artificial Neuron and, within this method,
such neuron trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[[28.235246103419946],
[1.12749544645359],
[-1.7353168202914326],
[0.7285727543658252]]
accuracyFromTraining =
95.06995458954695
predictedData =
[[28.868494779855514],
[32.80418405006583],
[25.89997715314427],
[38.25484973427189],
[16.295874460357858],
[26.67205741761012],
[27.198762118476985],
[26.859066716794352],
[31.50391014224514],
[26.42881371215305],
[38.14632853395502],
[30.297502725191123],
[26.929105800646223]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM]
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getSingleArtificialNeuron(self, activationFunction='sigmoid', learningRate=1, numberOfEpochs=1000, stopTrainingIfAcurracy=95, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=True):
if ((activationFunction!='none') and (activationFunction!='sigmoid') and (activationFunction!='relu') and (activationFunction!='tanh') and (activationFunction!='raiseTo2ndPower') and (activationFunction!='raiseTo3rdPower') and (activationFunction!='raiseTo4thPower') and (activationFunction!='raiseTo5thPower') and (activationFunction!='raiseTo6thPower') and (activationFunction!='exponential')):
raise Exception('ERROR: The selected Activation Function does not exist or has not been programmed in this method yet.')
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
# from . import MortrackML_Library as mSL
# import math
import random
numberOfIndependentRows= len(self.x_samplesList)
numberOfIndependentVariables = len(self.x_samplesList[0])
matrix_x = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(1)
for column in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][column])
matrix_x.append(temporalRow)
matrix_y = self.y_samplesList
# We innitialize the weight vector random values from -1 up to +1
if (isCustomizedInitialWeights == False):
matrix_w = []
for row in range(0, numberOfIndependentVariables+1): # bias + w vector
temporalRow = []
temporalRow.append(random.random()*2-1)
matrix_w.append(temporalRow)
firstMatrix_w = matrix_w
else:
matrix_w = firstMatrix_w
# We calculate the results obtained with the innitialized random weight
# vector
matrixMath = mLAL.MatrixMath()
Fx = matrixMath.getDotProduct(matrix_x, matrix_w)
Fz = []
dFz = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
if (activationFunction == 'none'):
current_Fz = Fx[row][0]
if (activationFunction == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[row][0])
if (activationFunction == 'relu'):
current_Fz = self.getReluActivation(Fx[row][0])
if (activationFunction == 'tanh'):
current_Fz = self.getTanhActivation(Fx[row][0])
if (activationFunction == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[row][0])
if (activationFunction == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[row][0])
temporalRow.append(current_Fz)
Fz.append(temporalRow)
temporalRow = []
if (activationFunction == 'none'):
if (current_Fz != 0):
temporalRow.append(1)
else:
temporalRow.append(0)
if (activationFunction == 'sigmoid'):
temporalRow.append(current_Fz*(1-current_Fz))
if (activationFunction == 'relu'):
temporalRow.append(self.getReluActivationDerivative(Fx[row][0]))
if (activationFunction == 'tanh'):
temporalRow.append(1-current_Fz**2)
if (activationFunction == 'raiseTo2ndPower'):
temporalRow.append(self.getRaiseToTheSecondPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo3rdPower'):
temporalRow.append(self.getRaiseToTheThirdPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo4thPower'):
temporalRow.append(self.getRaiseToTheFourthPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo5thPower'):
temporalRow.append(self.getRaiseToTheFifthPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo6thPower'):
temporalRow.append(self.getRaiseToTheSixthPowerDerivative(Fx[row][0]))
if (activationFunction == 'exponential'):
temporalRow.append(self.getExponentialDerivative(Fx[row][0]))
dFz.append(temporalRow)
# We evaluate the performance of the innitialized weight vectors
predictionAcurracy = 0
predictedData = Fz
numberOfDataPoints = numberOfIndependentRows
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_w)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(Fz)
bestModelingResults.append(firstMatrix_w)
bestModelingResults.append("Coefficients distribution is as follows:\nmodelCoefficients =\n[\n [Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM]\n]\n")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
allAccuracies.append(temporalRow)
# ---------------------------------------------------------------- #
# ----- WE START THE TRAINING PROCESS FROM EPOCH 2 AND ABOVE ----- #
# ---------------------------------------------------------------- #
# 2nd Epoch
# Djtotal_Dresult = [] # = expected - predicted
Dresult_Dsum = dFz # = dFz
# Dsum_Dw = matrix_y # = expected result
Djtotal_Dw = [] # = (Djtotal_Dresult)*(Dresult_Dsum)*(Dsum_Dw)
for row in range(0, numberOfIndependentRows):
temporalRow = []
current_Djtotal_Dresult = matrix_y[row][0]-Fz[row][0]
#temporalRow.append(Djtotal_Dresult[row][0]*Dresult_Dsum[row][0]*Dsum_Dw[row][0])
temporalRow.append(current_Djtotal_Dresult*Dresult_Dsum[row][0])
Djtotal_Dw.append(temporalRow)
transposedMatrix_x = matrixMath.getTransposedMatrix(matrix_x)
learningValue = matrixMath.getDotProduct(transposedMatrix_x, Djtotal_Dw)
newMatrix_w = []
for row in range(0, numberOfIndependentVariables+1): # bias + w vector
temporalRow = []
temporalRow.append(matrix_w[row][0]+learningRate*learningValue[row][0])
newMatrix_w.append(temporalRow)
# 3rd Epoch and above
for currentEpoch in range(1, numberOfEpochs):
print('Current Epoch = ' + format(currentEpoch))
# ----- Predict the output values with latest weight vector ----- #
currentMatrix_w = newMatrix_w
Fx = matrixMath.getDotProduct(matrix_x, currentMatrix_w)
Fz = []
dFz = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
if (activationFunction == 'none'):
current_Fz = Fx[row][0]
if (activationFunction == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[row][0])
if (activationFunction == 'relu'):
current_Fz = self.getReluActivation(Fx[row][0])
if (activationFunction == 'tanh'):
current_Fz = self.getTanhActivation(Fx[row][0])
if (activationFunction == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[row][0])
if (activationFunction == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[row][0])
temporalRow.append(current_Fz)
Fz.append(temporalRow)
temporalRow = []
if (activationFunction == 'none'):
if (current_Fz != 0):
temporalRow.append(1)
else:
temporalRow.append(0)
if (activationFunction == 'sigmoid'):
temporalRow.append(current_Fz*(1-current_Fz))
if (activationFunction == 'relu'):
temporalRow.append(self.getReluActivationDerivative(Fx[row][0]))
if (activationFunction == 'tanh'):
temporalRow.append(1-current_Fz**2)
if (activationFunction == 'raiseTo2ndPower'):
temporalRow.append(self.getRaiseToTheSecondPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo3rdPower'):
temporalRow.append(self.getRaiseToTheThirdPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo4thPower'):
temporalRow.append(self.getRaiseToTheFourthPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo5thPower'):
temporalRow.append(self.getRaiseToTheFifthPowerDerivative(Fx[row][0]))
if (activationFunction == 'raiseTo6thPower'):
temporalRow.append(self.getRaiseToTheSixthPowerDerivative(Fx[row][0]))
if (activationFunction == 'exponential'):
temporalRow.append(self.getExponentialDerivative(Fx[row][0]))
dFz.append(temporalRow)
# ----- Get improved and new weigth vector ----- #
# Djtotal_Dresult = [] # = expected - predicted
Dresult_Dsum = dFz # = dFz
# Dsum_Dw = matrix_y # = expected result
Djtotal_Dw = [] # = (Djtotal_Dresult)*(Dresult_Dsum)*(Dsum_Dw)
for row in range(0, numberOfIndependentRows):
temporalRow = []
current_Djtotal_Dresult = matrix_y[row][0]-Fz[row][0]
#temporalRow.append(Djtotal_Dresult[row][0]*Dresult_Dsum[row][0]*Dsum_Dw[row][0])
temporalRow.append(current_Djtotal_Dresult*Dresult_Dsum[row][0])
Djtotal_Dw.append(temporalRow)
transposedMatrix_x = matrixMath.getTransposedMatrix(matrix_x)
learningValue = matrixMath.getDotProduct(transposedMatrix_x, Djtotal_Dw)
newMatrix_w = []
for row in range(0, numberOfIndependentVariables+1): # bias + w vector
temporalRow = []
temporalRow.append(currentMatrix_w[row][0]+learningRate*learningValue[row][0])
newMatrix_w.append(temporalRow)
# ----- We save the current weight vector performance ----- #
Fx = matrixMath.getDotProduct(matrix_x, newMatrix_w)
Fz = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
if (activationFunction == 'none'):
current_Fz = Fx[row][0]
if (activationFunction == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[row][0])
if (activationFunction == 'relu'):
current_Fz = self.getReluActivation(Fx[row][0])
if (activationFunction == 'tanh'):
current_Fz = self.getTanhActivation(Fx[row][0])
if (activationFunction == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[row][0])
if (activationFunction == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[row][0])
temporalRow.append(current_Fz)
Fz.append(temporalRow)
predictionAcurracy = 0
predictedData = Fz
numberOfDataPoints = numberOfIndependentRows
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(newMatrix_w)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(newMatrix_w)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append(firstMatrix_w)
bestModelingResults.append("Coefficients distribution is as follows:\nmodelCoefficients =\n[\n [Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM]\n]\n")
if (predictionAcurracy > stopTrainingIfAcurracy):
break
# Alongside the information of the best model obtained, we add the
# modeled information of ALL the models obtained to the variable that
# we will return in this method
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
This method is used within the method "getArtificialNeuralNetwork()" to get
the weights of a particular neuron from a variable that contains all the
weights of all neurons (matrix_w).
"""
def getANNweightVectorForOneNeuron(self, matrix_w, neuronNumber):
temporalRow = []
for column in range(0, len(matrix_w[neuronNumber])):
temporalRow.append(matrix_w[neuronNumber][column])
temporalRow = [temporalRow]
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
return matrixMath.getTransposedMatrix(temporalRow)
"""
This method is used within the method "getArtificialNeuralNetwork()" to get
the partial derivative of the Total Error (dEtotal) due respect with the
partial derivative of the corresponding Activation Function (dFz) for a
particular neuron within an Artificial Neural Network.
"""
def getCurrentDetotal_DFz(self, allMatrix_w, allMatrix_dFz, positionOfNeuronOfCurrentLayer, Detotal_DFzy):
# Detotal_DFzy[Neuron_n_OfFinalLayer][DerivateOfsample_n][column=0]
# allMatrix_w[Layer_n][Neuron_n][row=weight_n][column=0]
# allMatrix_dFz[Layer_n][Neuron_n][row=sample_n_dFzResult][column=0]
newAllMatrix_w = []
for currentLayer in range(0, len(allMatrix_w)):
temporalLayer = []
for currentNeuronOfCurrentLayer in range(0, len(allMatrix_w[currentLayer])):
temporalNeuron = []
if (currentLayer == 0):
for currentWeight in range(0, len(allMatrix_w[currentLayer][currentNeuronOfCurrentLayer])):
temporalNeuron.append([1])
else:
for currentWeight in range(1, len(allMatrix_w[currentLayer][currentNeuronOfCurrentLayer])):
temporalNeuron.append(allMatrix_w[currentLayer][currentNeuronOfCurrentLayer][currentWeight])
temporalLayer.append(temporalNeuron)
newAllMatrix_w.append(temporalLayer)
numberOfSamples = len(allMatrix_dFz[0][0])
# We create a new matrix that contains all the data of "allMatrix_w"
# but withouth the biases values and only containing the weight values
# newAllMatrix_w[Layer_n][Neuron_n][row=weight_n][column=0] # But withouth bias
numberOfLayers = len(newAllMatrix_w)
accumulatedWeightCombinations = []
for current_dFz in range(0, numberOfSamples):
layerCalculations = [] # [Layer_n][neuron_n][accumulatedMultiplicationOfWeights]
temporalRow = []
for cNOCL in range(0, positionOfNeuronOfCurrentLayer):
temporalRow.append([0])
temporalRow.append([1])
layerCalculations.append(temporalRow)
# We innitialize the variable "layerCalculations" to use in the
# calculations of the weight combinations
for currentLayer in range(1, numberOfLayers):
numberOfNeuronsInCurrentLayer = len(newAllMatrix_w[currentLayer])
temporalLayer = []
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsInCurrentLayer):
numberOfNeuronsInPastLayer = len(layerCalculations[len(layerCalculations)-1])
temporalRow = []
for currentNeuronOfPastLayer in range(0, numberOfNeuronsInPastLayer):
for currentPreviousLayerCalculation in range(0, len(layerCalculations[len(layerCalculations)-1][currentNeuronOfPastLayer])):
current_aWC = layerCalculations[len(layerCalculations)-1][currentNeuronOfPastLayer][currentPreviousLayerCalculation] * newAllMatrix_w[currentLayer][currentNeuronOfCurrentLayer][currentNeuronOfPastLayer][0] * allMatrix_dFz[currentLayer][currentNeuronOfCurrentLayer][current_dFz][0]
if (currentLayer == (numberOfLayers-1)):
current_aWC = current_aWC * Detotal_DFzy[currentNeuronOfCurrentLayer][current_dFz][0]
temporalRow.append(current_aWC)
temporalLayer.append(temporalRow)
layerCalculations.append(temporalLayer)
accumulatedWeightCombinations.append(layerCalculations)
# We now get the values of the acumulated Weight Combinations but for
# each weight value of the current neuron that we are evaluating
Detotal_DFz = []
# accumulatedWeightCombinations[derivateOfCurrentSample][Layer_n][Neuron_n][accumulatedWeightCombinations]
for current_dFz_Sample in range(0, len(accumulatedWeightCombinations)):
lastLayer = len(accumulatedWeightCombinations[current_dFz_Sample])-1
for curentNeuronOfFinalLayer in range(0, len(accumulatedWeightCombinations[current_dFz_Sample][lastLayer])):
temporalRow = []
temporalValue = 0
for currentAccumulatedWeightCombinations in range(0, len(accumulatedWeightCombinations[current_dFz_Sample][lastLayer][0])):
temporalValue = temporalValue + accumulatedWeightCombinations[current_dFz_Sample][lastLayer][curentNeuronOfFinalLayer][currentAccumulatedWeightCombinations]
temporalRow.append(temporalValue)
Detotal_DFz.append(temporalRow)
return Detotal_DFz
"""
getArtificialNeuralNetwork(artificialNeuralNetworkDistribution="must contain an array that indicates the distribution of the desired neurons for each layer in columns. If a row-column value equals 1, this will mean that you want a neuron in that position. A 0 means otherwise",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The activation functions must be assigned in an array accordingly to the distribution specified in argument input variable artificialNeuralNetworkDistribution. The available activation functions are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your Artificial Neural Network to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your Artificial Neural Network to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates an Artificial Neural Network with a customized desired
number of neurons within it and, within this method, such Artificial Neural
Network trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResultForOutputNeuron1, expectedResultForOutputNeuron2]
matrix_y = [
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[1, 0]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[1,1,1]
]
aF = [
['relu', 'relu', 'sigmoid'],
['relu', 'relu', 'sigmoid']
]
modelingResults = dL.getArtificialNeuralNetwork(artificialNeuralNetworkDistribution=aNND, activationFunction=aF, learningRate=0.1, numberOfEpochs=10000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[
[2.133298325032156, -0.45548307884431677, -2.1332978269534664, -2.1332978292080043],
[2.287998188065245, 1.3477978318721369, -1.143999014059006, -1.1439990110690932],
[-0.6930287605411998, 0.41058709282271444, 0.6057943758418374],
[4.6826225603458056e-08, -1.8387485390712266, 2.2017181913306803],
[-4.1791269585765285, -2.5797524896448563, 3.3885776200605955],
[4.181437529101815, 2.5824655964639742, -3.3907451300458136]
]
accuracyFromTraining =
98.94028954483407
predictedData =
[[0.011560111421083964, 0.9884872182827878],
[0.9873319964204451, 0.01262867979045398],
[0.9873319961998808, 0.012628680010459043],
[0.015081447917016324, 0.9849528347708301],
[0.9989106156594524, 0.0010867877109744279]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM],
[Neuron2_bias, Neuron2_weight1, Neuron2_weight2, ... , Neuron2_weightZ],
[ . , . , . , ... , . ],
[ . , . , . , ... , . ],
[ . , . , . , ... , . ],
[NeuronN_bias, NeuronN_weight1, NeuronN_weight2, ... , NeuronN_weightK],
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getArtificialNeuralNetwork(self, artificialNeuralNetworkDistribution, activationFunction, learningRate=1, numberOfEpochs=1000, stopTrainingIfAcurracy=95, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=True):
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
from . import MortrackML_Library as mSL
import random
numberOfIndependentRows= len(self.x_samplesList)
numberOfIndependentVariables = len(self.x_samplesList[0])
numberOfNeuronLayers = len(artificialNeuralNetworkDistribution[0])
numberOfNeuronsPerLayer = []
activationFunctionsList = []
totalNumberOfNeurons = 0
matrixMath = mLAL.MatrixMath()
transposedANND = matrixMath.getTransposedMatrix(artificialNeuralNetworkDistribution)
transposedAF = matrixMath.getTransposedMatrix(activationFunction)
for row in range(0, len(transposedANND)):
currentNumberOfNeurons = 0
for column in range(0, len(transposedANND[0])):
if (transposedANND[row][column] == 1):
currentNumberOfNeurons = currentNumberOfNeurons + 1
activationFunctionsList.append(transposedAF[row][column])
temporalRow = []
temporalRow.append(currentNumberOfNeurons)
numberOfNeuronsPerLayer.append(temporalRow)
totalNumberOfNeurons = totalNumberOfNeurons + currentNumberOfNeurons
numberOfNeuronsPerLayer = matrixMath.getTransposedMatrix(numberOfNeuronsPerLayer)
activationFunctionsList = [activationFunctionsList]
numberOfNeuronsInFinalLayer = numberOfNeuronsPerLayer[0][len(numberOfNeuronsPerLayer[0])-1]
for column in range(0, numberOfNeuronLayers):
for row in range(0, numberOfNeuronsPerLayer[0][column]):
if ((activationFunction[row][column]!='none') and (activationFunction[row][column]!='sigmoid') and (activationFunction[row][column]!='relu') and (activationFunction[row][column]!='tanh') and (activationFunction[row][column]!='raiseTo2ndPower') and (activationFunction[row][column]!='raiseTo3rdPower') and (activationFunction[row][column]!='raiseTo4thPower') and (activationFunction[row][column]!='raiseTo5thPower') and (activationFunction[row][column]!='raiseTo6thPower') and (activationFunction[row][column]!='exponential')):
raise Exception('ERROR: The selected Activation Function does not exist or has not been programmed in this method yet.')
totalNumberOfLayers = len(numberOfNeuronsPerLayer[0])
matrix_x = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(1)
for column in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][column])
matrix_x.append(temporalRow)
matrix_y = self.y_samplesList
# We innitialize the weight vector random values from -1 up to +1
if (isCustomizedInitialWeights == False):
matrix_w = []
for currentLayer in range(0, totalNumberOfLayers):
for column in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow = []
if (currentLayer == 0):
for row in range(0, numberOfIndependentVariables+1):
temporalRow.append(random.random()*2-1)
else:
for row in range(0, numberOfNeuronsPerLayer[0][currentLayer-1]+1):
temporalRow.append(random.random()*2-1)
matrix_w.append(temporalRow)
firstMatrix_w = matrix_w
else:
matrix_w = firstMatrix_w
# We calculate the results obtained with the innitialized random weight
# vector (We calculate the matrixes for Fx, Fz and dFz)
Fx = []
Fz = []
dFz = []
actualFunctionActivation = 0
for currentLayer in range(0, totalNumberOfLayers):
temporalRow1 = []
if (currentLayer == 0):
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(matrix_x, self.getANNweightVectorForOneNeuron(matrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = []
temporalRow2= []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer][0][column])
# Derivates (dFz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
if (current_Fz != 0):
current_dFz = 1
else:
current_dFz = 0
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_dFz = current_Fz*(1-current_Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_dFz = self.getReluActivationDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_dFz = 1-current_Fz**2
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_dFz = self.getRaiseToTheSecondPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_dFz = self.getRaiseToTheThirdPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_dFz = self.getRaiseToTheFourthPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_dFz = self.getRaiseToTheFifthPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_dFz = self.getRaiseToTheSixthPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_dFz = self.getExponentialDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
temporalRow2.append(current_dFz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
dFz.append(temporalRow2)
else:
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer-1):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
inputMatrix = []
for row in range(0, numberOfIndependentRows):
temporalRow1 = []
temporalRow1.append(1) # bias column
for currentNeuron in range(pastNeuronOfCurrentLayer, pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]):
temporalRow1.append(Fz[currentNeuron][row])
inputMatrix.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1], pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]+numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(inputMatrix, self.getANNweightVectorForOneNeuron(matrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1= []
temporalRow2= []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
# Derivates (dFz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
if (current_Fz != 0):
current_dFz = 1
else:
current_dFz = 0
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_dFz = current_Fz*(1-current_Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_dFz = self.getReluActivationDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_dFz = 1-current_Fz**2
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_dFz = self.getRaiseToTheSecondPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_dFz = self.getRaiseToTheThirdPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_dFz = self.getRaiseToTheFourthPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_dFz = self.getRaiseToTheFifthPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_dFz = self.getRaiseToTheSixthPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_dFz = self.getExponentialDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
temporalRow2.append(current_dFz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
dFz.append(temporalRow2)
# We evaluate the performance of the innitialized weight vectors
predictionAcurracy = 0
predictedData = []
for currentNeuronOfLastLayer in range(0, numberOfNeuronsInFinalLayer):
predictedData.append(Fz[totalNumberOfNeurons-numberOfNeuronsInFinalLayer+currentNeuronOfLastLayer])
predictedData = matrixMath.getTransposedMatrix(predictedData)
numberOfDataPoints = numberOfIndependentRows*numberOfNeuronsInFinalLayer
for currentNeuronOfLastLayer in range(0, numberOfNeuronsInFinalLayer):
for row in range(0, numberOfIndependentRows):
n2 = self.y_samplesList[row][currentNeuronOfLastLayer]
n1 = predictedData[row][currentNeuronOfLastLayer]
if (isClassification == False):
if (((n1*n2) != 0)):
#if (((n1*n2) > 0) and (n1!=n2)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
#predictionAcurracy = predictionAcurracy + (n1/n2)
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][currentNeuronOfLastLayer]
n1 = self.y_samplesList[row][currentNeuronOfLastLayer]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
#if ((n1==n2) and (n1==0)):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_w)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append(firstMatrix_w)
bestModelingResults.append("Coefficients distribution is as follows:\nmodelCoefficients =\n[\n [Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM],\n [Neuron2_bias, Neuron2_weight1, Neuron2_weight2, ... , Neuron2_weightZ],\n [ . , . , . , ... , . ],\n [ . , . , . , ... , . ],\n [ . , . , . , ... , . ],\n [NeuronN_bias, NeuronN_weight1, NeuronN_weight2, ... , NeuronN_weightK],\n]\n")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
allAccuracies.append(temporalRow)
# ---------------------------------------------------------------- #
# ----- WE START THE TRAINING PROCESS FROM EPOCH 2 AND ABOVE ----- #
# ---------------------------------------------------------------- #
# 2nd Epoch
temporalMatrixOfMatrix_w = []
Detotal_DFzy = [] # Detotal_DFzy[Neuron_n][sample_n][column=0]
for currentLayer in range(0, totalNumberOfLayers):
trueCurrentLayer = totalNumberOfLayers-currentLayer
pastNeuronsOfCurrentLayer = 0
for currentLayerCount in range(0, (trueCurrentLayer-1)):
pastNeuronsOfCurrentLayer = pastNeuronsOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][trueCurrentLayer-1]):
trueCurrentNeuronOfCurrentLayer = numberOfNeuronsPerLayer[0][trueCurrentLayer-1]-1-currentNeuronOfCurrentLayer
if (currentLayer == 0):
# ----- We first update the weights of the output neuron ----- #
Detotal_Dfz = [] # = predicted - expected
expectedOutput = matrix_y
predictedOutput = []
Dfz_Df = [] # = dFz
predictedOutput.append(Fz[pastNeuronsOfCurrentLayer+trueCurrentNeuronOfCurrentLayer])
Dfz_Df.append(dFz[pastNeuronsOfCurrentLayer+trueCurrentNeuronOfCurrentLayer])
predictedOutput = matrixMath.getTransposedMatrix(predictedOutput)
Dfz_Df = matrixMath.getTransposedMatrix(Dfz_Df)
Df_Dw = []
Detotal_Dw = [] # = (Detotal_Dfz)*(Dfz_Df)*(Df_Dw)
# We calculate "Detotal_Dfz"
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(predictedOutput[row][0] - expectedOutput[row][trueCurrentNeuronOfCurrentLayer])
Detotal_Dfz.append(temporalRow)
# We calculate "Df_Dw"
if (totalNumberOfLayers == 1):
Df_Dw = matrixMath.getTransposedMatrix(matrix_x)
else:
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-2):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
temporalRow = []
for currentBiasDerivate in range(0, numberOfIndependentRows):
temporalRow.append(1)
Df_Dw.append(temporalRow)
for currentNeuronOfPastLayer in range(0, numberOfNeuronsPerLayer[0][trueCurrentLayer-2]):
Df_Dw.append(dFz[temporalNeuronsAnalized+currentNeuronOfPastLayer])
# We calculate "Detotal_Dw"
Detotal_Dfz_TIMES_Dfz_Df = []
for currentSample in range(0, len(Detotal_Dfz)):
Detotal_Dfz_TIMES_Dfz_Df.append([Detotal_Dfz[currentSample][0] * Dfz_Df[currentSample][0]])
Detotal_Dw = matrixMath.getDotProduct(Df_Dw, Detotal_Dfz_TIMES_Dfz_Df)
# We finnally update the weight values of the last neuron
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-1):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
currentVector_w = matrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
temporalRow = []
for currentWeight in range(0, len(matrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer])):
temporalRow.append(currentVector_w[currentWeight][0]-learningRate*Detotal_Dw[currentWeight][0])
temporalMatrixOfMatrix_w.append(temporalRow)
Detotal_DFzy.append(Detotal_Dfz_TIMES_Dfz_Df)
else:
fixed_Detotal_DFzy = []
for row in range(0, numberOfNeuronsInFinalLayer):
fixed_Detotal_DFzy.append(Detotal_DFzy[numberOfNeuronsInFinalLayer-row-1])
# ----- We Now update the weights of the other neurons ----- #
Detotal_Dfz = [] # = predicted - expected
Df_Dw = []
Detotal_Dw = [] # = (Detotal_Dfz)*(Dfz_Df)*(Df_Dw)
# We calculate "Detotal_Dfz"
# trueCurrentLayer
Detotal_Dfz = Detotal_DFzy
# We create a temporal matrix for "matrix_w" and "matrix_dFz"
# to just re-arrange the structure of how both matrixes
# have their actual data. This is needed to then use the
# method "self.getCurrentDetotal_DFz()" and to get
# Detotal_Dfz through such method.
temporalMatrix_w = []
temporalDfzMatrix = []
# Este loop se repite N veces = "penultima layer" - "1 layer adelante de la actual"
for currentFurtherLayer in range(trueCurrentLayer, totalNumberOfLayers):
# "temporalNeuronsAnalized" tiene el numero de neuronas que hay en todas las layers anteriores a la actual
if (len(temporalMatrix_w) == 0):
temporalRow = []
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-1):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
# We get "Dfz_Df" of the neuron that will improve its weights
Dfz_Df = dFz[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer] # = dFz
# We get the weights of the neuron that will improve its weights
currentVector_w = matrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalMatrix_w.append(temporalRow)
# We plug in the dFz of the last neuron
temporalRow = []
currentVector_w = dFz[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalDfzMatrix.append(temporalRow)
# dFz de donde viene la actual weight o del independent variable en caso de tratarse de la 1ra layer
# We calculate "Df_Dw"
if (trueCurrentLayer == 1):
Df_Dw = matrixMath.getTransposedMatrix(matrix_x)
else:
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-2):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
temporalRow = []
for currentBiasDerivate in range(0, numberOfIndependentRows):
temporalRow.append(1) # bias derivate
Df_Dw.append(temporalRow)
for currentPastNeuronOfPastLayer in range(0, numberOfNeuronsPerLayer[0][trueCurrentLayer-2]):
Df_Dw.append(dFz[temporalNeuronsAnalized+currentPastNeuronOfPastLayer])
temporalRow = []
temporalNeuronsAnalized = 0
for n in range(0, currentFurtherLayer):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
# Este loop se repite N veces = numero de neuronas en la actual layer (la cual empieza a partir de la layer futura / posterior)
for currentFutherNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentFurtherLayer]):
currentVector_w = matrix_w[temporalNeuronsAnalized+currentFutherNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalMatrix_w.append(temporalRow)
temporalRow = []
# Este loop se repite N veces = numero de neuronas en la actual layer (la cual empieza a partir de la layer futura / posterior)
for currentFutherNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentFurtherLayer]):
currentVector_w = dFz[temporalNeuronsAnalized+currentFutherNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalDfzMatrix.append(temporalRow)
# Detotal_DFzy[DerivateOfsample_n][column=0]
Detotal_Dfz = self.getCurrentDetotal_DFz(temporalMatrix_w, temporalDfzMatrix, trueCurrentNeuronOfCurrentLayer, fixed_Detotal_DFzy)
# We calculate "Detotal_Dw"
Detotal_Dfz_TIMES_Dfz_Df = []
for currentSample in range(0, len(Detotal_Dfz)):
Detotal_Dfz_TIMES_Dfz_Df.append([Detotal_Dfz[currentSample][0] * Dfz_Df[currentSample]])
Detotal_Dw = matrixMath.getDotProduct(Df_Dw, Detotal_Dfz_TIMES_Dfz_Df)
# We finnally update the weight values of the last neuron
# temporalMatrixOfMatrix_w = []
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-1):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
# We get the weights of the neuron that will improve its weights
currentVector_w = matrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
temporalRow = []
for currentWeight in range(0, len(matrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer])):
temporalRow.append(currentVector_w[currentWeight][0]-learningRate*Detotal_Dw[currentWeight][0])
temporalMatrixOfMatrix_w.append(temporalRow)
# We reorder the new obtained weights but accordingly to the neurons
# order (from neuron 1 to neuron "N") in variable "newMatrix_w"
newMatrix_w = []
for row in range(0, totalNumberOfNeurons):
newMatrix_w.append(temporalMatrixOfMatrix_w[totalNumberOfNeurons-row-1])
# 3rd Epoch and above
for currentEpoch in range(1, numberOfEpochs):
print('Current Epoch = ' + format(currentEpoch))
# ----- Predict the output values with latest weight vector ----- #
currentMatrix_w = newMatrix_w
Fx = []
Fz = []
dFz = []
actualFunctionActivation = 0
for currentLayer in range(0, totalNumberOfLayers):
temporalRow1 = []
if (currentLayer == 0):
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(matrix_x, self.getANNweightVectorForOneNeuron(currentMatrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = []
temporalRow2= []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer][0][column])
# Derivates (dFz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
if (current_Fz != 0):
current_dFz = 1
else:
current_dFz = 0
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_dFz = current_Fz*(1-current_Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_dFz = self.getReluActivationDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_dFz = 1-current_Fz**2
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_dFz = self.getRaiseToTheSecondPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_dFz = self.getRaiseToTheThirdPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_dFz = self.getRaiseToTheFourthPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_dFz = self.getRaiseToTheFifthPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_dFz = self.getRaiseToTheSixthPowerDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_dFz = self.getExponentialDerivative(Fx[currentNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
temporalRow2.append(current_dFz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
dFz.append(temporalRow2)
else:
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer-1):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
inputMatrix = []
for row in range(0, numberOfIndependentRows):
temporalRow1 = []
temporalRow1.append(1) # bias column
for currentNeuron in range(pastNeuronOfCurrentLayer, pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]):
temporalRow1.append(Fz[currentNeuron][row])
inputMatrix.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1], pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]+numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(inputMatrix, self.getANNweightVectorForOneNeuron(currentMatrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1= []
temporalRow2= []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
# Derivates (dFz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
if (current_Fz != 0):
current_dFz = 1
else:
current_dFz = 0
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_dFz = current_Fz*(1-current_Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_dFz = self.getReluActivationDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_dFz = 1-current_Fz**2
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_dFz = self.getRaiseToTheSecondPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_dFz = self.getRaiseToTheThirdPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_dFz = self.getRaiseToTheFourthPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_dFz = self.getRaiseToTheFifthPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_dFz = self.getRaiseToTheSixthPowerDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_dFz = self.getExponentialDerivative(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
temporalRow2.append(current_dFz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
dFz.append(temporalRow2)
# ----- Get improved and new weigth vector ----- #
temporalMatrixOfMatrix_w = []
Detotal_DFzy = [] # Detotal_DFzy[Neuron_n][sample_n][column=0]
for currentLayer in range(0, totalNumberOfLayers):
trueCurrentLayer = totalNumberOfLayers-currentLayer
pastNeuronsOfCurrentLayer = 0
for currentLayerCount in range(0, (trueCurrentLayer-1)):
pastNeuronsOfCurrentLayer = pastNeuronsOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][trueCurrentLayer-1]):
trueCurrentNeuronOfCurrentLayer = numberOfNeuronsPerLayer[0][trueCurrentLayer-1]-1-currentNeuronOfCurrentLayer
if (currentLayer == 0):
# ----- We first update the weights of the output neuron ----- #
Detotal_Dfz = [] # = predicted - expected
expectedOutput = matrix_y
predictedOutput = []
Dfz_Df = [] # = dFz
predictedOutput.append(Fz[pastNeuronsOfCurrentLayer+trueCurrentNeuronOfCurrentLayer])
Dfz_Df.append(dFz[pastNeuronsOfCurrentLayer+trueCurrentNeuronOfCurrentLayer])
predictedOutput = matrixMath.getTransposedMatrix(predictedOutput)
Dfz_Df = matrixMath.getTransposedMatrix(Dfz_Df)
Df_Dw = []
Detotal_Dw = [] # = (Detotal_Dfz)*(Dfz_Df)*(Df_Dw)
# We calculate "Detotal_Dfz"
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(predictedOutput[row][0] - expectedOutput[row][trueCurrentNeuronOfCurrentLayer])
Detotal_Dfz.append(temporalRow)
# We calculate "Df_Dw"
if (totalNumberOfLayers == 1):
Df_Dw = matrixMath.getTransposedMatrix(matrix_x)
else:
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-2):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
temporalRow = []
for currentBiasDerivate in range(0, numberOfIndependentRows):
temporalRow.append(1)
Df_Dw.append(temporalRow)
for currentNeuronOfPastLayer in range(0, numberOfNeuronsPerLayer[0][trueCurrentLayer-2]):
Df_Dw.append(dFz[temporalNeuronsAnalized+currentNeuronOfPastLayer])
# We calculate "Detotal_Dw"
Detotal_Dfz_TIMES_Dfz_Df = []
for currentSample in range(0, len(Detotal_Dfz)):
Detotal_Dfz_TIMES_Dfz_Df.append([Detotal_Dfz[currentSample][0] * Dfz_Df[currentSample][0]])
Detotal_Dw = matrixMath.getDotProduct(Df_Dw, Detotal_Dfz_TIMES_Dfz_Df)
# We finnally update the weight values of the last neuron
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-1):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
currentVector_w = currentMatrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
temporalRow = []
for currentWeight in range(0, len(currentMatrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer])):
temporalRow.append(currentVector_w[currentWeight][0]-learningRate*Detotal_Dw[currentWeight][0])
temporalMatrixOfMatrix_w.append(temporalRow)
Detotal_DFzy.append(Detotal_Dfz_TIMES_Dfz_Df)
else:
fixed_Detotal_DFzy = []
for row in range(0, numberOfNeuronsInFinalLayer):
fixed_Detotal_DFzy.append(Detotal_DFzy[numberOfNeuronsInFinalLayer-row-1])
# ----- We Now update the weights of the other neurons ----- #
Detotal_Dfz = [] # = predicted - expected
Df_Dw = []
Detotal_Dw = [] # = (Detotal_Dfz)*(Dfz_Df)*(Df_Dw)
# We calculate "Detotal_Dfz"
# trueCurrentLayer
Detotal_Dfz = Detotal_DFzy
# We create a temporal matrix for "currentMatrix_w" and "matrix_dFz"
# to just re-arrange the structure of how both matrixes
# have their actual data. This is needed to then use the
# method "self.getCurrentDetotal_DFz()" and to get
# Detotal_Dfz through such method.
temporalMatrix_w = []
temporalDfzMatrix = []
# Este loop se repite N veces = "penultima layer" - "1 layer adelante de la actual"
for currentFurtherLayer in range(trueCurrentLayer, totalNumberOfLayers):
# "temporalNeuronsAnalized" tiene el numero de neuronas que hay en todas las layers anteriores a la actual
if (len(temporalMatrix_w) == 0):
temporalRow = []
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-1):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
# We get "Dfz_Df" of the neuron that will improve its weights
Dfz_Df = dFz[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer] # = dFz
# We get the weights of the neuron that will improve its weights
currentVector_w = currentMatrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalMatrix_w.append(temporalRow)
# We plug in the dFz of the last neuron
temporalRow = []
currentVector_w = dFz[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalDfzMatrix.append(temporalRow)
# dFz de donde viene la actual weight o del independent variable en caso de tratarse de la 1ra layer
# We calculate "Df_Dw"
if (trueCurrentLayer == 1):
Df_Dw = matrixMath.getTransposedMatrix(matrix_x)
else:
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-2):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
temporalRow = []
for currentBiasDerivate in range(0, numberOfIndependentRows):
temporalRow.append(1) # bias derivate
Df_Dw.append(temporalRow)
for currentPastNeuronOfPastLayer in range(0, numberOfNeuronsPerLayer[0][trueCurrentLayer-2]):
Df_Dw.append(dFz[temporalNeuronsAnalized+currentPastNeuronOfPastLayer])
temporalRow = []
temporalNeuronsAnalized = 0
for n in range(0, currentFurtherLayer):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
# Este loop se repite N veces = numero de neuronas en la actual layer (la cual empieza a partir de la layer futura / posterior)
for currentFutherNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentFurtherLayer]):
currentVector_w = currentMatrix_w[temporalNeuronsAnalized+currentFutherNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalMatrix_w.append(temporalRow)
temporalRow = []
# Este loop se repite N veces = numero de neuronas en la actual layer (la cual empieza a partir de la layer futura / posterior)
for currentFutherNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentFurtherLayer]):
currentVector_w = dFz[temporalNeuronsAnalized+currentFutherNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
# We plug in all the weight vectors
temporalRow.append(currentVector_w)
temporalDfzMatrix.append(temporalRow)
# Detotal_DFzy[DerivateOfsample_n][column=0]
Detotal_Dfz = self.getCurrentDetotal_DFz(temporalMatrix_w, temporalDfzMatrix, trueCurrentNeuronOfCurrentLayer, fixed_Detotal_DFzy)
# We calculate "Detotal_Dw"
Detotal_Dfz_TIMES_Dfz_Df = []
for currentSample in range(0, len(Detotal_Dfz)):
Detotal_Dfz_TIMES_Dfz_Df.append([Detotal_Dfz[currentSample][0] * Dfz_Df[currentSample]])
Detotal_Dw = matrixMath.getDotProduct(Df_Dw, Detotal_Dfz_TIMES_Dfz_Df)
# We finnally update the weight values of the last neuron
# temporalMatrixOfMatrix_w = []
temporalNeuronsAnalized = 0
for n in range(0, trueCurrentLayer-1):
temporalNeuronsAnalized = temporalNeuronsAnalized + numberOfNeuronsPerLayer[0][n]
# We get the weights of the neuron that will improve its weights
currentVector_w = currentMatrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer]
currentVector_w = matrixMath.getTransposedMatrix([currentVector_w])
temporalRow = []
for currentWeight in range(0, len(currentMatrix_w[temporalNeuronsAnalized+trueCurrentNeuronOfCurrentLayer])):
temporalRow.append(currentVector_w[currentWeight][0]-learningRate*Detotal_Dw[currentWeight][0])
temporalMatrixOfMatrix_w.append(temporalRow)
# We reorder the new obtained weights but accordingly to the neurons
# order (from neuron 1 to neuron "N") in variable "newMatrix_w"
newMatrix_w = []
for row in range(0, totalNumberOfNeurons):
newMatrix_w.append(temporalMatrixOfMatrix_w[totalNumberOfNeurons-row-1])
# ----- We save the current weight vector performance ----- #
Fx = []
Fz = []
actualFunctionActivation = 0
for currentLayer in range(0, totalNumberOfLayers):
temporalRow1 = []
if (currentLayer == 0):
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(matrix_x, self.getANNweightVectorForOneNeuron(currentMatrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
else:
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer-1):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
inputMatrix = []
for row in range(0, numberOfIndependentRows):
temporalRow1 = []
temporalRow1.append(1) # bias column
for currentNeuron in range(pastNeuronOfCurrentLayer, pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]):
temporalRow1.append(Fz[currentNeuron][row])
inputMatrix.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1], pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]+numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(inputMatrix, self.getANNweightVectorForOneNeuron(currentMatrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1= []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
# We evaluate the performance of the innitialized weight vectors
predictionAcurracy = 0
predictedData = []
for currentNeuronOfLastLayer in range(0, numberOfNeuronsInFinalLayer):
predictedData.append(Fz[totalNumberOfNeurons-numberOfNeuronsInFinalLayer+currentNeuronOfLastLayer])
predictedData = matrixMath.getTransposedMatrix(predictedData)
numberOfDataPoints = numberOfIndependentRows*numberOfNeuronsInFinalLayer
for currentNeuronOfLastLayer in range(0, numberOfNeuronsInFinalLayer):
for row in range(0, numberOfIndependentRows):
n2 = self.y_samplesList[row][currentNeuronOfLastLayer]
n1 = predictedData[row][currentNeuronOfLastLayer]
if (isClassification == False):
if (((n1*n2) != 0)):
#if (((n1*n2) > 0) and (n1!=n2)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
#predictionAcurracy = predictionAcurracy + (n1/n2)
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][currentNeuronOfLastLayer]
n1 = self.y_samplesList[row][currentNeuronOfLastLayer]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
#if ((n1==n2) and (n1==0)):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(newMatrix_w)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(newMatrix_w)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append(firstMatrix_w)
bestModelingResults.append("Coefficients distribution is as follows:\nmodelCoefficients =\n[\n [Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM],\n [Neuron2_bias, Neuron2_weight1, Neuron2_weight2, ... , Neuron2_weightZ],\n [ . , . , . , ... , . ],\n [ . , . , . , ... , . ],\n [ . , . , . , ... , . ],\n [NeuronN_bias, NeuronN_weight1, NeuronN_weight2, ... , NeuronN_weightK],\n]\n")
if (predictionAcurracy > stopTrainingIfAcurracy):
break
# Alongside the information of the best model obtained, we add the
# modeled information of ALL the models obtained to the variable that
# we will return in this method
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
predictSingleArtificialNeuron(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
isThreshold="Set to True if you want to predict output values of a classification neuron. False if otherwise."
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
# -------------------------------------------------------- #
# ----- WE PREDICT SOME DATA WITH OUR CURRENT NEURON ----- #
# -------------------------------------------------------- #
matrix_x = [
[1, 2.3, 3.8],
[3.32, 2.42, 1.4],
[2.22, 3.41, 1.2]
]
dL = mSL.DeepLearning(matrix_x, [])
getPredictedData = dL.predictSingleArtificialNeuron(coefficients=modelCoefficients, activationFunction='none', isThreshold=False, threshold=0.5)
EXPECTED CODE RESULT:
getPredictedData =
[[28.140432977147068], [28.799532314784063], [25.69562041179361]]
"""
def predictSingleArtificialNeuron(self, coefficients, activationFunction='sigmoid', isThreshold=True, threshold=0.5):
if ((activationFunction!='none') and (activationFunction!='sigmoid') and (activationFunction!='relu') and (activationFunction!='tanh') and (activationFunction!='raiseTo2ndPower') and (activationFunction!='raiseTo3rdPower') and (activationFunction!='raiseTo4thPower') and (activationFunction!='raiseTo5thPower') and (activationFunction!='raiseTo6thPower') and (activationFunction!='exponential')):
raise Exception('ERROR: The selected Activation Function does not exist or has not been programmed in this method yet.')
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
numberOfIndependentRows= len(self.x_samplesList)
numberOfIndependentVariables = len(self.x_samplesList[0])
matrix_x = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(1)
for column in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][column])
matrix_x.append(temporalRow)
matrix_w = coefficients
# We calculate the results obtained with the given weight coefficients
# vector
matrixMath = mLAL.MatrixMath()
Fx = matrixMath.getDotProduct(matrix_x, matrix_w)
Fz = []
if (isThreshold == True):
for row in range(0, numberOfIndependentRows):
temporalRow = []
if (activationFunction == 'none'):
current_Fz = Fx[row][0]
if (activationFunction == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[row][0])
if (activationFunction == 'relu'):
current_Fz = self.getReluActivation(Fx[row][0])
if (activationFunction == 'tanh'):
current_Fz = self.getTanhActivation(Fx[row][0])
if (activationFunction == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[row][0])
if (activationFunction == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[row][0])
if (current_Fz < threshold):
current_Fz = 0
else:
current_Fz = 1
temporalRow.append(current_Fz)
Fz.append(temporalRow)
else:
for row in range(0, numberOfIndependentRows):
temporalRow = []
if (activationFunction == 'none'):
current_Fz = Fx[row][0]
if (activationFunction == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[row][0])
if (activationFunction == 'relu'):
current_Fz = self.getReluActivation(Fx[row][0])
if (activationFunction == 'tanh'):
current_Fz = self.getTanhActivation(Fx[row][0])
if (activationFunction == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[row][0])
if (activationFunction == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[row][0])
if (activationFunction == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[row][0])
temporalRow.append(current_Fz)
Fz.append(temporalRow)
# We get the predicted Values
predictedData = Fz
# We return the predicted data
return predictedData
"""
predictArtificialNeuralNetwork(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The activation functions must be assigned in an array accordingly to the distribution specified in argument input variable coefficients. The available activation functions are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
isThreshold="Set to True if you want to predict output values of a classification neuron. False if otherwise."
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[0,1,0]
]
aF = [
['none', 'none', 'none'],
['', 'none', '']
]
modelingResults = dL.getArtificialNeuralNetwork(artificialNeuralNetworkDistribution=aNND, activationFunction=aF, learningRate=0.00001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
# -------------------------------------------------------- #
# ----- WE PREDICT SOME DATA WITH OUR CURRENT NEURON ----- #
# -------------------------------------------------------- #
matrix_x = [
[1, 2.3, 3.8],
[3.32, 2.42, 1.4],
[2.22, 3.41, 1.2]
]
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[0,1,0]
]
aF = [
['none', 'none', 'none'],
['', 'none', '']
]
dL = mSL.DeepLearning(matrix_x, [])
getPredictedData = dL.predictArtificialNeuralNetwork(coefficients=modelCoefficients, artificialNeuralNetworkDistribution=aNND, activationFunction=aF, isThreshold=False, threshold=0.5)
EXPECTED CODE RESULT:
getPredictedData =
[[28.22084819611869], [28.895166544625255], [25.788001189515317]]
"""
def predictArtificialNeuralNetwork(self, coefficients, artificialNeuralNetworkDistribution, activationFunction, isThreshold=True, threshold=0.5):
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
from . import MortrackML_Library as mSL
numberOfIndependentRows= len(self.x_samplesList)
numberOfIndependentVariables = len(self.x_samplesList[0])
numberOfNeuronLayers = len(artificialNeuralNetworkDistribution[0])
numberOfNeuronsPerLayer = []
activationFunctionsList = []
totalNumberOfNeurons = 0
matrixMath = mLAL.MatrixMath()
transposedANND = matrixMath.getTransposedMatrix(artificialNeuralNetworkDistribution)
transposedAF = matrixMath.getTransposedMatrix(activationFunction)
for row in range(0, len(transposedANND)):
currentNumberOfNeurons = 0
for column in range(0, len(transposedANND[0])):
if (transposedANND[row][column] == 1):
currentNumberOfNeurons = currentNumberOfNeurons + 1
activationFunctionsList.append(transposedAF[row][column])
temporalRow = []
temporalRow.append(currentNumberOfNeurons)
numberOfNeuronsPerLayer.append(temporalRow)
totalNumberOfNeurons = totalNumberOfNeurons + currentNumberOfNeurons
numberOfNeuronsPerLayer = matrixMath.getTransposedMatrix(numberOfNeuronsPerLayer)
activationFunctionsList = [activationFunctionsList]
numberOfNeuronsInFinalLayer = numberOfNeuronsPerLayer[0][len(numberOfNeuronsPerLayer[0])-1]
for column in range(0, numberOfNeuronLayers):
for row in range(0, numberOfNeuronsPerLayer[0][column]):
if ((activationFunction[row][column]!='none') and (activationFunction[row][column]!='sigmoid') and (activationFunction[row][column]!='relu') and (activationFunction[row][column]!='tanh') and (activationFunction[row][column]!='raiseTo2ndPower') and (activationFunction[row][column]!='raiseTo3rdPower') and (activationFunction[row][column]!='raiseTo4thPower') and (activationFunction[row][column]!='raiseTo5thPower') and (activationFunction[row][column]!='raiseTo6thPower') and (activationFunction[row][column]!='exponential')):
raise Exception('ERROR: The selected Activation Function does not exist or has not been programmed in this method yet.')
totalNumberOfLayers = len(numberOfNeuronsPerLayer[0])
matrix_x = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(1)
for column in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][column])
matrix_x.append(temporalRow)
# ----- Predict the output values with the given weight matrix ----- #
currentMatrix_w = coefficients
Fx = []
Fz = []
actualFunctionActivation = 0
for currentLayer in range(0, totalNumberOfLayers):
temporalRow1 = []
if (currentLayer == 0):
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(matrix_x, self.getANNweightVectorForOneNeuron(currentMatrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
else:
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer-1):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
inputMatrix = []
for row in range(0, numberOfIndependentRows):
temporalRow1 = []
temporalRow1.append(1) # bias column
for currentNeuron in range(pastNeuronOfCurrentLayer, pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]):
temporalRow1.append(Fz[currentNeuron][row])
inputMatrix.append(temporalRow1)
for currentNeuronOfCurrentLayer in range(pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1], pastNeuronOfCurrentLayer+numberOfNeuronsPerLayer[0][currentLayer-1]+numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1 = matrixMath.getDotProduct(inputMatrix, self.getANNweightVectorForOneNeuron(currentMatrix_w, currentNeuronOfCurrentLayer))
temporalRow1 = matrixMath.getTransposedMatrix(temporalRow1)
Fx.append(temporalRow1)
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, currentLayer):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
for currentNeuronOfCurrentLayer in range(0, numberOfNeuronsPerLayer[0][currentLayer]):
temporalRow1= []
for column in range(0, numberOfIndependentRows):
# Activation Functions (Fz)
if (activationFunctionsList[0][actualFunctionActivation] == 'none'):
current_Fz = Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column]
if (activationFunctionsList[0][actualFunctionActivation] == 'sigmoid'):
current_Fz = self.getSigmoidActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'relu'):
current_Fz = self.getReluActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'tanh'):
current_Fz = self.getTanhActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo2ndPower'):
current_Fz = self.getRaiseToTheSecondPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo3rdPower'):
current_Fz = self.getRaiseToTheThirdPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo4thPower'):
current_Fz = self.getRaiseToTheFourthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo5thPower'):
current_Fz = self.getRaiseToTheFifthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'raiseTo6thPower'):
current_Fz = self.getRaiseToTheSixthPowerActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
if (activationFunctionsList[0][actualFunctionActivation] == 'exponential'):
current_Fz = self.getExponentialActivation(Fx[currentNeuronOfCurrentLayer+pastNeuronOfCurrentLayer][0][column])
temporalRow1.append(current_Fz)
actualFunctionActivation = actualFunctionActivation + 1
Fz.append(temporalRow1)
# We get the predicted values and we then give them the proper
# row-column format to then return it.
pastNeuronOfCurrentLayer = 0
for currentLayerCount in range(0, numberOfNeuronLayers-1):
pastNeuronOfCurrentLayer = pastNeuronOfCurrentLayer + numberOfNeuronsPerLayer[0][currentLayerCount]
prePredictedValues = []
for currentNeuronOfFinalLayer in range(0, numberOfNeuronsInFinalLayer):
temporalNeuron = []
for row in range(0, len(Fz[pastNeuronOfCurrentLayer + currentNeuronOfFinalLayer])):
temporalRow = []
current_Fz = Fz[pastNeuronOfCurrentLayer + currentNeuronOfFinalLayer][row]
if (isThreshold == True):
if (current_Fz < threshold):
current_Fz = 0
else:
current_Fz = 1
temporalRow.append([current_Fz])
else:
temporalRow.append([Fz[pastNeuronOfCurrentLayer + currentNeuronOfFinalLayer][row]])
temporalNeuron.append(temporalRow)
prePredictedValues.append(temporalNeuron)
predictedValues = []
for currentSample in range(0, numberOfIndependentRows):
temporalRow = []
for currentNeuronOfFinalLayer in range(0, len(prePredictedValues)):
temporalRow.append(prePredictedValues[currentNeuronOfFinalLayer][currentSample][0][0])
predictedValues.append(temporalRow)
return predictedValues
| 307,364 | 0 | 2,003 |
4cf655ae0cd5b1d81b09f564aff8861b5b29bfc2 | 1,305 | py | Python | geotrek/feedback/management/commands/erase_emails.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/feedback/management/commands/erase_emails.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/feedback/management/commands/erase_emails.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | import logging
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from geotrek.feedback.models import Report
logger = logging.getLogger(__name__)
| 37.285714 | 103 | 0.603065 | import logging
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from geotrek.feedback.models import Report
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Erase emails older than 1 year from feedbacks."
def add_arguments(self, parser):
parser.add_argument('-d', '--days',
help="Erase mails older than DAYS (default: %(default)s)",
type=int,
default=365)
parser.add_argument('--dry-run',
action='store_true',
default=False,
help="Show only how many reports will be modified")
def handle(self, *args, **options):
"""Handle method for `erase_email` command"""
one_year = timezone.now() - timedelta(days=options['days'])
older_reports = Report.objects.filter(date_insert__lt=one_year).exclude(email='')
if not options['dry_run']:
updated_reports = older_reports.update(email='')
logger.info('{0} email(s) erased'.format(updated_reports))
else:
logger.info('Dry run mode,{0} report(s) should be modified'.format(older_reports.count(),))
| 434 | 632 | 23 |
5563ca17d9aa689f0bb96796841c90e07e5665d7 | 750 | py | Python | haipproxy/client/redis_ops.py | liguobao/haipproxy | 2f529c7981c4e5f7d92940d6ab0dd31fa827b037 | [
"MIT"
] | null | null | null | haipproxy/client/redis_ops.py | liguobao/haipproxy | 2f529c7981c4e5f7d92940d6ab0dd31fa827b037 | [
"MIT"
] | null | null | null | haipproxy/client/redis_ops.py | liguobao/haipproxy | 2f529c7981c4e5f7d92940d6ab0dd31fa827b037 | [
"MIT"
] | null | null | null | import logging
import time
from haipproxy.utils import get_redis_conn
logger = logging.getLogger(__name__)
| 27.777778 | 83 | 0.582667 | import logging
import time
from haipproxy.utils import get_redis_conn
logger = logging.getLogger(__name__)
class ProxyMaintainer:
def __init__(self):
self.redis_conn = get_redis_conn()
self.rpipe = self.redis_conn.pipeline()
def del_all_fails(self):
total = 0
nfail = 0
for pkey in self.redis_conn.scan_iter(match='*://*'):
total += 1
if self.redis_conn.hget(pkey, 'used_count') != b'0' and \
self.redis_conn.hget(pkey, 'success_count') == b'0':
self.rpipe.delete(pkey)
nfail += 1
self.rpipe.execute()
logger.info(
f'{nfail} failed proxies deleted, {total} before, {total - nfail} now '
)
| 563 | 1 | 76 |
929be707cbd4b86ca10584e70cafb9eb009697e1 | 1,727 | py | Python | tests.py | b-Development-Team/b-star | e1a47e118d0f30f7caca5ecc3ac08fadaf2227c6 | [
"MIT"
] | 1 | 2021-12-28T22:07:10.000Z | 2021-12-28T22:07:10.000Z | tests.py | b-Development-Team/b-star | e1a47e118d0f30f7caca5ecc3ac08fadaf2227c6 | [
"MIT"
] | 6 | 2022-01-07T22:49:19.000Z | 2022-03-11T05:39:04.000Z | tests.py | b-Development-Team/b-star | e1a47e118d0f30f7caca5ecc3ac08fadaf2227c6 | [
"MIT"
] | 4 | 2021-11-26T01:38:32.000Z | 2022-02-27T20:54:08.000Z | from src.interpreter.function_deco import setupFunctions
from src.interpreter.run import runCode
# stolen from https://stackoverflow.com/questions/287871/how-do-i-print-colored-text-to-the-terminal
# muhahahahahahaha 😈
if __name__ == "__main__":
setupFunctions()
print(Colours.WARNING + "Starting test..." + Colours.ENDC)
testAll()
print()
if Stats.failedTests == 0:
print(Colours.OKGREEN + Colours.BOLD + f"All {Stats.num} tests passed!" + Colours.ENDC)
elif Stats.failedTests < Stats.correctTests:
print(Colours.WARNING + Colours.BOLD + f"{Stats.correctTests} / {Stats.num} passed..." + Colours.ENDC)
else:
print(Colours.FAIL + Colours.BOLD + f"{Stats.correctTests} / {Stats.num} passed..." + Colours.ENDC)
| 28.783333 | 112 | 0.625941 | from src.interpreter.function_deco import setupFunctions
from src.interpreter.run import runCode
# stolen from https://stackoverflow.com/questions/287871/how-do-i-print-colored-text-to-the-terminal
# muhahahahahahaha 😈
class Colours:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Stats:
num = 0
correctTests = 0
failedTests = 0
def test(name, code, assumption):
result = runCode(code)
correct = result == assumption
if correct:
print(Colours.OKGREEN + Colours.BOLD + "✔", name, Colours.ENDC)
Stats.correctTests += 1
else:
print(Colours.FAIL + Colours.BOLD + "✘", name, end="")
print(f" (Wanted '{assumption}', Got '{result}')")
Stats.failedTests += 1
Stats.num += 1
def testAll():
test("J", "[J 2]", "jj")
test("J Pt. 2", "[J 0]", "j")
test("J Pt. 3", "[J -9]", "j")
test("J Final Boss", "[J 100]",
"jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj")
if __name__ == "__main__":
setupFunctions()
print(Colours.WARNING + "Starting test..." + Colours.ENDC)
testAll()
print()
if Stats.failedTests == 0:
print(Colours.OKGREEN + Colours.BOLD + f"All {Stats.num} tests passed!" + Colours.ENDC)
elif Stats.failedTests < Stats.correctTests:
print(Colours.WARNING + Colours.BOLD + f"{Stats.correctTests} / {Stats.num} passed..." + Colours.ENDC)
else:
print(Colours.FAIL + Colours.BOLD + f"{Stats.correctTests} / {Stats.num} passed..." + Colours.ENDC)
| 620 | 249 | 91 |
75bf9f95fbd0ab5a4b10dbe4e89a7aa58f5cb2ae | 900 | py | Python | src/client.py | GRinvest/whales-miner | 027a0fd4a740e60057572e17223054aad1ceb1f8 | [
"Apache-2.0"
] | 1 | 2021-12-04T03:00:03.000Z | 2021-12-04T03:00:03.000Z | src/client.py | GRinvest/whales-miner | 027a0fd4a740e60057572e17223054aad1ceb1f8 | [
"Apache-2.0"
] | null | null | null | src/client.py | GRinvest/whales-miner | 027a0fd4a740e60057572e17223054aad1ceb1f8 | [
"Apache-2.0"
] | 3 | 2021-12-06T17:26:49.000Z | 2021-12-10T19:30:28.000Z | import asyncio
from typing import Callable, Any
import json
import logging
import requests
import numpy as np
log = logging.getLogger('pool') | 26.470588 | 74 | 0.604444 | import asyncio
from typing import Callable, Any
import json
import logging
import requests
import numpy as np
log = logging.getLogger('pool')
class PoolClient:
url: str
current_task: any
def __init__(self, url, wallet) -> None:
self.url = url
self.wallet = wallet
def report_solution(self, solution):
response = requests.post(f'{self.url}/submit', json={
'giver': solution['giver'],
'miner_addr': self.wallet,
'inputs': [solution['input']]
})
data = response.ok
return data
def load_next_task(self):
response = requests.get(f'{self.url}/job')
data = response.json()
data['seed'] = bytes.fromhex(data['seed'])
data['prefix'] = np.random.randint(0, 255, 16, np.uint8).tobytes()
data['complexity'] = bytes.fromhex(data['complexity'])
return data | 622 | 113 | 23 |
b19ad5305a5b3c9f8f66c710922db1655567f32b | 1,462 | py | Python | rosette-benchmarks-3/jitterbug/jitterbug/scripts/verif-perf.py | uw-unsat/leanette-popl22-artifact | 80fea2519e61b45a283fbf7903acdf6d5528dbe7 | [
"MIT"
] | 4 | 2021-10-09T14:46:50.000Z | 2022-01-31T23:39:57.000Z | rosette-benchmarks-4/jitterbug/jitterbug/scripts/verif-perf.py | uw-unsat/leanette-popl22-artifact | 80fea2519e61b45a283fbf7903acdf6d5528dbe7 | [
"MIT"
] | null | null | null | rosette-benchmarks-4/jitterbug/jitterbug/scripts/verif-perf.py | uw-unsat/leanette-popl22-artifact | 80fea2519e61b45a283fbf7903acdf6d5528dbe7 | [
"MIT"
] | 1 | 2022-02-03T19:50:18.000Z | 2022-02-03T19:50:18.000Z | #!/usr/bin/env python3
import argparse
import csv
import glob
import subprocess
import sys
import time
import re
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("output", type=argparse.FileType('w'))
args = parser.parse_args()
debug = args.debug
output = args.output
dry_run = args.dry_run
outfile = output
architectures = [
"rv64",
"rv32",
"arm64",
"arm32",
"x86_64",
"x86_32",
]
time_re = re.compile(r"\[ OK \] \"VERIFY \((.+)\)\" \((.+)ms cpu\) \((.+)ms real\) \((.+) terms\)")
outfile.write("arch, instr, cputime, realtime, terms\n")
for arch in architectures:
run(arch)
outfile.close()
| 23.580645 | 105 | 0.620383 | #!/usr/bin/env python3
import argparse
import csv
import glob
import subprocess
import sys
import time
import re
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("output", type=argparse.FileType('w'))
args = parser.parse_args()
debug = args.debug
output = args.output
dry_run = args.dry_run
outfile = output
architectures = [
"rv64",
"rv32",
"arm64",
"arm32",
"x86_64",
"x86_32",
]
time_re = re.compile(r"\[ OK \] \"VERIFY \((.+)\)\" \((.+)ms cpu\) \((.+)ms real\) \((.+) terms\)")
def get_proc_one_architecture(arch):
cmd = "echo" if dry_run else "make"
args = ["RACO_JOBS=1", f"verify-{arch}"]
return subprocess.run([cmd, *args], capture_output=True, encoding="utf8", check=True)
def run(arch):
if debug:
print(f"DEBUG: Running {arch}")
proc = get_proc_one_architecture(arch)
for line in proc.stdout.splitlines():
match = re.match(time_re, line)
if match:
instr = match.group(1)
cputime = match.group(2)
realtime = match.group(3)
terms = match.group(4)
result = f"{arch}, {instr}, {cputime}, {realtime}, {terms}\n"
print(result, end="")
outfile.write(result)
outfile.write("arch, instr, cputime, realtime, terms\n")
for arch in architectures:
run(arch)
outfile.close()
| 668 | 0 | 46 |
ae7e179c2fb0f76492a1221ee071181dd948b1d9 | 2,978 | py | Python | examples/object_detection_2d/detectron_dataset.py | rmeertens/EdgeAnnotationZChallenge | 98bc088be4766e4b24c7f584c34336be81cb0df4 | [
"MIT"
] | null | null | null | examples/object_detection_2d/detectron_dataset.py | rmeertens/EdgeAnnotationZChallenge | 98bc088be4766e4b24c7f584c34336be81cb0df4 | [
"MIT"
] | null | null | null | examples/object_detection_2d/detectron_dataset.py | rmeertens/EdgeAnnotationZChallenge | 98bc088be4766e4b24c7f584c34336be81cb0df4 | [
"MIT"
] | 1 | 2022-02-10T09:45:29.000Z | 2022-02-10T09:45:29.000Z | import json
import math
import os
from typing import List
import cv2
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import Visualizer
from detectron2.data import DatasetCatalog, MetadataCatalog
OBJECT_CATEGORIES = {
"Vehicle": 0,
"Cyclist": 1,
"Pedestrian": 2,
}
DATASET_TRAIN = "zen_2dod_train"
DATASET_VAL = "zen_2dod_val"
if __name__ == "__main__":
# This code is only for debugging / visualization.
DATASET_ROOT = '' # insert the dataset root path here
register_detectron(DATASET_ROOT, split=0, num_splits=3)
dataset = DatasetCatalog.get(DATASET_TRAIN)
for d in dataset:
img = cv2.imread(d["file_name"])
visualizer = Visualizer(
img[:, :, ::-1], scale=1, metadata=MetadataCatalog.get(DATASET_TRAIN)
)
out = visualizer.draw_dataset_dict(d)
cv2.imshow("image", out.get_image()[:, :, ::-1])
cv2.waitKey(0)
break
| 32.725275 | 85 | 0.650437 | import json
import math
import os
from typing import List
import cv2
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import Visualizer
from detectron2.data import DatasetCatalog, MetadataCatalog
OBJECT_CATEGORIES = {
"Vehicle": 0,
"Cyclist": 1,
"Pedestrian": 2,
}
DATASET_TRAIN = "zen_2dod_train"
DATASET_VAL = "zen_2dod_val"
def _read_objs(path):
objs = []
with open(path, "r") as anno_file:
for obj in anno_file:
obj = obj.split(' ')
category_id = OBJECT_CATEGORIES.get(obj[0], None)
if category_id is not None:
objs.append({
"bbox": [float(val) for val in obj[4:8]],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": category_id,
})
return objs
def get_dataset_dicts(root_path: str, test: bool) -> List[dict]:
# First construct a map from id to image path
datalist_path = os.path.join(root_path, ('test.json' if test else 'train.json'))
with open(datalist_path) as datalist_file:
datalist = json.load(datalist_file)
# Then read annotations and construct dataset dict
frames = []
print("Loading annotations...")
for frame_id, frame_paths in datalist.items():
frame_id = int(frame_id)
record = {
"file_name": sorted(frame_paths["blurred_imgs"])[1],
"image_id": frame_id,
"height": 2168,
"width": 3848,
"annotations": _read_objs(frame_paths["anno"][0]),
}
frames.append(record)
return frames
def register_detectron(
base_dir: str,
split: int = 0,
num_splits: int = 3,
):
# Split into train and val datasets
dataset_dict = get_dataset_dicts(base_dir, test=False)
frames_per_split = math.ceil(len(dataset_dict) / num_splits)
val_start, val_end = split * frames_per_split, (split + 1) * frames_per_split
train_dataset = dataset_dict[0:val_start] + dataset_dict[val_end:]
val_dataset = dataset_dict[val_start: val_end]
# Register datasets
DatasetCatalog.register(DATASET_TRAIN, lambda: train_dataset)
DatasetCatalog.register(DATASET_VAL, lambda: val_dataset)
MetadataCatalog.get(DATASET_TRAIN).thing_classes = list(OBJECT_CATEGORIES.keys())
MetadataCatalog.get(DATASET_VAL).thing_classes = list(OBJECT_CATEGORIES.keys())
if __name__ == "__main__":
# This code is only for debugging / visualization.
DATASET_ROOT = '' # insert the dataset root path here
register_detectron(DATASET_ROOT, split=0, num_splits=3)
dataset = DatasetCatalog.get(DATASET_TRAIN)
for d in dataset:
img = cv2.imread(d["file_name"])
visualizer = Visualizer(
img[:, :, ::-1], scale=1, metadata=MetadataCatalog.get(DATASET_TRAIN)
)
out = visualizer.draw_dataset_dict(d)
cv2.imshow("image", out.get_image()[:, :, ::-1])
cv2.waitKey(0)
break
| 1,958 | 0 | 69 |
8127a03073ef11b7996806669a0237e35550b610 | 2,914 | py | Python | swift/codegen/test/test_ql.py | Yonah125/codeql | eab8bcc82d736cda78240583edc3f69e4618f7fa | [
"MIT"
] | null | null | null | swift/codegen/test/test_ql.py | Yonah125/codeql | eab8bcc82d736cda78240583edc3f69e4618f7fa | [
"MIT"
] | null | null | null | swift/codegen/test/test_ql.py | Yonah125/codeql | eab8bcc82d736cda78240583edc3f69e4618f7fa | [
"MIT"
] | null | null | null | import sys
from copy import deepcopy
from swift.codegen.lib import ql
from swift.codegen.test.utils import *
@pytest.mark.parametrize("params,expected_local_var", [
(["a", "b", "c"], "x"),
(["a", "x", "c"], "x_"),
(["a", "x", "x_", "c"], "x__"),
(["a", "x", "x_", "x__"], "x___"),
])
@pytest.mark.parametrize("name,expected_article", [
("Argument", "An"),
("Element", "An"),
("Integer", "An"),
("Operator", "An"),
("Unit", "A"),
("Whatever", "A"),
])
if __name__ == '__main__':
sys.exit(pytest.main())
| 28.851485 | 94 | 0.633837 | import sys
from copy import deepcopy
from swift.codegen.lib import ql
from swift.codegen.test.utils import *
def test_property_has_first_param_marked():
params = [ql.Param("a", "x"), ql.Param("b", "y"), ql.Param("c", "z")]
expected = deepcopy(params)
expected[0].first = True
prop = ql.Property("Prop", "foo", "props", ["this"], params=params)
assert prop.params == expected
def test_property_has_first_table_param_marked():
tableparams = ["a", "b", "c"]
prop = ql.Property("Prop", "foo", "props", tableparams)
assert prop.tableparams[0].first
assert [p.param for p in prop.tableparams] == tableparams
assert all(p.type is None for p in prop.tableparams)
@pytest.mark.parametrize("params,expected_local_var", [
(["a", "b", "c"], "x"),
(["a", "x", "c"], "x_"),
(["a", "x", "x_", "c"], "x__"),
(["a", "x", "x_", "x__"], "x___"),
])
def test_property_local_var_avoids_params_collision(params, expected_local_var):
prop = ql.Property("Prop", "foo", "props", ["this"], params=[ql.Param(p) for p in params])
assert prop.local_var == expected_local_var
def test_property_not_a_class():
tableparams = ["x", "result", "y"]
prop = ql.Property("Prop", "foo", "props", tableparams)
assert not prop.type_is_class
assert [p.param for p in prop.tableparams] == tableparams
def test_property_is_a_class():
tableparams = ["x", "result", "y"]
prop = ql.Property("Prop", "Foo", "props", tableparams)
assert prop.type_is_class
assert [p.param for p in prop.tableparams] == ["x", prop.local_var, "y"]
@pytest.mark.parametrize("name,expected_article", [
("Argument", "An"),
("Element", "An"),
("Integer", "An"),
("Operator", "An"),
("Unit", "A"),
("Whatever", "A"),
])
def test_property_indefinite_article(name, expected_article):
prop = ql.Property(name, "Foo", "props", ["x"], plural="X")
assert prop.indefinite_article == expected_article
def test_property_no_plural_no_indefinite_article():
prop = ql.Property("Prop", "Foo", "props", ["x"])
assert prop.indefinite_article is None
def test_class_sorts_bases():
bases = ["B", "Ab", "C", "Aa"]
expected = ["Aa", "Ab", "B", "C"]
cls = ql.Class("Foo", bases=bases)
assert cls.bases == expected
def test_class_has_first_property_marked():
props = [
ql.Property(f"Prop{x}", f"Foo{x}", f"props{x}", [f"{x}"]) for x in range(4)
]
expected = deepcopy(props)
expected[0].first = True
cls = ql.Class("Class", properties=props)
assert cls.properties == expected
def test_class_db_id():
cls = ql.Class("ThisIsMyClass")
assert cls.db_id == "@this_is_my_class"
def test_root_class():
cls = ql.Class("Class")
assert cls.root
def test_non_root_class():
cls = ql.Class("Class", bases=["A"])
assert not cls.root
if __name__ == '__main__':
sys.exit(pytest.main())
| 2,077 | 0 | 274 |
cff25e20d9f4bd4d8816d854e75f19fe2c272adb | 1,799 | py | Python | tests/test_predicate_simplification.py | mccolljr/fete | 9342c814db997c8a7b5a1f3b23dd309d463c9718 | [
"MIT"
] | 1 | 2022-01-10T20:19:16.000Z | 2022-01-10T20:19:16.000Z | tests/test_predicate_simplification.py | mccolljr/fete | 9342c814db997c8a7b5a1f3b23dd309d463c9718 | [
"MIT"
] | null | null | null | tests/test_predicate_simplification.py | mccolljr/fete | 9342c814db997c8a7b5a1f3b23dd309d463c9718 | [
"MIT"
] | null | null | null | from zoneinfo import ZoneInfo
from datetime import datetime, timezone
from flurry.core import predicate as P
from flurry.core.utils import visit_predicate
from flurry.postgres.postgres import _PostgreSQLSimplifier
DATETIME_A = datetime(2022, 1, 27, 13, 6, 47, 799859, tzinfo=ZoneInfo("UTC"))
DATETIME_B = datetime(2022, 1, 27, 13, 6, 47, 799859, tzinfo=ZoneInfo("EST"))
predicates_to_simplify = {
"empty_or": P.Or(),
"empty_is": P.Is(),
"empty_and": P.And(),
"empty_where": P.Where(),
"simple_and": P.And(P.Where(a=1), P.Where(b=2)),
"simple_or": P.Or(P.Where(a=1), P.Where(b=2)),
"simple_is": P.Is(str, int, float),
"simple_where": P.Where(
a=P.Eq(1),
b=P.NotEq(2),
c=P.Less(3),
d=P.More(4),
e=P.LessEq(5),
f=P.MoreEq(6),
g=P.Between(7, 8),
h=P.OneOf(9, 10),
),
"null_where": P.Where(
a=P.Eq(None),
b=P.NotEq(None),
),
"complex": P.Or(
P.Is(int, str, float),
P.And(
P.Where(a=P.Eq(1)),
P.Where(b=P.NotEq(2)),
P.Where(c=P.Less(3)),
),
P.And(
P.Where(d=P.More(4)),
P.Where(e=P.LessEq(5)),
P.Where(f=P.MoreEq(6)),
),
P.Where(
g=P.Between(7, 8),
h=P.OneOf(9, 10),
),
P.And(P.Is(), P.Where()),
),
"date_and_time": P.Where(
a=P.Eq(DATETIME_A),
b=P.Eq(DATETIME_B),
c=P.NotEq(DATETIME_A),
d=P.NotEq(DATETIME_B),
),
}
| 27.257576 | 77 | 0.545859 | from zoneinfo import ZoneInfo
from datetime import datetime, timezone
from flurry.core import predicate as P
from flurry.core.utils import visit_predicate
from flurry.postgres.postgres import _PostgreSQLSimplifier
DATETIME_A = datetime(2022, 1, 27, 13, 6, 47, 799859, tzinfo=ZoneInfo("UTC"))
DATETIME_B = datetime(2022, 1, 27, 13, 6, 47, 799859, tzinfo=ZoneInfo("EST"))
predicates_to_simplify = {
"empty_or": P.Or(),
"empty_is": P.Is(),
"empty_and": P.And(),
"empty_where": P.Where(),
"simple_and": P.And(P.Where(a=1), P.Where(b=2)),
"simple_or": P.Or(P.Where(a=1), P.Where(b=2)),
"simple_is": P.Is(str, int, float),
"simple_where": P.Where(
a=P.Eq(1),
b=P.NotEq(2),
c=P.Less(3),
d=P.More(4),
e=P.LessEq(5),
f=P.MoreEq(6),
g=P.Between(7, 8),
h=P.OneOf(9, 10),
),
"null_where": P.Where(
a=P.Eq(None),
b=P.NotEq(None),
),
"complex": P.Or(
P.Is(int, str, float),
P.And(
P.Where(a=P.Eq(1)),
P.Where(b=P.NotEq(2)),
P.Where(c=P.Less(3)),
),
P.And(
P.Where(d=P.More(4)),
P.Where(e=P.LessEq(5)),
P.Where(f=P.MoreEq(6)),
),
P.Where(
g=P.Between(7, 8),
h=P.OneOf(9, 10),
),
P.And(P.Is(), P.Where()),
),
"date_and_time": P.Where(
a=P.Eq(DATETIME_A),
b=P.Eq(DATETIME_B),
c=P.NotEq(DATETIME_A),
d=P.NotEq(DATETIME_B),
),
}
def test_postgresql_simplify(snapshot):
visitor = _PostgreSQLSimplifier("type_field", "data_field")
for name, pred in predicates_to_simplify.items():
result = visit_predicate(visitor, pred)
assert result == snapshot(name=name)
| 229 | 0 | 23 |
54299737ff64a81a7861b9d3f8f6c0a4262b9ff8 | 637 | py | Python | rosalind/cons.py | genos/online_problems | 324597e8b64d74ad96dbece551a8220a1b61e615 | [
"MIT"
] | 1 | 2020-07-17T13:15:21.000Z | 2020-07-17T13:15:21.000Z | rosalind/cons.py | genos/online_problems | 324597e8b64d74ad96dbece551a8220a1b61e615 | [
"MIT"
] | null | null | null | rosalind/cons.py | genos/online_problems | 324597e8b64d74ad96dbece551a8220a1b61e615 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import sys
from collections import Counter
from rosalind import fasta
ACIDS = "ACGT"
if __name__ == "__main__":
with open("data/rosalind_cons.txt") as f:
cons, matrix = consensus(fasta(f.read()).values())
print(''.join(cons))
for x in ACIDS:
print("{0}: {1}".format(x, ' '.join(map(str, matrix[x]))))
| 24.5 | 70 | 0.588697 | #!/usr/bin/env python
# coding: utf-8
import sys
from collections import Counter
from rosalind import fasta
ACIDS = "ACGT"
def consensus(strings):
cons, matrix = [], dict((x, []) for x in ACIDS)
for c in map(Counter, zip(*strings)):
cons.append(c.most_common(1)[0][0])
for x in ACIDS:
matrix[x].append(c.get(x, 0))
return cons, matrix
if __name__ == "__main__":
with open("data/rosalind_cons.txt") as f:
cons, matrix = consensus(fasta(f.read()).values())
print(''.join(cons))
for x in ACIDS:
print("{0}: {1}".format(x, ' '.join(map(str, matrix[x]))))
| 230 | 0 | 23 |
29bd5f60f37d86231c60d3b3941b3912b6d327bc | 3,122 | py | Python | widget/weather.py | bkosciow/doton3 | 1a386373c49fc9c582b9b502b25b1612ba1be230 | [
"MIT"
] | null | null | null | widget/weather.py | bkosciow/doton3 | 1a386373c49fc9c582b9b502b25b1612ba1be230 | [
"MIT"
] | null | null | null | widget/weather.py | bkosciow/doton3 | 1a386373c49fc9c582b9b502b25b1612ba1be230 | [
"MIT"
] | null | null | null | from kivy.uix.stacklayout import StackLayout
from kivy.lang import Builder
import pathlib
from service.widget import Widget
from datetime import datetime, timedelta
Builder.load_file(str(pathlib.Path(__file__).parent.absolute()) + pathlib.os.sep + 'weather3.kv')
| 53.827586 | 111 | 0.595131 | from kivy.uix.stacklayout import StackLayout
from kivy.lang import Builder
import pathlib
from service.widget import Widget
from datetime import datetime, timedelta
Builder.load_file(str(pathlib.Path(__file__).parent.absolute()) + pathlib.os.sep + 'weather3.kv')
class Weather(Widget, StackLayout):
forecast_days = [0, 1, 2, 3]
def update_values(self, values, name):
# print(values, name)
if 'current' in values and values['current'] is not None:
normalized_values = self._normalize_values(values['current'])
self.ids['current_wind_icon'].angle = normalized_values['wind_deg']
self.ids['current_wind_label'].text = str(normalized_values['wind_speed'])
self.ids['current_cloudiness'].value = normalized_values['clouds']
self.ids['current_humidity'].value = normalized_values['humidity']
source = 'assets/image/openweather/' + str(normalized_values['weather_id']) + '.png'
if self.ids['current_icon'].source != source:
self.ids['current_icon'].source = source
self.ids['current_temperature'].text = str(normalized_values['temperature_current'])
if 'forecast' in values and values['forecast'] is not None:
today = datetime.today()
for offset in self.forecast_days:
date = today + timedelta(days=offset)
date = date.strftime('%Y-%m-%d')
if date in values['forecast']:
base_id = "day" + str(offset)
normalized_values = self._normalize_values(values['forecast'][date])
source = 'assets/image/openweather/' + str(normalized_values['weather_id']) + '.png'
if self.ids[base_id + '_icon'].source != source:
self.ids[base_id + '_icon'].source = source
if base_id + "_wind_icon" in self.ids:
self.ids[base_id + "_wind_icon"].angle = normalized_values['wind_deg']
if base_id + "_wind_label" in self.ids:
self.ids[base_id + "_wind_label"].text = str(normalized_values['wind_speed'])
if base_id + "_cloudiness" in self.ids:
self.ids[base_id + "_cloudiness"].value = normalized_values['clouds']
if base_id + "_humidity" in self.ids:
self.ids[base_id + "_humidity"].value = normalized_values['humidity']
if base_id + "_temperature_max" in self.ids:
self.ids[base_id + "_temperature_max"].text = str(normalized_values['temperature_max'])
if base_id + "_temperature_min" in self.ids:
self.ids[base_id + "_temperature_min"].text = str(normalized_values['temperature_min'])
def _normalize_values(self, values):
for name in values:
if isinstance(values[name], float):
values[name] = round(values[name])
if name == 'wind_deg':
values[name] = abs(360-values[name])
return values
| 2,733 | 101 | 23 |
488541b7084f651518dfe071f8a3d7a309a0ba21 | 3,887 | py | Python | bot.py | oatberry/robodaniel | 4b817047f5a4563ecb78e7461d60c4eda3dde6bd | [
"MIT"
] | null | null | null | bot.py | oatberry/robodaniel | 4b817047f5a4563ecb78e7461d60c4eda3dde6bd | [
"MIT"
] | null | null | null | bot.py | oatberry/robodaniel | 4b817047f5a4563ecb78e7461d60c4eda3dde6bd | [
"MIT"
] | null | null | null | """Classes for RoboDaniel"""
import importlib
import logging
import re
from data import commands
from datetime import datetime
from groupy import Bot as GroupyBot
from groupy import Group
from groupy import config
class Bot:
"""RoboDaniel bot class"""
def gather_commands(self):
"""gather !command functions and factoids into dicts"""
self.logger.info('gathering !commands...')
# reload command module for when !reload is called
importlib.reload(commands)
r = re.compile('^__')
self.command_dict = {c: getattr(commands, c)
for c in dir(commands)
if not r.match(c)}
# gather factoids
with open('data/factoids.txt') as factoids_file:
self.factoids = {f.split()[0]: ' '.join(f.split()[1:])
for f in factoids_file}
def generate_triggers(self):
"""generate message trigger rules"""
self.logger.info('generating trigger rules...')
with open('data/triggers.txt') as triggers_file:
self.triggers = [(re.compile(t.split()[0]), ' '.join(t.split()[1:]))
for t in triggers_file]
def interpret_command(self, message):
"""decide what to do with a "!command" message"""
# extract the message text, minus the beginning '!'
command = message['text'][1:]
# put a precautionary space before each '@'
# as GroupMe does weird stuff with mentions
command = re.sub('@', ' @', command)
if command in self.factoids:
return [self.factoids[command]]
args = command.split()
if args[0] in self.command_dict:
return self.command_dict[args[0]](args=args[1:],
sender=message['name'],
sender_id=message['user_id'],
attachments=message['attachments'],
bot=self)
else:
self.logger.warning('invalid command: {}'.format(command))
return False
def match_trigger(self, message):
"""attempt to match a message against trigger rules"""
response = None
if message['text'][0] == '!':
# message contains a !command; try to interpret it
self.logger.info('interpreted command: "{}"'.format(message['text']))
response = self.interpret_command(message)
else:
# try each trigger rule
for pattern, trigger in self.triggers:
if pattern.match(message['text']):
# response is triggered
self.logger.info('trigger matched: "{}"'.format(message['text']))
response = [trigger]
break
if response:
# we have a response to send!
logging.info('sending response: "{}"'.format(response))
self.post(*response)
def logmsg(self, message):
"""log a chat message to the appropriate logfile"""
timestamp = datetime.fromtimestamp(message['created_at']).isoformat()
line = '{} {}: {}'.format(timestamp, message['name'], message['text'])
print(line, file=self.chatlog)
def post(self, *message):
"""post a message with optional attachments"""
self.bot.post(*message)
| 36.327103 | 85 | 0.555441 | """Classes for RoboDaniel"""
import importlib
import logging
import re
from data import commands
from datetime import datetime
from groupy import Bot as GroupyBot
from groupy import Group
from groupy import config
class Bot:
"""RoboDaniel bot class"""
def __init__(self, api_key, bot_id):
config.API_KEY = api_key
self.bot = GroupyBot.list().filter(bot_id=bot_id).first
self.group = Group.list().filter(id=self.bot.group_id).first
self.chatlog = open('logs/{}.log'.format(self.group.name), 'a+')
self.logger = logging.getLogger(self.bot.name)
self.generate_triggers()
self.gather_commands()
def gather_commands(self):
"""gather !command functions and factoids into dicts"""
self.logger.info('gathering !commands...')
# reload command module for when !reload is called
importlib.reload(commands)
r = re.compile('^__')
self.command_dict = {c: getattr(commands, c)
for c in dir(commands)
if not r.match(c)}
# gather factoids
with open('data/factoids.txt') as factoids_file:
self.factoids = {f.split()[0]: ' '.join(f.split()[1:])
for f in factoids_file}
def generate_triggers(self):
"""generate message trigger rules"""
self.logger.info('generating trigger rules...')
with open('data/triggers.txt') as triggers_file:
self.triggers = [(re.compile(t.split()[0]), ' '.join(t.split()[1:]))
for t in triggers_file]
def interpret_command(self, message):
"""decide what to do with a "!command" message"""
# extract the message text, minus the beginning '!'
command = message['text'][1:]
# put a precautionary space before each '@'
# as GroupMe does weird stuff with mentions
command = re.sub('@', ' @', command)
if command in self.factoids:
return [self.factoids[command]]
args = command.split()
if args[0] in self.command_dict:
return self.command_dict[args[0]](args=args[1:],
sender=message['name'],
sender_id=message['user_id'],
attachments=message['attachments'],
bot=self)
else:
self.logger.warning('invalid command: {}'.format(command))
return False
def match_trigger(self, message):
"""attempt to match a message against trigger rules"""
response = None
if message['text'][0] == '!':
# message contains a !command; try to interpret it
self.logger.info('interpreted command: "{}"'.format(message['text']))
response = self.interpret_command(message)
else:
# try each trigger rule
for pattern, trigger in self.triggers:
if pattern.match(message['text']):
# response is triggered
self.logger.info('trigger matched: "{}"'.format(message['text']))
response = [trigger]
break
if response:
# we have a response to send!
logging.info('sending response: "{}"'.format(response))
self.post(*response)
def logmsg(self, message):
"""log a chat message to the appropriate logfile"""
timestamp = datetime.fromtimestamp(message['created_at']).isoformat()
line = '{} {}: {}'.format(timestamp, message['name'], message['text'])
print(line, file=self.chatlog)
def post(self, *message):
"""post a message with optional attachments"""
self.bot.post(*message)
| 376 | 0 | 27 |
9c27b0c05faee9392b60475336d6cad774e74b5a | 16,552 | py | Python | app_dir/app_initial_cond_lhs.py | virginia4/app_run_local | 5bcb8280dda777f972b09f49b420040db6bfeb77 | [
"MIT"
] | null | null | null | app_dir/app_initial_cond_lhs.py | virginia4/app_run_local | 5bcb8280dda777f972b09f49b420040db6bfeb77 | [
"MIT"
] | null | null | null | app_dir/app_initial_cond_lhs.py | virginia4/app_run_local | 5bcb8280dda777f972b09f49b420040db6bfeb77 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from builtins import range # pylint: disable=redefined-builtin
import dash_table
import collections
import os
import fnmatch
import glob
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell
import xlrd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
#import dash_table_experiments as dt
from .common import generate_table
import pandas as pd
import numpy as np
from . import opti_models
from . import app
import chart_studio.plotly as plt
# pylint: disable=redefined-builtin
# this command is necessary for the app to find the MDL_screens_database
# direcotry when you deploy
script_path = os.path.dirname(os.path.realpath(__file__))
myPath= os.path.join( script_path,'MDL_screens_database')
###############################################################################
def get_controls_var(id, desc, unit, range):
"""
Get controls for each variable.
This includes
* the description
* range
"""
label_reagent = dcc.Input(
id=id + "_label", type='text', value=desc, className="label")
unit_reagent = dcc.Input(
id=id + "_unit", type='text', value=unit, className="label")
range_low = dcc.Input(
id=id + "_low", type='number', value=range[0], className="range")
range_high = dcc.Input(
id=id + "_high", type='number', value=range[1], className="range")
return html.Tr([
html.Td(label_reagent),
html.Td(unit_reagent),
html.Td([range_low, html.Span('to'), range_high])], id=id + "_tr_lhs")
#------------------------------------------------------------------------------
###############################################################################
def get_controls_screen(id, desc, range):
"""
Get screen dimensions nsamples_x and nsamples_y
"""
label = dcc.Input(id = id + "_label", type = 'text', value=desc,
className = 'label')
dimensions_x = dcc.Input(
id=id + "_x", type='number', value=range[0], className="range")
dimensions_y = dcc.Input(
id=id + "_y", type='number', value=range[1], className="range")
return html.Tr([
html.Td(label),
html.Td([dimensions_x, html.Span('\\times'), dimensions_y])
,
# html.Td([
# # html.Span(slider, className="slider")
# # ,
# # html.Span('', id=id + "_weight_label")
# ])
], id=id + "_tr_lhs")
#------------------------------------------------------------------------------
###
# 'code' is the variable that gets as input the value from the user.
# This corresponds to a certain code name of the xlxs files. The program
# use this to to search in the directory for matches.
# First, the characteristics of the variable are set, i.e. how to link the
# variable to the layout-input environment that the user interacts with.
###
code = collections.OrderedDict([
('code_number',
dict(label=['MDL file code'])),
])
NVARS_MAX = 10
###
# inp_nvars: an input variable that is updated with btn_submit and takes the numbers of the reagents
# that are in each hit condition.
###
inp_nvars = html.Tr([
html.Td('Number of reagents: '),
html.Td(
dcc.Input(
id='inp_nvars_lhs',
# type='text',
value=' ',
# max=NVARS_MAX,
# min=1,
className="nvars range"))
])
###
# inp_code_hitwell: two-input variable, caries the values of both the hitwell and the code of the
# screen
###
inp_code_hitwell = html.Tr([
html.Td('Enter screen code (e.g. MD1-40) and hit well (e.g. B1):'),
html.Td(dcc.Input(id='inp_code_lhs',
type='text',
value="MD1-40")),
html.Td(dcc.Input(
id='inp_hitwell_lhs',
type='text',
value="B1")),
html.Div('', id='input_info_lhs')])
btn_submit = html.Tr([html.Td(html.Button('Submit', id = 'submit-button_lhs', className='action-button', n_clicks=0)),
html.Div('', id='submit_info_lhs',style={'width': '50%'}),
])
##############################################################################
lhs_text = """
Latin hypercube sampling (LHS) is a sampling method for searching for optimal
parameters in a high dimensional space. The LHS is a near-random method, i.e.
the optimised condtions are not completely random, instead they obey certain
requirements. These requirements assure that the final sample points
will be spread more evenly across the range. LHS can be used for high-dimension
spaces, i.e. for more than two conditions.
"""
lhs_text_html = [html.P(i) for i in lhs_text.split("\n\n")]
lhs_layout = html.Div( [html.H2("About the Latin Hybercube sampling"),
dcc.Markdown(lhs_text, className="text-container", id="lhs_container",
# **{'data-iframe-height': ''},
style={ 'width': '50%','padding': '20px',
'margin': '10px','justify-content': 'center','align-items': 'center'})])
##############################################################################
# states = label_states + unit_states + low_states + high_states
states = [State('inp_code_lhs', 'value')]
states += [State('inp_hitwell_lhs', 'value')]
@app.callback(
[Output('submit_info_lhs', 'children'),
Output('inp_nvars_lhs', 'value')],
[Input('submit-button_lhs', 'n_clicks')],
states)
#------------------------------------------------------------------------------
###
# This feature is so the user can change the dimensions of the screen, i.e. the
# number of the wells. Initialises by the the dimensions of a common crystallisation
# screen 12x8
###
inp_nsamples = html.Tr([
html.Td('Enter screen dimensions '),
html.Td(
dcc.Input(
id='nsamples_x_lhs', type='number', value=8,
className="nsamples range")),
html.Td(html.Span('x')),
html.Td(
dcc.Input(
id='nsamples_y_lhs', type='number', value=12,
className="nsamples range"))
])
##############################################################################
btn_compute = html.Div([
html.Button('compute using LHS', id='btn_compute_lhs', className='action-button',
n_clicks = 0),
html.Div('', id='compute_info_lhs')
])
###
# Creation of dash app: setting up the layout
###
layout = html.Div(
[
lhs_layout,
html.Table([inp_code_hitwell]),
html.Br(),
html.Table([btn_submit]),
html.Br(),
html.Table([inp_nvars, inp_nsamples]),
html.Br(),
btn_compute,
#graph, hover_info,
],
style={'padding': 20},
id="container_lhs",
# tag for iframe resizer
**{'data-iframe-height': ''},
)
#------------------------------------------------------------------------------
##############################################################################
###
# Using State to share more than one input in the callback.
# ninps: no of inputs
###
# ninps = len(label_states + unit_states + low_states + high_states) + 5
ninps = 5 # no of inputs
states = [State('inp_nvars_lhs', 'value')]
states += [State('nsamples_x_lhs', 'value')]
states += [State('nsamples_y_lhs', 'value')]
states += [State('inp_code_lhs', 'value')]
states += [State('inp_hitwell_lhs', 'value')]
#------------------------------------------------------------------------------
###############################################################################
@app.callback(
dash.dependencies.Output('compute_info_lhs', 'children'),
[dash.dependencies.Input('table_lhs', 'data'),
dash.dependencies.Input('btn_compute_lhs', 'n_clicks'),
], states)
def on_compute(submit_info, n_clicks, *args):
"""Callback for clicking compute button"""
if n_clicks is None :
return ''
df_hit_values = pd.DataFrame(submit_info)
if len(args) != ninps:
raise ValueError("Expected {} arguments".format(ninps))
# parse arguments
hitwell = args[-1]
code_name = args[-2]
nsamples_y = args[-3]
nsamples_x = args[-4]
###
# Count how many columns from each category are on the selected file
###
n_pH = len(df_hit_values.filter(like='pH').columns)
n_units = len(df_hit_values.filter(like='Units').columns)
n_salts = len(df_hit_values.filter(like='Salt').columns)
n_buff = len(df_hit_values.filter(like='Buffer').columns)
n_precip = len(df_hit_values.filter(like='Precipitant').columns)
###
# Only the values of concentration and pH are going to change
###
concentrations = df_hit_values.filter(like='Conc').columns
var = df_hit_values[concentrations].to_numpy()
var = var.T
var_float = var.astype(np.float)
pH = df_hit_values.filter(like='pH').columns
pH = df_hit_values[pH].to_numpy()
###
# In the following lines, the values of the concentration for salt/prec/buffer are assigned.
# The format of the file is crucial in order the following to work.
###
salt_conc = var[0:n_salts]
buff_conc = var[(n_salts):(n_salts+n_buff)]
precip_conc = var[(n_salts+n_buff):(n_salts+n_buff+n_precip)]
# VARY RANGE OF CONCERN --- ATTEMPTS TO MAKE THE RANGE CHANGE
# low_vals = np.array([args[i + NVARS_MAX] for i in range(nvars)])
# high_vals = np.array([args[i + 2 * NVARS_MAX] for i in range(nvars)
# NOTE: check if salt_conc, ph and precip_conc are float arrays. This check is
# important, cause after the user will update the number in the table,
# the values are parsed as str.
pH = pH.astype(float)
pH = pH.T
salt_conc = salt_conc.astype(float)
precip_conc = precip_conc.astype(float)
salt_range = [salt_conc[:]/2, salt_conc[:]*2]
pH_range = [pH[:]-1, pH[:]+1]
precip_range = [precip_conc[:]/4, precip_conc[:]*4]
low_vals = np.concatenate([salt_range[0], pH_range[0], precip_range[0]])
high_vals = np.concatenate([salt_range[1], pH_range[1], precip_range[1]])
nvars = n_salts + n_pH + n_precip
nsamples = nsamples_x*nsamples_y
salts_labels = df_hit_values.filter(like='Salt').columns.values
print('salts_labels',salts_labels)
buff_labels = df_hit_values.filter(like='Buffer').columns.values
print('buff_labels',buff_labels)
perci_labels = df_hit_values.filter(like='Precipitant').columns.values
print('perci_labels',perci_labels)
units_labels = df_hit_values.filter(like='Unit').columns.values
print('unit_labels',units_labels)
reagent_name = np.concatenate([df_hit_values.iloc[0][salts_labels[:]], df_hit_values.iloc[0][buff_labels[:]], df_hit_values.iloc[0][perci_labels[:]] ])
print('reagent_name', reagent_name)
reagent_name = reagent_name.tolist()
reagent_name_1 = reagent_name[0]
reagent_name_2 = reagent_name[1]
labels = reagent_name
labels_array = np.asarray(labels)
dim = len(labels_array)
styling_label_1 = [' ['] * len(labels)
styling_label_2 = [']'] * len(labels)
styling_label_1_array = np.asarray(styling_label_1)
styling_label_2_array = np.asarray(styling_label_2)
unit_name = np.concatenate([df_hit_values.iloc[0][units_labels[:]]])
labels_array_new = ["" for x in range(dim)]
ll = 0
for i in range(dim):
try:
ll = ll+1
counter = labels_array[i] + styling_label_1[i] + unit_name[i] + styling_label_2[i]
labels_array_new[ll-1] = counter
except:
return dcc.Textarea(
placeholder='Enter a value...',
value='An error occurred. Please report at: enquiries@moleculardimensions.com ',
style={'width': '40%'}
)
samples = opti_models.compute_LHS(num_samples=nsamples,
var_LB=low_vals,
var_UB=high_vals)
df = pd.DataFrame(data=samples, columns=labels_array_new)
table = generate_table(df, nsamples_x, nsamples_y, download_link=True)
np.set_printoptions(precision=3)
if n_clicks > 0:
return table
# #------------------------------------------------------------------------------
| 35.904555 | 158 | 0.584401 | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from builtins import range # pylint: disable=redefined-builtin
import dash_table
import collections
import os
import fnmatch
import glob
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell
import xlrd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
#import dash_table_experiments as dt
from .common import generate_table
import pandas as pd
import numpy as np
from . import opti_models
from . import app
import chart_studio.plotly as plt
# pylint: disable=redefined-builtin
# this command is necessary for the app to find the MDL_screens_database
# direcotry when you deploy
script_path = os.path.dirname(os.path.realpath(__file__))
myPath= os.path.join( script_path,'MDL_screens_database')
###############################################################################
def get_controls_var(id, desc, unit, range):
"""
Get controls for each variable.
This includes
* the description
* range
"""
label_reagent = dcc.Input(
id=id + "_label", type='text', value=desc, className="label")
unit_reagent = dcc.Input(
id=id + "_unit", type='text', value=unit, className="label")
range_low = dcc.Input(
id=id + "_low", type='number', value=range[0], className="range")
range_high = dcc.Input(
id=id + "_high", type='number', value=range[1], className="range")
return html.Tr([
html.Td(label_reagent),
html.Td(unit_reagent),
html.Td([range_low, html.Span('to'), range_high])], id=id + "_tr_lhs")
#------------------------------------------------------------------------------
###############################################################################
def get_controls_screen(id, desc, range):
"""
Get screen dimensions nsamples_x and nsamples_y
"""
label = dcc.Input(id = id + "_label", type = 'text', value=desc,
className = 'label')
dimensions_x = dcc.Input(
id=id + "_x", type='number', value=range[0], className="range")
dimensions_y = dcc.Input(
id=id + "_y", type='number', value=range[1], className="range")
return html.Tr([
html.Td(label),
html.Td([dimensions_x, html.Span('\\times'), dimensions_y])
,
# html.Td([
# # html.Span(slider, className="slider")
# # ,
# # html.Span('', id=id + "_weight_label")
# ])
], id=id + "_tr_lhs")
#------------------------------------------------------------------------------
###
# 'code' is the variable that gets as input the value from the user.
# This corresponds to a certain code name of the xlxs files. The program
# use this to to search in the directory for matches.
# First, the characteristics of the variable are set, i.e. how to link the
# variable to the layout-input environment that the user interacts with.
###
code = collections.OrderedDict([
('code_number',
dict(label=['MDL file code'])),
])
NVARS_MAX = 10
###
# inp_nvars: an input variable that is updated with btn_submit and takes the numbers of the reagents
# that are in each hit condition.
###
inp_nvars = html.Tr([
html.Td('Number of reagents: '),
html.Td(
dcc.Input(
id='inp_nvars_lhs',
# type='text',
value=' ',
# max=NVARS_MAX,
# min=1,
className="nvars range"))
])
###
# inp_code_hitwell: two-input variable, caries the values of both the hitwell and the code of the
# screen
###
inp_code_hitwell = html.Tr([
html.Td('Enter screen code (e.g. MD1-40) and hit well (e.g. B1):'),
html.Td(dcc.Input(id='inp_code_lhs',
type='text',
value="MD1-40")),
html.Td(dcc.Input(
id='inp_hitwell_lhs',
type='text',
value="B1")),
html.Div('', id='input_info_lhs')])
btn_submit = html.Tr([html.Td(html.Button('Submit', id = 'submit-button_lhs', className='action-button', n_clicks=0)),
html.Div('', id='submit_info_lhs',style={'width': '50%'}),
])
##############################################################################
lhs_text = """
Latin hypercube sampling (LHS) is a sampling method for searching for optimal
parameters in a high dimensional space. The LHS is a near-random method, i.e.
the optimised condtions are not completely random, instead they obey certain
requirements. These requirements assure that the final sample points
will be spread more evenly across the range. LHS can be used for high-dimension
spaces, i.e. for more than two conditions.
"""
lhs_text_html = [html.P(i) for i in lhs_text.split("\n\n")]
lhs_layout = html.Div( [html.H2("About the Latin Hybercube sampling"),
dcc.Markdown(lhs_text, className="text-container", id="lhs_container",
# **{'data-iframe-height': ''},
style={ 'width': '50%','padding': '20px',
'margin': '10px','justify-content': 'center','align-items': 'center'})])
##############################################################################
# states = label_states + unit_states + low_states + high_states
states = [State('inp_code_lhs', 'value')]
states += [State('inp_hitwell_lhs', 'value')]
@app.callback(
[Output('submit_info_lhs', 'children'),
Output('inp_nvars_lhs', 'value')],
[Input('submit-button_lhs', 'n_clicks')],
states)
def update_output_code_hitwell(n_clicks, *args):
###
# arg caries the values of the inputs from the submit button and the inp_nvars_lhs
###
hitwell = args[-1]
code_name = args[-2]
###
# "*" is necessary for finding the file
###
code_name = code_name + "*"
counter = 0
file_list = []
for file in os.listdir(myPath):
if fnmatch.fnmatch(file, code_name):
file_list.append(file)
###
# There are files that have similar names, e.g. MD1-10, MD1-10-ECO.
# The following logical statements assure that the correct file is
# selected.
###
file_list.sort()
print(file_list)
if len(file_list) > 1:
file_found = file_list[0]
elif len(file_list) == 1:
file_found = file_list[0]
# print ("The file you called is: \n", file_found)
###
# Find file and assign new path. Then read the the xlxs file in
# a Dataframe.
###
newpath = os.path.join(myPath, file_found)
xls = pd.ExcelFile(newpath)
df1 = pd.read_excel(xls)
###
# Search in columns with labels "Tube" and "Well" for the the hit well
###
searchedValue = hitwell
tube = df1.filter(like='Tube').columns
well = df1.filter(like='Well').columns
###
# Each file might has either well or tube, so the program has to check
# which is the case.
###
if well.empty == True:
print('tube and tube number:', searchedValue)
# df_searchedValue = df1[df1["Tube #"] == searchedValue]
try:
df_searchedValue = df1[df1["Tube #"] == int(searchedValue)]
# print("df_searchedValue \n", df_searchedValue)
except:
print("Something went wrong, try something new")
df_searchedValue = df1[df1["Tube #"] == searchedValue]
# print("df_searchedValue \n", df_searchedValue)
df_new = df1.set_index("Tube #", drop = False)
df_new.astype('str')
df_hit_well = df_searchedValue
# print("df_hit_well \n", df_hit_well)
# print("type(df_hit_well) = ", type(df_hit_well.index))
else:
try:
df_searchedValue = df1[df1["Well #"] == searchedValue]
df_new = df1.set_index("Well #", drop = False)
df_hit_well = df_new.loc[[searchedValue]]
# print("df_hit_well \n", df_hit_well)
# print("type(df_hit_well) = ", type(df_hit_well.index))
except:
return ([ html.Tr([ html.Td(dcc.Textarea(
value='An error occurred. Check if the inputs are correct. If there the error persists, please report at: enquiries@moleculardimensions.com',
style={'width': '50%'}))]), 0])
###
# Clean empty or nan rows
###
df_hit_well = df_hit_well.replace(r'None', np.nan)
df_hit_well = df_hit_well.replace(r'-', np.nan)
df_hit_values = df_hit_well.dropna(axis='columns')
rows = np.shape(df_hit_values)[0]
columns = np.shape(df_hit_values)[1]
###
# Concentrations is an array containing the indexes of the
# columns which have the "Conc" in the title.
###
concentrations = df_hit_values.filter(like='Conc').columns
###
# convert to dataframe the chosen columns. This way you can share the data with
# the between callback and the also print on screen with generate_table function
# later on.
###
kk = dash_table.DataTable(
id='table_lhs',
data=df_hit_values.to_dict('records'), editable=True,
columns=[{"name": i, "id": i} for i in df_hit_values.columns],
#
fixed_columns={ 'headers': True, 'data': 1},
style_cell = {
# all three widths are needed
'minWidth': '180hpx', 'width': '100px', 'maxWidth': '180px',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
},style_as_list_view=True,)
nvars_new = len(concentrations)
if n_clicks > 0:
return ([ html.Tr([html.Td(kk)]), nvars_new])
#------------------------------------------------------------------------------
###
# This feature is so the user can change the dimensions of the screen, i.e. the
# number of the wells. Initialises by the the dimensions of a common crystallisation
# screen 12x8
###
inp_nsamples = html.Tr([
html.Td('Enter screen dimensions '),
html.Td(
dcc.Input(
id='nsamples_x_lhs', type='number', value=8,
className="nsamples range")),
html.Td(html.Span('x')),
html.Td(
dcc.Input(
id='nsamples_y_lhs', type='number', value=12,
className="nsamples range"))
])
##############################################################################
btn_compute = html.Div([
html.Button('compute using LHS', id='btn_compute_lhs', className='action-button',
n_clicks = 0),
html.Div('', id='compute_info_lhs')
])
###
# Creation of dash app: setting up the layout
###
layout = html.Div(
[
lhs_layout,
html.Table([inp_code_hitwell]),
html.Br(),
html.Table([btn_submit]),
html.Br(),
html.Table([inp_nvars, inp_nsamples]),
html.Br(),
btn_compute,
#graph, hover_info,
],
style={'padding': 20},
id="container_lhs",
# tag for iframe resizer
**{'data-iframe-height': ''},
)
#------------------------------------------------------------------------------
##############################################################################
###
# Using State to share more than one input in the callback.
# ninps: no of inputs
###
# ninps = len(label_states + unit_states + low_states + high_states) + 5
ninps = 5 # no of inputs
states = [State('inp_nvars_lhs', 'value')]
states += [State('nsamples_x_lhs', 'value')]
states += [State('nsamples_y_lhs', 'value')]
states += [State('inp_code_lhs', 'value')]
states += [State('inp_hitwell_lhs', 'value')]
#------------------------------------------------------------------------------
###############################################################################
@app.callback(
dash.dependencies.Output('compute_info_lhs', 'children'),
[dash.dependencies.Input('table_lhs', 'data'),
dash.dependencies.Input('btn_compute_lhs', 'n_clicks'),
], states)
def on_compute(submit_info, n_clicks, *args):
"""Callback for clicking compute button"""
if n_clicks is None :
return ''
df_hit_values = pd.DataFrame(submit_info)
if len(args) != ninps:
raise ValueError("Expected {} arguments".format(ninps))
# parse arguments
hitwell = args[-1]
code_name = args[-2]
nsamples_y = args[-3]
nsamples_x = args[-4]
###
# Count how many columns from each category are on the selected file
###
n_pH = len(df_hit_values.filter(like='pH').columns)
n_units = len(df_hit_values.filter(like='Units').columns)
n_salts = len(df_hit_values.filter(like='Salt').columns)
n_buff = len(df_hit_values.filter(like='Buffer').columns)
n_precip = len(df_hit_values.filter(like='Precipitant').columns)
###
# Only the values of concentration and pH are going to change
###
concentrations = df_hit_values.filter(like='Conc').columns
var = df_hit_values[concentrations].to_numpy()
var = var.T
var_float = var.astype(np.float)
pH = df_hit_values.filter(like='pH').columns
pH = df_hit_values[pH].to_numpy()
###
# In the following lines, the values of the concentration for salt/prec/buffer are assigned.
# The format of the file is crucial in order the following to work.
###
salt_conc = var[0:n_salts]
buff_conc = var[(n_salts):(n_salts+n_buff)]
precip_conc = var[(n_salts+n_buff):(n_salts+n_buff+n_precip)]
# VARY RANGE OF CONCERN --- ATTEMPTS TO MAKE THE RANGE CHANGE
# low_vals = np.array([args[i + NVARS_MAX] for i in range(nvars)])
# high_vals = np.array([args[i + 2 * NVARS_MAX] for i in range(nvars)
# NOTE: check if salt_conc, ph and precip_conc are float arrays. This check is
# important, cause after the user will update the number in the table,
# the values are parsed as str.
pH = pH.astype(float)
pH = pH.T
salt_conc = salt_conc.astype(float)
precip_conc = precip_conc.astype(float)
salt_range = [salt_conc[:]/2, salt_conc[:]*2]
pH_range = [pH[:]-1, pH[:]+1]
precip_range = [precip_conc[:]/4, precip_conc[:]*4]
low_vals = np.concatenate([salt_range[0], pH_range[0], precip_range[0]])
high_vals = np.concatenate([salt_range[1], pH_range[1], precip_range[1]])
nvars = n_salts + n_pH + n_precip
nsamples = nsamples_x*nsamples_y
salts_labels = df_hit_values.filter(like='Salt').columns.values
print('salts_labels',salts_labels)
buff_labels = df_hit_values.filter(like='Buffer').columns.values
print('buff_labels',buff_labels)
perci_labels = df_hit_values.filter(like='Precipitant').columns.values
print('perci_labels',perci_labels)
units_labels = df_hit_values.filter(like='Unit').columns.values
print('unit_labels',units_labels)
reagent_name = np.concatenate([df_hit_values.iloc[0][salts_labels[:]], df_hit_values.iloc[0][buff_labels[:]], df_hit_values.iloc[0][perci_labels[:]] ])
print('reagent_name', reagent_name)
reagent_name = reagent_name.tolist()
reagent_name_1 = reagent_name[0]
reagent_name_2 = reagent_name[1]
labels = reagent_name
labels_array = np.asarray(labels)
dim = len(labels_array)
styling_label_1 = [' ['] * len(labels)
styling_label_2 = [']'] * len(labels)
styling_label_1_array = np.asarray(styling_label_1)
styling_label_2_array = np.asarray(styling_label_2)
unit_name = np.concatenate([df_hit_values.iloc[0][units_labels[:]]])
labels_array_new = ["" for x in range(dim)]
ll = 0
for i in range(dim):
try:
ll = ll+1
counter = labels_array[i] + styling_label_1[i] + unit_name[i] + styling_label_2[i]
labels_array_new[ll-1] = counter
except:
return dcc.Textarea(
placeholder='Enter a value...',
value='An error occurred. Please report at: enquiries@moleculardimensions.com ',
style={'width': '40%'}
)
samples = opti_models.compute_LHS(num_samples=nsamples,
var_LB=low_vals,
var_UB=high_vals)
df = pd.DataFrame(data=samples, columns=labels_array_new)
table = generate_table(df, nsamples_x, nsamples_y, download_link=True)
np.set_printoptions(precision=3)
if n_clicks > 0:
return table
# #------------------------------------------------------------------------------
| 4,261 | 0 | 22 |
571a468125cef33b9d8272247a74fae8ec3eb72d | 1,596 | py | Python | fhir/views/search.py | ekivemark/BlueButtonFHIR_API | 9b5d9ca92b1e5ff0e9de046c87596ff3d5e66eef | [
"Apache-2.0"
] | 5 | 2016-03-02T23:25:39.000Z | 2020-10-29T07:28:42.000Z | fhir/views/search.py | HowardEdidin/BlueButtonFHIR_API | b8433055507bcc334f70bc864eacd379a04f69db | [
"Apache-2.0"
] | 13 | 2020-02-11T22:50:32.000Z | 2022-03-11T23:12:48.000Z | fhir/views/search.py | HowardEdidin/BlueButtonFHIR_API | b8433055507bcc334f70bc864eacd379a04f69db | [
"Apache-2.0"
] | 4 | 2016-02-02T19:17:24.000Z | 2020-10-10T16:10:31.000Z | import json
from collections import OrderedDict
from importlib import import_module
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from fhir.models import SupportedResourceType
from fhir.utils import kickout_400
from fhir.views.utils import check_access_interaction_and_resource_type
from fhir.settings import FHIR_BACKEND_FIND, DF_EXTRA_INFO
| 33.25 | 86 | 0.714912 | import json
from collections import OrderedDict
from importlib import import_module
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from fhir.models import SupportedResourceType
from fhir.utils import kickout_400
from fhir.views.utils import check_access_interaction_and_resource_type
from fhir.settings import FHIR_BACKEND_FIND, DF_EXTRA_INFO
def search(request, resource_type):
interaction_type = 'search'
#Check if this interaction type and resource type combo is allowed.
deny = check_access_interaction_and_resource_type(resource_type, interaction_type)
if deny:
#If not allowed, return a 4xx error.
return deny
"""Search Interaction"""
# Example client use in curl:
# curl -X GET http://127.0.0.1:8000/fhir/Practitioner?foo=bar
if request.method != 'GET':
msg = "HTTP method %s not supported at this URL." % (request.method)
return kickout_400(msg)
if settings.DEBUG:
print("FHIR_BACKEND in search:",FHIR_BACKEND_FIND )
return FHIR_BACKEND_FIND.find(request, resource_type)
# Move to fhir_io_mongo (Plugable back-end)
od = OrderedDict()
if DF_EXTRA_INFO:
od['request_method']= request.method
od['interaction_type'] = "search"
od['resource_type'] = resource_type
if DF_EXTRA_INFO:
od['search_params'] = request.GET
od['note'] = "This is only a stub for future implementation"
return HttpResponse(json.dumps(od, indent=4),
content_type="application/json")
| 1,167 | 0 | 23 |
bc1a636255cb6f1f9ae68b02ecc3615b14cfd340 | 38,206 | py | Python | src/preparation/normalization.py | JustinRuan/Pathological-images | 478f0b568068e591e282e9566786e683ec39a108 | [
"MIT"
] | 2 | 2022-01-17T12:04:02.000Z | 2022-03-08T21:59:39.000Z | src/preparation/normalization.py | JustinRuan/Pathological-images | 478f0b568068e591e282e9566786e683ec39a108 | [
"MIT"
] | null | null | null | src/preparation/normalization.py | JustinRuan/Pathological-images | 478f0b568068e591e282e9566786e683ec39a108 | [
"MIT"
] | 1 | 2020-03-08T09:00:43.000Z | 2020-03-08T09:00:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'Justin'
__mtime__ = '2018-11-05'
"""
import time
import os
from skimage import color
import numpy as np
from skimage import io
import cv2
import torch
from core.util import read_csv_file, get_project_root, get_seeds
from preparation.hsd_transform import hsd2rgb, rgb2hsd
from visdom import Visdom
from core import Random_Gen
from preparation.acd_model import ACD_Model
from torch.autograd import Variable
from core import Block
# Reinhard algorithm
# @staticmethod
# def get_normalization_function(imgCone, params, extract_scale, patch_size, ):
# low_scale = params.GLOBAL_SCALE
# # 在有效检测区域内,均匀抽样
# eff_region = imgCone.get_effective_zone(low_scale)
# sampling_interval = 1000
# seeds = get_seeds(eff_region, low_scale, extract_scale, patch_size, spacingHigh=sampling_interval, margin=-4)
#
# # #不受限制地随机抽样
# # rx2 = int(imgCone.ImageWidth * extract_scale / params.GLOBAL_SCALE)
# # ry2 = int(imgCone.ImageHeight * extract_scale / params.GLOBAL_SCALE)
# # random_gen = Random_Gen("halton")
# #
# # N = 2000
# # # rx1, ry1, rx2, ry2 = self.valid_rect
# # x, y = self.random_gen.generate_random(N, 0, rx2, 0, ry2)
#
# images = []
# for x, y in seeds:
# block = imgCone.get_image_block(extract_scale, x, y, patch_size, patch_size)
# img = block.get_img()
# images.append(img)
#
# normal = HistNormalization("match_hist", hist_target ="hist_templates.npy",
# hist_source = None)
# normal.prepare(images)
#
# return normal
# import tensorflow as tf
# class ACDNormalization_tf(AbstractNormalization):
# def __init__(self, method, **kwarg):
# super(ACDNormalization_tf, self).__init__(method, **kwarg)
# self._pn = 100000
# self._bs = 1500
# self._step_per_epoch = int(self._pn / self._bs)
# self._epoch = int(300 / self._step_per_epoch)
# # self._pn = 100000
# # self._bs = 1500
# # self._step_per_epoch = 20
# # self._epoch = 15
#
# # self.dc_txt = kwarg["dc_txt"]
# # self.w_txt = kwarg["w_txt"]
# # self.template_path = kwarg["template_path"]
# self.dc_txt = "{}/data/{}".format(get_project_root(), kwarg["dc_txt"])
# self.w_txt = "{}/data/{}".format(get_project_root(), kwarg["w_txt"])
# self.template_path = "{}/data/{}".format(get_project_root(), kwarg["template_path"])
# self._template_dc_mat = None
# self._template_w_mat = None
#
# self.input_od = tf.placeholder(dtype=tf.float32, shape=[None, 3])
# self.target, self.cd, self.w = self.acd_model(self.input_od)
# self.init = tf.global_variables_initializer()
#
# # if(not os.path.exists(self.dc_txt) or not os.path.exists(self.w_txt)):
# # self.generate()
# self.generate()
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
# self.inv = np.linalg.inv(self._template_dc_mat * self._template_w_mat)
#
# def normalize_on_batch(self, src_img):
# img = self.transform(src_img)
# return img
#
# def generate(self):
# template_list = os.listdir(self.template_path)
# temp_images = []
# for i, name in enumerate(template_list):
# # temp_images.append(cv2.imread(os.path.join(self.template_path, name))) # BGR
# # 读入RGB
# temp_images.append(io.imread(os.path.join(self.template_path, name)))
#
# # fit
# st = time.time()
# self.fit(temp_images)
# print('fit time', time.time() - st)
#
# def fit(self, images):
# opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
# np.savetxt(self.dc_txt, opt_cd_mat)
# np.savetxt(self.w_txt, opt_w_mat)
#
# def transform(self, images):
#
# od = -np.log((np.asarray(images, np.float) + 1) / 256.0)
# normed_od = np.matmul(od, self.transform_mat)
# normed_images = np.exp(-normed_od) * 256 - 1
#
# return np.maximum(np.minimum(normed_images, 255), 0)/255
#
# def sampling_data(self, images):
# pixels = np.reshape(images, (-1, 3))
# pixels = pixels[:, (2, 1, 0)] # 从RGB变BGR
# pixels = pixels[np.random.choice(pixels.shape[0], min(self._pn * 20, pixels.shape[0]))]
# od = -np.log((np.asarray(pixels, np.float) + 1) / 256.0)
# tmp = np.mean(od, axis=1)
#
# # filter the background pixels (white or black)
# od = od[(tmp > 0.3) & (tmp < -np.log(30 / 256))]
# od = od[np.random.choice(od.shape[0], min(self._pn, od.shape[0]))]
#
# return od
#
# def extract_adaptive_cd_params(self, images):
# """
# :param images: RGB uint8 format in shape of [k, m, n, 3], where
# k is the number of ROIs sampled from a WSI, [m, n] is
# the size of ROI.
# """
# od_data = self.sampling_data(images)
# if self.input_od is None:
# input_od = tf.placeholder(dtype=tf.float32, shape=[None, 3])
# if self.target is None:
# self.target, self.cd, self.w = self.acd_model(input_od)
# if self.init is None:
# self.init = tf.global_variables_initializer()
#
# with tf.Session() as sess:
# with tf.device('/cpu:0'):
# sess.run(self.init)
# for ep in range(self._epoch):
# for step in range(self._step_per_epoch):
# sess.run(self.target, {self.input_od: od_data[step * self._bs:(step + 1) * self._bs]})
# opt_cd = sess.run(self.cd)
# opt_w = sess.run(self.w)
# return opt_cd, opt_w
#
# @staticmethod
# def acd_model(input_od, lambda_p=0.002, lambda_b=10, lambda_e=1, eta=0.6, gamma=0.5):
# """
# Stain matrix estimation via method of
# "Yushan Zheng, et al., Adaptive Color Deconvolution for Histological WSI Normalization."
# """
# init_varphi = np.asarray([[0.6060, 1.2680, 0.7989],
# [1.2383, 1.2540, 0.3927]])
# alpha = tf.Variable(init_varphi[0], dtype='float32')
# beta = tf.Variable(init_varphi[1], dtype='float32')
# w = [tf.Variable(1.0, dtype='float32'), tf.Variable(1.0, dtype='float32'), tf.constant(1.0)]
#
# sca_mat = tf.stack((tf.cos(alpha) * tf.sin(beta), tf.cos(alpha) * tf.cos(beta), tf.sin(alpha)), axis=1)
# cd_mat = tf.matrix_inverse(sca_mat)
#
# s = tf.matmul(input_od, cd_mat) * w
# h, e, b = tf.split(s, (1, 1, 1), axis=1)
#
# l_p1 = tf.reduce_mean(tf.square(b))
# l_p2 = tf.reduce_mean(2 * h * e / (tf.square(h) + tf.square(e)))
# l_b = tf.square((1 - eta) * tf.reduce_mean(h) - eta * tf.reduce_mean(e))
# l_e = tf.square(gamma - tf.reduce_mean(s))
#
# objective = l_p1 + lambda_p * l_p2 + lambda_b * l_b + lambda_e * l_e
#
# tag_dubeg = False
# if tag_dubeg:
# print_op = tf.print(['cd_mat: ', cd_mat])
# print_op2 = tf.print("objective", objective, ['l_p1: ', l_p1], ['l_p2: ', l_p2], ['l_b: ', l_b], ['l_p1: ', l_e])
# with tf.control_dependencies([print_op, print_op2]):
# target = tf.train.AdagradOptimizer(learning_rate=0.05).minimize(objective)
# else:
# target = tf.train.AdagradOptimizer(learning_rate=0.05).minimize(objective)
#
# return target, cd_mat, w
#
# def prepare(self, images):
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
# if self._template_dc_mat is None:
# raise AssertionError('Run fit function first')
#
# opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
# transform_mat = np.matmul(opt_cd_mat * opt_w_mat, self.inv)
#
# # 当输入图像为RGB时
# transform_mat = transform_mat[(2,1,0), :]
# self.transform_mat = transform_mat[:, (2,1,0)]
class ImageNormalizationTool(object):
'''
Lab颜色空间中的L分量用于表示像素的亮度,取值范围是[0,100],表示从纯黑到纯白;
a表示从红色到绿色的范围,取值范围是[127,-128];
b表示从黄色到蓝色的范围,取值范围是[127,-128]。
'''
| 35.975518 | 127 | 0.546406 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'Justin'
__mtime__ = '2018-11-05'
"""
import time
import os
from skimage import color
import numpy as np
from skimage import io
import cv2
import torch
from core.util import read_csv_file, get_project_root, get_seeds
from preparation.hsd_transform import hsd2rgb, rgb2hsd
from visdom import Visdom
from core import Random_Gen
from preparation.acd_model import ACD_Model
from torch.autograd import Variable
from core import Block
class AbstractNormalization(object):
def __init__(self, method, **kwarg):
self.method_name = method
def process(self, src_img):
return self.normalize(src_img)
def normalize(self, src_img):
raise NotImplementedError
def normalize_on_batch(self, src_img_list):
result = []
for img in src_img_list:
result.append(self.normalize(img))
return result
class RGBNormalization(AbstractNormalization):
def __init__(self, method, **kwarg):
super(RGBNormalization, self).__init__(method, **kwarg)
self.source_mean = kwarg["source_mean"]
self.source_std = kwarg["source_std"]
self.target_mean = kwarg["target_mean"]
self.target_std = kwarg["target_std"]
def normalize(self, src_img):
# RGB三通道分离
rgb_r = src_img[:, :, 0]
rgb_g = src_img[:, :, 1]
rgb_b = src_img[:, :, 2]
rgb1_r= (rgb_r - self.source_mean[0]) / self.source_std[0] * self.target_std[0] + self.target_mean[0]
rgb1_g = (rgb_g - self.source_mean[1]) / self.source_std[1] * self.target_std[1] + self.target_mean[1]
rgb1_b = (rgb_b - self.source_mean[2]) / self.source_std[2] * self.target_std[2] + self.target_mean[2]
# rgb1_r[rgb1_r > 255] = 255
# rgb1_r[rgb1_r < 0] = 0
# rgb1_g[rgb1_g > 255] = 255
# rgb1_g[rgb1_g < 0] = 0
# rgb1_b[rgb1_b > 255] = 255
# rgb1_b[rgb1_b < 0] = 0
rgb1_r = np.clip(rgb1_r, 0, 255)
rgb1_g = np.clip(rgb1_g, 0, 255)
rgb1_b = np.clip(rgb1_b, 0, 255)
rgb_result = np.dstack([rgb1_r.astype(np.int), rgb1_g.astype(np.int), rgb1_b.astype(np.int)])
return rgb_result
# Reinhard algorithm
class ReinhardNormalization(AbstractNormalization):
def __init__(self, method, **kwarg):
super(ReinhardNormalization, self).__init__(method, **kwarg)
self.source_mean = kwarg["source_mean"]
self.source_std = kwarg["source_std"]
self.target_mean = kwarg["target_mean"]
self.target_std = kwarg["target_std"]
def normalize(self, src_img):
lab_img = color.rgb2lab(src_img)
# LAB三通道分离
labO_l = np.array(lab_img[:, :, 0])
labO_a = np.array(lab_img[:, :, 1])
labO_b = np.array(lab_img[:, :, 2])
# # 按通道进行归一化整个图像, 经过缩放后的数据具有零均值以及标准方差
labO_l = (labO_l - self.source_mean[0]) / self.source_std[0] * self.target_std[0] + self.target_mean[0]
labO_a = (labO_a - self.source_mean[1]) / self.source_std[1] * self.target_std[1] + self.target_mean[1]
labO_b = (labO_b - self.source_mean[2]) / self.source_std[2] * self.target_std[2] + self.target_mean[2]
# labO_l[labO_l > 100] = 100
# labO_l[labO_l < 0] = 0
# labO_a[labO_a > 127] = 127
# labO_a[labO_a < -128] = -128
# labO_b[labO_b > 127] = 127
# labO_b[labO_b < -128] = -128
labO_l = np.clip(labO_l, 0, 100)
labO_a = np.clip(labO_a, -128, 127)
labO_b = np.clip(labO_b, -128, 127)
labO = np.dstack([labO_l, labO_a, labO_b])
# LAB to RGB变换
rgb_image = color.lab2rgb(labO)
return rgb_image
class HSDNormalization(AbstractNormalization):
def __init__(self, method, **kwarg):
super(HSDNormalization, self).__init__(method, **kwarg)
self.source_mean = kwarg["source_mean"]
self.source_std = kwarg["source_std"]
self.target_mean = kwarg["target_mean"]
self.target_std = kwarg["target_std"]
def normalize(self, src_img):
hsd_img = rgb2hsd(src_img)
# LAB三通道分离
hsdO_h = hsd_img[:, :, 0]
hsdO_s = hsd_img[:, :, 1]
hsdO_d = hsd_img[:, :, 2]
# # 按通道进行归一化整个图像, 经过缩放后的数据具有零均值以及标准方差
hsdO_h = (hsdO_h - self.source_mean[0]) / self.source_std[0] * self.target_std[0] + self.target_mean[0]
hsdO_s = (hsdO_s - self.source_mean[1]) / self.source_std[1] * self.target_std[1] + self.target_mean[1]
hsdO_d = (hsdO_d - self.source_mean[2]) / self.source_std[2] * self.target_std[2] + self.target_mean[2]
hsd1 = np.dstack([hsdO_h, hsdO_s, hsdO_d])
# LAB to RGB变换
rgb_image = hsd2rgb(hsd1)
return rgb_image
class HistNormalization(AbstractNormalization):
def __init__(self, method, **kwarg):
super(HistNormalization, self).__init__(method, **kwarg)
target_path = "{}/data/{}".format(get_project_root(), kwarg["hist_target"])
hist_target = np.load(target_path).item()
self.hist_target = hist_target
self._history = []
self.enable_update = True
if kwarg["hist_source"] is not None:
print("reading histogram file ...")
source_path = "{}/data/{}".format(get_project_root(), kwarg["hist_source"])
print("reading histogram file: ", source_path)
hist_source = np.load(source_path).item()
LUT = []
LUT.append(self._estimate_cumulative_cdf(hist_source["L"], hist_target["L"], start=0, end=100))
LUT.append(self._estimate_cumulative_cdf(hist_source["A"], hist_target["A"], start=-128, end=127))
LUT.append(self._estimate_cumulative_cdf(hist_source["B"], hist_target["B"], start=-128, end=127))
self.LUT = LUT
self.hist_source = hist_source
self.hist_target = hist_target
else:
# 将使用Prepare过程进行初始化
self.LUT = None
self.hist_source = None
def _estimate_cumulative_cdf(self, source, template, start, end):
src_values, src_counts = source
tmpl_values, tmpl_counts = template
# calculate normalized quantiles for each array
src_quantiles = np.cumsum(src_counts) / np.sum(src_counts)
tmpl_quantiles = np.cumsum(tmpl_counts) / np.sum(tmpl_counts)
interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
if src_values[0] > start:
src_values = np.insert(src_values, 0, start)
interp_a_values = np.insert(interp_a_values, 0, start)
if src_values[-1] < end:
src_values = np.append(src_values, end)
interp_a_values = np.append(interp_a_values, end)
new_source = np.arange(start, end + 1)
interp_b_values = np.interp(new_source, src_values, interp_a_values)
# result = dict(zip(new_source, np.rint(interp_b_values))) # for debug
# return result
return np.rint(interp_b_values)
def _calculate_hist(self, image_list):
data_L = []
data_A = []
data_B = []
for img in image_list:
lab_img = color.rgb2lab(img)
# LAB三通道分离
labO_l = np.array(lab_img[:, :, 0])
labO_a = np.array(lab_img[:, :, 1])
labO_b = np.array(lab_img[:, :, 2])
data_L.append(labO_l.astype(np.int))
data_A.append(labO_a.astype(np.int))
data_B.append(labO_b.astype(np.int))
data_L = np.array(data_L)
data_A = np.array(data_A)
data_B = np.array(data_B)
L_values, L_counts = np.unique(data_L.ravel(), return_counts=True)
A_values, A_counts = np.unique(data_A.ravel(), return_counts=True)
B_values, B_counts = np.unique(data_B.ravel(), return_counts=True)
return {"L":(L_values, L_counts), "A":(A_values, A_counts), "B":(B_values, B_counts) }
def normalize(self, src_img):
lab_img = color.rgb2lab(src_img)
# LAB三通道分离
lab0_l = np.array(lab_img[:, :, 0]).astype(np.int)
lab0_a = np.array(lab_img[:, :, 1]).astype(np.int)
lab0_b = np.array(lab_img[:, :, 2]).astype(np.int)
LUT_L = self.LUT[0]
lab1_l = LUT_L[lab0_l]
LUT_A = self.LUT[1]
lab1_a = LUT_A[128 + lab0_a]
LUT_B = self.LUT[2]
lab1_b = LUT_B[128 + lab0_b]
labO = np.dstack([lab1_l, lab1_a, lab1_b])
# LAB to RGB变换, 会除以255
rgb_image = color.lab2rgb(labO)
return rgb_image
def prepare(self, image_list):
if not self.enable_update:
return
# print("calculating histogram, the number of source: ", len(image_list))
hist_source = self._calculate_hist(image_list)
# source_path = "{}/data/{}".format(get_project_root(), "hist_source_tmp")
# np.save(source_path, hist_source)
hist_target = self.hist_target
LUT = []
LUT.append(self._estimate_cumulative_cdf(hist_source["L"], hist_target["L"], start=0, end=100))
LUT.append(self._estimate_cumulative_cdf(hist_source["A"], hist_target["A"], start=-128, end=127))
LUT.append(self._estimate_cumulative_cdf(hist_source["B"], hist_target["B"], start=-128, end=127))
# update
self.LUT = LUT
self.hist_source = hist_source
def draw_hist(self,fig_name):
hist_source = self.hist_source
hist_target = self.hist_target
viz = Visdom(env="main")
pic_L = viz.line(
Y=hist_target["L"][1],
X=hist_target["L"][0],
opts={
'linecolor': np.array([
[0, 0, 255],
]),
'dash': np.array(['solid']), # 'solid', 'dash', 'dashdot'
'showlegend': True,
'xlabel': 'L channel',
'ylabel': 'Probability',
'title': 'Histogram of L - {}'.format(fig_name),
},
name='target',
)
viz.line(
Y=hist_source["L"][1],
X=hist_source["L"][0],
opts={
'linecolor': np.array([
[255, 0, 0],
]),
},
name='source',
win=pic_L,
update='insert',
)
pic_A = viz.line(
Y=hist_target["A"][1],
X=hist_target["A"][0],
opts={
'linecolor': np.array([
[0, 0, 255],
]),
'dash': np.array(['solid']), # 'solid', 'dash', 'dashdot'
'showlegend': True,
'xlabel': 'A channel',
'ylabel': 'Probability',
'title': 'Histogram of A - {}'.format(fig_name),
},
name='target',
)
viz.line(
Y=hist_source["A"][1],
X=hist_source["A"][0],
opts={
'linecolor': np.array([
[255, 0, 0],
]),
},
name='source',
win=pic_A,
update='insert',
)
pic_B = viz.line(
Y=hist_target["B"][1],
X=hist_target["B"][0],
opts={
'linecolor': np.array([
[0, 0, 255],
]),
'dash': np.array(['solid']), # 'solid', 'dash', 'dashdot'
'showlegend': True,
'xlabel': 'B channel',
'ylabel': 'Probability',
'title': 'Histogram of B - {}'.format(fig_name),
},
name='target',
)
viz.line(
Y=hist_source["B"][1],
X=hist_source["B"][0],
opts={
'linecolor': np.array([
[255, 0, 0],
]),
},
name='source',
win=pic_B,
update='insert',
)
def draw_normalization_func(self, fig_name):
viz = Visdom(env="main")
pic_func = viz.line(
Y=self.LUT[0],
X=np.arange(0, 101),
opts={
'linecolor': np.array([
[0, 0, 255],
]),
'dash': np.array(['solid']), # 'solid', 'dash', 'dashdot'
'showlegend': True,
'xlabel': 'range',
'ylabel': 'value',
'title': 'function -{}'.format(fig_name),
},
name='L',
)
viz.line(
Y=self.LUT[1],
X=np.arange(-128, 128),
opts={
'linecolor': np.array([
[0, 255, 0],
]),
},
name='A',
win=pic_func,
update='insert',
)
viz.line(
Y=self.LUT[2],
X=np.arange(-128, 128),
opts={
'linecolor': np.array([
[255, 0, 0],
]),
},
name='B',
win=pic_func,
update='insert',
)
# @staticmethod
# def get_normalization_function(imgCone, params, extract_scale, patch_size, ):
# low_scale = params.GLOBAL_SCALE
# # 在有效检测区域内,均匀抽样
# eff_region = imgCone.get_effective_zone(low_scale)
# sampling_interval = 1000
# seeds = get_seeds(eff_region, low_scale, extract_scale, patch_size, spacingHigh=sampling_interval, margin=-4)
#
# # #不受限制地随机抽样
# # rx2 = int(imgCone.ImageWidth * extract_scale / params.GLOBAL_SCALE)
# # ry2 = int(imgCone.ImageHeight * extract_scale / params.GLOBAL_SCALE)
# # random_gen = Random_Gen("halton")
# #
# # N = 2000
# # # rx1, ry1, rx2, ry2 = self.valid_rect
# # x, y = self.random_gen.generate_random(N, 0, rx2, 0, ry2)
#
# images = []
# for x, y in seeds:
# block = imgCone.get_image_block(extract_scale, x, y, patch_size, patch_size)
# img = block.get_img()
# images.append(img)
#
# normal = HistNormalization("match_hist", hist_target ="hist_templates.npy",
# hist_source = None)
# normal.prepare(images)
#
# return normal
class ACDNormalization(AbstractNormalization):
def __init__(self, method, **kwarg):
super(ACDNormalization, self).__init__(method, **kwarg)
self._pn = 100000
self._bs = 2000
self._step_per_epoch = int(self._pn / self._bs)
self._epoch = int(300 / self._step_per_epoch)
# self._pn = 100000
# self._bs = 500
# self._step_per_epoch = 20
# self._epoch = 15
self.dc_txt = "{}/data/{}".format(get_project_root(), kwarg["dc_txt"])
self.w_txt = "{}/data/{}".format(get_project_root(), kwarg["w_txt"])
self.template_path = "{}/data/{}".format(get_project_root(), kwarg["template_path"])
self._template_dc_mat = None
self._template_w_mat = None
# if(not os.path.exists(self.dc_txt) or not os.path.exists(self.w_txt)):
# self.generate()
self.generate()
self._template_dc_mat = np.loadtxt(self.dc_txt)
self._template_w_mat = np.loadtxt(self.w_txt)
# def normalize_on_batch(self, src_img):
# if self.filter_all_white(src_img):
# return src_img
# else:
# img = self.transform(src_img)
# return img
def normalize_on_batch(self, src_img):
return self.normalize(src_img)
# def normalize(self, src_img):
# BGR_images = []
# for img in src_img:
# BGR_images.append(img[:,:, (2,1,0)])
#
# od = -np.log((np.asarray(BGR_images, np.float) + 1) / 256.0)
# normed_od = np.matmul(od, self.transform_mat)
# normed_images = np.exp(-normed_od) * 256 - 1
# result = (np.clip(normed_images, 0, 255)) / 255
#
# RBG_images = []
# for img in result:
# RBG_images.append(img[:,:, (2,1,0)])
#
# return RBG_images
def normalize(self, src_img):
od = -np.log((np.asarray(src_img, np.float) + 1) / 256.0)
normed_od = np.matmul(od, self.transform_mat)
normed_images = np.exp(-normed_od) * 256 - 1
result = (np.clip(normed_images, 0, 255)) / 255
return result
def generate(self):
template_list = os.listdir(self.template_path)
# temp_images = np.zeros((template_list.__len__(), 2048, 2048, 3), np.uint8)
temp_images = []
for i, name in enumerate(template_list):
if name.endswith(".jpg"):
# temp_images.append(cv2.imread(os.path.join(self.template_path, name))) # BGR
# 读入RGB
temp_images.append(io.imread(os.path.join(self.template_path, name)))
temp_images = np.array(temp_images)
# fit
self.fit(temp_images)
def fit(self, images):
opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
np.savetxt(self.dc_txt, opt_cd_mat)
np.savetxt(self.w_txt, opt_w_mat)
def sampling_data(self, images):
pixels = np.reshape(images, (-1, 3))
pixels = pixels[ :, (2, 1, 0)] # 从RGB变BGR
pixels = pixels[np.random.choice(pixels.shape[0], min(self._pn * 20, pixels.shape[0]))]
od = -np.log((np.asarray(pixels, np.float) + 1) / 256.0)
tmp = np.mean(od, axis=1)
# filter the background pixels (white or black)
od = od[(tmp > 0.3) & (tmp < -np.log(30 / 256))]
od = od[np.random.choice(od.shape[0], min(self._pn, od.shape[0]))]
return od
def extract_adaptive_cd_params(self, images):
"""
:param images: RGB uint8 format in shape of [k, m, n, 3], where
k is the number of ROIs sampled from a WSI, [m, n] is
the size of ROI.
"""
# self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.device = torch.device("cpu")
od_data = self.sampling_data(images)
# for debug
# np.save("od_data",od_data)
# od_data = np.load("od_data.npy")
model = ACD_Model()
model.to(self.device)
# optimizer = torch.optim.Adam(model.parameters(), lr=0.05)
optimizer = torch.optim.Adagrad(model.parameters(), lr=0.05)
model.train()
for ep in range(self._epoch):
for step in range(self._step_per_epoch):
batch_data = od_data[step * self._bs:(step + 1) * self._bs]
if len(batch_data) == 0:
break;
x = torch.from_numpy(batch_data).float()
b_x = Variable(x.to(self.device))
out = model(b_x)
loss = model.loss_function(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss = loss.item()
# print('(%d) %d / %d ==> Loss: %.4f ' % (ep, step, self._step_per_epoch, running_loss))
# print('(%d) ==> Loss: %.4f ' % (ep, running_loss))
opt_cd = model.cd_mat.data.cpu().numpy()
# opt_w = model.w.data.numpy()
opt_w = np.append(model.w.data.cpu().numpy(), [1.0])
return opt_cd, opt_w
def transform(self, images):# 这里是针对BGR的写法
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
if self._template_dc_mat is None:
raise AssertionError('Run fit function first')
opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
transform_mat = np.matmul(opt_cd_mat * opt_w_mat, np.linalg.inv(self._template_dc_mat * self._template_w_mat))
od = -np.log((np.asarray(images, np.float) + 1) / 256.0)
normed_od = np.matmul(od, transform_mat)
normed_images = np.exp(-normed_od) * 256 - 1
# return np.maximum(np.minimum(normed_images, 255), 0) / 255
return (np.clip(normed_images, 0, 255)) / 255
def prepare(self, images):
if self._template_dc_mat is None:
raise AssertionError('Run fit function first')
opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
transform_mat = np.matmul(opt_cd_mat * opt_w_mat,
np.linalg.inv(self._template_dc_mat * self._template_w_mat))
# 当输入图像为RGB时
transform_mat = transform_mat[(2,1,0), :]
self.transform_mat = transform_mat[:, (2,1,0)]
return
def filter_all_white(self, images):
for item in images:
m = np.mean(item)
if 25 < m < 225:
return False
return True
# import tensorflow as tf
# class ACDNormalization_tf(AbstractNormalization):
# def __init__(self, method, **kwarg):
# super(ACDNormalization_tf, self).__init__(method, **kwarg)
# self._pn = 100000
# self._bs = 1500
# self._step_per_epoch = int(self._pn / self._bs)
# self._epoch = int(300 / self._step_per_epoch)
# # self._pn = 100000
# # self._bs = 1500
# # self._step_per_epoch = 20
# # self._epoch = 15
#
# # self.dc_txt = kwarg["dc_txt"]
# # self.w_txt = kwarg["w_txt"]
# # self.template_path = kwarg["template_path"]
# self.dc_txt = "{}/data/{}".format(get_project_root(), kwarg["dc_txt"])
# self.w_txt = "{}/data/{}".format(get_project_root(), kwarg["w_txt"])
# self.template_path = "{}/data/{}".format(get_project_root(), kwarg["template_path"])
# self._template_dc_mat = None
# self._template_w_mat = None
#
# self.input_od = tf.placeholder(dtype=tf.float32, shape=[None, 3])
# self.target, self.cd, self.w = self.acd_model(self.input_od)
# self.init = tf.global_variables_initializer()
#
# # if(not os.path.exists(self.dc_txt) or not os.path.exists(self.w_txt)):
# # self.generate()
# self.generate()
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
# self.inv = np.linalg.inv(self._template_dc_mat * self._template_w_mat)
#
# def normalize_on_batch(self, src_img):
# img = self.transform(src_img)
# return img
#
# def generate(self):
# template_list = os.listdir(self.template_path)
# temp_images = []
# for i, name in enumerate(template_list):
# # temp_images.append(cv2.imread(os.path.join(self.template_path, name))) # BGR
# # 读入RGB
# temp_images.append(io.imread(os.path.join(self.template_path, name)))
#
# # fit
# st = time.time()
# self.fit(temp_images)
# print('fit time', time.time() - st)
#
# def fit(self, images):
# opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
# np.savetxt(self.dc_txt, opt_cd_mat)
# np.savetxt(self.w_txt, opt_w_mat)
#
# def transform(self, images):
#
# od = -np.log((np.asarray(images, np.float) + 1) / 256.0)
# normed_od = np.matmul(od, self.transform_mat)
# normed_images = np.exp(-normed_od) * 256 - 1
#
# return np.maximum(np.minimum(normed_images, 255), 0)/255
#
# def sampling_data(self, images):
# pixels = np.reshape(images, (-1, 3))
# pixels = pixels[:, (2, 1, 0)] # 从RGB变BGR
# pixels = pixels[np.random.choice(pixels.shape[0], min(self._pn * 20, pixels.shape[0]))]
# od = -np.log((np.asarray(pixels, np.float) + 1) / 256.0)
# tmp = np.mean(od, axis=1)
#
# # filter the background pixels (white or black)
# od = od[(tmp > 0.3) & (tmp < -np.log(30 / 256))]
# od = od[np.random.choice(od.shape[0], min(self._pn, od.shape[0]))]
#
# return od
#
# def extract_adaptive_cd_params(self, images):
# """
# :param images: RGB uint8 format in shape of [k, m, n, 3], where
# k is the number of ROIs sampled from a WSI, [m, n] is
# the size of ROI.
# """
# od_data = self.sampling_data(images)
# if self.input_od is None:
# input_od = tf.placeholder(dtype=tf.float32, shape=[None, 3])
# if self.target is None:
# self.target, self.cd, self.w = self.acd_model(input_od)
# if self.init is None:
# self.init = tf.global_variables_initializer()
#
# with tf.Session() as sess:
# with tf.device('/cpu:0'):
# sess.run(self.init)
# for ep in range(self._epoch):
# for step in range(self._step_per_epoch):
# sess.run(self.target, {self.input_od: od_data[step * self._bs:(step + 1) * self._bs]})
# opt_cd = sess.run(self.cd)
# opt_w = sess.run(self.w)
# return opt_cd, opt_w
#
# @staticmethod
# def acd_model(input_od, lambda_p=0.002, lambda_b=10, lambda_e=1, eta=0.6, gamma=0.5):
# """
# Stain matrix estimation via method of
# "Yushan Zheng, et al., Adaptive Color Deconvolution for Histological WSI Normalization."
# """
# init_varphi = np.asarray([[0.6060, 1.2680, 0.7989],
# [1.2383, 1.2540, 0.3927]])
# alpha = tf.Variable(init_varphi[0], dtype='float32')
# beta = tf.Variable(init_varphi[1], dtype='float32')
# w = [tf.Variable(1.0, dtype='float32'), tf.Variable(1.0, dtype='float32'), tf.constant(1.0)]
#
# sca_mat = tf.stack((tf.cos(alpha) * tf.sin(beta), tf.cos(alpha) * tf.cos(beta), tf.sin(alpha)), axis=1)
# cd_mat = tf.matrix_inverse(sca_mat)
#
# s = tf.matmul(input_od, cd_mat) * w
# h, e, b = tf.split(s, (1, 1, 1), axis=1)
#
# l_p1 = tf.reduce_mean(tf.square(b))
# l_p2 = tf.reduce_mean(2 * h * e / (tf.square(h) + tf.square(e)))
# l_b = tf.square((1 - eta) * tf.reduce_mean(h) - eta * tf.reduce_mean(e))
# l_e = tf.square(gamma - tf.reduce_mean(s))
#
# objective = l_p1 + lambda_p * l_p2 + lambda_b * l_b + lambda_e * l_e
#
# tag_dubeg = False
# if tag_dubeg:
# print_op = tf.print(['cd_mat: ', cd_mat])
# print_op2 = tf.print("objective", objective, ['l_p1: ', l_p1], ['l_p2: ', l_p2], ['l_b: ', l_b], ['l_p1: ', l_e])
# with tf.control_dependencies([print_op, print_op2]):
# target = tf.train.AdagradOptimizer(learning_rate=0.05).minimize(objective)
# else:
# target = tf.train.AdagradOptimizer(learning_rate=0.05).minimize(objective)
#
# return target, cd_mat, w
#
# def prepare(self, images):
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
# if self._template_dc_mat is None:
# raise AssertionError('Run fit function first')
#
# opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
# transform_mat = np.matmul(opt_cd_mat * opt_w_mat, self.inv)
#
# # 当输入图像为RGB时
# transform_mat = transform_mat[(2,1,0), :]
# self.transform_mat = transform_mat[:, (2,1,0)]
class ImageNormalizationTool(object):
def __init__(self, params):
self._params = params
# 归一化时,使用的参数
return
def calculate_avg_mean_std_RGB(self, source_code, data_filenames):
root_path = self._params.PATCHS_ROOT_PATH[source_code]
count = 0
mean_r = []
mean_g = []
mean_b = []
std_r = []
std_g = []
std_b = []
for data_filename in data_filenames:
data_file = "{}/{}".format(root_path, data_filename)
f = open(data_file, "r")
for line in f:
items = line.split(" ")
patch_file = "{}/{}".format(root_path, items[0])
img = io.imread(patch_file, as_gray=False)
# lab_img = color.rgb2lab(img)
# RGB三通道分离
rgb_r = img[:, :, 0]
rgb_g = img[:, :, 1]
rgb_b = img[:, :, 2]
# 按通道进行归一化整个图像, 经过缩放后的数据具有零均值以及标准方差
std_r.append(np.std(rgb_r))
std_g.append(np.std(rgb_g))
std_b.append(np.std(rgb_b))
mean_r.append(np.mean(rgb_r))
mean_g.append(np.mean(rgb_g))
mean_b.append(np.mean(rgb_b))
if (0 == count%1000):
print("{} calculate mean and std >>> {}".format(time.asctime( time.localtime()), count))
count += 1
f.close()
avg_mean_r = np.mean(mean_r)
avg_mean_g = np.mean(mean_g)
avg_mean_b = np.mean(mean_b)
avg_std_r = np.mean(std_r)
avg_std_g = np.mean(std_g)
avg_std_b = np.mean(std_b)
return avg_mean_r, avg_mean_g, avg_mean_b, avg_std_r, avg_std_g, avg_std_b
'''
Lab颜色空间中的L分量用于表示像素的亮度,取值范围是[0,100],表示从纯黑到纯白;
a表示从红色到绿色的范围,取值范围是[127,-128];
b表示从黄色到蓝色的范围,取值范围是[127,-128]。
'''
def calculate_avg_mean_std(self, source_code, data_filenames):
root_path = self._params.PATCHS_ROOT_PATH[source_code]
count = 0
mean_l = []
mean_a = []
mean_b = []
std_l = []
std_a = []
std_b = []
for data_filename in data_filenames:
data_file = "{}/{}".format(root_path, data_filename)
f = open(data_file, "r")
for line in f:
items = line.split(" ")
patch_file = "{}/{}".format(root_path, items[0])
img = io.imread(patch_file, as_gray=False)
lab_img = color.rgb2lab(img)
# LAB三通道分离
labO_l = lab_img[:, :, 0]
labO_a = lab_img[:, :, 1]
labO_b = lab_img[:, :, 2]
# 按通道进行归一化整个图像, 经过缩放后的数据具有零均值以及标准方差
std_l.append(np.std(labO_l))
std_a.append(np.std(labO_a))
std_b.append(np.std(labO_b))
mean_l.append(np.mean(labO_l))
mean_a.append(np.mean(labO_a))
mean_b.append(np.mean(labO_b))
if (0 == count%1000):
print("{} calculate mean and std >>> {}".format(time.asctime( time.localtime()), count))
count += 1
f.close()
avg_mean_l = np.mean(mean_l)
avg_mean_a = np.mean(mean_a)
avg_mean_b = np.mean(mean_b)
avg_std_l = np.mean(std_l)
avg_std_a = np.mean(std_a)
avg_std_b = np.mean(std_b)
return avg_mean_l, avg_mean_a, avg_mean_b, avg_std_l, avg_std_a, avg_std_b
def calculate_hist(self, source_code, source_txt, file_code):
def _generate_histogram(filennames):
Shape_L = (101,) # 100 + 1
Shape_A = (256,) # 127 + 128 + 1
Shape_B = (256,)
hist_l = np.zeros(Shape_L)
hist_a = np.zeros(Shape_A)
hist_b = np.zeros(Shape_B)
for K, file in enumerate(filennames):
img = io.imread(file, as_gray=False)
lab_img = color.rgb2lab(img)
# LAB三通道分离
labO_l = np.array(lab_img[:, :, 0])
labO_a = np.array(lab_img[:, :, 1])
labO_b = np.array(lab_img[:, :, 2])
labO_l = np.rint(labO_l)
labO_a = np.rint(labO_a)
labO_b = np.rint(labO_b)
values, counts = np.unique(labO_l.ravel(), return_counts=True)
for value, count in zip(values, counts):
hist_l[int(value)] += count
values, counts = np.unique(labO_a.ravel(), return_counts=True)
for value, count in zip(values, counts):
hist_a[int(value) + 128] += count
values, counts = np.unique(labO_b.ravel(), return_counts=True)
for value, count in zip(values, counts):
hist_b[int(value) + 128] += count
if (0 == K % 1000):
print("{} calculate histogram >>> {}".format(time.asctime(time.localtime()), K))
tag = hist_l > 0
values_l = np.arange(0, 101)
hist_l = hist_l[tag]
values_l = values_l[tag]
tag = hist_a > 0
values_a = np.arange(-128, 128)
hist_a = hist_a[tag]
values_a = values_a[tag]
tag = hist_b > 0
values_b = np.arange(-128, 128)
hist_b = hist_b[tag]
values_b = values_b[tag]
return {"L": (values_l, hist_l), "A": (values_a, hist_a), "B": (values_b, hist_b)}
root_path = self._params.PATCHS_ROOT_PATH
print("prepare transform function ...", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
source_path = "{}/{}".format(root_path[source_code], source_txt)
source_files, _ = read_csv_file(root_path[source_code], source_path)
print("Loaded the number of images = ", len(source_files))
hist_sources = _generate_histogram(source_files)
project_root = self._params.PROJECT_ROOT
np.save("{}/data/{}".format(project_root, file_code), hist_sources)
return
def calculate_avg_mean_std_HSD(self, source_code, data_filenames):
root_path = self._params.PATCHS_ROOT_PATH[source_code]
count = 0
mean_h = []
mean_s = []
mean_d = []
std_h = []
std_s = []
std_d = []
for data_filename in data_filenames:
data_file = "{}/{}".format(root_path, data_filename)
f = open(data_file, "r")
for line in f:
items = line.split(" ")
patch_file = "{}/{}".format(root_path, items[0])
img = io.imread(patch_file, as_gray=False)
hsd_img = rgb2hsd(img)
# HSD三通道分离
hsdO_h = hsd_img[:, :, 0]
hsdO_s = hsd_img[:, :, 1]
hsdO_d = hsd_img[:, :, 2]
# 按通道进行归一化整个图像, 经过缩放后的数据具有零均值以及标准方差
std_h.append(np.std(hsdO_h))
std_s.append(np.std(hsdO_s))
std_d.append(np.std(hsdO_d))
mean_h.append(np.mean(hsdO_h))
mean_s.append(np.mean(hsdO_s))
mean_d.append(np.mean(hsdO_d))
if (0 == count%1000):
print("{} calculate mean and std >>> {}".format(time.asctime( time.localtime()), count))
count += 1
f.close()
avg_mean_h = np.mean(mean_h)
avg_mean_s = np.mean(mean_s)
avg_mean_d = np.mean(mean_d)
avg_std_h = np.mean(std_h)
avg_std_s = np.mean(std_s)
avg_std_d = np.mean(std_d)
return avg_mean_h, avg_mean_s, avg_mean_d, avg_std_h, avg_std_s, avg_std_d
def normalize_dataset(self, source_samples, tagrget_dir, range = None, batch_size = 20):
self.opcode = 19
# normal = ACDNormalization_tf("acd", dc_txt="dc.txt", w_txt="w.txt", template_path="template_normal")
normal = ACDNormalization("acd", dc_txt="dc.txt", w_txt="w.txt", template_path="template_normal")
patch_root = self._params.PATCHS_ROOT_PATH[source_samples[0]]
sample_filename = source_samples[1]
train_list = "{}/{}".format(patch_root, sample_filename)
Xtrain, Ytrain = read_csv_file(patch_root, train_list)
if range is not None:
Xtrain = Xtrain[range[0]:range[1]]
Ytrain = Ytrain[range[0]:range[1]]
# prepare
images = []
for patch_file in Xtrain:
img = io.imread(patch_file, as_gray=False)
# imgBGR = img[:, :, (2, 1, 0)]
# images.append(imgBGR)
images.append(img)
normal.prepare(images)
target_cancer_path = "{}/{}_cancer".format(patch_root, tagrget_dir)
target_normal_path = "{}/{}_normal".format(patch_root, tagrget_dir)
if (not os.path.exists(target_cancer_path)):
os.makedirs(target_cancer_path)
if (not os.path.exists(target_normal_path)):
os.makedirs(target_normal_path)
n = 0
batch_images = []
batch_y = []
batch_blocks = []
for K, (x, y) in enumerate(zip(Xtrain, Ytrain)):
new_block = Block()
new_block.load_img(x)
img = np.array(new_block.get_img())
# imgBGR = img[:, :, (2, 1, 0)]
# batch_images.append(imgBGR)
batch_images.append(img)
batch_y.append(y)
batch_blocks.append(new_block)
n = n + 1
if n >= batch_size:
norm_images = normal.normalize_on_batch(batch_images)
for block, norm_img, y in zip(batch_blocks, norm_images, batch_y):
# block.set_img(255 * norm_img[:, :, (2, 1, 0)])
block.set_img(255 * norm_img)
block.opcode = self.opcode
if y == 0:
block.save_img(target_normal_path)
else:
block.save_img(target_cancer_path)
batch_images = []
batch_y = []
batch_blocks = []
n = 0
if (0 == K % 1000):
print("{} normalizing >>> {}".format(time.asctime(time.localtime()), K))
if n > 0:
norm_images = normal.normalize_on_batch(batch_images)
for block, norm_img, y in zip(batch_blocks, norm_images, batch_y):
# block.set_img(255 * norm_img[:, :, (2, 1, 0)])
block.set_img(255 * norm_img)
block.opcode = self.opcode
if y == 0:
block.save_img(target_normal_path)
else:
block.save_img(target_cancer_path)
return
| 26,701 | 2,807 | 751 |
fb48c6ffa8bf2f05c677efbf3c8c823801a1611f | 887 | py | Python | xicam/core/tests/fixtures.py | ihumphrey/Xi-cam | a033a97c4dac55221167d9c4e914c65e835f015a | [
"BSD-3-Clause-LBNL"
] | 6 | 2020-04-26T19:09:09.000Z | 2022-02-25T19:35:54.000Z | xicam/core/tests/fixtures.py | ihumphrey/Xi-cam | a033a97c4dac55221167d9c4e914c65e835f015a | [
"BSD-3-Clause-LBNL"
] | 81 | 2020-04-07T15:19:23.000Z | 2022-02-03T19:22:37.000Z | xicam/core/tests/fixtures.py | ihumphrey/Xi-cam | a033a97c4dac55221167d9c4e914c65e835f015a | [
"BSD-3-Clause-LBNL"
] | 5 | 2020-06-18T19:24:58.000Z | 2022-02-26T08:14:14.000Z | import time
import event_model
from pytest import fixture
import scipy.misc
from xicam.core.data.bluesky_utils import run_from_doc_stream
@fixture
| 31.678571 | 106 | 0.754228 | import time
import event_model
from pytest import fixture
import scipy.misc
from xicam.core.data.bluesky_utils import run_from_doc_stream
def synthetic_ingestor():
timestamp = time.time()
run_bundle = event_model.compose_run()
data = scipy.misc.face(True)
field = "some_data"
source = "synthetic_ingestor"
frame_data_keys = {field: {"source": source, "dtype": "number", "shape": data.shape}}
frame_stream_name = "primary"
frame_stream_bundle = run_bundle.compose_descriptor(data_keys=frame_data_keys, name=frame_stream_name)
yield "start", run_bundle.start_doc
yield "descriptor", frame_stream_bundle.descriptor_doc
yield "event", frame_stream_bundle.compose_event(data={field: data}, timestamps={field: timestamp})
yield "stop", run_bundle.compose_stop()
@fixture
def catalog():
return run_from_doc_stream(synthetic_ingestor())
| 690 | 0 | 45 |
c08890baeeb7003201fe120e0ec26af300fc4756 | 1,096 | py | Python | 080_SquareRootDigitalExpansion.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | 080_SquareRootDigitalExpansion.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | 080_SquareRootDigitalExpansion.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | """
It is well known that if the square root of a natural number is not an integer, then it is irrational. The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
ans: 40886
"""
# cheesed using the decimal class
import decimal
decimal.getcontext().prec = 102
sum = 0
for n in range(1, 101):
n = decimal.Decimal(n).sqrt()
n = str(n)
if "." in n:
for d in n.replace(".","")[:100]:
sum += int(d)
print(sum)
# Babylonian Method
# https://en.wikipedia.org/wiki/Methods_of_computing_square_roots
decimal.getcontext().prec = 110
D = decimal.Decimal
squares = [x**2 for x in range(1,11)]
sum = 0
for n in range(1, 101):
if n not in squares:
sr = D(n)/2
for i in range(10):
sr = (sr + n/sr)/2
for d in str(sr).replace(".","")[:100]:
sum += int(d)
print(sum) | 25.488372 | 195 | 0.694343 | """
It is well known that if the square root of a natural number is not an integer, then it is irrational. The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
ans: 40886
"""
# cheesed using the decimal class
import decimal
decimal.getcontext().prec = 102
sum = 0
for n in range(1, 101):
n = decimal.Decimal(n).sqrt()
n = str(n)
if "." in n:
for d in n.replace(".","")[:100]:
sum += int(d)
print(sum)
# Babylonian Method
# https://en.wikipedia.org/wiki/Methods_of_computing_square_roots
decimal.getcontext().prec = 110
D = decimal.Decimal
squares = [x**2 for x in range(1,11)]
sum = 0
for n in range(1, 101):
if n not in squares:
sr = D(n)/2
for i in range(10):
sr = (sr + n/sr)/2
for d in str(sr).replace(".","")[:100]:
sum += int(d)
print(sum) | 0 | 0 | 0 |
76feb18d04101e1ac27bb3c1a57bb7433bd50e92 | 5,654 | py | Python | Registration.py | lisherlock/Py_registration | 9b775957dae924230e0eccebd94ece1194d37a2e | [
"Apache-2.0"
] | 2 | 2021-06-08T09:26:59.000Z | 2021-09-03T05:46:05.000Z | Registration.py | lisherlock/Py_registration | 9b775957dae924230e0eccebd94ece1194d37a2e | [
"Apache-2.0"
] | null | null | null | Registration.py | lisherlock/Py_registration | 9b775957dae924230e0eccebd94ece1194d37a2e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 09:44:40 2021
@author: limy
"""
import cv2
import numpy as np
import pandas as pd
import os
if __name__ == '__main__':
if not os.path.exists('data_info.csv'):
print('请将data_info.csv文件放在程序根目录!')
else:
data_info = pd.read_csv('data_info.csv')
# 检查csv表格是否异常
if data_info['ori_image_path'].tolist() == [] or data_info['target_image_path'].tolist() == [] or data_info['output_path'].tolist() == [] or data_info['output_file_name'].tolist() == []:
print('请检查data_info.csv文件中内容是否为空!')
else:
ori_image_path = data_info['ori_image_path'].tolist()
for num in range(len(ori_image_path)):
ori_image_path = data_info['ori_image_path'].tolist()[num]
target_image_path = data_info['target_image_path'].tolist()[num]
output_file_path = os.path.join(data_info['output_path'].tolist()[num], data_info['output_file_name'].tolist()[num])
print('您正在使用配准标注平台,请注意:标注时待配准图像和参考图像的标点顺序要一致,若顺序不一致则无法配准!同时每次标注时请标注4个关键点对,不要多也不要少,谢谢您的使用!')
state = 1
while(state):
original_image = cv2.imread(ori_image_path)
ref_win = cv2.imread(ori_image_path)
target_image = cv2.imread(target_image_path)
src_win = cv2.imread(target_image_path)
imagePoints1 = []
imagePoints2 = []
state = annotion_state()
if state == 2:
break
elif state == 0:
if (len(imagePoints1) != len(imagePoints2)) or (len(imagePoints1) == 0 or len(imagePoints2) == 0):
print('标注点对数量不一致请重新标注!')
print('参考图像标注点数量:', len(imagePoints1))
print('待配准图像标注点数量:', len(imagePoints2))
state = 1
elif len(imagePoints1) != 4 or len(imagePoints2) != 4:
print('两次标注点对数量不为4,请重新标注!')
print('参考图像标注点数量:', len(imagePoints1))
print('待配准图像标注点数量:', len(imagePoints2))
state = 1
if len(imagePoints1)==4 and len(imagePoints2)==4:
src_points = np.array(imagePoints2, dtype=np.float32)
den_points = np.array(imagePoints1, dtype=np.float32)
# getPerspectiveTransform可以得到从点集src_points到点集den_points的透视变换矩阵
T = cv2.getPerspectiveTransform(src_points, den_points)
# 进行透视变换
# 注意透视变换第三个参数为变换后图片大小,格式为(高度,宽度)
warp_imgae = cv2.warpPerspective(target_image, T, (original_image.shape[1], original_image.shape[0]), borderValue=[255, 255, 255])
cv2.imshow("transform", warp_imgae)
cv2.imshow("jizhun", ref_win)
cv2.imshow("daipeizhun", src_win)
cv2.imwrite(output_file_path, warp_imgae)
# cv2.imwrite("result.jpg", warp_imgae)
cv2.imwrite(os.path.join(data_info['output_path'].tolist()[0], "src_p.jpg"), src_win)
cv2.imwrite(os.path.join(data_info['output_path'].tolist()[0], "ref_p.jpg"), ref_win)
print('图片已保存到输出目录,请查看!请点击标注窗口,按esc退出此次标注。')
print(output_file_path)
cv2.waitKey()
cv2.destroyAllWindows()
else:
print('您已放弃标注,感谢您的使用!')
| 32.125 | 195 | 0.483021 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 09:44:40 2021
@author: limy
"""
import cv2
import numpy as np
import pandas as pd
import os
def on_mouse1(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
p = [x, y]
cv2.circle(ref_win, (x, y), 4, (0, 0, 255), -1)
cv2.imshow("jizhun", ref_win)
imagePoints1.append(p)
print('基准坐标:', p)
def on_mouse2(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
p = [x, y]
cv2.circle(src_win, (x, y), 4, (0, 0, 255), -1)
cv2.imshow("daipeizhun", src_win)
imagePoints2.append(p)
print('目标坐标:', p)
def annotion_state():
cv2.namedWindow("daipeizhun")
cv2.namedWindow("jizhun")
cv2.imshow("daipeizhun", src_win)
cv2.imshow("jizhun", ref_win)
cv2.setMouseCallback("jizhun", on_mouse1)
cv2.setMouseCallback("daipeizhun", on_mouse2)
print('标注结束后请在标注窗口按esc退出!')
cv2.waitKey()
input_str = input("确认标注结束?请输入y确认标注,输入n重新标注,输入q放弃标注。\n")
if input_str == 'y':
print('标注结束')
cv2.destroyAllWindows()
cv2.waitKey(1)
print('基准图像总标注点坐标:', imagePoints1)
print('待配准图像总标注点坐标:', imagePoints2)
state = 0
elif input_str == 'n':
cv2.destroyAllWindows()
cv2.waitKey(1)
state = 1
elif input_str == 'q':
cv2.destroyAllWindows()
cv2.waitKey(1)
state = 2
else:
state = 0
print('输入非法字符,已回到初始标注界面。')
return state
if __name__ == '__main__':
if not os.path.exists('data_info.csv'):
print('请将data_info.csv文件放在程序根目录!')
else:
data_info = pd.read_csv('data_info.csv')
# 检查csv表格是否异常
if data_info['ori_image_path'].tolist() == [] or data_info['target_image_path'].tolist() == [] or data_info['output_path'].tolist() == [] or data_info['output_file_name'].tolist() == []:
print('请检查data_info.csv文件中内容是否为空!')
else:
ori_image_path = data_info['ori_image_path'].tolist()
for num in range(len(ori_image_path)):
ori_image_path = data_info['ori_image_path'].tolist()[num]
target_image_path = data_info['target_image_path'].tolist()[num]
output_file_path = os.path.join(data_info['output_path'].tolist()[num], data_info['output_file_name'].tolist()[num])
print('您正在使用配准标注平台,请注意:标注时待配准图像和参考图像的标点顺序要一致,若顺序不一致则无法配准!同时每次标注时请标注4个关键点对,不要多也不要少,谢谢您的使用!')
state = 1
while(state):
original_image = cv2.imread(ori_image_path)
ref_win = cv2.imread(ori_image_path)
target_image = cv2.imread(target_image_path)
src_win = cv2.imread(target_image_path)
imagePoints1 = []
imagePoints2 = []
state = annotion_state()
if state == 2:
break
elif state == 0:
if (len(imagePoints1) != len(imagePoints2)) or (len(imagePoints1) == 0 or len(imagePoints2) == 0):
print('标注点对数量不一致请重新标注!')
print('参考图像标注点数量:', len(imagePoints1))
print('待配准图像标注点数量:', len(imagePoints2))
state = 1
elif len(imagePoints1) != 4 or len(imagePoints2) != 4:
print('两次标注点对数量不为4,请重新标注!')
print('参考图像标注点数量:', len(imagePoints1))
print('待配准图像标注点数量:', len(imagePoints2))
state = 1
if len(imagePoints1)==4 and len(imagePoints2)==4:
src_points = np.array(imagePoints2, dtype=np.float32)
den_points = np.array(imagePoints1, dtype=np.float32)
# getPerspectiveTransform可以得到从点集src_points到点集den_points的透视变换矩阵
T = cv2.getPerspectiveTransform(src_points, den_points)
# 进行透视变换
# 注意透视变换第三个参数为变换后图片大小,格式为(高度,宽度)
warp_imgae = cv2.warpPerspective(target_image, T, (original_image.shape[1], original_image.shape[0]), borderValue=[255, 255, 255])
cv2.imshow("transform", warp_imgae)
cv2.imshow("jizhun", ref_win)
cv2.imshow("daipeizhun", src_win)
cv2.imwrite(output_file_path, warp_imgae)
# cv2.imwrite("result.jpg", warp_imgae)
cv2.imwrite(os.path.join(data_info['output_path'].tolist()[0], "src_p.jpg"), src_win)
cv2.imwrite(os.path.join(data_info['output_path'].tolist()[0], "ref_p.jpg"), ref_win)
print('图片已保存到输出目录,请查看!请点击标注窗口,按esc退出此次标注。')
print(output_file_path)
cv2.waitKey()
cv2.destroyAllWindows()
else:
print('您已放弃标注,感谢您的使用!')
| 1,546 | 0 | 79 |
944dc18aae4b6ad45e6313a51de2909f9033490c | 644 | py | Python | tests/test_command/test_help_command.py | bbglab/openvariant | ea1e1b6edf0486b0dea34f43227ba333df1071cc | [
"BSD-3-Clause"
] | null | null | null | tests/test_command/test_help_command.py | bbglab/openvariant | ea1e1b6edf0486b0dea34f43227ba333df1071cc | [
"BSD-3-Clause"
] | null | null | null | tests/test_command/test_help_command.py | bbglab/openvariant | ea1e1b6edf0486b0dea34f43227ba333df1071cc | [
"BSD-3-Clause"
] | null | null | null | import unittest
from click.testing import CliRunner
from openvariant.commands.openvar import openvar
| 37.882353 | 108 | 0.732919 | import unittest
from click.testing import CliRunner
from openvariant.commands.openvar import openvar
class TestHelpCommand(unittest.TestCase):
def test_help_command(self):
runner = CliRunner()
result = runner.invoke(openvar)
self.assertTrue("cat Concatenate files to standard input" in result.output)
self.assertTrue("count Number of rows that matches a specified criterion" in result.output)
self.assertTrue("groupby Groups rows that have the same values into summary rows" in result.output)
self.assertTrue("plugin Actions to execute for a plugin: create" in result.output)
| 470 | 20 | 50 |
391a60fcab151516dc6c51c0a4b163d9938237df | 6,431 | py | Python | thresholdOut_mydemo_paramTuning.py | bmcmenamin/thresholdOut-explorations | 3951e09a7105ad7833ba22cb7a22754f6489f6e2 | [
"MIT"
] | 9 | 2016-02-10T00:41:28.000Z | 2022-02-23T11:36:33.000Z | thresholdOut_mydemo_paramTuning.py | bmcmenamin/thresholdOut-explorations | 3951e09a7105ad7833ba22cb7a22754f6489f6e2 | [
"MIT"
] | null | null | null | thresholdOut_mydemo_paramTuning.py | bmcmenamin/thresholdOut-explorations | 3951e09a7105ad7833ba22cb7a22754f6489f6e2 | [
"MIT"
] | 3 | 2016-09-14T06:23:46.000Z | 2018-07-05T04:15:02.000Z | import numpy as np
import pandas as pd
from scipy.stats import zscore
from sklearn.linear_model import LogisticRegression, LassoLars
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sb
def createDataset(n, d=100, d_inf=5, is_classification=True, no_signal=False):
"""
n is number of samples in each of train/test/holdout sets
d is the data dimension (default=100)
d_inf is the number of informative features (default=20)
is_classification is bool stating whether data is for classification
as opposed to regression (default=True)
no_signal is a bool stating whether you want the data randomized so
there's no useful signal (default=False)
"""
# making random inputs, outputs
X = np.random.normal(0, 1, (3*n, d))
y = np.random.normal(0, 1, (3*n, 1))
# thresholding y values for classification
if is_classification:
y = 2.0*((y>0) - 0.5)
# making the first d_inf dimensions informative
if is_classification:
X[:,:d_inf] += y*np.random.normal(1.0, 1.5, X[:,:d_inf].shape)
else:
snr = 0.05
X[:,:d_inf] += snr*y
X = zscore(X, axis=0)
# if you dont want useful signal, randomize the labels
if no_signal:
np.random.shuffle(y)
# Divide into train/test/holdout pairs
outputs = [[X[i::3, :], y[i::3, 0]] for i in range(3)]
return outputs
def thresholdout(train_vals, holdout_vals, tho_scale=1.0):
"""
This is the actual thresholdout algorithm
that takes values from a training-run and a holdout-run
and returns a new set of holdout values
"""
thr = tho_scale
tol = thr / 4
train_vals = np.array(train_vals)
holdout_vals = np.array(holdout_vals)
diffNoise = np.abs(train_vals - holdout_vals) - np.random.normal(0, tol, holdout_vals.shape)
flipIdx = diffNoise > thr
new_holdout_vals = np.copy(train_vals)
new_holdout_vals[flipIdx] = np.copy(holdout_vals)[flipIdx] + np.random.normal(0, tol, new_holdout_vals[flipIdx].shape)
return new_holdout_vals
def repeatexp(n, d, grid_size, reps, tho_scale=0.1, is_classification=True, no_signal=True):
"""
Repeat the experiment multiple times on different
datasets to put errorbars on the graphs
"""
datasetList = ['Train', 'Holdout', 'Test']
colList = ['perm', 'performance', 'dataset']
df_list_std = []
df_list_tho = []
for perm in tqdm(range(reps)):
vals_std, vals_tho = fitModels_paramTuning(n, d, grid_size,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=no_signal)
for i, ds in enumerate(datasetList):
df_list_std.append((perm, vals_std[i], ds))
df_list_tho.append((perm, vals_tho[i], ds))
df_std = pd.DataFrame(df_list_std, columns=colList)
df_tho = pd.DataFrame(df_list_tho, columns=colList)
return df_std, df_tho
def runExpt_and_makePlots(n, d, grid_size, reps, tho_scale=0.1, is_classification=True):
"""
Run the experiments with and without useful training signal
then make subplots to show how overfitting differs for
standard holdout and thresholdout
n = number of training samples in train/test/holdout sets
d = dimension of data
grid_size = number of steps in parameter grid search
reps = number of times experiment is repeated
is_classification = bool that indicates whether to do classification or regression
"""
args = [n, d, grid_size, reps]
df_std_signal, df_tho_signal = repeatexp(*args,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=False)
df_std_nosignal, df_tho_nosignal = repeatexp(*args,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=True)
f, ax = plt.subplots(2, 2, figsize=(8,10), sharex=True, sharey=False)
sb.set_style('whitegrid')
kw_params = {'x':'dataset',
'y':'performance',
'units':'perm'}
sb.barplot(data=df_std_signal,
ax=ax[0,0],
**kw_params)
ax[0,0].set_title('Standard, HAS Signal')
sb.barplot(data=df_tho_signal,
ax=ax[0,1],
**kw_params)
ax[0,1].set_title('Thresholdout, HAS Signal')
sb.barplot(data=df_std_nosignal,
ax=ax[1,0],
**kw_params)
ax[1,0].set_title('Standard, NO Signal')
sb.barplot(data=df_tho_nosignal,
ax=ax[1,1],
**kw_params)
ax[1,1].set_title('Thresholdout, NO Signal')
return f, ax | 35.142077 | 122 | 0.589955 | import numpy as np
import pandas as pd
from scipy.stats import zscore
from sklearn.linear_model import LogisticRegression, LassoLars
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sb
def createDataset(n, d=100, d_inf=5, is_classification=True, no_signal=False):
"""
n is number of samples in each of train/test/holdout sets
d is the data dimension (default=100)
d_inf is the number of informative features (default=20)
is_classification is bool stating whether data is for classification
as opposed to regression (default=True)
no_signal is a bool stating whether you want the data randomized so
there's no useful signal (default=False)
"""
# making random inputs, outputs
X = np.random.normal(0, 1, (3*n, d))
y = np.random.normal(0, 1, (3*n, 1))
# thresholding y values for classification
if is_classification:
y = 2.0*((y>0) - 0.5)
# making the first d_inf dimensions informative
if is_classification:
X[:,:d_inf] += y*np.random.normal(1.0, 1.5, X[:,:d_inf].shape)
else:
snr = 0.05
X[:,:d_inf] += snr*y
X = zscore(X, axis=0)
# if you dont want useful signal, randomize the labels
if no_signal:
np.random.shuffle(y)
# Divide into train/test/holdout pairs
outputs = [[X[i::3, :], y[i::3, 0]] for i in range(3)]
return outputs
def thresholdout(train_vals, holdout_vals, tho_scale=1.0):
"""
This is the actual thresholdout algorithm
that takes values from a training-run and a holdout-run
and returns a new set of holdout values
"""
thr = tho_scale
tol = thr / 4
train_vals = np.array(train_vals)
holdout_vals = np.array(holdout_vals)
diffNoise = np.abs(train_vals - holdout_vals) - np.random.normal(0, tol, holdout_vals.shape)
flipIdx = diffNoise > thr
new_holdout_vals = np.copy(train_vals)
new_holdout_vals[flipIdx] = np.copy(holdout_vals)[flipIdx] + np.random.normal(0, tol, new_holdout_vals[flipIdx].shape)
return new_holdout_vals
def fitModels_paramTuning(n, d, grid_size, no_signal=False, tho_scale=0.1, is_classification=True):
dataset = createDataset(n, d=d, d_inf=10,
is_classification=True,
no_signal=no_signal)
best_perf_std = [-np.inf, -np.inf, -np.inf]
best_perf_tho = [-np.inf, -np.inf, -np.inf]
best_tho = -np.inf
for a in np.logspace(-4, 0, num=grid_size):
if is_classification:
model = LogisticRegression(penalty='l1', C=a)
eval_model = lambda x: model.score(*x)
else:
model = LassoLars(alpha=a, normalize=True, max_iter=5000)
#eval_model = lambda x: (np.var(x[1]) - np.mean((model.predict(x[0]) - x[1])**2)) / np.var(x[1])
eval_model = lambda x: model.score(*x)
# Train models
model = model.fit(*dataset[0])
# standard holdout performance
perf_std = [eval_model(d) for d in dataset]
if perf_std[1] > best_perf_std[1]:
best_perf_std = perf_std
# thresholdout performance
_tho = thresholdout(perf_std[0], perf_std[1], tho_scale=tho_scale)
perf_tho = [i for i in perf_std]
if _tho > best_tho:
best_tho = _tho
best_perf_tho = perf_tho
return best_perf_std, best_perf_tho
def repeatexp(n, d, grid_size, reps, tho_scale=0.1, is_classification=True, no_signal=True):
"""
Repeat the experiment multiple times on different
datasets to put errorbars on the graphs
"""
datasetList = ['Train', 'Holdout', 'Test']
colList = ['perm', 'performance', 'dataset']
df_list_std = []
df_list_tho = []
for perm in tqdm(range(reps)):
vals_std, vals_tho = fitModels_paramTuning(n, d, grid_size,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=no_signal)
for i, ds in enumerate(datasetList):
df_list_std.append((perm, vals_std[i], ds))
df_list_tho.append((perm, vals_tho[i], ds))
df_std = pd.DataFrame(df_list_std, columns=colList)
df_tho = pd.DataFrame(df_list_tho, columns=colList)
return df_std, df_tho
def runExpt_and_makePlots(n, d, grid_size, reps, tho_scale=0.1, is_classification=True):
"""
Run the experiments with and without useful training signal
then make subplots to show how overfitting differs for
standard holdout and thresholdout
n = number of training samples in train/test/holdout sets
d = dimension of data
grid_size = number of steps in parameter grid search
reps = number of times experiment is repeated
is_classification = bool that indicates whether to do classification or regression
"""
args = [n, d, grid_size, reps]
df_std_signal, df_tho_signal = repeatexp(*args,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=False)
df_std_nosignal, df_tho_nosignal = repeatexp(*args,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=True)
f, ax = plt.subplots(2, 2, figsize=(8,10), sharex=True, sharey=False)
sb.set_style('whitegrid')
kw_params = {'x':'dataset',
'y':'performance',
'units':'perm'}
sb.barplot(data=df_std_signal,
ax=ax[0,0],
**kw_params)
ax[0,0].set_title('Standard, HAS Signal')
sb.barplot(data=df_tho_signal,
ax=ax[0,1],
**kw_params)
ax[0,1].set_title('Thresholdout, HAS Signal')
sb.barplot(data=df_std_nosignal,
ax=ax[1,0],
**kw_params)
ax[1,0].set_title('Standard, NO Signal')
sb.barplot(data=df_tho_nosignal,
ax=ax[1,1],
**kw_params)
ax[1,1].set_title('Thresholdout, NO Signal')
return f, ax | 1,346 | 0 | 23 |
222e298f819cf53218a3cdeeaa07b60f0a7876a3 | 196 | py | Python | applications/boards/admin.py | niux3/forum | 8c21ba2d83cd58223a1a9a75aa71475953344671 | [
"MIT"
] | null | null | null | applications/boards/admin.py | niux3/forum | 8c21ba2d83cd58223a1a9a75aa71475953344671 | [
"MIT"
] | null | null | null | applications/boards/admin.py | niux3/forum | 8c21ba2d83cd58223a1a9a75aa71475953344671 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Board, Post, Topic
@admin.register(Board)
admin.site.register(Post)
admin.site.register(Topic) | 17.818182 | 38 | 0.780612 | from django.contrib import admin
from .models import Board, Post, Topic
@admin.register(Board)
class BoardAdmin(admin.ModelAdmin):
pass
admin.site.register(Post)
admin.site.register(Topic) | 0 | 23 | 22 |
49add441363c0c1ecdf5bbcbaa5d3fd1b05f3e9d | 1,980 | py | Python | Python/Zelle/Chapter7_DecisionStructures/ProgrammingExercises/7_BabysitterWages/babysitterWageCalc.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter7_DecisionStructures/ProgrammingExercises/7_BabysitterWages/babysitterWageCalc.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter7_DecisionStructures/ProgrammingExercises/7_BabysitterWages/babysitterWageCalc.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | # babysitterWageCalc.py
# A program that accepts a starting time and ending time in hours and minutes
# to calculate the total babysitting bill. The start and end times are in a
# single 24 hour period. Partial hours are prorated.
"""A babysitter charges $2.50 an hour until 9:00 PM when the rate drops to
$1.75 an hour (the children are in bed). Write a program that accepts a
starting time and ending time in hours and minutes and calculates the total
babysitting bill. You may assume that the starting and ending times are in a
single 24-hour period. Partial hours should be prorated."""
from datetime import datetime
main()
| 31.935484 | 78 | 0.672222 | # babysitterWageCalc.py
# A program that accepts a starting time and ending time in hours and minutes
# to calculate the total babysitting bill. The start and end times are in a
# single 24 hour period. Partial hours are prorated.
"""A babysitter charges $2.50 an hour until 9:00 PM when the rate drops to
$1.75 an hour (the children are in bed). Write a program that accepts a
starting time and ending time in hours and minutes and calculates the total
babysitting bill. You may assume that the starting and ending times are in a
single 24-hour period. Partial hours should be prorated."""
from datetime import datetime
def getTimes():
start = input("What time did the babysitter start (HH:MM)? ")
end = input("\nWhat time did you come home (HH:MM)? ")
return start, end
def calcWage(start, end):
lowWage = 1.75
highWage = 2.50
startHour = int(start[:2])
startMin = int(start[3:])/60
endHour = int(end[:2])
endMin = int(end[3:])/60
# Get the time after 9 PM and before 12 AM
if endHour > 9:
lowWageTime = (endHour + endMin) - 9
else:
lowWageTime = 0
# Calculate time worked before 9 PM.
if startHour < 9:
highWageTime = 9 - (startHour + startMin)
else:
highWageTime = 0
pay = (highWageTime * highWage) + (lowWageTime * lowWage)
return pay
def main():
# Introduction
print("""This program accepts the number of hours and minutes a babysitter
worked and calculates how much is owed. Prevailing wage is $2.50, after 9 p.m.
the wage goes to $1.75. Partial hours are prorated. The babysitter goes home
at 12AM.""")
try:
# Get the start and end times.
# Assume single 24-hour period means before 12 AM.
start, end = getTimes()
pay = calcWage(start, end)
print("You owe the babysitter ${0:0.2f}.".format(pay))
except (ValueError, SyntaxError):
print("You need ti input time in numbers as HH:MM. Exiting.")
main()
| 1,280 | 0 | 69 |
bb4d8355799811b39115c1a071296ef19d0f7c79 | 36,580 | py | Python | tests/test_harperdb_base.py | HarperDB/harperdb-sdk-python | 34ec710c5e1ea7f54a0f3efed0c8e2aee859cb32 | [
"MIT"
] | 4 | 2020-07-13T19:10:03.000Z | 2022-01-22T18:32:22.000Z | tests/test_harperdb_base.py | HarperDB/harperdb-sdk-python | 34ec710c5e1ea7f54a0f3efed0c8e2aee859cb32 | [
"MIT"
] | 5 | 2020-07-29T17:54:23.000Z | 2020-07-29T20:44:42.000Z | tests/test_harperdb_base.py | HarperDB/harperdb-sdk-python | 34ec710c5e1ea7f54a0f3efed0c8e2aee859cb32 | [
"MIT"
] | 2 | 2021-06-02T17:41:12.000Z | 2021-07-10T15:22:18.000Z | import responses
import unittest
import harperdb
import harperdb_testcase
| 29.691558 | 79 | 0.501422 | import responses
import unittest
import harperdb
import harperdb_testcase
class TestHarperDBBase(harperdb_testcase.HarperDBTestCase):
def setUp(self):
""" This method is called before each test.
"""
self.db = harperdb.HarperDBBase(self.URL)
@unittest.mock.patch('base64.b64encode')
def test_create_harperdb_base_with_kwargs(self, mock_b64encode):
""" Create an instance of HarperDBBase with keyword args.
"""
# by mocking the base64 module we can define what it returns,
# so it's very easy to check the value stored in db.token
mock_b64encode.return_value = b'anArbitraryBase64String'
db = harperdb.HarperDBBase(
self.URL,
self.USERNAME,
self.PASSWORD,
timeout=3)
mock_b64encode.assert_called_once_with(
'{}:{}'.format(self.USERNAME, self.PASSWORD).encode('utf-8'))
self.assertEqual(db.url, self.URL)
self.assertEqual(
db.token,
'Basic {}'.format(mock_b64encode.return_value.decode('utf-8')))
self.assertEqual(db.timeout, 3)
@responses.activate
def test_create_schema(self):
# define the expected JSON body in POST request
spec = {
'operation': 'create_schema',
'schema': 'test_schema',
}
# mock the server response to create_schema
responses.add(
'POST',
self.URL,
json=self.SCHEMA_CREATED,
status=200)
self.assertEqual(
self.db._create_schema(spec['schema']),
self.SCHEMA_CREATED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_drop_schema(self):
# define the expected JSON body in POST request
spec = {
'operation': 'drop_schema',
'schema': 'test_schema',
}
# mock the server response to drop_schema
responses.add(
'POST',
self.URL,
json=self.SCHEMA_DROPPED,
status=200)
self.assertEqual(
self.db._drop_schema(spec['schema']),
self.SCHEMA_DROPPED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_describe_schema(self):
# define the expected JSON body in POST request
spec = {
'operation': 'describe_schema',
'schema': 'test_schema',
}
# mock the server response to describe_schema
responses.add(
'POST',
self.URL,
json=self.DESCRIBE_SCHEMA_1,
status=200)
self.assertEqual(
self.db._describe_schema(spec['schema']),
self.DESCRIBE_SCHEMA_1)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_create_table(self):
# define the expected JSON body in POST request
spec = {
'operation': 'create_table',
'schema': 'test_schema',
'table': 'test_table',
'hash_attribute': 'id',
}
# mock the server response to drop_schema
responses.add(
'POST',
self.URL,
json=self.TABLE_CREATED,
status=200)
self.assertEqual(
self.db._create_table(
schema=spec['schema'],
table=spec['table'],
hash_attribute=spec['hash_attribute']),
self.TABLE_CREATED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_describe_table(self):
# define the expected JSON body in POST request
spec = {
'operation': 'describe_table',
'schema': 'test_schema',
'table': 'test_table',
}
# mock the server response to describe_table
responses.add(
'POST',
self.URL,
json=self.DESCRIBE_TABLE,
status=200)
self.assertEqual(
self.db._describe_table(spec['schema'], spec['table']),
self.DESCRIBE_TABLE)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_describe_all(self):
# define the expected JSON body in POST request
spec = {
'operation': 'describe_all'
}
# mock the server response to describe_all
responses.add(
'POST',
self.URL,
json=self.DESCRIBE_ALL,
status=200)
self.assertEqual(self.db._describe_all(), self.DESCRIBE_ALL)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_drop_table(self):
# define the expected JSON body in POST request
spec = {
'operation': 'drop_table',
'schema': 'test_schema',
'table': 'test_table',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.TABLE_DROPPED,
status=200)
self.assertEqual(
self.db._drop_table(schema=spec['schema'], table=spec['table']),
self.TABLE_DROPPED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_drop_attribute(self):
""" Drop an attribute from a table.
"""
# define the expected JSON body in POST request
spec = {
'operation': 'drop_attribute',
'schema': 'test_schema',
'table': 'test_table',
'attribute': 'test_attribute',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.DROP_ATTRIBUTE,
status=200)
self.assertEqual(
self.db._drop_attribute(
schema=spec['schema'],
table=spec['table'],
attribute=spec['attribute']),
self.DROP_ATTRIBUTE)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_insert(self):
""" Insert a list of records.
"""
# define the expected JSON body in POST request
spec = {
'operation': 'insert',
'schema': 'test_schema',
'table': 'test_table',
'records': [
{
'id': 1,
'name': 'foo',
},
],
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORD_INSERTED,
status=200)
self.assertEqual(
self.db._insert(
schema=spec['schema'],
table=spec['table'],
records=spec['records']),
self.RECORD_INSERTED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_update(self):
""" Update a list of records by hash.
"""
# define the expected JSON body in POST request
spec = {
'operation': 'update',
'schema': 'test_schema',
'table': 'test_table',
'records': [
{
'id': 1,
'name': 'foo',
},
],
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORD_UPSERTED,
status=200)
self.assertEqual(
self.db._update(
schema=spec['schema'],
table=spec['table'],
records=spec['records']),
self.RECORD_UPSERTED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_delete(self):
""" Delete a list of records by hash.
"""
# define the expected JSON body in POST request
spec = {
'operation': 'delete',
'schema': 'test_schema',
'table': 'test_table',
'hash_values': ['1'],
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORDS_DELETED,
status=200)
self.assertEqual(
self.db._delete(
schema=spec['schema'],
table=spec['table'],
hash_values=spec['hash_values']),
self.RECORDS_DELETED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_search_by_hash(self):
""" Search records by hash value.
"""
# define the expected JSON body in POST request
spec = {
'operation': 'search_by_hash',
'schema': 'test_schema',
'table': 'test_table',
'hash_values': ['1'],
'get_attributes': ['*'],
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORDS,
status=200)
self.assertEqual(
self.db._search_by_hash(
schema=spec['schema'],
table=spec['table'],
hash_values=spec['hash_values']),
self.RECORDS)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
# test optional get_attributes
spec['get_attributes'] = ['foo', 'bar']
self.db._search_by_hash(
schema=spec['schema'],
table=spec['table'],
hash_values=spec['hash_values'],
get_attributes=spec['get_attributes'])
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_search_by_value(self):
""" Search records by value.
"""
# define the expected JSON body in POST request
spec = {
'operation': 'search_by_value',
'schema': 'test_schema',
'table': 'test_table',
'search_attribute': 'name',
'search_value': 'foo',
'get_attributes': ['*'],
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORDS,
status=200)
self.assertEqual(
self.db._search_by_value(
schema=spec['schema'],
table=spec['table'],
search_attribute=spec['search_attribute'],
search_value=spec['search_value']),
self.RECORDS)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
# test optional get_attributes
spec['get_attributes'] = ['foo', 'bar']
self.db._search_by_value(
schema=spec['schema'],
table=spec['table'],
search_attribute=spec['search_attribute'],
search_value=spec['search_value'],
get_attributes=spec['get_attributes'])
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_sql(self):
""" Accept SQL strings.
"""
sql_string = \
'INSERT INTO test_schema.test_table (id, name) VALUES(1, \"foo\")'
# define the expected JSON body in POST request
spec = {
'operation': 'sql',
'sql': sql_string,
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORD_INSERTED,
status=200)
self.assertEqual(self.db._sql(sql_string), self.RECORD_INSERTED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_csv_data_load(self):
""" Records are inserted from a CSV file path.
"""
spec = {
'operation': 'csv_data_load',
'action': 'insert',
'schema': 'test_schema',
'table': 'test_table',
'data': self.CSV_STRING,
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
self.assertEqual(
self.db._csv_data_load(
schema=spec['schema'],
table=spec['table'],
path='tests/test.csv'),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_csv_data_load_update(self):
""" Records are updated from a CSV file path.
"""
spec = {
'operation': 'csv_data_load',
'action': 'update',
'schema': 'test_schema',
'table': 'test_table',
'data': self.CSV_STRING,
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
self.assertEqual(
self.db._csv_data_load(
schema=spec['schema'],
table=spec['table'],
path='tests/test.csv',
action='update'),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_csv_file_load(self):
""" Records are inserted from a CSV file path on the HarperDB host.
"""
spec = {
'operation': 'csv_file_load',
'action': 'insert',
'schema': 'test_schema',
'table': 'test_table',
'file_path': 'path/to/file/on/host.csv',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
self.assertEqual(
self.db._csv_file_load(
schema=spec['schema'],
table=spec['table'],
file_path=spec['file_path']),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_csv_file_load_update(self):
""" Records are updated from a CSV file path on the HarperDB host.
"""
spec = {
'operation': 'csv_file_load',
'action': 'update',
'schema': 'test_schema',
'table': 'test_table',
'file_path': 'path/to/file/on/host.csv',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
self.assertEqual(
self.db._csv_file_load(
schema=spec['schema'],
table=spec['table'],
file_path=spec['file_path'],
action='update'),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_csv_url_load(self):
""" Records are inserted from a CSV file at a URL.
"""
spec = {
'operation': 'csv_url_load',
'action': 'insert',
'schema': 'test_schema',
'table': 'test_table',
'csv_url': 'example.com/test.csv',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
self.assertEqual(
self.db._csv_url_load(
schema=spec['schema'],
table=spec['table'],
csv_url=spec['csv_url']),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_csv_url_load_update(self):
""" Records are updated from a CSV file at a URL.
"""
spec = {
'operation': 'csv_url_load',
'action': 'update',
'schema': 'test_schema',
'table': 'test_table',
'csv_url': 'example.com/test.csv',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
self.assertEqual(
self.db._csv_url_load(
schema=spec['schema'],
table=spec['table'],
csv_url=spec['csv_url'],
action='update'),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_get_job(self):
""" Returns a job dictionary from an id.
"""
spec = {
'operation': 'get_job',
'id': 'aUniqueID',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.GET_JOB,
status=200)
self.assertEqual(
self.db._get_job(spec['id']),
self.GET_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_search_jobs_by_start_date(self):
""" Get an array of jobs by date range.
"""
spec = {
'operation': 'search_jobs_by_start_date',
'from_date': '2020-01-01',
'to_date': '2020-02-01',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.SEARCH_JOB,
status=200)
self.assertEqual(
self.db._search_jobs_by_start_date(
from_date=spec['from_date'],
to_date=spec['to_date']
),
self.SEARCH_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_add_user(self):
""" Add a user.
"""
spec = {
'operation': 'add_user',
'role': 'aUniqueID',
'username': 'user',
'password': 'pass',
'active': True,
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.USER_ADDED,
status=200)
self.assertEqual(
self.db._add_user(
role=spec['role'],
username=spec['username'],
password=spec['password']),
self.USER_ADDED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_alter_user(self):
""" Alter a user.
"""
spec = {
'operation': 'alter_user',
'role': 'aUniqueID',
'username': 'user',
'password': 'pass',
'active': False,
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.USER_ALTERED,
status=200)
self.assertEqual(
self.db._alter_user(
role=spec['role'],
username=spec['username'],
password=spec['password'],
active=False),
self.USER_ALTERED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_drop_user(self):
""" Drop a user.
"""
spec = {
'operation': 'drop_user',
'username': 'user',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.USER_DROPPED,
status=200)
self.assertEqual(
self.db._drop_user(username=spec['username']),
self.USER_DROPPED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_user_info(self):
""" Get user info.
"""
spec = {
'operation': 'user_info',
'username': 'user',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.USER_INFO,
status=200)
self.assertEqual(
self.db._user_info(username=spec['username']),
self.USER_INFO)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_list_users(self):
""" List users.
"""
spec = {
'operation': 'list_users',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.LIST_USERS,
status=200)
self.assertEqual(
self.db._list_users(),
self.LIST_USERS)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_add_role(self):
""" Add a role.
"""
spec = {
'operation': 'add_role',
'role': 'developer',
'permission': {
'super_user': False,
'dev': {
'tables': {
'dog': {
'read': True,
'insert': True,
'update': True,
'delete': False,
'attribute_restrictions': [],
}
}
}
}
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.ADD_ROLE,
status=200)
self.assertEqual(
self.db._add_role(role='developer', permission=spec['permission']),
self.ADD_ROLE)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_alter_role(self):
""" Add a role.
"""
spec = {
'operation': 'alter_role',
'id': 'aUniqueID',
'permission': {
'super_user': False,
'dev': {
'tables': {
'dog': {
'read': True,
'insert': True,
'update': True,
'delete': False,
'attribute_restrictions': []
}
}
}
}
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.ALTER_ROLE,
status=200)
self.assertEqual(
self.db._alter_role(id='aUniqueID', permission=spec['permission']),
self.ALTER_ROLE)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_drop_role(self):
""" Drop a role.
"""
spec = {
'operation': 'drop_role',
'id': 'aUniqueID',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.DROP_ROLE,
status=200)
self.assertEqual(
self.db._drop_role(id='aUniqueID'),
self.DROP_ROLE)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_list_roles(self):
"""List Roles.
"""
spec = {
'operation': 'list_roles',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.LIST_ROLES,
status=200)
self.assertEqual(
self.db._list_roles(),
self.LIST_ROLES)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_add_node(self):
""" Add a node to a cluster.
"""
spec = {
'operation': 'add_node',
'name': 'anotherNode',
'host': 'hostname',
'port': 31415,
'subscriptions': [
{
'channel': 'dev:dog',
'subscribe': False,
'publish': True,
},
]
}
responses.add(
'POST',
self.URL,
json=self.NODE_ADDED,
status=200)
self.assertEqual(
self.db._add_node(
name=spec['name'],
host=spec['host'],
port=spec['port'],
subscriptions=spec['subscriptions'],),
self.NODE_ADDED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_update_node(self):
""" Update a node in a cluster.
"""
spec = {
'operation': 'update_node',
'name': 'anotherNode',
'host': 'hostname',
'port': 31415,
'subscriptions': [
{
'channel': 'dev:dog',
'subscribe': False,
'publish': True,
},
]
}
responses.add(
'POST',
self.URL,
json=self.NODE_ADDED,
status=200)
self.assertEqual(
self.db._update_node(
name=spec['name'],
host=spec['host'],
port=spec['port'],
subscriptions=spec['subscriptions'],),
self.NODE_ADDED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_remove_node(self):
""" Remove a node from a cluster.
"""
spec = {
'operation': 'remove_node',
'name': 'anotherNode',
}
responses.add(
'POST',
self.URL,
json=self.NODE_REMOVED,
status=200)
self.assertEqual(
self.db._remove_node(name=spec['name']),
self.NODE_REMOVED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_cluster_status(self):
""" Retrieve cluster status.
"""
spec = {
'operation': 'cluster_status',
}
responses.add(
'POST',
self.URL,
json=self.CLUSTER_STATUS,
status=200)
self.assertEqual(self.db._cluster_status(), self.CLUSTER_STATUS)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_registration_info(self):
""" Retrieve registration info.
"""
spec = {
'operation': 'registration_info',
}
responses.add(
'POST',
self.URL,
json=self.REGISTRATION,
status=200)
self.assertEqual(self.db._registration_info(), self.REGISTRATION)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_get_fingerprint(self):
""" Retrieve fingerprint.
"""
spec = {
'operation': 'get_fingerprint',
}
responses.add(
'POST',
self.URL,
json=self.FINGERPRINT,
status=200)
self.assertEqual(self.db._get_fingerprint(), self.FINGERPRINT)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_set_license(self):
""" Set license.
"""
spec = {
'operation': 'set_license',
'key': 'myLicenseKey',
'company': 'myCompany',
}
responses.add(
'POST',
self.URL,
json=self.SET_LICENSE,
status=200)
self.assertEqual(
self.db._set_license(spec['key'], spec['company']),
self.SET_LICENSE)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_delete_files_before(self):
""" Delete records before a given date.
"""
spec = {
'operation': 'delete_files_before',
'date': '2018-07-10',
'schema': 'dev',
'table': 'dog',
}
# mock the server response
responses.add(
'POST',
self.URL,
json=self.RECORDS_DELETED,
status=200)
self.assertEqual(
self.db._delete_files_before(
date=spec['date'],
schema=spec['schema'],
table=spec['table']),
self.RECORDS_DELETED)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_export_local(self):
""" Export records to server file system.
"""
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
spec = {
'operation': 'export_local',
'format': 'json',
'path': 'path/to/data',
'search_operation': {
'operation': 'search_by_hash',
'hash_values': ['uniqueHash'],
},
}
self.assertEqual(
self.db._export_local(
path=spec['path'],
hash_values=['uniqueHash']),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
# test keyword args
spec['format'] = 'csv'
spec['search_operation'] = {
'operation': 'search_by_value',
'search_attribute': 'pi',
'search_value': 3.14,
}
self.assertEqual(
self.db._export_local(
path=spec['path'],
search_attribute='pi',
search_value=3.14,
format='csv'),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 2)
# more keyword args
spec['format'] = 'json'
spec['search_operation'] = {
'operation': 'sql',
'sql': 'my SQL string',
}
self.assertEqual(
self.db._export_local(
path=spec['path'],
sql=spec['search_operation']['sql']),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 3)
@responses.activate
def test_export_to_s3(self):
""" Export records to an Amazon S3 bucket.
"""
# mock the server response
responses.add(
'POST',
self.URL,
json=self.START_JOB,
status=200)
spec = {
'operation': 'export_to_s3',
'format': 'json',
'search_operation': {
'operation': 'search_by_hash',
'hash_values': ['uniqueHash'],
},
's3': {
'aws_access_key_id': 'myKey',
'aws_secret_access_key': 'mySecretKey',
'bucket': 'myBucket',
'key': 'KEY',
},
}
self.assertEqual(
self.db._export_to_s3(
aws_access_key='myKey',
aws_secret_access_key='mySecretKey',
bucket='myBucket',
key='KEY',
hash_values=['uniqueHash']),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
spec = {
'operation': 'export_to_s3',
'format': 'csv',
'search_operation': {
'operation': 'search_by_value',
'search_attribute': 'pi',
'search_value': 3.14,
},
's3': {
'aws_access_key_id': 'myKey',
'aws_secret_access_key': 'mySecretKey',
'bucket': 'myBucket',
'key': 'KEY',
},
}
self.assertEqual(
self.db._export_to_s3(
aws_access_key='myKey',
aws_secret_access_key='mySecretKey',
bucket='myBucket',
key='KEY',
search_attribute='pi',
search_value=3.14,
format='csv'),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 2)
spec = {
'operation': 'export_to_s3',
'format': 'json',
'search_operation': {
'operation': 'sql',
'sql': 'my SQL string',
},
's3': {
'aws_access_key_id': 'myKey',
'aws_secret_access_key': 'mySecretKey',
'bucket': 'myBucket',
'key': 'KEY',
},
}
self.assertEqual(
self.db._export_to_s3(
aws_access_key='myKey',
aws_secret_access_key='mySecretKey',
bucket='myBucket',
key='KEY',
sql=spec['search_operation']['sql']),
self.START_JOB)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 3)
@responses.activate
def test_read_log(self):
""" Read the server log.
"""
spec = {
'operation': 'read_log',
'limit': 1000,
'start': 0,
'from': None,
'until': None,
'order': 'desc'
}
responses.add(
'POST',
self.URL,
json=self.READ_LOG,
status=200)
self.assertEqual(self.db._read_log(), self.READ_LOG)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
# test keyword args
spec = {
'operation': 'read_log',
'limit': 1001,
'start': 1,
'from': "2020-01-01",
'until': "2020-02-01",
'order': 'asc'
}
self.assertEqual(
self.db._read_log(
limit=spec['limit'],
start=spec['start'],
from_date=spec['from'],
to_date=spec['until'],
order=spec['order']),
self.READ_LOG)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_system_information(self):
""" Retrieve system information.
"""
spec = {
'operation': 'system_information',
}
responses.add(
'POST',
self.URL,
json=self.SYSTEM_INFORMATION,
status=200)
self.assertEqual(
self.db._system_information(),
self.SYSTEM_INFORMATION)
self.assertLastRequestMatchesSpec(spec)
self.assertEqual(len(responses.calls), 1)
| 4,103 | 32,378 | 23 |
be4318651bc28b4444e1bbed1935110fd6ea221a | 8,274 | py | Python | src/sounding_selection/utilities.py | NoelDyer/Hydrographic-Sounding-Selection | 2797378ca50956d03ec00d6b1984ce9313e58e31 | [
"CC0-1.0"
] | null | null | null | src/sounding_selection/utilities.py | NoelDyer/Hydrographic-Sounding-Selection | 2797378ca50956d03ec00d6b1984ce9313e58e31 | [
"CC0-1.0"
] | null | null | null | src/sounding_selection/utilities.py | NoelDyer/Hydrographic-Sounding-Selection | 2797378ca50956d03ec00d6b1984ce9313e58e31 | [
"CC0-1.0"
] | 1 | 2022-01-20T13:07:54.000Z | 2022-01-20T13:07:54.000Z | import triangle
from shapely.geometry import Polygon, MultiPolygon, Point
from shapely.ops import unary_union
def triangulate(vertex_list, boundary_vertices=None, boundary_indexes=None):
""" Uses a Python wrapper of Triangle (Shechuck, 1996) to triangulate a set of points. Triangulation is
constrained if bounding vertices are provided; otherwise the triangulation is Delaunay. """
xy_list = [[v.get_x(), v.get_y()] for v in vertex_list]
if boundary_vertices is not None and boundary_indexes is not None:
boundary_points = [[v.get_x(), v.get_y()] for v in boundary_vertices]
unique_points = [point for point in xy_list if point not in boundary_points]
boundary_points.extend(unique_points)
# Constrained
triangulation = triangle.triangulate({'vertices': boundary_points,
'segments': boundary_indexes},
'pCS0') # p: PSLG; C: Exact arithmetic; S_: Steiner point limit
else:
# Delaunay
triangulation = triangle.triangulate({'vertices': xy_list})
return triangulation
def fill_poly_gaps(mqual_poly):
""" Fills gaps in MultiPolygons/Polygons by rebuilding the geometry from the exterior coordinates of each polygon
and then using a bounding rectangle of the new polygons to eliminate any remaining gaps that can occur from
touching polygon edges. """
parts = list()
if mqual_poly.geom_type == 'MultiPolygon':
for geom in mqual_poly.geoms:
p = Polygon(geom.exterior.coords)
parts.append(p)
dissolve_poly = unary_union(parts)
xmin, ymin, xmax, ymax = dissolve_poly.bounds
bounding_rect = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]).buffer(1, resolution=1)
geom_list = list()
diff_poly = bounding_rect.difference(dissolve_poly)
for diff_poly_geom in diff_poly.geoms:
geom_list.append(diff_poly_geom)
sorted_geoms = sorted(geom_list, key=lambda k: k.bounds)
fill_poly = bounding_rect.difference(sorted_geoms[0])
poly = fill_poly.buffer(0)
else:
dissolve_poly = unary_union(mqual_poly)
xmin, ymin, xmax, ymax = dissolve_poly.bounds
bounding_rect = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]).buffer(1, resolution=1)
diff_poly = bounding_rect.difference(dissolve_poly)
fill_poly = bounding_rect.difference(diff_poly)
poly = fill_poly.buffer(0)
if poly.geom_type == 'MultiPolygon':
final_poly = MultiPolygon(poly)
else:
final_poly = Polygon(poly)
return final_poly
def create_idx(start, end):
""" Creates indexes for boundary vertices so that segments can be created for a constrained triangulation. """
return [[i, i + 1] for i in range(start, end)] + [[end, start]]
def get_boundary_points(poly, point_set, point_tree):
""" Extracts polygon vertex coordinates and returns the coordinates as Vertex objects along with the associated
index list. """
boundary_dict = dict()
if poly.geom_type == 'MultiPolygon':
for geom_index in range(len(poly.geoms)):
boundary_dict[geom_index] = list()
geom = poly.geoms[geom_index]
x, y = geom.exterior.coords.xy
for i in range(len(x) - 1):
point_list = list()
p = Point(x[i], y[i])
p_buffer = p.buffer(0.00000001)
point_tree.get_points_in_polygon(point_tree.get_root(), 0, point_set.get_domain(), p_buffer, point_set,
point_list)
boundary_dict[geom_index].append(point_list[0])
else:
x, y = poly.exterior.coords.xy
boundary_dict[0] = list()
for i in range(len(x) - 1):
point_list = list()
p = Point(x[i], y[i])
p_buffer = p.buffer(0.00000001)
point_tree.get_points_in_polygon(point_tree.get_root(), 0, point_set.get_domain(), p_buffer, point_set,
point_list)
boundary_dict[0].append(point_list[0])
boundary_vertices, length_list = list(), list()
for poly_index in boundary_dict.keys():
poly_length = len(boundary_dict[poly_index])
length_list.append(poly_length-1)
for vertex in boundary_dict[poly_index]:
boundary_vertices.append(vertex)
index_list = list()
for i in range(len(length_list)):
if i == 0:
start = 0
end = length_list[i]
else:
start = sum(length_list[:i]) + i
end = start + length_list[i]
index_list.extend(create_idx(start, end))
return boundary_vertices, index_list
def simplify_mqual(triangulation, mqual_poly):
""" Simplifies MQUAL boundary by taking an input Delaunay triangulation of the source soundings, and removing
triangles whose centroid does not intersect the original MQUAL polygon. The simplified MQUAL boundary will have
vertices that are in the source soundings dataset, which is important for the triangle test during validation.
This process can result in geometries with unwanted gaps, which are eliminated using fill_poly_gaps(). """
delete_triangles, tin_triangles, = list(), list()
# Get each triangle of the TIN
for index, value in enumerate(triangulation['triangles']):
tri_list = list()
for v_id in value:
tri_list.append(v_id)
triangle_points = list()
for vertex_id in tri_list:
vertex = triangulation['vertices'][vertex_id]
triangle_points.append([vertex[0], vertex[1]])
triangle_poly = Polygon(triangle_points)
tri_centroid = triangle_poly.centroid
# Flag triangle if centroid is outside MQUAL polygon
if mqual_poly.intersects(tri_centroid) is False:
delete_triangles.append(triangle_poly)
tin_triangles.append(triangle_poly)
tin_shape = unary_union(tin_triangles)
# Delete triangles from shape, beginning with largest area
sorted_del_triangles = sorted(delete_triangles, key=lambda k: k.area, reverse=True)
for delete_poly in sorted_del_triangles:
x, y = delete_poly.exterior.coords.xy
delete_tri_points = list()
for i in range(len(x) - 1):
delete_tri_points.append(Point(x[i], y[i]))
tin_shape = tin_shape.difference(delete_poly)
# Check to ensure removed triangle does not exclude source soundings from simplified polygon
intersect_check = [point.intersects(tin_shape) for point in delete_tri_points]
if False in intersect_check:
tin_shape = unary_union([tin_shape, delete_poly])
if tin_shape.geom_type == 'MultiPolygon':
final_poly = list()
for geom in tin_shape.geoms:
if mqual_poly.intersects(geom.centroid) is True:
final_poly.append(geom)
poly = MultiPolygon(final_poly).buffer(0)
else:
poly = Polygon(tin_shape.buffer(0))
return poly
def modified_binary_search(sorted_vertices, vertex):
""" Modified binary search algorithm to increase performance when removing soundings during the
label-based generalization. """
right, left = 0, 0
vertices_num = len(sorted_vertices)
while right < vertices_num:
i = (right + vertices_num) // 2
if vertex.get_z() < sorted_vertices[i].get_z():
vertices_num = i
else:
right = i + 1
vertices_num = right - 1
while left < vertices_num:
i = (left + vertices_num) // 2
if vertex.get_z() > sorted_vertices[i].get_z():
left = i + 1
else:
vertices_num = i
if left == right-1:
return left
else:
for idx in range(left, right):
if sorted_vertices[idx] == vertex:
return idx
| 38.305556 | 120 | 0.623036 | import triangle
from shapely.geometry import Polygon, MultiPolygon, Point
from shapely.ops import unary_union
def triangulate(vertex_list, boundary_vertices=None, boundary_indexes=None):
""" Uses a Python wrapper of Triangle (Shechuck, 1996) to triangulate a set of points. Triangulation is
constrained if bounding vertices are provided; otherwise the triangulation is Delaunay. """
xy_list = [[v.get_x(), v.get_y()] for v in vertex_list]
if boundary_vertices is not None and boundary_indexes is not None:
boundary_points = [[v.get_x(), v.get_y()] for v in boundary_vertices]
unique_points = [point for point in xy_list if point not in boundary_points]
boundary_points.extend(unique_points)
# Constrained
triangulation = triangle.triangulate({'vertices': boundary_points,
'segments': boundary_indexes},
'pCS0') # p: PSLG; C: Exact arithmetic; S_: Steiner point limit
else:
# Delaunay
triangulation = triangle.triangulate({'vertices': xy_list})
return triangulation
def fill_poly_gaps(mqual_poly):
""" Fills gaps in MultiPolygons/Polygons by rebuilding the geometry from the exterior coordinates of each polygon
and then using a bounding rectangle of the new polygons to eliminate any remaining gaps that can occur from
touching polygon edges. """
parts = list()
if mqual_poly.geom_type == 'MultiPolygon':
for geom in mqual_poly.geoms:
p = Polygon(geom.exterior.coords)
parts.append(p)
dissolve_poly = unary_union(parts)
xmin, ymin, xmax, ymax = dissolve_poly.bounds
bounding_rect = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]).buffer(1, resolution=1)
geom_list = list()
diff_poly = bounding_rect.difference(dissolve_poly)
for diff_poly_geom in diff_poly.geoms:
geom_list.append(diff_poly_geom)
sorted_geoms = sorted(geom_list, key=lambda k: k.bounds)
fill_poly = bounding_rect.difference(sorted_geoms[0])
poly = fill_poly.buffer(0)
else:
dissolve_poly = unary_union(mqual_poly)
xmin, ymin, xmax, ymax = dissolve_poly.bounds
bounding_rect = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]).buffer(1, resolution=1)
diff_poly = bounding_rect.difference(dissolve_poly)
fill_poly = bounding_rect.difference(diff_poly)
poly = fill_poly.buffer(0)
if poly.geom_type == 'MultiPolygon':
final_poly = MultiPolygon(poly)
else:
final_poly = Polygon(poly)
return final_poly
def create_idx(start, end):
""" Creates indexes for boundary vertices so that segments can be created for a constrained triangulation. """
return [[i, i + 1] for i in range(start, end)] + [[end, start]]
def get_boundary_points(poly, point_set, point_tree):
""" Extracts polygon vertex coordinates and returns the coordinates as Vertex objects along with the associated
index list. """
boundary_dict = dict()
if poly.geom_type == 'MultiPolygon':
for geom_index in range(len(poly.geoms)):
boundary_dict[geom_index] = list()
geom = poly.geoms[geom_index]
x, y = geom.exterior.coords.xy
for i in range(len(x) - 1):
point_list = list()
p = Point(x[i], y[i])
p_buffer = p.buffer(0.00000001)
point_tree.get_points_in_polygon(point_tree.get_root(), 0, point_set.get_domain(), p_buffer, point_set,
point_list)
boundary_dict[geom_index].append(point_list[0])
else:
x, y = poly.exterior.coords.xy
boundary_dict[0] = list()
for i in range(len(x) - 1):
point_list = list()
p = Point(x[i], y[i])
p_buffer = p.buffer(0.00000001)
point_tree.get_points_in_polygon(point_tree.get_root(), 0, point_set.get_domain(), p_buffer, point_set,
point_list)
boundary_dict[0].append(point_list[0])
boundary_vertices, length_list = list(), list()
for poly_index in boundary_dict.keys():
poly_length = len(boundary_dict[poly_index])
length_list.append(poly_length-1)
for vertex in boundary_dict[poly_index]:
boundary_vertices.append(vertex)
index_list = list()
for i in range(len(length_list)):
if i == 0:
start = 0
end = length_list[i]
else:
start = sum(length_list[:i]) + i
end = start + length_list[i]
index_list.extend(create_idx(start, end))
return boundary_vertices, index_list
def simplify_mqual(triangulation, mqual_poly):
""" Simplifies MQUAL boundary by taking an input Delaunay triangulation of the source soundings, and removing
triangles whose centroid does not intersect the original MQUAL polygon. The simplified MQUAL boundary will have
vertices that are in the source soundings dataset, which is important for the triangle test during validation.
This process can result in geometries with unwanted gaps, which are eliminated using fill_poly_gaps(). """
delete_triangles, tin_triangles, = list(), list()
# Get each triangle of the TIN
for index, value in enumerate(triangulation['triangles']):
tri_list = list()
for v_id in value:
tri_list.append(v_id)
triangle_points = list()
for vertex_id in tri_list:
vertex = triangulation['vertices'][vertex_id]
triangle_points.append([vertex[0], vertex[1]])
triangle_poly = Polygon(triangle_points)
tri_centroid = triangle_poly.centroid
# Flag triangle if centroid is outside MQUAL polygon
if mqual_poly.intersects(tri_centroid) is False:
delete_triangles.append(triangle_poly)
tin_triangles.append(triangle_poly)
tin_shape = unary_union(tin_triangles)
# Delete triangles from shape, beginning with largest area
sorted_del_triangles = sorted(delete_triangles, key=lambda k: k.area, reverse=True)
for delete_poly in sorted_del_triangles:
x, y = delete_poly.exterior.coords.xy
delete_tri_points = list()
for i in range(len(x) - 1):
delete_tri_points.append(Point(x[i], y[i]))
tin_shape = tin_shape.difference(delete_poly)
# Check to ensure removed triangle does not exclude source soundings from simplified polygon
intersect_check = [point.intersects(tin_shape) for point in delete_tri_points]
if False in intersect_check:
tin_shape = unary_union([tin_shape, delete_poly])
if tin_shape.geom_type == 'MultiPolygon':
final_poly = list()
for geom in tin_shape.geoms:
if mqual_poly.intersects(geom.centroid) is True:
final_poly.append(geom)
poly = MultiPolygon(final_poly).buffer(0)
else:
poly = Polygon(tin_shape.buffer(0))
return poly
def modified_binary_search(sorted_vertices, vertex):
""" Modified binary search algorithm to increase performance when removing soundings during the
label-based generalization. """
right, left = 0, 0
vertices_num = len(sorted_vertices)
while right < vertices_num:
i = (right + vertices_num) // 2
if vertex.get_z() < sorted_vertices[i].get_z():
vertices_num = i
else:
right = i + 1
vertices_num = right - 1
while left < vertices_num:
i = (left + vertices_num) // 2
if vertex.get_z() > sorted_vertices[i].get_z():
left = i + 1
else:
vertices_num = i
if left == right-1:
return left
else:
for idx in range(left, right):
if sorted_vertices[idx] == vertex:
return idx
| 0 | 0 | 0 |
d54477b6aa70a46580284bc5806010f5dad7516f | 14,629 | py | Python | scripts/job_wrapper.py | Weeks-UNC/shapemapper-txome | 545d8fcd814511467a498ac6b47675be7fb96210 | [
"MIT"
] | null | null | null | scripts/job_wrapper.py | Weeks-UNC/shapemapper-txome | 545d8fcd814511467a498ac6b47675be7fb96210 | [
"MIT"
] | null | null | null | scripts/job_wrapper.py | Weeks-UNC/shapemapper-txome | 545d8fcd814511467a498ac6b47675be7fb96210 | [
"MIT"
] | null | null | null | import os
import random
import string
import subprocess as sp
import sys
import time
import uuid
from scripts.util import timestamp, makedirs, indent
from scripts.globals import god
# FIXME: add support for SLURM
# FIXME: add starcluster cluster name argument (SGE)
# FIXME: proc constraints for SGE submissions?
# TODO: add timeout for hung jobs?
# fully testing job wrapper interface would require some docker finesse -
# not sure how easy it is to spin up a simple starcluster, LSF, or SLURM system, since
# they usually involve multiple nodes
| 35.855392 | 107 | 0.543509 | import os
import random
import string
import subprocess as sp
import sys
import time
import uuid
from scripts.util import timestamp, makedirs, indent
from scripts.globals import god
# FIXME: add support for SLURM
# FIXME: add starcluster cluster name argument (SGE)
# FIXME: proc constraints for SGE submissions?
# TODO: add timeout for hung jobs?
# fully testing job wrapper interface would require some docker finesse -
# not sure how easy it is to spin up a simple starcluster, LSF, or SLURM system, since
# they usually involve multiple nodes
class Job:
def __init__(self,
cmd,
name=None,
id=None,
out_folder=".",
run_folder=".",
bsub_opts=""):
self.name = name
self.id = id
self.cmd = cmd
self.out_folder = out_folder
self.run_folder = run_folder
self.bsub_opts = bsub_opts
# generate unique job ID
if self.id is None:
self.id = str(uuid.uuid4())[:8]
# prepend name if given (so jobs will be more easily identified on LSF cluster)
if self.name is not None:
self.id = self.name+"_"+self.id
#chars = string.ascii_letters
#self.id = ''.join([random.choice(chars) for x in range(8)])
# use job ID as name if none provided
if self.name is None:
self.name = self.id
self.stdout = os.path.join(self.out_folder, self.id + ".stdout")
self.stderr = os.path.join(self.out_folder, self.id + ".stderr")
def get_stdout(self):
return open(self.stdout, "rU").read().strip()
def get_stderr(self):
return open(self.stderr, "rU").read().strip()
def get_output(self):
s = "stdout:\n"
s += indent(self.get_stdout())
s += "stderr:\n"
s += indent(self.get_stderr())
return s
def get_lsf_jobs(filter_jobs, all=False):
cmd = "bjobs -w"
if all:
# include jobs with any status (will list recently terminated jobs as well)
cmd += " -a"
lines = sp.check_output(cmd, shell=True, stderr=open(os.devnull, 'w')).decode().splitlines()[1:]
ids = set().append('hey')
for line in lines:
s = str(line).split(None, 7)
try:
ids.append(s[6])
except IndexError:
pass
# limit running job ids to those that we started
jobs = set()
# TODO: there's probably a faster way to do this, maybe using LSF groups
for job in filter_jobs:
if job.id in ids:
jobs.add(job)
#print([job.id for job in jobs])
return list(jobs)
def get_lsf_returncode(stdout_filename):
error_msg = "Unable to parse LSF process return code for file {}".format(stdout_filename)
f = open(stdout_filename, "rU")
r = f.read()
s_string = "\n\nSuccessfully completed.\n\nResource usage summary:\n\n"
if s_string in r:
return 0
else:
try:
i_right = r.index(".\n\nResource usage summary:\n\n")
i_left = r.index("\nExited with exit code ", 0, i_right)
return_code = int(r[i_left:i_right].split()[-1])
return return_code
except ValueError:
raise RuntimeError(error_msg)
def get_sge_jobs(jobs):
running_jobs = []
for job in jobs:
assert isinstance(job, Job)
cmd = "qstat -j {}".format(job.id)
try:
o = sp.check_output(cmd, shell=True, stderr=open(os.devnull, 'w')).decode()
except sp.CalledProcessError:
# This may happen if no jobs have been run at all on the cluster yet
continue
if o.splitlines()[0].strip() != "Following jobs do not exist:":
running_jobs.append(job)
return running_jobs
def get_sge_completed_jobs(jobs):
completed_jobs = []
for job in jobs:
assert isinstance(job, Job)
cmd = "qacct -j {}".format(job.id)
try:
o = sp.check_output(cmd, shell=True, stderr=open(os.devnull, 'w')).decode()
except sp.CalledProcessError:
# This may happen if no jobs have been run at all on the cluster yet
continue
if o.strip() != "error: job name {} not found".format(job.id):
completed_jobs.append(job)
return completed_jobs
def get_sge_returncode(job):
error = RuntimeError("Unable to retrieve SGE exit status for job {}".format(job.id))
cmd = "qacct -j {}".format(job.id)
o = sp.check_output(cmd, shell=True, stderr=open(os.devnull, 'w')).decode().splitlines()
try:
return_code = None
for line in o[::-1]:
if line.startswith("exit_status"):
return_code = int(line.strip().split()[1])
break
if return_code is None:
raise error
return return_code
except ValueError:
raise error
def run_jobs_sge(jobs,
max_concurrent_jobs=50):
assert all([isinstance(job, Job) for job in jobs])
queued_jobs = list(jobs)
notify_counter = 0.0
notify_seconds = 5*60
current_time = time.time()
while len(get_sge_completed_jobs(jobs)) < len(jobs):
current_jobs = get_sge_jobs(jobs)
if len(current_jobs) < max_concurrent_jobs:
n = max_concurrent_jobs-len(current_jobs)
to_run = queued_jobs[:n]
del queued_jobs[:n]
for job in to_run:
current_dir = os.getcwd()
os.chdir(job.run_folder)
# make sure the output folder exists so qsub doesn't give an error
# creating stdout and stderr files
makedirs(job.out_folder)
makedirs(os.path.split(job.stdout)[0])
# write command to job script (put in output folder so we don't clutter
# up the top-level directory)
f = open(job.out_folder+"/"+job.id+".sh", "w")
f.write(job.cmd)
f.close()
cmd = "qsub -N {job_id} -o {stdout} -e {stderr} -V -cwd '{out_folder}/{job_id}.sh'"
cmd = cmd.format(job_id=job.id,
out_folder=job.out_folder,
stdout=job.stdout,
stderr=job.stderr)
print("submitting job with command:")
print(cmd)
print('from within folder "{}"'.format(job.run_folder))
print("at "+timestamp())
job.proc = sp.Popen(cmd, shell=True)
os.chdir(current_dir)
time.sleep(0.1)
t = time.time()
time_delta = t - current_time
current_time = t
notify_counter += time_delta
if notify_counter > notify_seconds:
notify_counter = 0.0
print("{} jobs not yet submitted, {} jobs running or waiting in SGE queue at {}".format(
len(queued_jobs), len(current_jobs), timestamp()))
success = True
for job in jobs:
try:
if get_sge_returncode(job) != 0:
success = False
print("Error: Job failed for command" +
"\n\t" + job.cmd)
print("Stdout:\n"+open(job.stdout, "rU").read())
print("Stderr:\n"+open(job.stderr, "rU").read())
except AttributeError:
success = False
print("Error: failed to start process for command" +
"\n\t" + job.cmd)
except IOError:
success = False
print("Error: failed to capture output for command" +
"\n\t" + job.cmd)
return success
def run_jobs_lsf(jobs,
max_concurrent_jobs=50):
assert all([isinstance(job, Job) for job in jobs])
# FIXME: add "child" jobs to same group as parent job (is this even possible from inside this process?)
# - ideally all child jobs should be killed if the parent job is killed
queued_jobs = list(jobs)
current_jobs = []
notify_counter = 0.0
notify_seconds = 5*60
current_time = time.time()
# Note: if job.bsub_opts requests e.g. -n 4 R span[hosts=1],
# then the number of apparent jobs on the cluster will be 4x max_concurrent_jobs
while len(queued_jobs)>0 or len(current_jobs)>0:
if len(current_jobs) < max_concurrent_jobs:
n = max_concurrent_jobs-len(current_jobs)
to_run = queued_jobs[:n]
del queued_jobs[:n]
for job in to_run:
current_dir = os.getcwd()
os.chdir(job.run_folder)
cmd = "bsub -q day -J '{}' -o '{}' -e '{}' "
# FIXME: this is probably not needed
if job.bsub_opts.strip() == "":
cmd += "-n1 "
cmd += job.bsub_opts + " "
cmd += job.cmd
cmd = cmd.format(job.id, job.stdout, job.stderr)
print("submitting job with command:")
print(cmd)
print('from within folder "{}"'.format(job.run_folder))
print("at "+timestamp())
job.proc = sp.Popen(cmd, shell=True)
os.chdir(current_dir)
# wait until LSF reflects the recently queued jobs
# (otherwise the outer loop will sometimes terminate early)
while len(get_lsf_jobs(filter_jobs=to_run, all=True)) != len(to_run):
pass
time.sleep(0.1)
current_jobs = get_lsf_jobs(filter_jobs=jobs)
t = time.time()
time_delta = t - current_time
current_time = t
notify_counter += time_delta
if notify_counter > notify_seconds:
notify_counter = 0.0
print("{} jobs not yet submitted, {} jobs running or pending in LSF queue at {}".format(
len(queued_jobs), len(current_jobs), timestamp(),))
success = True
for job in jobs:
try:
if get_lsf_returncode(job.stdout) != 0:
success = False
print("Error: Job failed for command" +
"\n\t" + job.cmd)
print("Stdout:\n"+open(job.stdout, "rU").read())
print("Stderr:\n"+open(job.stderr, "rU").read())
except AttributeError:
success = False
print("Error: failed to start process for command" +
"\n\t" + job.cmd)
except IOError:
success = False
print("Error: failed to capture output for command" +
"\n\t" + job.cmd)
return success
def run_jobs_local(jobs,
max_concurrent_jobs=1):
assert all([isinstance(job, Job) for job in jobs])
queued_jobs = list(jobs)
running_jobs = []
notify_counter = 0.0
notify_seconds = 5*60
current_time = time.time()
while len(queued_jobs)>0 or len(running_jobs)>0:
if len(running_jobs) < max_concurrent_jobs:
n = max_concurrent_jobs-len(running_jobs)
to_run = queued_jobs[:n]
del queued_jobs[:n]
for job in to_run:
current_dir = os.getcwd()
os.chdir(job.run_folder)
stdout = open(job.stdout, "w")
stderr = open(job.stderr, "w")
print("\nRunning local job with command:")
print(job.cmd)
print('from within folder "{}"'.format(job.run_folder))
print("at "+timestamp())
job.proc = sp.Popen(job.cmd, shell=True,
stdout=stdout, stderr=stderr)
os.chdir(current_dir)
time.sleep(0.1)
running_jobs = []
for job in jobs:
try:
if job.proc.poll() is None:
running_jobs.append(job)
except AttributeError:
pass
t = time.time()
time_delta = t - current_time
current_time = t
notify_counter += time_delta
if notify_counter > notify_seconds:
notify_counter = 0.0
print("{} jobs remaining, {} jobs running".format(len(queued_jobs),
len(running_jobs),
))
sys.stdout.flush()
success = True
for job in jobs:
try:
if job.proc.returncode != 0:
success = False
print("Error: Job failed for command"+
"\n\t"+job.cmd)
print("Stdout:")
print(open(job.stdout,"rU").read())
print("Stderr:")
print(open(job.stderr, "rU").read())
except AttributeError:
success = False
print("Error: failed to start process for command"+
"\n\t"+job.cmd)
return success
def run_jobs(jobs,
max_concurrent_jobs=50,
platform="local"):
assert all([isinstance(job, Job) for job in jobs])
assert platform in ["local", "lsf", "sge"]
if platform == "local":
return run_jobs_local(jobs, max_concurrent_jobs=max_concurrent_jobs)
elif platform == "lsf":
return run_jobs_lsf(jobs, max_concurrent_jobs=max_concurrent_jobs)
elif platform == "sge":
return run_jobs_sge(jobs, max_concurrent_jobs=max_concurrent_jobs)
def stage(dir="out",
done="done",
cmd=None,
cmds=None,
dirs=None,
name="job"):
global god
platform = god.platform
max_jobs = god.max_jobs
if os.path.isfile(done):
print("Skipping {} stage and using previous results.".format(name))
else:
s = "Running {} stage . . .".format(name)
print('_' * len(s))
print(s)
jobs = []
if cmds is None:
cmds = []
if cmd is not None:
cmds.append(cmd)
for i, cmd in enumerate(cmds):
if dirs is not None:
dir = dirs[i]
makedirs(dir)
jobs.append(Job(cmd,
name=name,
out_folder=dir,
bsub_opts=god.bsub_opts))
success = run_jobs(jobs,
platform=platform,
max_concurrent_jobs=max_jobs)
if success:
os.mknod(done)
print(". . . successfully completed {} at {}.".format(name, timestamp()))
else:
sys.exit(1)
| 13,719 | -11 | 360 |
95600a88fb80cf310a5e21e05d62d39bf743a5d5 | 4,398 | py | Python | LookDevBlendPyLinux/cameraImportV2.py | moonyuet/blendLightRigAddon | 7e9f19ba1215b1797e86ee35c89f4e339e974869 | [
"CC0-1.0"
] | 2 | 2021-04-28T13:26:07.000Z | 2021-09-04T06:29:03.000Z | LookDevBlendPyLinux/cameraImportV2.py | moonyuet/blendLightRigAddon | 7e9f19ba1215b1797e86ee35c89f4e339e974869 | [
"CC0-1.0"
] | null | null | null | LookDevBlendPyLinux/cameraImportV2.py | moonyuet/blendLightRigAddon | 7e9f19ba1215b1797e86ee35c89f4e339e974869 | [
"CC0-1.0"
] | null | null | null | import bpy
bl_info ={
"name": "Camera",
"author" : "Kayla Man",
"version" : (1,0),
"blender" : (2,91,0),
"location" : " ",
"description" : "creating cameras in Blender",
"warning": "",
"wiki_url": "",
"category": "Camera"
}
import bpy
from bpy.props import PointerProperty, BoolProperty
if __name__ == "__main__":
register()
| 29.32 | 68 | 0.624147 | import bpy
bl_info ={
"name": "Camera",
"author" : "Kayla Man",
"version" : (1,0),
"blender" : (2,91,0),
"location" : " ",
"description" : "creating cameras in Blender",
"warning": "",
"wiki_url": "",
"category": "Camera"
}
import bpy
from bpy.props import PointerProperty, BoolProperty
class CameraSetPanel(bpy.types.Panel):
bl_label = "Camera Creation Add-On"
bl_idname = "CAMERA_PT_PANEL"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Camera"
def draw(self, context):
layout = self.layout
da = bpy.context.scene
row = layout.row()
row.operator("frn.cambuild_operator")
row = layout.row()
row.operator("left.cambuild_operator")
row = layout.row()
row.operator("right.cambuild_operator")
row = layout.row()
row.operator("back.cambuild_operator")
row = layout.row()
row.prop(da.myCamView, "lock")
row = layout.row()
row.label(text="Select ONE Camera as RenderCam")
row = layout.row()
row.operator("cam.set_operator")
class FRN_CAM(bpy.types.Operator):
bl_label = "Front Camera"
bl_idname = "frn.cambuild_operator"
def execute(self, context):
frn_cam = bpy.data.cameras.new("Front Camera")
cam_obI = bpy.data.objects.new("Front Camera", frn_cam)
cam_obI.location = (0, -0.8, 0.96)
cam_obI.rotation_euler = (1.5708,0,0)
bpy.context.collection.objects.link(cam_obI)
return {"FINISHED"}
class LEFT_CAM(bpy.types.Operator):
bl_label = "Left Camera"
bl_idname = "left.cambuild_operator"
def execute(self, context):
left_cam = bpy.data.cameras.new("Left Camera")
cam_obII = bpy.data.objects.new("Left Camera", left_cam)
cam_obII.location = (1.5, 0, 0.96)
cam_obII.rotation_euler = (1.5708,0, 1.5708)
bpy.context.collection.objects.link(cam_obII)
return {"FINISHED"}
class RIGHT_CAM(bpy.types.Operator):
bl_label = "Right Camera"
bl_idname = "right.cambuild_operator"
def execute(self, context):
right_cam = bpy.data.cameras.new("Right Camera")
cam_obIII = bpy.data.objects.new("Right Camera", right_cam)
cam_obIII.location = (-1.5, 0, 0.96)
cam_obIII.rotation_euler = (1.5708,0, -1.5708)
bpy.context.collection.objects.link(cam_obIII)
return {"FINISHED"}
class BACK_CAM(bpy.types.Operator):
bl_label = "Back Camera"
bl_idname = "back.cambuild_operator"
def execute(self, context):
back_cam = bpy.data.cameras.new("Back Camera")
cam_obIV = bpy.data.objects.new("Back Camera", back_cam)
cam_obIV.location = (0, 0.8, 0.96)
cam_obIV.rotation_euler = (-1.5708, 3.14159, 0)
bpy.context.collection.objects.link(cam_obIV)
return {"FINISHED"}
class SET_CAM(bpy.types.Operator):
bl_label = "Set Camera"
bl_idname = "cam.set_operator"
def execute(self, context):
da = bpy.context.space_data
obj = bpy.context.active_object
da.camera = obj
return {"FINISHED"}
def lockCameraToView(self,context):
da = bpy.context.space_data
da.lock_camera= self.lock
class CAMDRIVENSET(bpy.types.PropertyGroup):
lock: BoolProperty(
name="Lock Camera To View",
subtype="NONE",
default = False,
update=lockCameraToView)
def register():
bpy.utils.register_class(CameraSetPanel)
bpy.utils.register_class(FRN_CAM)
bpy.utils.register_class(LEFT_CAM)
bpy.utils.register_class(RIGHT_CAM)
bpy.utils.register_class(BACK_CAM)
bpy.utils.register_class(SET_CAM)
bpy.utils.register_class(CAMDRIVENSET)
bpy.types.Scene.myCamView = PointerProperty(type=CAMDRIVENSET)
def unregister():
bpy.utils.unregister_class(CameraSetPanel)
bpy.utils.unregister_class(FRN_CAM)
bpy.utils.unregister_class(LEFT_CAM)
bpy.utils.unregister_class(RIGHT_CAM)
bpy.utils.unregister_class(BACK_CAM)
bpy.utils.unregister_class(SET_CAM)
bpy.utils.unregister_class(CAMDRIVENSET)
del bpy.types.Scene.myCamView
if __name__ == "__main__":
register()
| 2,766 | 971 | 254 |
3138faa71281ece7141a027a18ff865eabffe8d1 | 1,337 | py | Python | keras_extensions/callbacks.py | kassonlab/keras_bn_library | a3fe200aa650428138790b7fa5ac4a4f65b48b3b | [
"MIT"
] | 32 | 2017-01-19T08:48:11.000Z | 2020-04-06T03:43:19.000Z | keras_extensions/callbacks.py | kassonlab/keras_bn_library | a3fe200aa650428138790b7fa5ac4a4f65b48b3b | [
"MIT"
] | 1 | 2017-10-10T21:50:41.000Z | 2017-10-10T21:50:41.000Z | keras_extensions/callbacks.py | kassonlab/keras_bn_library | a3fe200aa650428138790b7fa5ac4a4f65b48b3b | [
"MIT"
] | 15 | 2017-02-22T07:15:57.000Z | 2020-12-23T09:40:28.000Z | import sys
import numpy as np
import keras.backend as K
from keras.callbacks import Callback
from keras.models import Model, Sequential
| 30.386364 | 70 | 0.68736 | import sys
import numpy as np
import keras.backend as K
from keras.callbacks import Callback
from keras.models import Model, Sequential
class UnsupervisedLoss2Logger(Callback):
def __init__(self, X_train, X_test, loss, verbose=1, batch_size = 1,
label='loss', every_n_epochs=1, display_delta=True):
super(UnsupervisedLoss2Logger, self).__init__()
self.X_train = X_train
self.X_test = X_test
self.loss = loss
self.verbose = verbose
self.label = label
self.every_n_epochs = every_n_epochs
self.display_delta = display_delta
self.prev_loss = None
self.batch_size = batch_size
input_train = K.placeholder(shape=self.X_train.shape)
input_test = K.placeholder(shape=self.X_test.shape)
loss = self.loss(input_train, input_test)
ins = [input_train, input_test]
self.loss_function = K.function(ins, loss)
def on_epoch_end(self, epoch, logs={}):
if((epoch+1) % self.every_n_epochs == 0):
loss = np.mean(self.loss_function([self.X_train, self.X_test]))
if(self.prev_loss):
delta_loss = loss - self.prev_loss
else:
delta_loss = None
self.prev_loss = loss
if(self.display_delta and delta_loss):
print(' - %s: %f (%+f)' % (self.label, loss, delta_loss))
else:
print(' - %s: %f' % (self.label, loss))
#sys.stdout.flush()
| 1,101 | 19 | 74 |
1a16e3f990d40c674f8e1df4f1338124ef440c72 | 2,023 | py | Python | misago/threads/api/threadendpoints/editor.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | 1 | 2017-07-25T03:04:36.000Z | 2017-07-25T03:04:36.000Z | misago/threads/api/threadendpoints/editor.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | misago/threads/api/threadendpoints/editor.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | from rest_framework.response import Response
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from misago.acl import add_acl
from misago.categories import THREADS_ROOT_NAME
from misago.categories.models import Category
from misago.threads.permissions import can_start_thread
from misago.threads.threadtypes import trees_map
| 33.163934 | 90 | 0.671775 | from rest_framework.response import Response
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from misago.acl import add_acl
from misago.categories import THREADS_ROOT_NAME
from misago.categories.models import Category
from misago.threads.permissions import can_start_thread
from misago.threads.threadtypes import trees_map
def thread_start_editor(request):
if request.user.is_anonymous:
raise PermissionDenied(_("You need to be signed in to start threads."))
# list of categories that allow or contain subcategories that allow new threads
available = []
categories = []
queryset = Category.objects.filter(
pk__in=request.user.acl_cache['browseable_categories'],
tree_id=trees_map.get_tree_id_for_root(THREADS_ROOT_NAME)
).order_by('-lft')
for category in queryset:
add_acl(request.user, category)
post = False
if can_start_thread(request.user, category):
post = {
'close': bool(category.acl['can_close_threads']),
'hide': bool(category.acl['can_hide_threads']),
'pin': category.acl['can_pin_threads'],
}
available.append(category.pk)
available.append(category.parent_id)
elif category.pk in available:
available.append(category.parent_id)
categories.append({
'id': category.pk,
'name': category.name,
'level': category.level - 1,
'post': post,
})
# list only categories that allow new threads, or contains subcategory that allows one
cleaned_categories = []
for category in reversed(categories):
if category['id'] in available:
cleaned_categories.append(category)
if not cleaned_categories:
raise PermissionDenied(
_("No categories that allow new threads are available to you at the moment.")
)
return Response(cleaned_categories)
| 1,619 | 0 | 23 |
955aff6693c99ba1b17b5b6b9b8029c959f4b210 | 390 | py | Python | python/06/potentiometer.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
] | 6 | 2022-03-05T02:36:57.000Z | 2022-03-12T12:31:27.000Z | python/06/potentiometer.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
] | null | null | null | python/06/potentiometer.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
] | null | null | null | import pigpio
import time
INTERVAL = 0.1
pi = pigpio.pi()
h = pi.spi_open(0, 1000000, 0)
try:
while True:
print(read_adc_ch0(pi, h) * 3.3)
time.sleep(INTERVAL)
except KeyboardInterrupt:
pass
pi.spi_close(h)
pi.stop()
| 16.25 | 66 | 0.633333 | import pigpio
import time
INTERVAL = 0.1
def read_adc_ch0(pi, h):
count, data = pi.spi_xfer(h, [0b01101000, 0])
return int.from_bytes([data[0] & 0x03, data[1]], 'big') / 1023
pi = pigpio.pi()
h = pi.spi_open(0, 1000000, 0)
try:
while True:
print(read_adc_ch0(pi, h) * 3.3)
time.sleep(INTERVAL)
except KeyboardInterrupt:
pass
pi.spi_close(h)
pi.stop()
| 120 | 0 | 23 |
2a0d11630311e52aedb1075f674099510af71845 | 7,910 | py | Python | pages/predictions.py | rsmecking/Predicting_women_shoe_prices | 41190d37c264664cbec46c64ff2df5e488c6018b | [
"MIT"
] | null | null | null | pages/predictions.py | rsmecking/Predicting_women_shoe_prices | 41190d37c264664cbec46c64ff2df5e488c6018b | [
"MIT"
] | null | null | null | pages/predictions.py | rsmecking/Predicting_women_shoe_prices | 41190d37c264664cbec46c64ff2df5e488c6018b | [
"MIT"
] | null | null | null | # Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from joblib import load
import pandas as pd
# Imports from this application
from app import app
#Pipeline
pipeline = load('assets/pipeline.joblib')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Select a few options that may apply to the shoes.
The average shoe price is the starting point.
*You will be able to make up shoes(Open toed boot.)
"""
),
dcc.Dropdown(
id='brand',
options = [
{'label': 'Brinley Co.', 'value': 'co.'},
{'label': 'Propet', 'value': 'propet'},
{'label': 'SAS', 'value': 'sas'},
{'label': 'Trotters', 'value': 'trotters'},
{'label': 'Pleaser', 'value': 'pleaser'},
{'label': 'Soda', 'value': 'soda'},
{'label': 'Spring Step', 'value': 'spring'},
{'label': 'Aerosoles', 'value': 'aerosoles'},
{'label': 'Softwalk', 'value': 'softwalk'},
{'label': "L'Artiste", 'value': "l'artiste"},
{'label': 'Ellie Shoes', 'value': 'ellie'},
{'label': 'Drew', 'value': 'drew'},
{'label': 'Steve Madden', 'value': 'madden'},
{'label': "New Balance", 'value': "new"},
{'label': "Toms", 'value': "tom"},
{'label': "Other", 'value': "other"},
],
placeholder="Select a Brand",
value = 'Brand',
className='mb-2',
),
html.Div([
dcc.Markdown("Shoe discontinued?"),
dcc.RadioItems(
id='available',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("On Sale?"),
dcc.RadioItems(
id='has_sale',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Purchased Online?"),
dcc.RadioItems(
id='online',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Was shipping free?"),
dcc.RadioItems(
id='free_shipping',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
],
)
column2 = dbc.Col(
[
html.Div([
dcc.Markdown("Does shoe have heel?"),
dcc.RadioItems(
id='has_heel',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does shoe look like a boot?"),
dcc.RadioItems(
id='is_boot',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Is the bottom flat?"),
dcc.RadioItems(
id='is_flat',
options=[
{'label': 'Yes', 'value': '1'},
{'label': 'No', 'value': '0'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Do the toes show?"),
dcc.RadioItems(
id='open_toe',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does the shoe cut off at the ankle?"),
dcc.RadioItems(
id='ankle_height',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does shoe have accessories?(i.e. straps/lace)"),
dcc.RadioItems(
id='accessories',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does box/tag have a description?"),
dcc.RadioItems(
id='has_description',
options=[
{'label': 'Yes', 'value': '1'},
{'label': 'No', 'value': '0'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
]
)
column3 = dbc.Col(
[
html.H2('Estimated Price of Shoes', className='mb-5'),
html.Div(id='prediction-content', className='lead')
]
)
@app.callback(
Output('prediction-content', 'children'),[
Input('brand', 'value'),
Input('available', 'value'),
Input('has_sale', 'value'),
Input('online', 'value'),
Input('free_shipping', 'value'),
Input('has_heel', 'value'),
Input('is_boot', 'value'),
Input('is_flat', 'value'),
Input('open_toe', 'value'),
Input('ankle_height', 'value'),
Input('accessories', 'value'),
Input('has_description', 'value'),
],
)
layout = dbc.Row([column1, column2, column3])
| 30.423077 | 112 | 0.427307 | # Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from joblib import load
import pandas as pd
# Imports from this application
from app import app
#Pipeline
pipeline = load('assets/pipeline.joblib')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Select a few options that may apply to the shoes.
The average shoe price is the starting point.
*You will be able to make up shoes(Open toed boot.)
"""
),
dcc.Dropdown(
id='brand',
options = [
{'label': 'Brinley Co.', 'value': 'co.'},
{'label': 'Propet', 'value': 'propet'},
{'label': 'SAS', 'value': 'sas'},
{'label': 'Trotters', 'value': 'trotters'},
{'label': 'Pleaser', 'value': 'pleaser'},
{'label': 'Soda', 'value': 'soda'},
{'label': 'Spring Step', 'value': 'spring'},
{'label': 'Aerosoles', 'value': 'aerosoles'},
{'label': 'Softwalk', 'value': 'softwalk'},
{'label': "L'Artiste", 'value': "l'artiste"},
{'label': 'Ellie Shoes', 'value': 'ellie'},
{'label': 'Drew', 'value': 'drew'},
{'label': 'Steve Madden', 'value': 'madden'},
{'label': "New Balance", 'value': "new"},
{'label': "Toms", 'value': "tom"},
{'label': "Other", 'value': "other"},
],
placeholder="Select a Brand",
value = 'Brand',
className='mb-2',
),
html.Div([
dcc.Markdown("Shoe discontinued?"),
dcc.RadioItems(
id='available',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("On Sale?"),
dcc.RadioItems(
id='has_sale',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Purchased Online?"),
dcc.RadioItems(
id='online',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Was shipping free?"),
dcc.RadioItems(
id='free_shipping',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
],
)
column2 = dbc.Col(
[
html.Div([
dcc.Markdown("Does shoe have heel?"),
dcc.RadioItems(
id='has_heel',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does shoe look like a boot?"),
dcc.RadioItems(
id='is_boot',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Is the bottom flat?"),
dcc.RadioItems(
id='is_flat',
options=[
{'label': 'Yes', 'value': '1'},
{'label': 'No', 'value': '0'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Do the toes show?"),
dcc.RadioItems(
id='open_toe',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does the shoe cut off at the ankle?"),
dcc.RadioItems(
id='ankle_height',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does shoe have accessories?(i.e. straps/lace)"),
dcc.RadioItems(
id='accessories',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does box/tag have a description?"),
dcc.RadioItems(
id='has_description',
options=[
{'label': 'Yes', 'value': '1'},
{'label': 'No', 'value': '0'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
]
)
column3 = dbc.Col(
[
html.H2('Estimated Price of Shoes', className='mb-5'),
html.Div(id='prediction-content', className='lead')
]
)
@app.callback(
Output('prediction-content', 'children'),[
Input('brand', 'value'),
Input('available', 'value'),
Input('has_sale', 'value'),
Input('online', 'value'),
Input('free_shipping', 'value'),
Input('has_heel', 'value'),
Input('is_boot', 'value'),
Input('is_flat', 'value'),
Input('open_toe', 'value'),
Input('ankle_height', 'value'),
Input('accessories', 'value'),
Input('has_description', 'value'),
],
)
def predict(brand, available,
has_sale, online, free_shipping, has_heel, is_boot,
is_flat, open_toe, ankle_height, accessories,
has_description):
df = pd.DataFrame(
columns=['brand', 'available',
'has_sale', 'online', 'free_shipping', 'has_heel', 'is_boot',
'is_flat', 'open_toe', 'ankle_height', 'accessories',
'has_description'],
data=[[brand, available,
has_sale, online, free_shipping, has_heel, is_boot,
is_flat, open_toe, ankle_height, accessories,
has_description]]
)
y_pred = pipeline.predict(df)[0]
if y_pred < 0:
y_pred = 0
return html.Img(src='https://media.giphy.com/media/7WqNJ99pmPIAM/giphy.gif'), ('Eww, These shoes suck!')
else:
return f'Estimated price ${y_pred:.02f} '
# else:
# return html.Img(src='assets/run_image.jpeg',className='img-fluid', style = {'height': '400px'})
layout = dbc.Row([column1, column2, column3])
| 960 | 0 | 22 |
a28907bbc899f839d0921ca6d26ae1f32abf8c59 | 495 | py | Python | parsec/commands/ftpfiles/get_ftp_files.py | simonbray/parsec | c0e123cbf7cb1289ec722357a6262f716575e4d9 | [
"Apache-2.0"
] | null | null | null | parsec/commands/ftpfiles/get_ftp_files.py | simonbray/parsec | c0e123cbf7cb1289ec722357a6262f716575e4d9 | [
"Apache-2.0"
] | null | null | null | parsec/commands/ftpfiles/get_ftp_files.py | simonbray/parsec | c0e123cbf7cb1289ec722357a6262f716575e4d9 | [
"Apache-2.0"
] | null | null | null | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, list_output
@click.command('get_ftp_files')
@click.option(
"--deleted",
help="Whether to include deleted files",
is_flag=True
)
@pass_context
@custom_exception
@list_output
def cli(ctx, deleted=False):
"""Get a list of local files.
Output:
A list of dicts with details on individual files on FTP
"""
return ctx.gi.ftpfiles.get_ftp_files(deleted=deleted)
| 21.521739 | 59 | 0.743434 | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, list_output
@click.command('get_ftp_files')
@click.option(
"--deleted",
help="Whether to include deleted files",
is_flag=True
)
@pass_context
@custom_exception
@list_output
def cli(ctx, deleted=False):
"""Get a list of local files.
Output:
A list of dicts with details on individual files on FTP
"""
return ctx.gi.ftpfiles.get_ftp_files(deleted=deleted)
| 0 | 0 | 0 |
2b73695ee15f57bc160ef5ef051f9a44802b59ad | 1,669 | py | Python | lib/kb_DRAM/utils/kbase_util.py | cshenry/kb_DRAM | bfd1d0dd938e7e9b7eb68cd38607e11335f53bf7 | [
"MIT"
] | null | null | null | lib/kb_DRAM/utils/kbase_util.py | cshenry/kb_DRAM | bfd1d0dd938e7e9b7eb68cd38607e11335f53bf7 | [
"MIT"
] | 2 | 2022-01-25T18:52:21.000Z | 2022-03-08T15:46:19.000Z | lib/kb_DRAM/utils/kbase_util.py | WrightonLabCSU/kb_DRAM | b1edcdd95adf5f20e13fade1443ca32710d66a63 | [
"MIT"
] | 1 | 2021-11-28T16:32:51.000Z | 2021-11-28T16:32:51.000Z | import os
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.DataFileUtilClient import DataFileUtil
| 42.794872 | 108 | 0.554224 | import os
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.DataFileUtilClient import DataFileUtil
def generate_product_report(callback_url, workspace_name, output_dir, product_html_loc, output_files,
output_objects=None):
# check params
if output_objects is None:
output_objects = []
# setup utils
datafile_util = DataFileUtil(callback_url)
report_util = KBaseReport(callback_url)
# move html to main directory uploaded to shock so kbase can find it
html_file = os.path.join(output_dir, 'product.html')
os.rename(product_html_loc, html_file)
report_shock_id = datafile_util.file_to_shock({
'file_path': output_dir,
'pack': 'zip'
})['shock_id']
html_report = [{
'shock_id': report_shock_id,
'name': os.path.basename(html_file),
'label': os.path.basename(html_file),
'description': 'DRAM product.'
}]
report = report_util.create_extended_report({'message': 'Here are the results from your DRAM run.',
'workspace_name': workspace_name,
'html_links': html_report,
'direct_html_link_index': 0,
'file_links': [value for key, value in output_files.items()
if value['path'] is not None],
'objects_created': output_objects,
})
return report
| 1,512 | 0 | 23 |
830ffeec5405c841ab717ae1e0f6fa9c2447b5e0 | 2,766 | py | Python | src/docker_publisher_osparc_services/gitlab_ci_setup/commands.py | ITISFoundation/ci-service-integration-library | ef79ffd61cbd2fda66866981045720e42a8dc6a0 | [
"MIT"
] | null | null | null | src/docker_publisher_osparc_services/gitlab_ci_setup/commands.py | ITISFoundation/ci-service-integration-library | ef79ffd61cbd2fda66866981045720e42a8dc6a0 | [
"MIT"
] | null | null | null | src/docker_publisher_osparc_services/gitlab_ci_setup/commands.py | ITISFoundation/ci-service-integration-library | ef79ffd61cbd2fda66866981045720e42a8dc6a0 | [
"MIT"
] | null | null | null | import re
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, List
from ..models import RegistryEndpointModel, RepoModel
DOCKER_LOGIN: str = (
"echo ${SCCI_TARGET_REGISTRY_PASSWORD} | "
"docker login ${SCCI_TARGET_REGISTRY_ADDRESS} --username ${SCCI_TARGET_REGISTRY_USER} --password-stdin"
)
CommandList = List[str]
COMMANDS_BUILD: CommandList = [
"git clone ${SCCI_REPO} ${SCCI_CLONE_DIR}",
"cd ${SCCI_CLONE_DIR}",
"ooil compose",
"docker-compose build",
DOCKER_LOGIN,
"docker tag ${SCCI_IMAGE_NAME}:${SCCI_TAG} ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
"docker push ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
]
COMMANDS_TEST_BASE: CommandList = [
"git clone ${SCCI_REPO} ${SCCI_CLONE_DIR}",
"cd ${SCCI_CLONE_DIR}",
DOCKER_LOGIN,
"docker pull ${SCCI_CI_IMAGE_NAME}:${SCCI_TAG}",
# if user defines extra commands those will be append here
]
COMMANDS_PUSH: CommandList = [
DOCKER_LOGIN,
"docker pull ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
"docker tag ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG} ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_RELEASE_IMAGE}:${SCCI_TAG}",
"docker push ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_RELEASE_IMAGE}:${SCCI_TAG}",
]
def validate_commands_list(
commands_list: CommandList, env_vars: Dict[str, str]
) -> None:
"""validation is run at runtime before assembling the gitlab ci spec"""
for command in commands_list:
hits = re.findall(r"\$\{(.*?)\}", command)
for hit in hits:
if hit.startswith("SCCI") and hit not in env_vars:
raise ValueError(
f"env var '{hit}'\ndefined in '{command}'\n "
f"not found default injected env vars '{env_vars}'"
)
| 35.461538 | 146 | 0.691974 | import re
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, List
from ..models import RegistryEndpointModel, RepoModel
DOCKER_LOGIN: str = (
"echo ${SCCI_TARGET_REGISTRY_PASSWORD} | "
"docker login ${SCCI_TARGET_REGISTRY_ADDRESS} --username ${SCCI_TARGET_REGISTRY_USER} --password-stdin"
)
CommandList = List[str]
COMMANDS_BUILD: CommandList = [
"git clone ${SCCI_REPO} ${SCCI_CLONE_DIR}",
"cd ${SCCI_CLONE_DIR}",
"ooil compose",
"docker-compose build",
DOCKER_LOGIN,
"docker tag ${SCCI_IMAGE_NAME}:${SCCI_TAG} ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
"docker push ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
]
COMMANDS_TEST_BASE: CommandList = [
"git clone ${SCCI_REPO} ${SCCI_CLONE_DIR}",
"cd ${SCCI_CLONE_DIR}",
DOCKER_LOGIN,
"docker pull ${SCCI_CI_IMAGE_NAME}:${SCCI_TAG}",
# if user defines extra commands those will be append here
]
COMMANDS_PUSH: CommandList = [
DOCKER_LOGIN,
"docker pull ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
"docker tag ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG} ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_RELEASE_IMAGE}:${SCCI_TAG}",
"docker push ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_RELEASE_IMAGE}:${SCCI_TAG}",
]
def assemble_env_vars(
repo_model: RepoModel,
registries: Dict[str, RegistryEndpointModel],
image_name: str,
tag: str,
) -> Dict[str, str]:
clone_directory: Path = Path(TemporaryDirectory().name)
registry: RegistryEndpointModel = registries[repo_model.registry.target]
test_image = repo_model.registry.local_to_test[image_name]
release_image = repo_model.registry.test_to_release[test_image]
return {
"SCCI_REPO": repo_model.escaped_repo,
"SCCI_CLONE_DIR": f"{clone_directory}",
"SCCI_IMAGE_NAME": image_name,
"SCCI_TAG": tag,
"SCCI_TEST_IMAGE": test_image,
"SCCI_RELEASE_IMAGE": release_image,
"SCCI_TARGET_REGISTRY_ADDRESS": registry.address,
"SCCI_TARGET_REGISTRY_PASSWORD": registry.password.get_secret_value(),
"SCCI_TARGET_REGISTRY_USER": registry.user,
}
def validate_commands_list(
commands_list: CommandList, env_vars: Dict[str, str]
) -> None:
"""validation is run at runtime before assembling the gitlab ci spec"""
for command in commands_list:
hits = re.findall(r"\$\{(.*?)\}", command)
for hit in hits:
if hit.startswith("SCCI") and hit not in env_vars:
raise ValueError(
f"env var '{hit}'\ndefined in '{command}'\n "
f"not found default injected env vars '{env_vars}'"
)
| 854 | 0 | 23 |
774cc98c678585ea6d520b686c53a5f7a76078a0 | 1,013 | py | Python | lib/text.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | lib/text.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | lib/text.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# noc.core.text legacy wrappers
# flake8: noqa
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import warnings
# NOC modules
from noc.core.text import (
parse_table,
strip_html_tags,
xml_to_table,
list_to_ranges,
ranges_to_list,
replace_re_group,
indent,
split_alnum,
find_indented,
parse_kv,
str_dict,
quote_safe_path,
to_seconds,
format_table,
clean_number,
safe_shadow,
ch_escape,
tsv_escape,
parse_table_header,
)
from noc.core.deprecations import RemovedInNOC1905Warning
warnings.warn(
"noc.lib.text is deprecated and will be removed in NOC 19.5. "
"Please replace imports to noc.core.text",
RemovedInNOC1905Warning,
stacklevel=2,
)
| 23.55814 | 72 | 0.548865 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# noc.core.text legacy wrappers
# flake8: noqa
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import warnings
# NOC modules
from noc.core.text import (
parse_table,
strip_html_tags,
xml_to_table,
list_to_ranges,
ranges_to_list,
replace_re_group,
indent,
split_alnum,
find_indented,
parse_kv,
str_dict,
quote_safe_path,
to_seconds,
format_table,
clean_number,
safe_shadow,
ch_escape,
tsv_escape,
parse_table_header,
)
from noc.core.deprecations import RemovedInNOC1905Warning
warnings.warn(
"noc.lib.text is deprecated and will be removed in NOC 19.5. "
"Please replace imports to noc.core.text",
RemovedInNOC1905Warning,
stacklevel=2,
)
| 0 | 0 | 0 |
d0697da8d14c723f47789a41dc7d08caea4cfe72 | 27,642 | py | Python | axeda_api.py | TeamValvrave/pyCloundAPILib | 635350a2cd6236b000224d87b4922af8c76614b1 | [
"MIT"
] | 2 | 2015-03-29T03:16:28.000Z | 2019-03-15T07:16:05.000Z | axeda_api.py | TeamValvrave/pyCloundAPILib | 635350a2cd6236b000224d87b4922af8c76614b1 | [
"MIT"
] | null | null | null | axeda_api.py | TeamValvrave/pyCloundAPILib | 635350a2cd6236b000224d87b4922af8c76614b1 | [
"MIT"
] | 2 | 2015-03-29T03:16:29.000Z | 2019-03-15T07:14:31.000Z | # -*- coding: utf-8 -*-
import json
from xml.etree import ElementTree
import utils
from cloud import Cloud
def toDateTime(c):
"""
FIXME
"""
return None
class TypeAbstractPlatformObject(TypeabstractPlatformObjectBase):
"""
Format: attributes
@id: string
@systemId: string
@label: string
@detail: string
@restUrl: string
"""
class TypeDataItemCollection(TypeabstractPlatformObjectBase):
"""
Format:
@dataItem: list of DataItemReference
"""
class TypeAbstractSearchCriteria(TypeabstractPlatformObjectBase):
"""
Format:
attributes
@pageSize: int
Specify the number of entries per page of the results, if not specified defaults to MAX_PAGE_SIZE
@pageNumber: int
Specify which page of the results to return, if not specified defaults to DEFAULT_PAGE.
Using the pageNumber pagination property affects which found object
is returned by the findOne method. For example, pageNumber=1 returns the
first matching found object, while pageNumber=3 returns the 3rd matching
found object, etc.
@sortAscending: boolean
@sortPropertyName: string
"""
class TypeDataItemCriteria(TypeAbstractSearchCriteria):
"""
Format:
@name: string
@alias: string
@modelId: string
Model system id.
@types: list of DataItemType ("ANALOG" / "DIGITAL" / "STRING")
@readOnly: boolean
@visible: boolean
@forwarded: boolean
@historicalOnly: boolean
e.g, "name"
"""
class TypeHistoricalDataItemValueCriteria(TypeAbstractSearchCriteria):
"""
Format:
@assetId: string
@dataItemIds: list
@ item: string
@startDate: dateTime
@endDate: dateTime
"""
class CurrentDataItemValueCriteria(dict):
"""
Format:
@name: string
@alias: string
@assetId: string
Asset system id.
@types: list
@readOnly: boolean
@visible: boolean
@forwarded: boolean
@historicalOnly: boolean
@pageSize: int
@pageNumber: int
Using the pageNumber pagination property affects which found object
is returned by the findOne method. For example, pageNumber=1 returns the
first matching found object, while pageNumber=3 returns the 3rd matching
found object, etc.
@sortAscending: bool
@sortPropertyName: string
e.g, "name"
"""
class Axeda(Cloud):
"""
Axeda platform REST APIs
https://<host>/artisan/apidocs/v1/
https://<host>/artisan/apidocs/v2/
"""
class Auth(Axeda):
"""
API
https://<host>/services/v1/rest/Auth?_wadl
"""
def login(self, username = None, password = None, timeout = 1800):
"""
Creates a new session (sessionId) for the related authenticated user.
Note that when Axeda Platform creates a session for a user, a timeout is
defined for that session. The session will be valid only while the session
is effective; if the session times out, additional calls to the Web services
will return “access defined” errors. Your code should implement error
handling to ensure the session is still valid.
"""
if not username:
username = self.username
if not password:
password = self.password
if not timeout:
timeout = self.timeout
url = self.url_prefix + 'login?principal.username=' + username + \
'&password=' + password + '&sessionTimeout=' + str(timeout)
if self.json:
headers = { 'Accept': 'application/json' }
else:
headers = None
r = utils.get(url, headers = headers, ssl = self.ssl)
if r.status_code != 200:
return False
if self.json:
self.session_id = str(json.loads(r.content)['wsSessionInfo']['sessionId'])
else:
self.session_id = str(utils.parse_xml(r.content, 'sessionId', self.name_space))
if self.session_id:
return True
else:
return False
def logout(self, sessionid = None):
"""
Ends the session for the related user. Invalidates the specified SessionId
such that it can no longer be used.
"""
if not self.session_id and not sessionid:
return False
url = self.url_prefix + 'logout?sessionid='
if sessionid:
url += sessionid
else:
url += self.session_id
r = utils.get(url, ssl = self.ssl)
if r.status_code != 204:
return False
else:
self.session_id = None
return True
class Scripto(Axeda):
"""
API
https://<host>/services/v1/rest/Scripto/?_wadl
"""
class Asset(Axeda):
"""
Asset Object APIs
https://<host>/services/v2/rest/asset?_wadl
"""
def findOne(self, s):
"""
Finds the first Asset that meets the specified criteria.
"""
self.checkParameter((s,))
if not isinstance(s, TypeAssetCriteria):
assert(False)
url = self.setURL("findOne")
headers = self.setHeaders(json = True)
# FIXME: either mode doesn't working with Axeda but Mashery.
if True:
payload = s.toJson()
else:
if True:
payload = \
'''<?xml version="1.0" encoding="UTF-8"?><AssetCriteria xmlns="http://www.axeda.com/services/v2"><modelNumber>''' + c["modelNumber"] + '''</modelNumber><serialNumber>''' + c["serialNumber"] + '''</serialNumber></AssetCriteria>'''
else: # also work with Mashery
payload = \
'''<v2:AssetCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><v2:modelNumber>''' + c["modelNumber"] + '''</v2:modelNumber><v2:serialNumber>''' + c["serialNumber"] + '''</v2:serialNumber></v2:AssetCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByIds(self, asset_id, fast = True):
"""
Finds the specified SDK objects based on the ids provided, and returns an
unordered list of found objects. This method will accept only platform ids,
and does not support aternate ids.
"""
self.checkParameter((asset_id,))
if fast:
url = self.setURL("id/" + str(asset_id))
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
else:
url = self.setURL('findByIds')
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"id": asset_id
})
else:
payload = \
'''<IdCollection xmlns="http://www.axeda.com/services/v2">
<id>''' + str(asset_id) + '''</id></IdCollection>'''
r = self.postRequest('findByIds', headers, payload)
if r is not None and r.status_code == 200:
return r.content
else:
return None
class DataItem(Axeda):
"""
API
https://<host>/services/v2/rest/dataItem?_wadl
"""
def create(self, name, model_name, type, alias = ""):
"""
Creates a new Data Item.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
"""
self.checkParameter((name, model_name, type))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": "valvrave_test_string",
"type": "STRING",
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
</v2:DataItem>'''
r = self.putRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def save(self, name, model_name, type, alias = ""):
"""
Save a new Data Item.
Note:
This same REST call as a create() invokes a Save operation.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
Return value:
{
"successful":true,
"totalCount":1,
"succeeded":[
{
"ref":"es2015-Galileo-Gen2||valvrave_test_string3",
"id":"427"
}],
"failures":[]
}
"""
self.checkParameter((name, model_name, type,))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": "valvrave_test_string",
"id": "es2015-Galileo-Gen2||valvrave_test_string",
"type": "STRING",
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
</v2:DataItem>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def update(self, dataitem_id, name, model_name, type, alias = ""):
"""
Updates an existing Data Item.
"""
self.checkParameter((dataitem_id, name, model_name, type))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
if False:
payload = json.dumps(
{
"name": name,
"model": [{
"objType": "ModelReference", "id": model
}],
"type": type
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2" systemId="''' + str(dataitem_id) + '''">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
<v2:alias>''' + alias + '''</v2:alias>
</v2:DataItem>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return r.content
else:
return None
def delete(self, dataitem_id):
"""
Deletes a data item.
"""
self.checkParameter((dataitem_id,))
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
r = self.deleteRequest(url, headers)
if r is not None and r.status_code == 200:
#return TypeExecutionResult(json.loads(r.content))
return json.loads(r.content)
else:
return None
def find(self, **s):
"""
Finds Data Items based on search criteria.
@criteria: a complete criteria is defined as:
alias: (null)
modelId: (null)
types: ([])
readOnly": (null)
visible": (null)
forwarded: (null)
historicalOnly: (null)
pageSize": (null)
pageNumber: (null)
sortAscending: (null)
sortPropertyName: (null)
"""
url = self.setURL("find")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(s)
else:
payload = None
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findOne(self, **s):
"""
Returns the first Data Item found that meets specified search criteria.
@criteria: See the class Criteria.
Note:
Essentially this API equals to find() with pageNumber=1.
"""
url = self.setURL("findOne")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(s)
else:
payload = None
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByIds(self, dataitem_ids):
"""
Finds the specified SDK objects based on the ids provided, and returns an
unordered list of found objects. This method will accept only platform ids,
and does not support aternate ids.
"""
self.checkParameter((dataitem_ids,))
url = self.setURL("findByIds")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"id": dataitem_id
})
# FIXME: which one
payload = json.dumps({
"dataItem": [{
"systemId": str(dataitem_id)
}]
})
else:
payload = '''<IdCollection xmlns="http://www.axeda.com/services/v2">'''
for i in dataitem_ids:
payload += '<id>' + str(i) + '</id>'
payload += '</IdCollection>'
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findById(self, dataitem_id):
"""
Finds an Data Item based on its platform identifier.
@dataitem_id
"""
self.checkParameter((dataitem_id,))
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByAlternateId(self, name, model_name):
"""
Finds a Data Item based on the alternate identifier.
@alternate_id
The alternate ID of a Data Item takes the following format:
ModelNumber||dataItemName
"""
self.checkParameter((name, model_name))
alternate_id = model_name + "||" + name
url = self.setURL(alternate_id)
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findCurrentValues(self, criteria):
"""
Returns the current values of the specified Data Items.
Note:
For the findCurrentValues method, the assetId input field is required.
"""
self.checkParameter((criteria,))
c = CurrentDataItemValueCriteria(criteria)
url = self.setURL("findCurrentValues")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(c)
else:
payload = \
'''<v2:CurrentDataItemValueCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<v2:assetId>''' + str(criteria['asset_id']) + '''</v2:assetId></v2:CurrentDataItemValueCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def getSourceDataItems(self, id):
"""
Returns the source Data Items associated with the specified source Data Item.
"""
self.checkParameter((id,))
url = self.setURL("id/" + str(id) + "/sourceDataItems")
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def getTargetDataItems(self, id):
"""
Returns the source Data Items associated with the specified target Data Item.
"""
self.checkParameter((id,))
url = self.setURL("id/" + str(id) + "/targetDataItems")
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findHistoricalValues(self, **p):
"""
Returns the historical values of the specified Data Items.
"""
url = self.setURL("findHistoricalValues")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(p)
else:
payload = \
'''<v2:CurrentDataItemValueCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<v2:assetId>''' + str(criteria['asset_id']) + '''</v2:assetId></v2:CurrentDataItemValueCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def bulkCreate(self, c):
"""
Creates a new Data Item.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
"""
self.checkParameter((c,))
TypeDataItemCollection(c)
url = self.setURL("bulk/create")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": name,
"type": type,
"model": {
"id": id,
},
})
else:
payload = \
'''<DataItemCollection xmlns="http://www.axeda.com/services/v2">
<dataItem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="DataItem">
<name>''' + name + '''</name><model id="''' + model + '''"/><type>''' + type + '''</type>
</dataItem></DataItemCollection>'''
r = self.putRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
| 26.707246 | 315 | 0.685081 | # -*- coding: utf-8 -*-
import json
from xml.etree import ElementTree
import utils
from cloud import Cloud
def toString(c):
return str(c) if c != None else None
def toBool(c):
return bool(c) if c != None else None
def toInt(c):
return int(c) if c != None else None
def toList(c):
return list(c) if c != None else None
def toDateTime(c):
"""
FIXME
"""
return None
class TypeabstractPlatformObjectBase(object):
def __init__(self, val, **p):
val.update(p)
self.stored_value = val
def getValue(self):
if not self.stored_value:
return None
return self.stored_value
def toJson(self):
return json.dumps(self.stored_value)
def toXml(self):
pass
class TypeAbstractPlatformObject(TypeabstractPlatformObjectBase):
"""
Format: attributes
@id: string
@systemId: string
@label: string
@detail: string
@restUrl: string
"""
def __init__(self, id, systemId, label, detail, restUrl):
TypeabstractPlatformObjectBase.__init__(self, {
"id": id,
"systemId": systemId,
"label": label,
"detail": detail,
"restUrl": restUrl
})
class TypeDataItemReference(TypeAbstractPlatformObject):
def __init__(self, id, systemId, label, detail, restUrl):
TypeAbstractPlatformObject.__init__(self, id, systemId, label, detail, restUrl)
class TypeDataItemCollection(TypeabstractPlatformObjectBase):
"""
Format:
@dataItem: list of DataItemReference
"""
def __init__(self, dataItemRefernences):
for i in dataItemRefernences:
TypeAbstractPlatformObject(i)
dict.__init__(self, list(dataItemRefernences))
class TypeAbstractSearchCriteria(TypeabstractPlatformObjectBase):
"""
Format:
attributes
@pageSize: int
Specify the number of entries per page of the results, if not specified defaults to MAX_PAGE_SIZE
@pageNumber: int
Specify which page of the results to return, if not specified defaults to DEFAULT_PAGE.
Using the pageNumber pagination property affects which found object
is returned by the findOne method. For example, pageNumber=1 returns the
first matching found object, while pageNumber=3 returns the 3rd matching
found object, etc.
@sortAscending: boolean
@sortPropertyName: string
"""
def __init__(self, pageSize, pageNumber, sortAscending, sortPropertyName, **p):
self.pageSize = toInt(pageSize)
self.pageNumber = toInt(pageNumber)
self.sortAscending = toBool(sortAscending)
self.sortPropertyName = toString(sortPropertyName)
TypeabstractPlatformObjectBase.__init__(self, {
"pageSize": self.pageSize,
"pageNumber": self.pageNumber,
"sortAscending": self.sortAscending,
"sortPropertyName": self.sortPropertyName
}, **p)
class TypeDataItemCriteria(TypeAbstractSearchCriteria):
"""
Format:
@name: string
@alias: string
@modelId: string
Model system id.
@types: list of DataItemType ("ANALOG" / "DIGITAL" / "STRING")
@readOnly: boolean
@visible: boolean
@forwarded: boolean
@historicalOnly: boolean
e.g, "name"
"""
def __init__(self, **p):
self.name = toString(p.get("name"))
self.alias = toString(p.get("alias"))
self.modelId = toString(p.get("modelId"))
self.types = toList(p.get("types"))
self.readOnly = toBool(p.get("readOnly"))
self.visible = toBool(p.get("visible"))
self.forwarded = toBool(p.get("forwarded"))
self.historicalOnly = toBool(p.get("historicalOnly"))
TypeAbstractSearchCriteria.__init__(self,
pageSize = p.get("pageSize"),
pageNumber = p.get("pageNumber"),
sortAscending = p.get("sortAscending"),
sortPropertyName = p.get("sortPropertyName"),
**{
"name": self.name,
"alias": self.alias,
"modelId": self.modelId,
"types": self.types,
"readOnly": self.readOnly,
"visible": self.visible,
"forwarded": self.forwarded,
"historicalOnly": self.historicalOnly
}
)
class TypeHistoricalDataItemValueCriteria(TypeAbstractSearchCriteria):
"""
Format:
@assetId: string
@dataItemIds: list
@ item: string
@startDate: dateTime
@endDate: dateTime
"""
def __init__(self, assetId, dataItemIds, startDate, endDate):
self.assetId = toString(assetId)
self.startDate = toDateTime(startDate)
self.endDate = toDateTime(endDate)
dataItemIds = toList(dataItemIds)
d = []
self.dataItemIds = []
if dataItemIds:
for i in dataItemIds:
#d.append({"item": str(i)})
d.append(str(i))
self.dataItemIds.append(str(i))
TypeabstractPlatformObjectBase.__init__(self, {
"assetId": self.assetId,
"dataItemIds": d,
"startDate": self.startDate,
"endDate": self.endDate
})
class TypeSuccessfulOperation(TypeabstractPlatformObjectBase):
def __init__(self, p):
# FIXME: <xs:sequence>
self.succeeded = []
for s in toList(p):
self.succeeded.append({
"ref": toString(s.get("ref")),
"id": toString(s.get("id"))
})
TypeabstractPlatformObjectBase.__init__(self, self.succeeded)
class TypeFailedOperationDetails(TypeabstractPlatformObjectBase):
def __init__(self, p):
# <xs:sequence/>
TypeabstractPlatformObjectBase.__init__(self, toList(p))
class TypeFailedOperation(TypeabstractPlatformObjectBase):
def __init__(self, p):
# FIXME: <xs:sequence>
self.failures = []
for s in toList(p):
self.failures.append({
"ref": toString(s.get("ref")),
"message": toString(s.get("message")),
"details": TypeFailedOperationDetails(s.get("details")).getValue(),
"sourceOfFailure": toString(s.get("sourceOfFailure")),
# Attribute
"code": toString(s.get("code"))
})
TypeabstractPlatformObjectBase.__init__(self, self.failures)
class TypeAbstractExecutionResult(TypeabstractPlatformObjectBase):
def __init__(self, p):
self.succeeded = TypeSuccessfulOperation(p.get("succeeded")).getValue()
self.failures = TypeFailedOperation(p.get("failures")).getValue()
# Attributes
self.successful = toBool(p.get("successful"))
self.totalCount = toInt(p.get("totalCount"))
TypeabstractPlatformObjectBase.__init__(self, {
"succeeded": self.succeeded,
"failures": self.failures,
"successful": self.successful,
"totalCount": self.totalCount
})
class TypeExecutionResult(TypeAbstractExecutionResult):
def __init__(self, p):
TypeAbstractExecutionResult.__init__(self, p)
class TypeAssetCriteria(TypeAbstractSearchCriteria):
def __init__(self, **p):
self.gatewayId = toString(p.get("gatewayId"))
self.name = toString(p.get("name"))
self.modelNumber = toString(p.get("modelNumber"))
self.serialNumber = toString(p.get("serialNumber"))
self.organizationName = toString(p.get("organizationName"))
self.locationName = toString(p.get("locationName"))
self.regionName = toString(p.get("regionName"))
self.assetGroupName = toString(p.get("assetGroupName"))
self.systemName = toString(p.get("systemName"))
self.gatewayName = toString(p.get("gatewayName"))
self.gatewayOnly = toBool(p.get("gatewayOnly"))
self.backupAgentsOnly = toString(p.get("backupAgentsOnly"))
self.packageName = toString(p.get("packageName"))
self.packageVersion = toString(p.get("packageVersion"))
self.withoutPackage = toBool(p.get("withoutPackage"))
self.muted = toBool(p.get("muted"))
self.conditionId = toString(p.get("conditionId"))
self.toLastContactDate = toDateTime(p.get("toLastContactDate"))
self.fromLastContactDate = toDateTime(p.get("fromLastContactDate"))
#@dataItem: DataItemValueCriteria
self.hasEventsSince = toDateTime(p.get("hasEventsSince"))
#@geofence: GeofenceCriteria
self.showAssetsWithAlarms = toBool(p.get("showAssetsWithAlarms"))
self.propertyName = toString(p.get("propertyName"))
#@item: string list
#@propertyNamesMatchType:PropertyNamesMatchType
self.propertyValue = toString(p.get("propertyValue"))
self.includeDetails = toBool(p.get("includeDetails"))
self.missing = toBool(p.get("missing"))
self.neverRegistered = toBool(p.get("neverRegistered"))
self.inMachineStream = toBool(p.get("inMachineStream"))
TypeAbstractSearchCriteria.__init__(self,
p.get("assetId"), p.get("dataItemIds"), p.get("startDate"), p.get("endDate")
)
TypeabstractPlatformObjectBase.__init__(self, {
"gatewayId": self.gatewayId,
"name": self.name,
"modelNumber": self.modelNumber,
"serialNumber": self.serialNumber,
"organizationName": self.organizationName,
"locationName": self.locationName,
"regionName": self.regionName,
"assetGroupName": self.assetGroupName,
"systemName": self.systemName,
"gatewayName": self.gatewayName,
"gatewayOnly": self.gatewayOnly,
"backupAgentsOnly": self.backupAgentsOnly,
"packageName": self.packageName,
"packageVersion": self.packageVersion,
"withoutPackage": self.withoutPackage,
"muted": self.muted,
"conditionId": self.conditionId,
"toLastContactDate": self.toLastContactDate,
"fromLastContactDate": self.fromLastContactDate,
#@dataItem: DataItemValueCriteria
"hasEventsSince": self.hasEventsSince,
#@geofence: GeofenceCriteria
"showAssetsWithAlarms": self.showAssetsWithAlarms,
"propertyName": self.propertyName,
#@propertyNamesMatchType:PropertyNamesMatchType
"propertyValue": self.propertyValue,
"includeDetails": self.includeDetails,
"missing": self.missing,
"neverRegistered": self.neverRegistered,
"inMachineStream": self.inMachineStream
})
class CurrentDataItemValueCriteria(dict):
"""
Format:
@name: string
@alias: string
@assetId: string
Asset system id.
@types: list
@readOnly: boolean
@visible: boolean
@forwarded: boolean
@historicalOnly: boolean
@pageSize: int
@pageNumber: int
Using the pageNumber pagination property affects which found object
is returned by the findOne method. For example, pageNumber=1 returns the
first matching found object, while pageNumber=3 returns the 3rd matching
found object, etc.
@sortAscending: bool
@sortPropertyName: string
e.g, "name"
"""
def __init__(self, criteria):
dict.__init__(self, criteria)
class Axeda(Cloud):
"""
Axeda platform REST APIs
https://<host>/artisan/apidocs/v1/
https://<host>/artisan/apidocs/v2/
"""
def __init__(self, config):
Cloud.__init__(self, "Axeda", config)
if self.get('name') == None:
assert(False)
if self.get('username') == None:
assert(False)
if self.get('password') == None:
assert(False)
if not self.get("asset"):
assert(False)
if not self.get("model"):
assert(False)
self.config = config
self.username = self.get('username')
self.password = self.get('password')
self.timeout = self.get('timeout')
self.session_id = None
if self.get('ssl') != None:
self.ssl = self.get('ssl')
else:
self.ssl = None
self.debug = config["debug"] if config.get('debug') else False
# Use json or xml?
if self.get('json') == None:
self.json = True
else:
self.json = self.get('json')
if self.ssl == True:
self.v1_url_prefix = 'https'
self.v2_url_prefix = 'https'
else:
self.v1_url_prefix = 'http'
self.v2_url_prefix = 'http'
self.v1_url_prefix += '://' + config['name'] + '/services/v1/rest/'
self.v2_url_prefix += '://' + config['name'] + '/services/v2/rest/'
if not self.json:
self.name_space = 'http://type.v1.webservices.sl.axeda.com'
ElementTree.register_namespace('', self.name_space)
#ElementTree.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
else:
self.name_space = None
def isDebug(self):
return self.debug
def checkParameter(self, opts):
for o in opts:
if not o:
assert(False)
def setURL(self, api):
url = self.url_prefix + api
if False:
if self.session_id:
url += '?sessionid=' + self.session_id
else:
url += '?username=' + self.username + '&password=' + self.password
return url
def setHeaders(self, json = None):
# Always return json response
headers = { "Accept": "application/json" }
if json == True or (json == None and self.json == True):
headers["Content-Type"] = "application/json"
else:
headers["Content-Type"] = "application/xml"
# By default, return xml response or plain text by certain scripto
if self.session_id:
headers["x_axeda_wss_sessionid"] = self.session_id
else:
headers["x_axeda_wss_username"] = self.username
headers["x_axeda_wss_password"] = self.password
return headers
def auth(self):
return Auth(self.config)
def scripto(self):
return Scripto(self.config)
def asset(self):
return Asset(self.config)
def dataItem(self):
return DataItem(self.config)
class Auth(Axeda):
"""
API
https://<host>/services/v1/rest/Auth?_wadl
"""
def __init__(self, config):
Axeda.__init__(self, config, False)
self.url_prefix = self.v1_url_prefix + 'Auth/'
def login(self, username = None, password = None, timeout = 1800):
"""
Creates a new session (sessionId) for the related authenticated user.
Note that when Axeda Platform creates a session for a user, a timeout is
defined for that session. The session will be valid only while the session
is effective; if the session times out, additional calls to the Web services
will return “access defined” errors. Your code should implement error
handling to ensure the session is still valid.
"""
if not username:
username = self.username
if not password:
password = self.password
if not timeout:
timeout = self.timeout
url = self.url_prefix + 'login?principal.username=' + username + \
'&password=' + password + '&sessionTimeout=' + str(timeout)
if self.json:
headers = { 'Accept': 'application/json' }
else:
headers = None
r = utils.get(url, headers = headers, ssl = self.ssl)
if r.status_code != 200:
return False
if self.json:
self.session_id = str(json.loads(r.content)['wsSessionInfo']['sessionId'])
else:
self.session_id = str(utils.parse_xml(r.content, 'sessionId', self.name_space))
if self.session_id:
return True
else:
return False
def logout(self, sessionid = None):
"""
Ends the session for the related user. Invalidates the specified SessionId
such that it can no longer be used.
"""
if not self.session_id and not sessionid:
return False
url = self.url_prefix + 'logout?sessionid='
if sessionid:
url += sessionid
else:
url += self.session_id
r = utils.get(url, ssl = self.ssl)
if r.status_code != 204:
return False
else:
self.session_id = None
return True
class Scripto(Axeda):
"""
API
https://<host>/services/v1/rest/Scripto/?_wadl
"""
def __init__(self, config, sessionid = None):
Axeda.__init__(self, config)
self.url_prefix = self.v1_url_prefix + 'Scripto/'
self.session_id = sessionid
def execute(self, app, data = None):
self.checkParameter((app,))
url = self.setURL('execute/' + app)
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers, data)
if r is not None and r.status_code == 200:
return r.content
else:
return None
class Asset(Axeda):
"""
Asset Object APIs
https://<host>/services/v2/rest/asset?_wadl
"""
def __init__(self, config, sessionid = None):
Axeda.__init__(self, config)
self.url_prefix = self.v2_url_prefix + 'asset/'
self.session_id = sessionid
def find(self, serial_number):
self.checkParameter((serial_number,))
url = self.setURL("find")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"modelNumber": serial_number,
})
else:
payload = \
'''<AssetCriteria xmlns="http://www.axeda.com/services/v2">
<serialNumber>''' + serial_number + '''</serialNumber></AssetCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findOne(self, s):
"""
Finds the first Asset that meets the specified criteria.
"""
self.checkParameter((s,))
if not isinstance(s, TypeAssetCriteria):
assert(False)
url = self.setURL("findOne")
headers = self.setHeaders(json = True)
# FIXME: either mode doesn't working with Axeda but Mashery.
if True:
payload = s.toJson()
else:
if True:
payload = \
'''<?xml version="1.0" encoding="UTF-8"?><AssetCriteria xmlns="http://www.axeda.com/services/v2"><modelNumber>''' + c["modelNumber"] + '''</modelNumber><serialNumber>''' + c["serialNumber"] + '''</serialNumber></AssetCriteria>'''
else: # also work with Mashery
payload = \
'''<v2:AssetCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><v2:modelNumber>''' + c["modelNumber"] + '''</v2:modelNumber><v2:serialNumber>''' + c["serialNumber"] + '''</v2:serialNumber></v2:AssetCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByIds(self, asset_id, fast = True):
"""
Finds the specified SDK objects based on the ids provided, and returns an
unordered list of found objects. This method will accept only platform ids,
and does not support aternate ids.
"""
self.checkParameter((asset_id,))
if fast:
url = self.setURL("id/" + str(asset_id))
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
else:
url = self.setURL('findByIds')
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"id": asset_id
})
else:
payload = \
'''<IdCollection xmlns="http://www.axeda.com/services/v2">
<id>''' + str(asset_id) + '''</id></IdCollection>'''
r = self.postRequest('findByIds', headers, payload)
if r is not None and r.status_code == 200:
return r.content
else:
return None
class DataItem(Axeda):
"""
API
https://<host>/services/v2/rest/dataItem?_wadl
"""
def __init__(self, config, sessionid = None):
Axeda.__init__(self, config)
self.url_prefix = self.v2_url_prefix + 'dataItem/'
self.session_id = sessionid
def create(self, name, model_name, type, alias = ""):
"""
Creates a new Data Item.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
"""
self.checkParameter((name, model_name, type))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": "valvrave_test_string",
"type": "STRING",
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
</v2:DataItem>'''
r = self.putRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def save(self, name, model_name, type, alias = ""):
"""
Save a new Data Item.
Note:
This same REST call as a create() invokes a Save operation.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
Return value:
{
"successful":true,
"totalCount":1,
"succeeded":[
{
"ref":"es2015-Galileo-Gen2||valvrave_test_string3",
"id":"427"
}],
"failures":[]
}
"""
self.checkParameter((name, model_name, type,))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": "valvrave_test_string",
"id": "es2015-Galileo-Gen2||valvrave_test_string",
"type": "STRING",
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
</v2:DataItem>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def update(self, dataitem_id, name, model_name, type, alias = ""):
"""
Updates an existing Data Item.
"""
self.checkParameter((dataitem_id, name, model_name, type))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
if False:
payload = json.dumps(
{
"name": name,
"model": [{
"objType": "ModelReference", "id": model
}],
"type": type
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2" systemId="''' + str(dataitem_id) + '''">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
<v2:alias>''' + alias + '''</v2:alias>
</v2:DataItem>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return r.content
else:
return None
def delete(self, dataitem_id):
"""
Deletes a data item.
"""
self.checkParameter((dataitem_id,))
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
r = self.deleteRequest(url, headers)
if r is not None and r.status_code == 200:
#return TypeExecutionResult(json.loads(r.content))
return json.loads(r.content)
else:
return None
def find(self, **s):
"""
Finds Data Items based on search criteria.
@criteria: a complete criteria is defined as:
alias: (null)
modelId: (null)
types: ([])
readOnly": (null)
visible": (null)
forwarded: (null)
historicalOnly: (null)
pageSize": (null)
pageNumber: (null)
sortAscending: (null)
sortPropertyName: (null)
"""
url = self.setURL("find")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(s)
else:
payload = None
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findOne(self, **s):
"""
Returns the first Data Item found that meets specified search criteria.
@criteria: See the class Criteria.
Note:
Essentially this API equals to find() with pageNumber=1.
"""
url = self.setURL("findOne")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(s)
else:
payload = None
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByIds(self, dataitem_ids):
"""
Finds the specified SDK objects based on the ids provided, and returns an
unordered list of found objects. This method will accept only platform ids,
and does not support aternate ids.
"""
self.checkParameter((dataitem_ids,))
url = self.setURL("findByIds")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"id": dataitem_id
})
# FIXME: which one
payload = json.dumps({
"dataItem": [{
"systemId": str(dataitem_id)
}]
})
else:
payload = '''<IdCollection xmlns="http://www.axeda.com/services/v2">'''
for i in dataitem_ids:
payload += '<id>' + str(i) + '</id>'
payload += '</IdCollection>'
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findById(self, dataitem_id):
"""
Finds an Data Item based on its platform identifier.
@dataitem_id
"""
self.checkParameter((dataitem_id,))
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByAlternateId(self, name, model_name):
"""
Finds a Data Item based on the alternate identifier.
@alternate_id
The alternate ID of a Data Item takes the following format:
ModelNumber||dataItemName
"""
self.checkParameter((name, model_name))
alternate_id = model_name + "||" + name
url = self.setURL(alternate_id)
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findCurrentValues(self, criteria):
"""
Returns the current values of the specified Data Items.
Note:
For the findCurrentValues method, the assetId input field is required.
"""
self.checkParameter((criteria,))
c = CurrentDataItemValueCriteria(criteria)
url = self.setURL("findCurrentValues")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(c)
else:
payload = \
'''<v2:CurrentDataItemValueCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<v2:assetId>''' + str(criteria['asset_id']) + '''</v2:assetId></v2:CurrentDataItemValueCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def getSourceDataItems(self, id):
"""
Returns the source Data Items associated with the specified source Data Item.
"""
self.checkParameter((id,))
url = self.setURL("id/" + str(id) + "/sourceDataItems")
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def getTargetDataItems(self, id):
"""
Returns the source Data Items associated with the specified target Data Item.
"""
self.checkParameter((id,))
url = self.setURL("id/" + str(id) + "/targetDataItems")
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findHistoricalValues(self, **p):
"""
Returns the historical values of the specified Data Items.
"""
url = self.setURL("findHistoricalValues")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(p)
else:
payload = \
'''<v2:CurrentDataItemValueCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<v2:assetId>''' + str(criteria['asset_id']) + '''</v2:assetId></v2:CurrentDataItemValueCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def bulkCreate(self, c):
"""
Creates a new Data Item.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
"""
self.checkParameter((c,))
TypeDataItemCollection(c)
url = self.setURL("bulk/create")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": name,
"type": type,
"model": {
"id": id,
},
})
else:
payload = \
'''<DataItemCollection xmlns="http://www.axeda.com/services/v2">
<dataItem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="DataItem">
<name>''' + name + '''</name><model id="''' + model + '''"/><type>''' + type + '''</type>
</dataItem></DataItemCollection>'''
r = self.putRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def bulkDelete(self):
pass
def bulkSave(self):
pass
def bulkUpdate(self):
pass
| 10,211 | 291 | 1,097 |
13c57a663746561f31ecd110df3a77d5f7f583a7 | 9,050 | py | Python | python/Channel/RenderLayer.py | smile-jing/NukeToolSet | 45a2523f3bfa60fbfb03c9702bbdd20cf42cb331 | [
"MIT"
] | 81 | 2016-05-05T15:04:43.000Z | 2022-03-21T06:54:22.000Z | python/Channel/RenderLayer.py | ensii75/NukeToolSet | 0c47efc3bc7ca513f902e00a3e2b71404636aae9 | [
"MIT"
] | 8 | 2018-04-04T16:35:26.000Z | 2022-02-10T09:56:30.000Z | python/Channel/RenderLayer.py | ensii75/NukeToolSet | 0c47efc3bc7ca513f902e00a3e2b71404636aae9 | [
"MIT"
] | 51 | 2016-05-07T14:27:42.000Z | 2022-02-10T05:55:11.000Z | import nuke
| 63.286713 | 437 | 0.520773 | import nuke
def RenderLayer():
RenderLayernode = nuke.nodes.NoOp(name='RenderLayer')
knob_tk = nuke.Tab_Knob('User', 'RenderLayer')
RenderLayernode.addKnob(knob_tk)
knob_fk = nuke.File_Knob('filename', 'FileName')
RenderLayernode.addKnob(knob_fk)
knob_pk = nuke.Pulldown_Knob('choosefiletype', 'chooseFileType')
knob_ek1 = nuke.Enumeration_Knob('filetype', 'FileType', ['tiff', 'targa', 'jpeg', 'png'])
RenderLayernode.addKnob(knob_pk)
RenderLayernode.addKnob(knob_ek1)
# tiff
knob_ek_tiff1 = nuke.Enumeration_Knob('datatypetiff', 'DataType', ['8 bit', '16 bit', '32 bit float'])
knob_ek_tiff2 = nuke.Enumeration_Knob('compressiontiff', 'Compression', ['none', 'PackBits', 'LZW', 'Deflate'])
RenderLayernode.addKnob(knob_ek_tiff1)
RenderLayernode.addKnob(knob_ek_tiff2)
knob_ek_tiff1.setVisible(False)
knob_ek_tiff2.setVisible(False)
# tga
knob_ek_tga = nuke.Enumeration_Knob('compressiontga', 'Compression', ['none', 'RLE'])
RenderLayernode.addKnob(knob_ek_tga)
knob_ek_tga.setVisible(False)
# jpeg
knob_dk_jpeg1 = nuke.Double_Knob('jpeg_quality', 'quality')
knob_dk_jpeg1.setValue(1)
knob_ek_jpeg2 = nuke.Enumeration_Knob('jpeg_sub_sampling', 'sub-sampling', ['4:1:1', '4:2:2', '4:4:4'])
RenderLayernode.addKnob(knob_dk_jpeg1)
RenderLayernode.addKnob(knob_ek_jpeg2)
knob_dk_jpeg1.setVisible(False)
knob_ek_jpeg2.setVisible(False)
# png
knob_ek_png = nuke.Enumeration_Knob('datatypepng', 'DataType', ['8 bit', '16 bit'])
RenderLayernode.addKnob(knob_ek_png)
knob_ek_png.setVisible(False)
knob_pk.setValues({
'chosefiletype/tiff': 'nuke.thisNode().knob("filetype").setValue("tiff");nuke.thisNode().knob("datatypetiff").setVisible(True);nuke.thisNode().knob("compressiontiff").setVisible(True);nuke.thisNode().knob("compressiontga").setVisible(False);nuke.thisNode().knob("jpeg_quality").setVisible(False);nuke.thisNode().knob("jpeg_sub_sampling").setVisible(False);nuke.thisNode().knob("datatypepng").setVisible(False)',
'targa': 'nuke.thisNode().knob("filetype").setValue("targa");nuke.thisNode().knob("datatypetiff").setVisible(False);nuke.thisNode().knob("compressiontiff").setVisible(False);nuke.thisNode().knob("compressiontga").setVisible(True);nuke.thisNode().knob("jpeg_quality").setVisible(False);nuke.thisNode().knob("jpeg_sub_sampling").setVisible(False);nuke.thisNode().knob("datatypepng").setVisible(False)',
'jpeg': 'nuke.thisNode().knob("filetype").setValue("jpeg");nuke.thisNode().knob("datatypetiff").setVisible(False);nuke.thisNode().knob("compressiontiff").setVisible(False);nuke.thisNode().knob("compressiontga").setVisible(False);nuke.thisNode().knob("jpeg_quality").setVisible(True);nuke.thisNode().knob("jpeg_sub_sampling").setVisible(True);nuke.thisNode().knob("datatypepng").setVisible(False) ',
'png': 'nuke.thisNode().knob("filetype").setValue("png");nuke.thisNode().knob("datatypetiff").setVisible(False);nuke.thisNode().knob("compressiontiff").setVisible(False);nuke.thisNode().knob("compressiontga").setVisible(False);nuke.thisNode().knob("jpeg_quality").setVisible(False);nuke.thisNode().knob("jpeg_sub_sampling").setVisible(False);nuke.thisNode().knob("datatypepng").setVisible(True)'})
knob_line = nuke.Text_Knob('')
RenderLayernode.addKnob(knob_line)
knob_py = nuke.PyScript_Knob('createwrite', 'CreateWrite')
RenderLayernode.addKnob(knob_py)
knob_py.setCommand("""
layerlist = nuke.layers(nuke.thisNode())
print layerlist
for i in layerlist:
if i == 'rgb':
print i
pass
else:
node = nuke.nodes.Shuffle()
node.setInput(0,nuke.thisNode())
node.knob('in').setValue(i)
node.knob('name').setValue(i)
node.knob('postage_stamp').setValue(1)
node.knob('note_font_size').setValue(16)
filename = nuke.thisNode().knob('filename').value() + i + '.####.' + nuke.thisNode().knob('filetype').value()
write = nuke.nodes.Write()
write.setInput(0,node)
write.knob('file').setValue(filename)
write.knob('file_type').setValue(nuke.thisNode().knob('filetype').value())
if nuke.thisNode().knob('filetype').value() == 'tiff':
write.knob('datatype').setValue(nuke.thisNode().knob('datatypetiff').value())
write.knob('compression').setValue(nuke.thisNode().knob('compressiontiff').value())
elif nuke.thisNode().knob('filetype').value() == 'targa':
write.knob('compression').setValue(nuke.thisNode().knob('compressiontga').value())
elif nuke.thisNode().knob('filetype').value() == 'jpeg':
write.knob('_jpeg_quality').setValue(nuke.thisNode().knob('jpeg_quality').value())
write.knob('_jpeg_sub_sampling').setValue(nuke.thisNode().knob('jpeg_sub_sampling').value())
else:
write.knob('datatype').setValue(nuke.thisNode().knob('datatypepng').value())
write.knob('beforeRender').setValue('''
if os.path.exists(os.path.dirname(nuke.thisNode().knob('file').value()))==True:
print nuke.thisNode().knob('file').value()
else:
os.makedirs(os.path.dirname(nuke.thisNode().knob('file').value()))''')
write.knob('afterRender').setValue('''
inputx = nuke.thisNode()['xpos'].value()
inputy = nuke.thisNode()['ypos'].value()
filelist = nuke.getFileNameList(os.path.dirname(nuke.thisNode().knob('file').value()))
writename = os.path.basename(nuke.thisNode().knob('file').value())
for file in filelist:
#print file
if file.find('.db') < 0:
if file.find(' ') > 0 and file.find('-') > 0:
filename = file.split(' ')[0]
if filename.find('#') > 0:
number = filename.count('#')
place = filename.find('#')
oldstr = filename[place:place + number]
filename = filename.replace(oldstr,'%0'+ str(number) + 'd')
print filename
if filename == writename:
firstframe=file.split(' ')[-1].split("-")[0]
lastframe=file.split(' ')[-1].split("-")[1]
newnode = nuke.nodes.Read(file=os.path.dirname(nuke.thisNode().knob('file').value()) + '/' + writename,first=firstframe,last=lastframe,)
newnode.setXYpos(int(inputx),int(inputy)+50)
else:
pass
else:
filename = file
if filename == writename:
firstframe=filename.split('.')[-2]
lastframe=filename.split('.')[-2]
newnode = nuke.nodes.Read(file=os.path.dirname(nuke.thisNode().knob('file').value()) + '/' + writename,first=firstframe,last=lastframe,)
newnode.setXYpos(int(inputx),int(inputy)+50)
else:
pass
''')
"""
)
knob_doc = nuke.PyScript_Knob('document', 'HelpDocument')
RenderLayernode.addKnob(knob_doc)
knob_doc.setCommand("""nuke.message('''USE STEP
1.connect this node to Read node
2.set up your render path at the FileName
3.choose render file Type
4.choose the sub setting
5.click createwrite button
''')
"""
)
| 9,015 | 0 | 23 |
0902b3bbf9323c5937d4d3a55e4df4431a9f15c8 | 2,487 | py | Python | tests/test_js_branches.py | arunskurian/delphixpy-examples | c4716edbd22fb238ceed23e989b6e6abd82ac8fc | [
"Apache-2.0"
] | 11 | 2018-02-23T16:48:18.000Z | 2022-01-19T23:37:50.000Z | tests/test_js_branches.py | arunskurian/delphixpy-examples | c4716edbd22fb238ceed23e989b6e6abd82ac8fc | [
"Apache-2.0"
] | 10 | 2018-10-22T21:10:38.000Z | 2021-02-19T20:00:21.000Z | tests/test_js_branches.py | arunskurian/delphixpy-examples | c4716edbd22fb238ceed23e989b6e6abd82ac8fc | [
"Apache-2.0"
] | 16 | 2018-06-02T07:02:57.000Z | 2021-07-18T05:27:42.000Z | #!/usr/bin/env python
"""
Unit tests for Jet Stream delphixpy
"""
import sys
import unittest
import js_branch
import js_container
import js_template
from lib.GetSession import GetSession
class JetStreamBranchTests(unittest.TestCase):
"""
Creates, activates, lists destroys JS Branches
Requirements: Parent VDB named jst3, and child VDB named jst3_cld.
Change template_db and database_name to reflect values in your environment.
"""
@classmethod
@classmethod
# Run the test case
if __name__ == "__main__":
unittest.main(buffer=True)
| 32.723684 | 87 | 0.686771 | #!/usr/bin/env python
"""
Unit tests for Jet Stream delphixpy
"""
import sys
import unittest
import js_branch
import js_container
import js_template
from lib.GetSession import GetSession
class JetStreamBranchTests(unittest.TestCase):
"""
Creates, activates, lists destroys JS Branches
Requirements: Parent VDB named jst3, and child VDB named jst3_cld.
Change template_db and database_name to reflect values in your environment.
"""
@classmethod
def setUpClass(cls):
super(JetStreamBranchTests, cls).setUpClass()
cls.server_obj = GetSession()
cls.server_obj.serversess(
"172.16.169.146", "delphix_admin", "delphix", "DOMAIN"
)
cls.server_obj.dlpx_engines["engine_name"] = "test_engine"
cls.container_name = "js_test_container0001"
cls.branch_name = "js_test_branch0001"
cls.template_name = "js_test_template0001"
cls.template_db = "jst3"
cls.database_name = "jst3_cld"
js_template.create_template(cls.server_obj, cls.template_name, cls.template_db)
js_container.create_container(
cls.server_obj, cls.template_name, cls.container_name, cls.database_name
)
js_branch.create_branch(
cls.server_obj, cls.branch_name, cls.template_name, cls.container_name
)
def test_activate_js_branch(self):
original_branch = "default"
js_branch.activate_branch(self.server_obj, original_branch)
self.assertIn(original_branch, sys.stdout.getvalue().strip())
def test_lists_js_branches(self):
js_branch.list_branches(self.server_obj)
self.assertIn(
"Branch Name, Data Layout".format(self.branch_name),
sys.stdout.getvalue().strip(),
)
@classmethod
def tearDownClass(cls):
super(JetStreamBranchTests, cls).tearDownClass()
cls.server_obj = GetSession()
cls.server_obj.serversess(
"172.16.169.146", "delphix_admin", "delphix", "DOMAIN"
)
cls.branch_name = "js_test_branch0001"
cls.container_name = "js_test_container0001"
cls.template_name = "js_test_template0001"
js_branch.delete_branch(cls.server_obj, cls.branch_name)
js_container.delete_container(cls.server_obj, cls.container_name, True)
js_template.delete_template(cls.server_obj, cls.template_name)
# Run the test case
if __name__ == "__main__":
unittest.main(buffer=True)
| 1,806 | 0 | 106 |
d174b28db0bd144c662ccafceda6fcd3b97132d5 | 1,360 | py | Python | test/unit/docknet/function/test_activation_function.py | Accenture/Docknet | e81eb0c5aefd080ebeebf369d41f8d3fa85ab917 | [
"Apache-2.0"
] | 2 | 2020-06-29T08:58:26.000Z | 2022-03-08T11:38:18.000Z | test/unit/docknet/function/test_activation_function.py | jeekim/Docknet | eb3cad13701471a7aaeea1d573bc5608855bab52 | [
"Apache-2.0"
] | 1 | 2022-03-07T17:58:59.000Z | 2022-03-07T17:58:59.000Z | test/unit/docknet/function/test_activation_function.py | jeekim/Docknet | eb3cad13701471a7aaeea1d573bc5608855bab52 | [
"Apache-2.0"
] | 3 | 2020-06-29T08:58:31.000Z | 2020-11-22T11:23:11.000Z | from typing import Union
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from docknet.function.activation_function import relu, sigmoid, tanh
sigmoid_test_cases = [
(np.array([-100., 0., 100]), np.array([0., 0.5, 1.])),
(-100., 0.),
(0., 0.5),
(100., 1.),
(np.array([0.]), np.array([0.5])),
]
@pytest.mark.parametrize("x, expected", sigmoid_test_cases)
relu_test_cases = [
(-1., 0.),
(0., 0.),
(1., 1.),
(5., 5.),
(np.array(0.), np.array(0.)),
(np.array([-1., 0., 1., 5.]), np.array([0., 0., 1., 5.]))
]
@pytest.mark.parametrize("x, expected", relu_test_cases)
tanh_test_cases = [
(-100., -1.),
(0., 0.),
(100., 1.),
(np.array(0.), np.array(0.)),
(np.array([-100., 0., 100.]), np.array([-1., 0., 1.]))
]
@pytest.mark.parametrize("x, expected", tanh_test_cases)
| 25.185185 | 78 | 0.619853 | from typing import Union
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from docknet.function.activation_function import relu, sigmoid, tanh
sigmoid_test_cases = [
(np.array([-100., 0., 100]), np.array([0., 0.5, 1.])),
(-100., 0.),
(0., 0.5),
(100., 1.),
(np.array([0.]), np.array([0.5])),
]
@pytest.mark.parametrize("x, expected", sigmoid_test_cases)
def test_sigmoid(x: Union[float, np.array], expected: Union[float, np.array]):
actual = sigmoid(x)
assert_array_almost_equal(actual, expected, verbose=True)
relu_test_cases = [
(-1., 0.),
(0., 0.),
(1., 1.),
(5., 5.),
(np.array(0.), np.array(0.)),
(np.array([-1., 0., 1., 5.]), np.array([0., 0., 1., 5.]))
]
@pytest.mark.parametrize("x, expected", relu_test_cases)
def test_relu(x: Union[float, np.array], expected: Union[float, np.array]):
actual = relu(x)
assert_array_almost_equal(actual, expected, verbose=True)
tanh_test_cases = [
(-100., -1.),
(0., 0.),
(100., 1.),
(np.array(0.), np.array(0.)),
(np.array([-100., 0., 100.]), np.array([-1., 0., 1.]))
]
@pytest.mark.parametrize("x, expected", tanh_test_cases)
def test_tanh(x: Union[float, np.array], expected: Union[float, np.array]):
actual = tanh(x)
assert_array_almost_equal(actual, expected, verbose=True)
| 418 | 0 | 66 |
1dfb503013d2ff4804933686210e68b0d620e354 | 3,710 | py | Python | examples/keras_mnist.py | jmsalamy/KungFu | 063ccaae3c0a2c6411f26d3641da262abd4eeab3 | [
"Apache-2.0"
] | null | null | null | examples/keras_mnist.py | jmsalamy/KungFu | 063ccaae3c0a2c6411f26d3641da262abd4eeab3 | [
"Apache-2.0"
] | null | null | null | examples/keras_mnist.py | jmsalamy/KungFu | 063ccaae3c0a2c6411f26d3641da262abd4eeab3 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import argparse
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import math
import tensorflow as tf
from kungfu import current_cluster_size, current_rank
from kungfu.tensorflow.initializer import BroadcastGlobalVariablesCallback
from kungfu.tensorflow.optimizers import (SynchronousAveragingOptimizer,
SynchronousSGDOptimizer,
PairAveragingOptimizer)
parser = argparse.ArgumentParser(description='Keras MNIST example.')
parser.add_argument('--kf-optimizer',
type=str,
default='sync-sgd',
help='kungfu optimizer')
args = parser.parse_args()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
batch_size = 128
num_classes = 10
# KungFu: adjust number of epochs based on number of GPUs.
epochs = int(math.ceil(4.0 / current_cluster_size()))
# Input image dimensions
img_rows, img_cols = 28, 28
# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# KungFu: adjust learning rate based on number of GPUs.
opt = keras.optimizers.Adadelta(1.0 * current_cluster_size())
# KungFu: wrap distributed optimizers.
if args.kf_optimizer == 'sync-sgd':
opt = SynchronousSGDOptimizer(opt, with_keras=True)
elif args.kf_optimizer == 'async-sgd':
opt = PairAveragingOptimizer(opt, with_keras=True)
elif args.kf_optimizer == 'sma':
opt = SynchronousAveragingOptimizer(opt, with_keras=True)
else:
raise RuntimeError('unknown optimizer: %s' % name)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
callbacks = [BroadcastGlobalVariablesCallback(with_keras=True)]
# KungFu: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if current_rank() == 0:
callbacks.append(
keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))
model.fit(x_train,
y_train,
batch_size=batch_size,
callbacks=callbacks,
epochs=epochs,
verbose=1 if current_rank() == 0 else 0,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| 35 | 90 | 0.716442 | from __future__ import print_function
import argparse
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import math
import tensorflow as tf
from kungfu import current_cluster_size, current_rank
from kungfu.tensorflow.initializer import BroadcastGlobalVariablesCallback
from kungfu.tensorflow.optimizers import (SynchronousAveragingOptimizer,
SynchronousSGDOptimizer,
PairAveragingOptimizer)
parser = argparse.ArgumentParser(description='Keras MNIST example.')
parser.add_argument('--kf-optimizer',
type=str,
default='sync-sgd',
help='kungfu optimizer')
args = parser.parse_args()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
batch_size = 128
num_classes = 10
# KungFu: adjust number of epochs based on number of GPUs.
epochs = int(math.ceil(4.0 / current_cluster_size()))
# Input image dimensions
img_rows, img_cols = 28, 28
# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# KungFu: adjust learning rate based on number of GPUs.
opt = keras.optimizers.Adadelta(1.0 * current_cluster_size())
# KungFu: wrap distributed optimizers.
if args.kf_optimizer == 'sync-sgd':
opt = SynchronousSGDOptimizer(opt, with_keras=True)
elif args.kf_optimizer == 'async-sgd':
opt = PairAveragingOptimizer(opt, with_keras=True)
elif args.kf_optimizer == 'sma':
opt = SynchronousAveragingOptimizer(opt, with_keras=True)
else:
raise RuntimeError('unknown optimizer: %s' % name)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
callbacks = [BroadcastGlobalVariablesCallback(with_keras=True)]
# KungFu: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if current_rank() == 0:
callbacks.append(
keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))
model.fit(x_train,
y_train,
batch_size=batch_size,
callbacks=callbacks,
epochs=epochs,
verbose=1 if current_rank() == 0 else 0,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| 0 | 0 | 0 |
256b135af9f32fec236798312f6e612bb29aef9a | 1,548 | py | Python | report/parser/parser.py | banche/benchmark | 4d2a09ab7e47b51d2425b7fa29b92afa4bf18edd | [
"MIT"
] | null | null | null | report/parser/parser.py | banche/benchmark | 4d2a09ab7e47b51d2425b7fa29b92afa4bf18edd | [
"MIT"
] | null | null | null | report/parser/parser.py | banche/benchmark | 4d2a09ab7e47b51d2425b7fa29b92afa4bf18edd | [
"MIT"
] | null | null | null | try:
from parser.benchmark import Benchmark
except:
from benchmark import Benchmark
import json
import collections
def merge_dict(to_update: collections.defaultdict(list),
other: collections.defaultdict(list)):
"""
Merge benchmarks dictionnaries together
>>> from collections import defaultdict
>>> a = defaultdict(list)
>>> b = defaultdict(list)
>>> a[1] = [1,2]
>>> a[2] = [3,4]
>>> b[1] = [3,4]
>>> b[2] = [1,2]
>>> b[3] = [5,6]
>>> merge_dict(a,b)
defaultdict(<class 'list'>, {1: [1, 2, 3, 4], 2: [3, 4, 1, 2], 3: [5, 6]})
"""
for k,v in other.items():
if k in to_update.keys():
to_update[k] += v
else:
to_update[k] = v
return to_update
| 26.689655 | 78 | 0.604651 | try:
from parser.benchmark import Benchmark
except:
from benchmark import Benchmark
import json
import collections
def parse_benchmark_json(input: dict):
bkey = 'benchmarks'
benchmarks = collections.defaultdict(list)
if not bkey in input.keys():
raise RuntimeError('Missing benchmarks key!')
ib = input[bkey]
for bench in ib:
benchmark = Benchmark.from_json(bench)
if benchmark is not None:
benchmarks[benchmark.full_name].append(benchmark)
# TODO parse some of the context information to generate the final report
return benchmarks
def merge_dict(to_update: collections.defaultdict(list),
other: collections.defaultdict(list)):
"""
Merge benchmarks dictionnaries together
>>> from collections import defaultdict
>>> a = defaultdict(list)
>>> b = defaultdict(list)
>>> a[1] = [1,2]
>>> a[2] = [3,4]
>>> b[1] = [3,4]
>>> b[2] = [1,2]
>>> b[3] = [5,6]
>>> merge_dict(a,b)
defaultdict(<class 'list'>, {1: [1, 2, 3, 4], 2: [3, 4, 1, 2], 3: [5, 6]})
"""
for k,v in other.items():
if k in to_update.keys():
to_update[k] += v
else:
to_update[k] = v
return to_update
def load_files(files: list):
benchmarks = collections.defaultdict(list)
for file in files:
with open(file, 'r+') as f:
fdata = json.load(f)
fbenchmarks = parse_benchmark_json(fdata)
merge_dict(benchmarks, fbenchmarks)
return benchmarks
| 733 | 0 | 46 |
383ba4f99467276218dddbb904fd3ea3dfa214f7 | 2,097 | py | Python | venv/lib/python3.8/site-packages/pyqtgraph/tests/ui_testing.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 23 | 2017-09-04T13:20:38.000Z | 2022-03-08T08:15:17.000Z | venv/lib/python3.8/site-packages/pyqtgraph/tests/ui_testing.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 4 | 2018-01-05T13:44:29.000Z | 2021-09-30T17:08:15.000Z | venv/lib/python3.8/site-packages/pyqtgraph/tests/ui_testing.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 5 | 2017-11-26T19:40:46.000Z | 2021-03-11T17:25:23.000Z |
# Functions for generating user input events.
# We would like to use QTest for this purpose, but it seems to be broken.
# See: http://stackoverflow.com/questions/16299779/qt-qgraphicsview-unit-testing-how-to-keep-the-mouse-in-a-pressed-state
from ..Qt import QtCore, QtGui, QT_LIB
| 37.446429 | 121 | 0.709108 |
# Functions for generating user input events.
# We would like to use QTest for this purpose, but it seems to be broken.
# See: http://stackoverflow.com/questions/16299779/qt-qgraphicsview-unit-testing-how-to-keep-the-mouse-in-a-pressed-state
from ..Qt import QtCore, QtGui, QT_LIB
def mousePress(widget, pos, button, modifier=None):
if isinstance(widget, QtGui.QGraphicsView):
widget = widget.viewport()
if modifier is None:
modifier = QtCore.Qt.NoModifier
if QT_LIB != 'PyQt5' and isinstance(pos, QtCore.QPointF):
pos = pos.toPoint()
event = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonPress, pos, button, QtCore.Qt.NoButton, modifier)
QtGui.QApplication.sendEvent(widget, event)
def mouseRelease(widget, pos, button, modifier=None):
if isinstance(widget, QtGui.QGraphicsView):
widget = widget.viewport()
if modifier is None:
modifier = QtCore.Qt.NoModifier
if QT_LIB != 'PyQt5' and isinstance(pos, QtCore.QPointF):
pos = pos.toPoint()
event = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonRelease, pos, button, QtCore.Qt.NoButton, modifier)
QtGui.QApplication.sendEvent(widget, event)
def mouseMove(widget, pos, buttons=None, modifier=None):
if isinstance(widget, QtGui.QGraphicsView):
widget = widget.viewport()
if modifier is None:
modifier = QtCore.Qt.NoModifier
if buttons is None:
buttons = QtCore.Qt.NoButton
if QT_LIB != 'PyQt5' and isinstance(pos, QtCore.QPointF):
pos = pos.toPoint()
event = QtGui.QMouseEvent(QtCore.QEvent.MouseMove, pos, QtCore.Qt.NoButton, buttons, modifier)
QtGui.QApplication.sendEvent(widget, event)
def mouseDrag(widget, pos1, pos2, button, modifier=None):
mouseMove(widget, pos1)
mousePress(widget, pos1, button, modifier)
mouseMove(widget, pos2, button, modifier)
mouseRelease(widget, pos2, button, modifier)
def mouseClick(widget, pos, button, modifier=None):
mouseMove(widget, pos)
mousePress(widget, pos, button, modifier)
mouseRelease(widget, pos, button, modifier)
| 1,684 | 0 | 119 |
bace046721e1e09e7c1cb6deccea93df38cd4439 | 379 | py | Python | home/views.py | Exide/django-luna | 48947318bbe557ff115dd8eea36bfb2d1a053d5a | [
"MIT"
] | null | null | null | home/views.py | Exide/django-luna | 48947318bbe557ff115dd8eea36bfb2d1a053d5a | [
"MIT"
] | null | null | null | home/views.py | Exide/django-luna | 48947318bbe557ff115dd8eea36bfb2d1a053d5a | [
"MIT"
] | null | null | null | from django.shortcuts import render_to_response
from django.template import RequestContext
| 29.153846 | 71 | 0.688654 | from django.shortcuts import render_to_response
from django.template import RequestContext
def index(request):
return render_to_response('home/index.html',
context_instance=RequestContext(request))
def dingus(request):
return render_to_response('home/dingus.html',
context_instance=RequestContext(request))
| 240 | 0 | 46 |
5dec5fd9f591b12b83b92b3921aa8484ddc7327e | 1,956 | py | Python | release/toolchain_flags.py | bansalvinayak/bazel-toolchains | 5e4c884b0507a399141d3cc018f11edbd29034e8 | [
"Apache-2.0"
] | null | null | null | release/toolchain_flags.py | bansalvinayak/bazel-toolchains | 5e4c884b0507a399141d3cc018f11edbd29034e8 | [
"Apache-2.0"
] | null | null | null | release/toolchain_flags.py | bansalvinayak/bazel-toolchains | 5e4c884b0507a399141d3cc018f11edbd29034e8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to create or update sample toolchain.bazelrc file."""
import os
from string import Template
from util import get_git_root
TPL = os.path.join(get_git_root(), "release", "toolchain.bazelrc.tpl")
def update_toolchain_bazelrc_file(container_configs_list, bazel_version):
"""Creates/updates toolchain.bazelrc file.
Example toolchain.bazelrc file can be found at
configs/ubuntu16_04_clang/1.0/toolchain.bazelrc.
There is one toolchain.bazelrc file per container per config version.
If the file already exists in this repo, the script will delete it and
generate new one.
Args:
container_configs_list: list of ContainerConfigs, the list of
ContainerConfigs to generate configs for.
bazel_version: string, the version of Bazel used to generate the configs.
"""
for container_configs in container_configs_list:
with open(container_configs.get_toolchain_bazelrc_path(),
"w") as toolchain_bazelrc_file:
# Create or update toolchain.bazelrc file.
with open(TPL, "r") as tpl_file:
tpl = Template(tpl_file.read()).substitute(
CONFIG_VERSION=container_configs.version,
BAZEL_VERSION=bazel_version,
PACKAGE=container_configs.package,
PLATFORM=container_configs.platform_target,
)
toolchain_bazelrc_file.write(tpl)
| 35.563636 | 77 | 0.743865 | # Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to create or update sample toolchain.bazelrc file."""
import os
from string import Template
from util import get_git_root
TPL = os.path.join(get_git_root(), "release", "toolchain.bazelrc.tpl")
def update_toolchain_bazelrc_file(container_configs_list, bazel_version):
"""Creates/updates toolchain.bazelrc file.
Example toolchain.bazelrc file can be found at
configs/ubuntu16_04_clang/1.0/toolchain.bazelrc.
There is one toolchain.bazelrc file per container per config version.
If the file already exists in this repo, the script will delete it and
generate new one.
Args:
container_configs_list: list of ContainerConfigs, the list of
ContainerConfigs to generate configs for.
bazel_version: string, the version of Bazel used to generate the configs.
"""
for container_configs in container_configs_list:
with open(container_configs.get_toolchain_bazelrc_path(),
"w") as toolchain_bazelrc_file:
# Create or update toolchain.bazelrc file.
with open(TPL, "r") as tpl_file:
tpl = Template(tpl_file.read()).substitute(
CONFIG_VERSION=container_configs.version,
BAZEL_VERSION=bazel_version,
PACKAGE=container_configs.package,
PLATFORM=container_configs.platform_target,
)
toolchain_bazelrc_file.write(tpl)
| 0 | 0 | 0 |
d24e8fbbabbf2f4697c6751c8a360bccbbe24829 | 2,540 | py | Python | rainbow/api.py | weblogng/rainbow | e27e6fd7a934b62d091e909473125d666ffe52ab | [
"Apache-2.0"
] | null | null | null | rainbow/api.py | weblogng/rainbow | e27e6fd7a934b62d091e909473125d666ffe52ab | [
"Apache-2.0"
] | null | null | null | rainbow/api.py | weblogng/rainbow | e27e6fd7a934b62d091e909473125d666ffe52ab | [
"Apache-2.0"
] | null | null | null | from fabric.state import env
from fabtools.require.files import (directory, put)
from fabtools.utils import (run_as_root)
from fabric.api import (cd, run)
#alias fabric's env for simple unit-testing of the rainbow api
fabric_env = env
| 43.050847 | 119 | 0.732283 | from fabric.state import env
from fabtools.require.files import (directory, put)
from fabtools.utils import (run_as_root)
from fabric.api import (cd, run)
#alias fabric's env for simple unit-testing of the rainbow api
fabric_env = env
def deploy(artifact_name, remote_path):
artifact_name = str(artifact_name)
remote_path = str(remote_path)
print "deploying artifact: {artifact_name} to {remote_path}"\
.format(artifact_name=artifact_name, remote_path=remote_path)
dest_dir = remote_path + "/"
file_extension = '.tar.gz'
bare_artifact_name = artifact_name
if artifact_name.endswith(file_extension):
bare_artifact_name = artifact_name[:(-1 * len(file_extension))]
dest_dir = dest_dir + bare_artifact_name
else:
dest_dir = dest_dir + artifact_name
if bare_artifact_name in ['prev', 'current', 'next']:
raise ValueError("sorry, {artifact_name} is not a legal artifact name because it collides "
"with a word reserved for symbolic links used by rainbow".format(artifact_name=artifact_name))
# note: the request to create the remote_path should be superfluous since dest_dir contains it
directory(path=remote_path, use_sudo=True)
directory(path=dest_dir, use_sudo=True, owner=fabric_env.user)
put(local_path=artifact_name, remote_path=remote_path, use_sudo=True)
with cd(path=remote_path):
run("tar -xvf {artifact_name} -C {dest_dir}".format(dest_dir=dest_dir, artifact_name=artifact_name))
run_as_root("ln -nsf {dest_dir} next".format(dest_dir=dest_dir))
def _roll_to_release(release, remote_path):
print "cutting-over to release: {release}".format(release=release)
with cd(path=remote_path):
print "changed to {remote_path}".format(remote_path=remote_path)
current_rel = run("readlink -f {remote_path}/current".format(remote_path=remote_path))
target_rel = run("readlink -f {remote_path}/{release}".format(remote_path=remote_path, release=release))
run_as_root("ln -nsf {current_rel} prev".format(current_rel=current_rel))
run_as_root("ln -nsf {target_rel} current".format(target_rel=target_rel))
current_rel = run("readlink -f {remote_path}/current".format(remote_path=remote_path))
print "updated current release to {current_rel}".format(current_rel=current_rel)
def roll_to_next_release(remote_path):
_roll_to_release("next", remote_path)
def roll_to_prev_release(remote_path):
_roll_to_release("prev", remote_path)
| 2,211 | 0 | 92 |
e509c6e785d37348c425782cb1474db703cfe589 | 189 | py | Python | convenient/decorators.py | ixc/glamkit-convenient | e88bcbe3f7f9405ff25ad4bebe405b858e550cff | [
"BSD-3-Clause"
] | null | null | null | convenient/decorators.py | ixc/glamkit-convenient | e88bcbe3f7f9405ff25ad4bebe405b858e550cff | [
"BSD-3-Clause"
] | null | null | null | convenient/decorators.py | ixc/glamkit-convenient | e88bcbe3f7f9405ff25ad4bebe405b858e550cff | [
"BSD-3-Clause"
] | null | null | null | from django.db.models.signals import post_save
| 21 | 46 | 0.714286 | from django.db.models.signals import post_save
def post_save_handler(model):
def renderer(func):
post_save.connect(func, sender=model)
return func
return renderer
| 118 | 0 | 23 |
0b3f9e62011eaa05f11621c7f1809cb9466f4b05 | 24,051 | py | Python | models/questioner.py | soumye/dialog_without_dialog | 9f95d6fb457659f9007445d9036b94e639bddd8b | [
"MIT"
] | 3 | 2020-09-08T23:19:51.000Z | 2021-07-21T11:12:48.000Z | models/questioner.py | soumye/dialog_without_dialog | 9f95d6fb457659f9007445d9036b94e639bddd8b | [
"MIT"
] | 2 | 2021-01-08T02:08:09.000Z | 2021-11-15T23:51:02.000Z | models/questioner.py | soumye/dialog_without_dialog | 9f95d6fb457659f9007445d9036b94e639bddd8b | [
"MIT"
] | 2 | 2020-11-10T15:53:03.000Z | 2020-12-11T03:24:00.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.distributions.one_hot_categorical import OneHotCategorical
from torch.distributions.relaxed_categorical import RelaxedOneHotCategorical
from torch.distributions.normal import Normal
from torch.distributions.kl import kl_divergence
from models.agent import Agent
import models.encoder as enc
import models.decoder as dec
from models.answerer import FCNet, SimpleClassifier
import models.context_coder as ctx
from misc.vector_quantizer import VectorQuantizerEMA, VectorQuantizer
from misc import utilities as utils
from misc.utilities import gumbel_softmax
import pdb
| 42.047203 | 129 | 0.584924 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.distributions.one_hot_categorical import OneHotCategorical
from torch.distributions.relaxed_categorical import RelaxedOneHotCategorical
from torch.distributions.normal import Normal
from torch.distributions.kl import kl_divergence
from models.agent import Agent
import models.encoder as enc
import models.decoder as dec
from models.answerer import FCNet, SimpleClassifier
import models.context_coder as ctx
from misc.vector_quantizer import VectorQuantizerEMA, VectorQuantizer
from misc import utilities as utils
from misc.utilities import gumbel_softmax
import pdb
class Questioner(Agent):
def __init__(self, params):
'''
'''
super(Questioner, self).__init__()
self.params = params.copy()
self.varType = params['varType']
self.rnnHiddenSize = params['rnnHiddenSize']
self.latentSize = params['latentSize']
self.wordDropoutRate = params['wordDropoutRate']
self.unkToken = params['unkToken']
self.endToken = params['endToken']
self.startToken = params['startToken']
self.embedSize = params['embedSize']
self.num_embeddings = params['num_embeddings']
self.num_vars = params.get('num_vars', 1)
self.imgEmbedSize = params['imgEmbedSize']
self.imgFeatureSize = params['imgFeatureSize']
self.ansEmbedSize = params['ansEmbedSize']
self.num_ans_candidates = params['num_ans_candidates']
self.numLayers = params['numLayers']
self.query_type = params.get('queryType', 'dialog_only')
self.speaker_type = params.get('speakerType', 'two_part')
self._num_embeddings = self.num_embeddings * self.num_vars
# by default, use the final annealed temperature
temp, hard = utils.gumbel_temp_anneal_function(params, training=False)
self.temp = temp
self.hard = hard
# c context (images)
# x question
# z latent variable
# a answer
# y index in image pool
# Encoder q(z | x, c)
# x, c -> h_enc
self.encoder = enc.RNNEncoder(params)
# h_enc -> z
if self.varType == 'cont':
self.henc2z = nn.ModuleList([
nn.Linear(self.rnnHiddenSize, self.latentSize), # mean
nn.Linear(self.rnnHiddenSize, self.latentSize), # logv
])
elif self.varType == 'gumbel':
self.henc2z = nn.Linear(self.rnnHiddenSize, self._num_embeddings)
elif self.varType == 'none':
self.henc2z = lambda x: x
# Policy / Context Coder p(z | c)
# initialize this even for cond_vae because it will be fine-tuned late
# c -> h_c
self.ctx_coder = ctx.ImageSetContextCoder(self.imgEmbedSize,
self.imgFeatureSize,
self.rnnHiddenSize)
# h_c -> z
if self.varType == 'cont':
self.hc2z = nn.ModuleList([
nn.Linear(self.imgEmbedSize, self.latentSize), # mean
nn.Linear(self.imgEmbedSize, self.latentSize), # logv
])
self.z_dim = self.latentSize
elif self.varType == 'gumbel':
self.hc2z = nn.Linear(self.imgEmbedSize, self._num_embeddings)
self.z_dim = self._num_embeddings
elif self.varType == 'none':
self.hc2z = lambda x: x
self.z_dim = self.imgEmbedSize
# we may need to tune this.
self.dialogRNN = dialogRNN(params)
if self.speaker_type == 'two_part_zgrad':
self.z2dialog = nn.Sequential(
nn.Linear(self.z_dim, self.rnnHiddenSize),
nn.ReLU(),
)
elif self.speaker_type == 'two_part_zgrad_logit':
self.zlogit2dialog = nn.Sequential(
nn.Linear(self.z_dim, self.rnnHiddenSize),
nn.ReLU(),
)
elif self.speaker_type == 'two_part_zgrad_codebook':
self.codebook2dialog = nn.Sequential(
nn.Linear(self.embedSize, self.rnnHiddenSize),
nn.ReLU(),
)
# answer embedding from last round for the latent space. The begining is a start token.
self.ans_uncertain_token = self.num_ans_candidates + 0
self.ans_start_token = self.num_ans_candidates + 1
self.ans_not_relevant_token = self.num_ans_candidates + 2
# TODO: fix typo, but don't break compatibility with existing trained models
self.ansEmebed = nn.Embedding(self.num_ans_candidates+3, self.ansEmbedSize)
self.quesStartEmbed = nn.Embedding(1, self.rnnHiddenSize)
self.quesEncoder = enc.RNNEncoder(params, useImage=False)
# Decoder p(x | z)
# z -> h_dec
if self.varType == 'gumbel':
new_codebook = lambda: nn.Linear(self.num_embeddings, self.latentSize, bias=False)
codebooks = [new_codebook() for _ in range(self.num_vars)]
self.gumbel_codebook = nn.ModuleList(codebooks)
self.z2hdec = nn.Linear(self.latentSize, self.embedSize)
# h_dec -> x
self.decoder = dec.RNNDecoder(params)
# Predict image p(y)
self.predict_q = FCNet([self.rnnHiddenSize*2+self.ansEmbedSize, 1024])
self.predict_ctx = ctx.ImageContextCoder(params)
self.predict_fact = ctx.FactContextCoder(params)
self.predict_logit = SimpleClassifier(1024, 1024*2, 1, 0.5)
# when true, save various internal state (e.g., z)
self.tracking = False
# The speaker is another qbot (usually a pre-trained model kept fixed)
# which can be used to generate questions to get a model that can't
# change its dialog state.
if self.params.get('speakerParams', None) is not None:
self.speaker = utils.loadModel(params['speakerParams'], 'qbot')
self.speaker.tracking = True
# when speaker mode is on, also have the speaker track the dialog
self.speakerMode = True
else:
self.speakerMode = False
if self.query_type == 'dialog_qa':
# dialogRNN embed + ans embed + ques embed
query_embed_in_dim = self.rnnHiddenSize + self.ansEmbedSize + self.rnnHiddenSize
self.query_embed = nn.Linear(query_embed_in_dim, self.rnnHiddenSize)
@property
def tracking(self):
return self._tracking
@tracking.setter
def tracking(self, value):
self._tracking = value
self.ctx_coder.tracking = value
self.predict_ctx.tracking = value
if hasattr(self, 'dis_ctx'):
self.dis_ctx.tracking = value
if (hasattr(self, 'encoder') and
hasattr(self.encoder, 'ImageSetContextCoder')):
self.encoder.ImageSetContextCoder.tracking = value
def _h2z(self, h2z, h, inference='sample'):
assert inference in ['sample', 'greedy']
batch_size = h.size(0)
if self.varType == 'cont':
# REPARAMETERIZATION
mean = h2z[0](h)
logv = h2z[1](h)
std = torch.exp(0.5 * logv)
if inference == 'sample':
sample = torch.randn([batch_size, self.latentSize]).type_as(h)
z = sample * std + mean
elif inference == 'greedy':
z = mean
return z, (mean, logv)
elif self.varType == 'gumbel':
# TODO: use this as history representation
z_logit_param = h2z(h)
temp = self.temp
hard = self.hard
K = self.num_embeddings
V = self.num_vars
z_v_logits = [z_logit_param[:, v*K:(v+1)*K] for v in range(V)]
z_v_logprobs = [F.log_softmax(zv_logit, dim=1) for zv_logit in z_v_logits]
z = None
z_soft = None
# compute z
if inference == 'sample':
z_vs = [gumbel_softmax(z_vl, tau=temp, hard=hard, ret_soft=True) for z_vl in z_v_logprobs]
z = torch.cat([z[0] for z in z_vs], dim=1)
z_soft = torch.cat([z[1] for z in z_vs], dim=1)
# TODO: is this really the argmax of the gumbel softmax?
elif inference == 'greedy' and not hard:
z_vs = [F.softmax(z_vl / temp, dim=1) for z_vl in z_v_logprobs]
z = torch.cat([z for z in z_vs], dim=1)
elif inference == 'greedy' and hard:
idxs = [z_vl.max(dim=1, keepdim=True)[1] for z_vl in z_v_logprobs]
z_vs = [torch.zeros_like(z_vl).scatter_(1, idx, 1.0) for z_vl, idx in zip(z_v_logprobs, idxs)]
z = torch.cat([z for z in z_vs], dim=1)
z_logprob = torch.cat(z_v_logprobs, dim=1)
return z, (z_logprob, z_soft)
elif self.varType == 'none':
z = h
return z, None
def reset(self):
self.dialogHiddens = []
self.questions = []
self.quesOneHot = []
self.quesLens = []
self.gt_questions = []
self.gt_quesLens = []
self.rand_questions = []
self.rand_quesLens = []
self.answers = []
self.dialogQuerys = []
self.images = None
self.batch_size = 0
self.latent_states = []
self.dialogQuesEmbedding = []
self.dialogAnsEmbedding = []
if self.speakerMode:
self.speaker.reset()
def observe(self, *args, **kwargs):
self._observe(*args, **kwargs)
if self.speakerMode:
self.speaker.observe(*args, **kwargs)
def _observe(self, images=None,
ques=None, ques_len=None, ques_one_hot=None, gt_ques=False,
ans=None, ans_rel=None,
start_answer=False, start_question=False):
if images is not None:
self.images = images
self.batch_size = images.size(0)
if ques is not None:
if gt_ques:
self.gt_questions.append(ques)
else:
self.questions.append(ques)
if ques_len is not None:
if gt_ques:
self.gt_quesLens.append(ques_len)
else:
self.quesLens.append(ques_len)
if ques_one_hot is not None:
assert not gt_ques
self.quesOneHot.append(ques_one_hot)
if ans is not None:
if ans_rel is not None:
ans[ans_rel==0] = self.ans_not_relevant_token
self.answers.append(ans)
if start_answer:
self.answers.append(torch.full([self.batch_size], self.ans_start_token, dtype=torch.long, device=self.images.device))
if start_question:
self.questions.append(torch.full([self.batch_size], 0, dtype=torch.long, device=self.images.device))
self.quesLens.append(-1)
def embedDialog(self, inpt, ques, ques_len, answer, ques_one_hot=None):
if len(self.dialogHiddens) == 0:
batchSize = ques.shape[0]
hPrev = self._initHidden(batchSize)
quesEmbedding = self.quesStartEmbed(ques)
else:
hPrev = self.dialogHiddens[-1]
# rnn question encoder here.
quesEmbedding = self.quesEncoder(ques, ques_len, ques_one_hot=ques_one_hot)
# embed the answer here. We didn't connect the output embedding yet.
ansEmbedding = self.ansEmebed(answer)
oupt, query, hNew = self.dialogRNN(inpt, quesEmbedding, ansEmbedding, hPrev)
# add residual connection
oupt = oupt.squeeze(1) + inpt
self.dialogHiddens.append(hNew)
if self.query_type == 'dialog_only':
self.dialogQuerys.append(query)
self.dialogQuesEmbedding.append(quesEmbedding)
self.dialogAnsEmbedding.append(ansEmbedding)
elif self.query_type == 'dialog_qa':
self.dialogQuesEmbedding.append(quesEmbedding)
self.dialogAnsEmbedding.append(ansEmbedding)
query = torch.cat([query, ansEmbedding, quesEmbedding], dim=1)
query = self.query_embed(query)
self.dialogQuerys.append(query)
return oupt
def _initHidden(self, batchSize):
'''Initial dialog rnn state - initialize with zeros'''
# Dynamic batch size inference
assert batchSize != 0, 'Observe something to infer batch size.'
someTensor = next(self.parameters()).data
h = someTensor.new(batchSize, self.rnnHiddenSize).zero_()
c = someTensor.new(batchSize, self.rnnHiddenSize).zero_()
return (h, c)
def _z2hdec(self, z):
z, _ = z
# z -> h_dec
if self.varType == 'cont':
h_dec = self.z2hdec(z)
elif self.varType == 'gumbel':
K = self.num_embeddings
gumbel_embed = 0
for v in range(self.num_vars):
z_v = z[:, v*K:(v+1)*K]
gumbel_embed += self.gumbel_codebook[v](z_v)
# TODO: use gumbel_embed as the history representation
h_dec = self.z2hdec(gumbel_embed)
elif self.varType == 'none':
h_dec = z
return h_dec
def _klloss(self, z1, z2):
# gradients only pass through z1, not z2
if self.varType == 'cont' and z2 == 'prior':
mean, logv = z1[1]
kl = -0.5 * torch.mean(1 + logv - mean.pow(2) - logv.exp())
elif self.varType == 'gumbel' and z2 == 'prior':
z, z_param = z1
z_logprob, _ = z_param
K = self.num_embeddings
# KL loss term(s)
# with logit parameters (Eric Jang's version)
#q_v = F.softmax(z1[1].reshape([-1, K]), dim=1)
# with samples
log_q_v = z_logprob.reshape([-1, K])
q_v = log_q_v.exp()
logprior = torch.tensor(1. / self.num_embeddings).to(q_v).log()
kl = (q_v * (log_q_v - logprior)).sum(dim=1)
kl = kl.mean() # over variables and batch
elif self.varType == 'none' and z2 == 'prior':
kl = 0
return kl
def _infer_z(self, method, inference='sample'):
# we will try different auto-encoder method for question generation.
# first with the continous vae, with various technique to make the latent code informative.
# batch_size = imgs.shape[0]
# if the length of the answer is 0
# x, c -> z_enc
if method == 'encoder':
h_enc = self.encoder(self.gt_questions[-1], ques_len=self.gt_quesLens[-1], imgs=self.images)
z = self._h2z(self.henc2z, h_enc, inference=inference)
# c -> z_c; also advances dialog state; (prior method is for a baseline)
if method in ['policy', 'prior', 'speaker']:
if len(self.dialogHiddens) == 0:
h_c = self.ctx_coder(self.images, None)
else:
h_c = self.ctx_coder(self.images, self.dialogQuerys[-1])
# NOTE: h_c is not the recurrent
# add an RNN model here and embed previous answers.
h_c = self.embedDialog(h_c,
self.questions[-1],
self.quesLens[-1],
self.answers[-1],
self.quesOneHot[-1] if self.quesOneHot else None)
if self.speakerMode:
# allow evaluation of the policy in speakerMode
assert method == 'speaker' or not self.training
if method == 'policy':
z = self._h2z(self.hc2z, h_c, inference=inference)
elif method == 'prior':
z = self._prior(inference=inference)
elif method == 'speaker':
with torch.no_grad():
self.speaker.forwardDecode(z_inference=inference,
z_source='policy')
z = self.speaker.z
self.latent_states.append(z)
# allow gradients to flow to z from the dialog rnn
if self.speaker_type == 'two_part_zgrad':
_h, _c = self.dialogHiddens[-1]
self.dialogHiddens[-1] = (_h + self.z2dialog(z[0]), _c)
elif self.speaker_type == 'two_part_zgrad_logit':
assert self.varType == 'gumbel', 'just use two_part_zgrad'
_h, _c = self.dialogHiddens[-1]
self.dialogHiddens[-1] = (_h + self.zlogit2dialog(z[1][0]), _c)
elif self.speaker_type == 'two_part_zgrad_codebook':
assert self.varType == 'gumbel', 'just use two_part_zgrad'
_h, _c = self.dialogHiddens[-1]
h_codebook = self._z2hdec(z)
self.dialogHiddens[-1] = (_h + self.codebook2dialog(h_codebook), _c)
return z
def forward(self):
# compute z from encoder and/or policy
z_enc = self._infer_z('encoder')
z_c = self._infer_z('policy')
# compute question logprobs and regularize zs
logProbs = self._decodez(z_enc)
kl = self._klloss(z_enc, 'prior')
ccLogProbs = self._decodez(z_c)
cckl = self._klloss(z_c, 'prior')
return logProbs, kl, ccLogProbs, cckl
def _decodez(self, z):
# z -> h_dec
h_dec = self._z2hdec(z)
# h_dec -> x
# decoder input word dropout
gt_ques = self.gt_questions[-1]
if self.wordDropoutRate > 0:
# randomly replace decoder input with <unk>
prob = torch.rand(gt_ques.size()).type_as(h_dec)
prob[(gt_ques == self.unkToken) | (gt_ques == 0) | (gt_ques == self.startToken) | (gt_ques == self.endToken)] = 1
gt_ques = gt_ques.clone()
gt_ques[prob < self.wordDropoutRate] = self.unkToken
logProbs = self.decoder(h_dec, self.images, gt_ques)
return logProbs
def _prior(self, inference='sample'):
device = self.z2hdec.weight.device
batchSize = self.images.shape[0]
if self.varType == 'cont':
if inference == 'sample':
sample = torch.randn(batchSize, self.latentSize, device=device)
else:
sample = torch.zeros(batchSize, self.latentSize, device=device)
mean = torch.zeros_like(sample)
logv = torch.ones_like(sample)
z = sample, (mean, logv)
elif self.varType == 'gumbel':
K = self.num_embeddings
V = self.num_vars
prior_probs = torch.tensor([1 / K] * K,
dtype=torch.float, device=device)
logprior = torch.log(prior_probs)
if inference == 'sample':
prior = OneHotCategorical(prior_probs)
z_vs = prior.sample(sample_shape=(batchSize * V,))
z = z_vs.reshape([batchSize, -1])
else:
z_vs = prior_probs.expand(batchSize * V, -1)
z = z_vs.reshape([batchSize, -1])
z = (z, logprior)
elif self.varType == 'none':
raise Exception('Z has no prior for varType==none')
return z
def forwardDecode(self, dec_inference='sample', beamSize=1, maxSeqLen=20,
z_inference='sample', z_source='encoder'):
'''
Decode a sequence (question) using either sampling or greedy inference.
This can be called after observing necessary context using observe().
Arguments:
inference : Inference method for decoding
'sample' - Sample each word from its softmax distribution
'greedy' - Always choose the word with highest probability
if beam size is 1, otherwise use beam search.
z_source : Where to get z from?
'encoder' - Encode the question and image pool into a z (default)
'policy' - Encode the image pool via context coder
'prior' - Sample a z from the prior on z (without any context)
'speaker' - Sample z from the speaker model (must exist)
beamSize : Beam search width
maxSeqLen : Maximum length of token sequence to generate
'''
# infer decoder initial state
if torch.is_tensor(z_source):
z = (z_source, None)
elif z_source in ['policy', 'encoder', 'prior', 'speaker']:
z = self._infer_z(z_source, inference=z_inference)
else:
raise Exception('Unknown z_source {}'.format(z_source))
h_dec = self._z2hdec(z)
# decode z
dec_out = self.decoder.forwardDecode(
h_dec,
self.images,
maxSeqLen=maxSeqLen,
inference=dec_inference,
beamSize=beamSize)
if self.tracking:
self.z = z
return dec_out['samples'], dec_out['sampleLens'], dec_out['sampleOneHot']
def predictImage(self):
'''
Given the question answer pair, and image feature. predict
whether the image fit for the QA pair.
'''
assert len(self.dialogHiddens) != 0
# encode history and dialog recurrent state into query
hidden = self.dialogHiddens[-1][0].squeeze(0)
quesFact = torch.cat([embed.unsqueeze(1) for embed in self.dialogQuesEmbedding], dim=1)
ansFact = torch.cat([embed.unsqueeze(1) for embed in self.dialogAnsEmbedding], dim=1)
qaFact = self.predict_fact(torch.cat((quesFact, ansFact), dim=2), hidden)
query = torch.cat([hidden, qaFact], dim=1)
# use query to attend to pool images and further embed history
iEmbed = self.predict_ctx(self.images, query)
qEmbed = self.predict_q(query)
# final logits
logit = self.predict_logit(iEmbed * qEmbed.unsqueeze(1).expand_as(iEmbed))
return logit.squeeze(2)
class dialogRNN(nn.Module):
def __init__(self, params):
super(dialogRNN, self).__init__()
# build the memory module for the questioner.
self.quesEmbedSize = self.rnnHiddenSize = params['rnnHiddenSize']
self.numLayers = params['numLayers']
self.imgEmbedSize = params['imgEmbedSize']
self.ansEmbedSize = params['ansEmbedSize']
self.dropout = params['dropout']
self.speaker_type = params.get('speakerType', 'two_part')
if self.speaker_type.startswith('two_part'):
self.in_dim = self.ansEmbedSize + self.imgEmbedSize + self.quesEmbedSize
elif self.speaker_type == 'one_part':
self.in_dim = self.ansEmbedSize + self.quesEmbedSize
self.rnn = nn.LSTMCell(self.in_dim, self.rnnHiddenSize) # we, fc, h^2_t-1
self.i2h_1 = nn.Linear(self.in_dim, self.rnnHiddenSize)
self.i2h_2 = nn.Linear(self.in_dim, self.rnnHiddenSize)
self.h2h_1 = nn.Linear(self.rnnHiddenSize, self.rnnHiddenSize)
self.h2h_2 = nn.Linear(self.rnnHiddenSize, self.rnnHiddenSize)
def forward(self, img_feat, quesEmbedding, ans_embedding, state):
if self.speaker_type.startswith('two_part'):
lstm_input = torch.cat([img_feat, quesEmbedding, ans_embedding], dim=1)
elif self.speaker_type == 'one_part':
lstm_input = torch.cat([quesEmbedding, ans_embedding], dim=1)
ada_gate1 = torch.sigmoid(self.i2h_1(lstm_input) + self.h2h_1(state[0]))
#ada_gate2 = torch.sigmoid(self.i2h_2(lstm_input) + self.h2h_2(state[0]))
hidden, cell = self.rnn(lstm_input, (state[0], state[1]))
output = F.dropout(hidden, self.dropout, self.training)
query = F.dropout(ada_gate1*torch.tanh(cell), self.dropout, training=self.training)
state = (hidden, cell)
return output, query, state
| 13,857 | 9,391 | 99 |
7df06d73b995c433c32758eba37f1d2180f4a9f6 | 1,347 | py | Python | rmf_fleet_adapter_python/tests/unit/test_geometry.py | Capstone-S13/rmf_ros2 | 66721dd2ab5a458c050bad154c6a17d8e4b5c8f4 | [
"Apache-2.0"
] | 18 | 2021-03-30T03:03:16.000Z | 2022-03-21T13:48:41.000Z | rmf_fleet_adapter_python/tests/unit/test_geometry.py | Capstone-S13/rmf_ros2 | 66721dd2ab5a458c050bad154c6a17d8e4b5c8f4 | [
"Apache-2.0"
] | 147 | 2021-03-09T09:16:27.000Z | 2022-03-25T11:26:58.000Z | rmf_fleet_adapter_python/tests/unit/test_geometry.py | Capstone-S13/rmf_ros2 | 66721dd2ab5a458c050bad154c6a17d8e4b5c8f4 | [
"Apache-2.0"
] | 20 | 2021-05-21T06:54:58.000Z | 2022-03-18T10:43:01.000Z | import rmf_adapter.geometry as geometry
# CIRCLE ======================================================================
# Check construction
| 35.447368 | 79 | 0.714922 | import rmf_adapter.geometry as geometry
# CIRCLE ======================================================================
# Check construction
def test_circle():
circle = geometry.Circle(5)
circle_copy = geometry.Circle(circle) # Copy construction works!
assert circle.radius == 5
assert circle_copy.radius == 5
# Check member reassignment
circle.radius = 10
circle_copy.radius = 10
assert circle.radius == 10
assert circle_copy.radius == 10
# Check direct construction
direct_final_convex_circle = geometry.make_final_convex_circle(5)
direct_final_convex_circle_source = direct_final_convex_circle.source
assert direct_final_convex_circle.characteristic_length == 5
assert direct_final_convex_circle_source.radius == 5
# Check method construction
final_circle = circle.finalize()
final_convex_circle = circle.finalize_convex()
final_circle_source = final_circle.source
# Verify that source is a copy
# and does not affect the final shape it was generated from
assert final_circle.characteristic_length == 10.0
final_circle_source.radius = 1.0
assert final_circle_source.radius == 1.0
assert final_circle.characteristic_length == 10.0
assert final_convex_circle.characteristic_length == 10
assert final_convex_circle.source.radius == 10
| 1,182 | 0 | 22 |
73d87fb62a1c173a26873ae61c51c04ba3607f3b | 6,884 | py | Python | generate_example_data.py | edawson/bam-matcher | a3bda35b5215a39ce4209e6f1f86dd36e7a98d1a | [
"CC-BY-4.0"
] | null | null | null | generate_example_data.py | edawson/bam-matcher | a3bda35b5215a39ce4209e6f1f86dd36e7a98d1a | [
"CC-BY-4.0"
] | null | null | null | generate_example_data.py | edawson/bam-matcher | a3bda35b5215a39ce4209e6f1f86dd36e7a98d1a | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
'''
Created on 23/02/2016
@author: Paul Wang (ppswang@gmail.com)
Generate example data set from CML samples
- need to combine from multiple samples as a way to annonymise patient ID
'''
import os
import HTSeq
import vcf
import sys
# ==============================
# ==============================
BAMDIR = "/data/sacgf/molpath/data/aligned/CML_WES/bam_files"
BAM_FILES = {}
bam_idx = 0
for f in os.listdir(BAMDIR):
if f.endswith(".bam"):
BAM_FILES[bam_idx] = f
bam_idx += 1
# number of variants to get:
VAR_N = 300
VCF_FILE = "1KG_1500_exon_variants_noX.vcf"
invcf = vcf.Reader(filename=VCF_FILE)
SAM1 = "sample1.sam"
SAM2 = "sample2.sam"
HEADER = """@HD VN:1.4 GO:none SO:coordinate
@SQ SN:1 LN:249250621
@SQ SN:2 LN:243199373
@SQ SN:3 LN:198022430
@SQ SN:4 LN:191154276
@SQ SN:5 LN:180915260
@SQ SN:6 LN:171115067
@SQ SN:7 LN:159138663
@SQ SN:8 LN:146364022
@SQ SN:9 LN:141213431
@SQ SN:10 LN:135534747
@SQ SN:11 LN:135006516
@SQ SN:12 LN:133851895
@SQ SN:13 LN:115169878
@SQ SN:14 LN:107349540
@SQ SN:15 LN:102531392
@SQ SN:16 LN:90354753
@SQ SN:17 LN:81195210
@SQ SN:18 LN:78077248
@SQ SN:19 LN:59128983
@SQ SN:20 LN:63025520
@SQ SN:21 LN:48129895
@SQ SN:22 LN:51304566
@SQ SN:X LN:155270560
@SQ SN:Y LN:59373566
@SQ SN:MT LN:16569
@SQ SN:GL000207.1 LN:4262
@SQ SN:GL000226.1 LN:15008
@SQ SN:GL000229.1 LN:19913
@SQ SN:GL000231.1 LN:27386
@SQ SN:GL000210.1 LN:27682
@SQ SN:GL000239.1 LN:33824
@SQ SN:GL000235.1 LN:34474
@SQ SN:GL000201.1 LN:36148
@SQ SN:GL000247.1 LN:36422
@SQ SN:GL000245.1 LN:36651
@SQ SN:GL000197.1 LN:37175
@SQ SN:GL000203.1 LN:37498
@SQ SN:GL000246.1 LN:38154
@SQ SN:GL000249.1 LN:38502
@SQ SN:GL000196.1 LN:38914
@SQ SN:GL000248.1 LN:39786
@SQ SN:GL000244.1 LN:39929
@SQ SN:GL000238.1 LN:39939
@SQ SN:GL000202.1 LN:40103
@SQ SN:GL000234.1 LN:40531
@SQ SN:GL000232.1 LN:40652
@SQ SN:GL000206.1 LN:41001
@SQ SN:GL000240.1 LN:41933
@SQ SN:GL000236.1 LN:41934
@SQ SN:GL000241.1 LN:42152
@SQ SN:GL000243.1 LN:43341
@SQ SN:GL000242.1 LN:43523
@SQ SN:GL000230.1 LN:43691
@SQ SN:GL000237.1 LN:45867
@SQ SN:GL000233.1 LN:45941
@SQ SN:GL000204.1 LN:81310
@SQ SN:GL000198.1 LN:90085
@SQ SN:GL000208.1 LN:92689
@SQ SN:GL000191.1 LN:106433
@SQ SN:GL000227.1 LN:128374
@SQ SN:GL000228.1 LN:129120
@SQ SN:GL000214.1 LN:137718
@SQ SN:GL000221.1 LN:155397
@SQ SN:GL000209.1 LN:159169
@SQ SN:GL000218.1 LN:161147
@SQ SN:GL000220.1 LN:161802
@SQ SN:GL000213.1 LN:164239
@SQ SN:GL000211.1 LN:166566
@SQ SN:GL000199.1 LN:169874
@SQ SN:GL000217.1 LN:172149
@SQ SN:GL000216.1 LN:172294
@SQ SN:GL000215.1 LN:172545
@SQ SN:GL000205.1 LN:174588
@SQ SN:GL000219.1 LN:179198
@SQ SN:GL000224.1 LN:179693
@SQ SN:GL000223.1 LN:180455
@SQ SN:GL000195.1 LN:182896
@SQ SN:GL000212.1 LN:186858
@SQ SN:GL000222.1 LN:186861
@SQ SN:GL000200.1 LN:187035
@SQ SN:GL000193.1 LN:189789
@SQ SN:GL000194.1 LN:191469
@SQ SN:GL000225.1 LN:211173
@SQ SN:GL000192.1 LN:547496
@SQ SN:NC_007605 LN:171823
"""
# write header
sam1_out = open(SAM1, "w")
sam2_out = open(SAM2, "w")
sam1_out.write(HEADER)
sam1_out.write("@RG\tID:sample1\tSM:sample1\tPL:Illumina\n")
sam2_out.write(HEADER)
sam2_out.write("@RG\tID:sample2\tSM:sample2\tPL:Illumina\n")
sample_idx = 0
sample_N = len(BAM_FILES)
var_ct = 0
for var in invcf:
# write SAM1
print var.CHROM, var.POS, var.REF, var.ALT
SAM1_done = False
SAM2_done = False
inbam1 = HTSeq.BAM_Reader(os.path.join(BAMDIR, BAM_FILES[sample_idx]))
sample_idx += 1
sample_idx = sample_idx % sample_N
inbam2 = HTSeq.BAM_Reader(os.path.join(BAMDIR, BAM_FILES[sample_idx]))
sample_idx += 1
sample_idx = sample_idx % sample_N
SAM1_reads = []
SAM2_reads = []
for read in inbam1.fetch(region="%s:%d-%d" % (var.CHROM, var.POS, var.POS)):
if read.pcr_or_optical_duplicate:
continue
if read.proper_pair == False:
continue
SAM1_reads.append(read)
for read in inbam2.fetch(region="%s:%d-%d" % (var.CHROM, var.POS, var.POS)):
if read.pcr_or_optical_duplicate:
continue
if read.proper_pair == False:
continue
SAM2_reads.append(read)
# don't write anything if neither samples are sufficiently covered
if len(SAM1_reads) < 10 or len(SAM2_reads) < 10:
continue
if len(SAM1_reads) > 100 or len(SAM2_reads) > 100:
continue
print "sample 1 writing %d reads" % len(SAM1_reads)
for ct, read in enumerate(SAM1_reads):
# print "%d/%d" % (ct+1, len(SAM1_reads))
sys.stdout.write("\b\b\b\b\b\b\b\b\b\b\b\b%d/%d" % (ct+1, len(SAM1_reads)))
sys.stdout.flush()
# need to replace read group
sam1_out.write(change_RG(read, "sample1") + "\n")
# bits = read.get_sam_line().split("\t")
# for i in range(len(bits)):
# if bits[i].startswith("RG:"):
# bits[i] = "RG:Z:sample1"
# sam1_out.write("\t".join(bits) + "\n")
# get paired mate
mate_pos = read.mate_start
mate_found = False
for read2 in inbam1.fetch(region="%s:%d-%d" % (mate_pos.chrom, mate_pos.pos+1, mate_pos.pos+1)):
if read2.read.name == read.read.name:
mate_found = True
sam1_out.write(change_RG(read2, "sample1") + "\n")
break
if mate_found == False:
print "ERROR: Cannot find mate read"
exit(1)
print "\b\b\b\b\b\b\b\b\b\b\b\bsample 2 writing %d reads" % len(SAM2_reads)
for ct, read in enumerate(SAM2_reads):
# print "%d/%d" % (ct+1, len(SAM2_reads))
sys.stdout.write("\b\b\b\b\b\b\b\b\b\b\b\b%d/%d" % (ct+1, len(SAM2_reads)))
sys.stdout.flush()
# need to replace read group
sam2_out.write(change_RG(read, "sample2") + "\n")
# get paired mate
mate_pos = read.mate_start
mate_found = False
for read2 in inbam2.fetch(region="%s:%d-%d" % (mate_pos.chrom, mate_pos.pos+1, mate_pos.pos+1)):
if read2.read.name == read.read.name:
mate_found = True
sam2_out.write(change_RG(read2, "sample2") + "\n")
break
if mate_found == False:
print "ERROR: Cannot find mate read"
exit(1)
var_ct += 1
print "\b\b\b\b\b\b\b\b\b\b\b\bwrote %d sites" % var_ct
if var_ct >= VAR_N:
break
sam1_out.close()
sam2_out.close()
os.system("samtools view -bS sample1.sam | samtools sort - > sample1.bam")
os.system("samtools index sample1.bam")
os.system("samtools view -bS sample2.sam | samtools sort - > sample2.bam")
os.system("samtools index sample2.bam")
| 27.426295 | 104 | 0.643812 | #!/usr/bin/env python
'''
Created on 23/02/2016
@author: Paul Wang (ppswang@gmail.com)
Generate example data set from CML samples
- need to combine from multiple samples as a way to annonymise patient ID
'''
import os
import HTSeq
import vcf
import sys
# ==============================
def change_RG(aligned_read, new_RG):
bits = aligned_read.get_sam_line().split("\t")
for i in range(len(bits)):
if bits[i].startswith("RG:"):
bits[i] = "RG:Z:%s" % new_RG
break
return "\t".join(bits)
# ==============================
BAMDIR = "/data/sacgf/molpath/data/aligned/CML_WES/bam_files"
BAM_FILES = {}
bam_idx = 0
for f in os.listdir(BAMDIR):
if f.endswith(".bam"):
BAM_FILES[bam_idx] = f
bam_idx += 1
# number of variants to get:
VAR_N = 300
VCF_FILE = "1KG_1500_exon_variants_noX.vcf"
invcf = vcf.Reader(filename=VCF_FILE)
SAM1 = "sample1.sam"
SAM2 = "sample2.sam"
HEADER = """@HD VN:1.4 GO:none SO:coordinate
@SQ SN:1 LN:249250621
@SQ SN:2 LN:243199373
@SQ SN:3 LN:198022430
@SQ SN:4 LN:191154276
@SQ SN:5 LN:180915260
@SQ SN:6 LN:171115067
@SQ SN:7 LN:159138663
@SQ SN:8 LN:146364022
@SQ SN:9 LN:141213431
@SQ SN:10 LN:135534747
@SQ SN:11 LN:135006516
@SQ SN:12 LN:133851895
@SQ SN:13 LN:115169878
@SQ SN:14 LN:107349540
@SQ SN:15 LN:102531392
@SQ SN:16 LN:90354753
@SQ SN:17 LN:81195210
@SQ SN:18 LN:78077248
@SQ SN:19 LN:59128983
@SQ SN:20 LN:63025520
@SQ SN:21 LN:48129895
@SQ SN:22 LN:51304566
@SQ SN:X LN:155270560
@SQ SN:Y LN:59373566
@SQ SN:MT LN:16569
@SQ SN:GL000207.1 LN:4262
@SQ SN:GL000226.1 LN:15008
@SQ SN:GL000229.1 LN:19913
@SQ SN:GL000231.1 LN:27386
@SQ SN:GL000210.1 LN:27682
@SQ SN:GL000239.1 LN:33824
@SQ SN:GL000235.1 LN:34474
@SQ SN:GL000201.1 LN:36148
@SQ SN:GL000247.1 LN:36422
@SQ SN:GL000245.1 LN:36651
@SQ SN:GL000197.1 LN:37175
@SQ SN:GL000203.1 LN:37498
@SQ SN:GL000246.1 LN:38154
@SQ SN:GL000249.1 LN:38502
@SQ SN:GL000196.1 LN:38914
@SQ SN:GL000248.1 LN:39786
@SQ SN:GL000244.1 LN:39929
@SQ SN:GL000238.1 LN:39939
@SQ SN:GL000202.1 LN:40103
@SQ SN:GL000234.1 LN:40531
@SQ SN:GL000232.1 LN:40652
@SQ SN:GL000206.1 LN:41001
@SQ SN:GL000240.1 LN:41933
@SQ SN:GL000236.1 LN:41934
@SQ SN:GL000241.1 LN:42152
@SQ SN:GL000243.1 LN:43341
@SQ SN:GL000242.1 LN:43523
@SQ SN:GL000230.1 LN:43691
@SQ SN:GL000237.1 LN:45867
@SQ SN:GL000233.1 LN:45941
@SQ SN:GL000204.1 LN:81310
@SQ SN:GL000198.1 LN:90085
@SQ SN:GL000208.1 LN:92689
@SQ SN:GL000191.1 LN:106433
@SQ SN:GL000227.1 LN:128374
@SQ SN:GL000228.1 LN:129120
@SQ SN:GL000214.1 LN:137718
@SQ SN:GL000221.1 LN:155397
@SQ SN:GL000209.1 LN:159169
@SQ SN:GL000218.1 LN:161147
@SQ SN:GL000220.1 LN:161802
@SQ SN:GL000213.1 LN:164239
@SQ SN:GL000211.1 LN:166566
@SQ SN:GL000199.1 LN:169874
@SQ SN:GL000217.1 LN:172149
@SQ SN:GL000216.1 LN:172294
@SQ SN:GL000215.1 LN:172545
@SQ SN:GL000205.1 LN:174588
@SQ SN:GL000219.1 LN:179198
@SQ SN:GL000224.1 LN:179693
@SQ SN:GL000223.1 LN:180455
@SQ SN:GL000195.1 LN:182896
@SQ SN:GL000212.1 LN:186858
@SQ SN:GL000222.1 LN:186861
@SQ SN:GL000200.1 LN:187035
@SQ SN:GL000193.1 LN:189789
@SQ SN:GL000194.1 LN:191469
@SQ SN:GL000225.1 LN:211173
@SQ SN:GL000192.1 LN:547496
@SQ SN:NC_007605 LN:171823
"""
# write header
sam1_out = open(SAM1, "w")
sam2_out = open(SAM2, "w")
sam1_out.write(HEADER)
sam1_out.write("@RG\tID:sample1\tSM:sample1\tPL:Illumina\n")
sam2_out.write(HEADER)
sam2_out.write("@RG\tID:sample2\tSM:sample2\tPL:Illumina\n")
sample_idx = 0
sample_N = len(BAM_FILES)
var_ct = 0
for var in invcf:
# write SAM1
print var.CHROM, var.POS, var.REF, var.ALT
SAM1_done = False
SAM2_done = False
inbam1 = HTSeq.BAM_Reader(os.path.join(BAMDIR, BAM_FILES[sample_idx]))
sample_idx += 1
sample_idx = sample_idx % sample_N
inbam2 = HTSeq.BAM_Reader(os.path.join(BAMDIR, BAM_FILES[sample_idx]))
sample_idx += 1
sample_idx = sample_idx % sample_N
SAM1_reads = []
SAM2_reads = []
for read in inbam1.fetch(region="%s:%d-%d" % (var.CHROM, var.POS, var.POS)):
if read.pcr_or_optical_duplicate:
continue
if read.proper_pair == False:
continue
SAM1_reads.append(read)
for read in inbam2.fetch(region="%s:%d-%d" % (var.CHROM, var.POS, var.POS)):
if read.pcr_or_optical_duplicate:
continue
if read.proper_pair == False:
continue
SAM2_reads.append(read)
# don't write anything if neither samples are sufficiently covered
if len(SAM1_reads) < 10 or len(SAM2_reads) < 10:
continue
if len(SAM1_reads) > 100 or len(SAM2_reads) > 100:
continue
print "sample 1 writing %d reads" % len(SAM1_reads)
for ct, read in enumerate(SAM1_reads):
# print "%d/%d" % (ct+1, len(SAM1_reads))
sys.stdout.write("\b\b\b\b\b\b\b\b\b\b\b\b%d/%d" % (ct+1, len(SAM1_reads)))
sys.stdout.flush()
# need to replace read group
sam1_out.write(change_RG(read, "sample1") + "\n")
# bits = read.get_sam_line().split("\t")
# for i in range(len(bits)):
# if bits[i].startswith("RG:"):
# bits[i] = "RG:Z:sample1"
# sam1_out.write("\t".join(bits) + "\n")
# get paired mate
mate_pos = read.mate_start
mate_found = False
for read2 in inbam1.fetch(region="%s:%d-%d" % (mate_pos.chrom, mate_pos.pos+1, mate_pos.pos+1)):
if read2.read.name == read.read.name:
mate_found = True
sam1_out.write(change_RG(read2, "sample1") + "\n")
break
if mate_found == False:
print "ERROR: Cannot find mate read"
exit(1)
print "\b\b\b\b\b\b\b\b\b\b\b\bsample 2 writing %d reads" % len(SAM2_reads)
for ct, read in enumerate(SAM2_reads):
# print "%d/%d" % (ct+1, len(SAM2_reads))
sys.stdout.write("\b\b\b\b\b\b\b\b\b\b\b\b%d/%d" % (ct+1, len(SAM2_reads)))
sys.stdout.flush()
# need to replace read group
sam2_out.write(change_RG(read, "sample2") + "\n")
# get paired mate
mate_pos = read.mate_start
mate_found = False
for read2 in inbam2.fetch(region="%s:%d-%d" % (mate_pos.chrom, mate_pos.pos+1, mate_pos.pos+1)):
if read2.read.name == read.read.name:
mate_found = True
sam2_out.write(change_RG(read2, "sample2") + "\n")
break
if mate_found == False:
print "ERROR: Cannot find mate read"
exit(1)
var_ct += 1
print "\b\b\b\b\b\b\b\b\b\b\b\bwrote %d sites" % var_ct
if var_ct >= VAR_N:
break
sam1_out.close()
sam2_out.close()
os.system("samtools view -bS sample1.sam | samtools sort - > sample1.bam")
os.system("samtools index sample1.bam")
os.system("samtools view -bS sample2.sam | samtools sort - > sample2.bam")
os.system("samtools index sample2.bam")
| 221 | 0 | 22 |
0479170dcd3ba00d379d3aa46efda05f56271113 | 35,024 | py | Python | api/src/events/test_events.py | quiteqiang/corpus-christi | 3fb89030d738d7887c984435dffe3951c5e8772f | [
"MIT"
] | null | null | null | api/src/events/test_events.py | quiteqiang/corpus-christi | 3fb89030d738d7887c984435dffe3951c5e8772f | [
"MIT"
] | null | null | null | api/src/events/test_events.py | quiteqiang/corpus-christi | 3fb89030d738d7887c984435dffe3951c5e8772f | [
"MIT"
] | null | null | null | import datetime
import random
import pytest
from flask import url_for
from .create_event_data import flip, fake, create_multiple_events, event_object_factory, \
create_multiple_assets, create_multiple_teams, create_events_assets, create_events_teams, \
create_events_persons, create_events_participants, create_event_images
from .models import Event, EventPerson, EventAsset, EventParticipant, \
EventTeam
from ..assets.models import Asset
from ..images.create_image_data import create_test_images, create_images_events
from ..images.models import Image, ImageEvent
from ..people.models import Person
from ..people.test_people import create_multiple_people
from ..places.models import Country
from ..places.test_places import create_multiple_locations, create_multiple_addresses, create_multiple_areas
from ..teams.models import Team
# ---- Event
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (asset <-> event)
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (event <-> team)
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (event <-> person)
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (event <-> participant)
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
| 40.443418 | 117 | 0.695066 | import datetime
import random
import pytest
from flask import url_for
from .create_event_data import flip, fake, create_multiple_events, event_object_factory, \
create_multiple_assets, create_multiple_teams, create_events_assets, create_events_teams, \
create_events_persons, create_events_participants, create_event_images
from .models import Event, EventPerson, EventAsset, EventParticipant, \
EventTeam
from ..assets.models import Asset
from ..images.create_image_data import create_test_images, create_images_events
from ..images.models import Image, ImageEvent
from ..people.models import Person
from ..people.test_people import create_multiple_people
from ..places.models import Country
from ..places.test_places import create_multiple_locations, create_multiple_addresses, create_multiple_areas
from ..teams.models import Team
# ---- Event
def generate_locations(auth_client):
Country.load_from_file()
create_multiple_areas(auth_client.sqla, 1)
create_multiple_addresses(auth_client.sqla, 1)
create_multiple_locations(auth_client.sqla, 2)
@pytest.mark.smoke
def test_create_event(auth_client):
# GIVEN an empty database
# WHEN we create a number of events
count = random.randint(5, 15)
for i in range(count):
resp = auth_client.post(
url_for('events.create_event'), json=event_object_factory(auth_client.sqla))
assert resp.status_code == 201
# THEN we expect the same number of the events in the database as we created
assert auth_client.sqla.query(Event).count() == count
@pytest.mark.smoke
def test_create_invalid_event(auth_client):
# GIVEN an empty database
# WHEN we attempt to add invalid events
count = random.randint(5, 15)
for i in range(count):
event = event_object_factory(auth_client.sqla)
if flip():
event['title'] = None
elif flip():
event['start'] = None
else:
event['end'] = None
resp = auth_client.post(url_for('events.create_event'), json=event)
# THEN the response should have the correct code
assert resp.status_code == 422
# AND the database should still be empty
assert auth_client.sqla.query(Event).count() == 0
@pytest.mark.smoke
def test_read_all_events(auth_client):
# GIVEN a database with some events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
# WHEN we read all active events
resp = auth_client.get(url_for('events.read_all_events'))
# THEN we expect the right status code
assert resp.status_code == 200
# THEN we expect the number of active
# all queried events from database
events = auth_client.sqla.query(Event).all()
inactive = 0
for event in events:
if not event.active:
inactive += 1
# THEN we expect the database has the same number of events as we created
assert len(events) == count
# THEN we expect the number of active events we get from the request to be the same as the number in the database
assert len(resp.json) == count - inactive
# THEN we expect each active event's title to correspond to the queried event's title in the database
j = 0 # j is the index for the response array
for i in range(count - inactive):
if events[i].active:
assert resp.json[j]['title'] == events[i].title
j += 1
@pytest.mark.smoke
def test_read_all_events_with_query(auth_client):
# GIVEN some existing events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
for _ in range(15):
# WHEN queried for all events matching a flag
query_string = dict()
if flip():
query_string['return_group'] = 'inactive'
else:
query_string['return_group'] = 'both'
if flip():
query_string['start'] = datetime.datetime.now().strftime(
'%Y-%m-%d')
if flip():
query_string['end'] = datetime.datetime.now().strftime('%Y-%m-%d')
if flip():
query_string['title'] = 'c'
if flip():
query_string['location_id'] = 1
if flip():
query_string['include_assets'] = 1
if flip():
query_string['sort'] = 'start'
elif flip():
query_string['sort'] = 'end'
else:
query_string['sort'] = 'title'
if flip():
query_string['sort'] += '_desc'
# THEN the response should match those flags
resp = auth_client.get(
url_for('events.read_all_events'), query_string=query_string)
assert resp.status_code == 200
for event in resp.json:
if 'return_group' in query_string:
if query_string['return_group'] == 'inactive':
assert not event['active']
else:
assert event['active']
if 'start' in query_string:
assert datetime.datetime.strptime(event['start'][:event['start'].index(
'T')], '%Y-%m-%d') >= datetime.datetime.strptime(query_string['start'], '%Y-%m-%d')
if 'end' in query_string:
assert datetime.datetime.strptime(event['end'][:event['end'].index(
'T')], '%Y-%m-%d') <= datetime.datetime.strptime(query_string['end'], '%Y-%m-%d')
if 'title' in query_string:
assert query_string['title'].lower() in event['title'].lower()
if 'location_id' in query_string:
assert event['location_id'] == query_string['location_id']
@pytest.mark.smoke
def test_read_one_event(auth_client):
# GIVEN a database with a number of events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
events = auth_client.sqla.query(Event).all()
# WHEN we ask for the events one by one
for event in events:
# THEN we expect each of them to correspond to the event in the database
resp = auth_client.get(
url_for('events.read_one_event', event_id=event.id))
assert resp.status_code == 200
assert resp.json['title'] == event.title
# Date-time comes back in a slightly different format, but information is the same.
# assert resp.json['start'] == str(event.start)
@pytest.mark.smoke
def test_read_one_missing_event(auth_client):
# GIVEN an empty database
# WHEN a request for a specific event is made
resp = auth_client.get(url_for('events.read_one_event', event_id=1))
# THEN the response should have the appropriate error code
assert resp.status_code == 404
@pytest.mark.smoke
def test_replace_event(auth_client):
# GIVEN a database with a number of events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
# WHEN we replace one event with a predefined content
event = auth_client.sqla.query(Event).first()
new_event = {
'title': fake.word(),
'start': str(fake.future_datetime(end_date="+6h")),
'end': str(fake.date_time_between(start_date="+6h", end_date="+1d", tzinfo=None)),
'active': flip()
}
resp = auth_client.put(
url_for('events.replace_event', event_id=event.id), json=new_event)
# THEN we expect the right status code
assert resp.status_code == 200
# THEN we expect the event in the database to have the same content of the predefined content
assert resp.json['id'] == event.id
assert resp.json['title'] == new_event['title']
assert resp.json['active'] == new_event['active']
@pytest.mark.smoke
def test_replace_invalid_event(auth_client):
# GIVEN a database with events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
# WHEN we attempt to edit an invalid event
original_event = auth_client.sqla.query(Event).first()
modified_event = event_object_factory(auth_client.sqla)
if flip():
modified_event['title'] = None
elif flip():
modified_event['start'] = None
else:
modified_event['end'] = None
resp = auth_client.put(url_for(
'events.replace_event', event_id=original_event.id), json=modified_event)
# THEN the response should have the correct code
assert resp.status_code == 422
# AND the event should be unchanged
new_event = auth_client.sqla.query(Event).filter(
Event.id == original_event.id).first()
assert new_event.title == original_event.title
assert new_event.start == original_event.start
assert new_event.end == original_event.end
@pytest.mark.smoke
def test_update_event(auth_client):
# GIVEN a database with a number of events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
# WHEN we update one event
event = auth_client.sqla.query(Event).first()
payload = {}
new_event = event_object_factory(auth_client.sqla)
flips = (flip(), flip(), flip(), flip(), flip(), flip())
if flips[0]:
payload['title'] = new_event['title']
if flips[1]:
payload['start'] = new_event['start']
if flips[2]:
payload['end'] = new_event['end']
if flips[3]:
payload['active'] = new_event['active']
if flips[4] and 'description' in new_event.keys():
payload['description'] = new_event['description']
if flips[5] and 'location_id' in new_event.keys():
payload['location_id'] = new_event['location_id']
resp = auth_client.patch(
url_for('events.update_event', event_id=event.id), json=payload)
# THEN we assume the correct status code
assert resp.status_code == 200
# THEN we assume the correct content in the returned object
if flips[0]:
assert resp.json['title'] == payload['title']
if flips[1]:
assert resp.json['start'] == payload['start'].replace(
' ', 'T') + "+00:00"
if flips[2]:
assert resp.json['end'] == payload['end'].replace(' ', 'T') + "+00:00"
if flips[3]:
assert resp.json['active'] == payload['active']
if flips[4] and 'description' in new_event.keys():
assert resp.json['description'] == payload['description']
if flips[5] and 'location_id' in new_event.keys():
assert resp.json['location'] == payload['location_id']
@pytest.mark.smoke
def test_update_invalid_event(auth_client):
# GIVEN a database with events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
# WHEN we attempt to edit an invalid event
original_event = auth_client.sqla.query(Event).first()
modified_event = event_object_factory(auth_client.sqla)
if flip():
modified_event['title'] = None
elif flip():
modified_event['start'] = None
else:
modified_event['end'] = None
resp = auth_client.patch(
url_for('events.update_event', event_id=original_event.id), json=modified_event)
# THEN the response should have the correct code
assert resp.status_code == 422
# AND the event should be unchanged
new_event = auth_client.sqla.query(Event).filter(
Event.id == original_event.id).first()
assert new_event.title == original_event.title
assert new_event.start == original_event.start
assert new_event.end == original_event.end
@pytest.mark.smoke
def test_update_missing_event(auth_client):
# GIVEN an empty database
# WHEN we attempt to edit an event
event = event_object_factory(auth_client.sqla)
resp = auth_client.patch(
url_for('events.update_event', event_id=1), json=event)
# THEN the response code should be correct
assert resp.status_code == 404
@pytest.mark.smoke
def test_delete_event(auth_client):
# GIVEN a database with a number of events
count = random.randint(3, 11)
create_multiple_events(auth_client.sqla, count)
# WHEN we deactivate a number of them
events = auth_client.sqla.query(Event).all()
deleted = 0
for event in events:
if flip() and event.active:
resp = auth_client.delete(
url_for('events.delete_event', event_id=event.id))
# THEN we assume the correct status code
assert resp.status_code == 204
deleted += 1
elif not event.active:
deleted += 1
# THEN we assume the number of active events in the database to be the correct number
new_events = auth_client.sqla.query(Event).filter(Event.active).all()
assert len(new_events) == count - deleted
@pytest.mark.smoke
def test_delete_invalid_event(auth_client):
# GIVEN an empty database
# WHEN a delete request is sent
resp = auth_client.delete(url_for('events.delete_event', event_id=1))
# THEN the response code should be correct
assert resp.status_code == 404
# AND the database should still be empty
new_events = auth_client.sqla.query(Event).filter(Event.active).all()
assert len(new_events) == 0
# ---- Linking tables (asset <-> event)
@pytest.mark.smoke
def test_add_asset_to_event(auth_client):
# GIVEN a database with some events and assets
generate_locations(auth_client)
count_assets = random.randint(15, 20)
count_events = random.randint(3, 5)
create_multiple_assets(auth_client.sqla, count_assets)
create_multiple_events(auth_client.sqla, count_events)
# WHEN we create an asset to an event
for _ in range(count_assets):
test_asset_id = random.randint(1, count_assets)
test_event_id = random.randint(1, count_events + 1)
test_asset = auth_client.sqla.query(Asset).filter(
Asset.id == test_asset_id).first()
test_event = auth_client.sqla.query(Event).filter(
Event.id == test_event_id).first()
test_asset_events = auth_client.sqla.query(EventAsset).filter_by(
asset_id=test_asset_id).join(Event).all()
resp = auth_client.put(url_for(
'events.add_asset_to_event', asset_id=test_asset_id, event_id=test_event_id))
if not test_event or not test_asset:
assert resp.status_code == 404
continue
if event_overlap(test_event, test_asset_events):
assert resp.status_code == 422
continue
# THEN we expect the right status code
assert resp.status_code == 200
def event_overlap(test_event, test_asset_events):
for asset_event in test_asset_events:
# test for overlap with existing events
if test_event.start <= asset_event.event.start < test_event.end \
or asset_event.event.start <= test_event.start < asset_event.event.end \
or test_event.start < asset_event.event.end <= test_event.end \
or asset_event.event.start < test_event.end <= asset_event.event.end:
return True
return False
@pytest.mark.smoke
def test_add_asset_to_invalid_event(auth_client):
# GIVEN a database with some events and assets
generate_locations(auth_client)
create_multiple_assets(auth_client.sqla, 1)
create_multiple_events(auth_client.sqla, 1)
# WHEN we create an asset to an event that doesn't exist
invalid_event_id = auth_client.sqla.query(Event.id).first()[0] + 1
asset_id = auth_client.sqla.query(Asset.id).first()[0]
url = url_for('events.add_asset_to_event',
event_id=invalid_event_id, asset_id=asset_id)
resp = auth_client.post(url)
# THEN we expect the right status code
assert resp.status_code == 404
# THEN we don't expect the entry in the database's linking table
queried_event_asset_count = auth_client.sqla.query(EventAsset).filter(
EventAsset.event_id == invalid_event_id, EventAsset.asset_id == asset_id).count()
assert queried_event_asset_count == 0
@pytest.mark.smoke
def test_add_booked_asset_to_event(auth_client):
# GIVEN a database with some events and assets linked
generate_locations(auth_client)
create_multiple_assets(auth_client.sqla, 1)
create_multiple_events(auth_client.sqla, 2)
create_events_assets(auth_client.sqla, 1)
event_id = auth_client.sqla.query(Event.id).first()[0]
asset_id = auth_client.sqla.query(Asset.id).first()[0]
queried_event_asset_count = auth_client.sqla.query(EventAsset).filter(
EventAsset.event_id == event_id, EventAsset.asset_id == asset_id).count()
# WHEN we create an asset to an event
url = url_for('events.add_asset_to_event',
event_id=event_id, asset_id=asset_id)
resp = auth_client.post(url)
# THEN we expect the right status code
assert resp.status_code == 422
# THEN we expect the entry not to be duplicated in the database's linking table
new_queried_event_asset_count = auth_client.sqla.query(EventAsset).filter(
EventAsset.event_id == event_id, EventAsset.asset_id == asset_id).count()
assert queried_event_asset_count == new_queried_event_asset_count
@pytest.mark.smoke
def test_remove_asset_from_event(auth_client):
# GIVEN a database with some linked events and assets
generate_locations(auth_client)
create_multiple_assets(auth_client.sqla, 5)
create_multiple_events(auth_client.sqla, 5)
create_events_assets(auth_client.sqla, 1)
link_count = auth_client.sqla.query(EventAsset).count()
# WHEN we unlink an asset from an event
event_id = auth_client.sqla.query(Event.id).first()[0]
asset_id = auth_client.sqla.query(Asset.id).first()[0]
url = url_for('events.remove_asset_from_event',
event_id=event_id, asset_id=asset_id)
resp = auth_client.delete(url)
# THEN we expect the right status code
assert resp.status_code == 204
# THEN we expect the number of entries in the database's linking table to be one less
new_link_count = auth_client.sqla.query(EventAsset).count()
assert new_link_count == link_count - 1
# WHEN we unlink the same asset
resp = auth_client.delete(url)
# THEN We expect an error
assert resp.status_code == 404
@pytest.mark.smoke
def test_remove_unbooked_asset_from_event(auth_client):
# GIVEN a database with some linked events and assets
generate_locations(auth_client)
create_multiple_assets(auth_client.sqla, 5)
create_multiple_events(auth_client.sqla, 5)
create_events_assets(auth_client.sqla, 1)
link_count = auth_client.sqla.query(EventAsset).count()
# WHEN we unlink an asset from an event
invalid_event_id = 30
asset_id = auth_client.sqla.query(Asset.id).first()[0]
url = url_for('events.remove_asset_from_event',
event_id=invalid_event_id, asset_id=asset_id)
resp = auth_client.delete(url)
# THEN we expect the right status code
assert resp.status_code == 404
# THEN we expect the number of entries in the database's linking table to be one less
new_link_count = auth_client.sqla.query(EventAsset).count()
assert new_link_count == link_count
# ---- Linking tables (event <-> team)
@pytest.mark.smoke
def test_add_event_team(auth_client):
# GIVEN a database with only some teams
create_multiple_teams(auth_client.sqla, 5)
team_id = auth_client.sqla.query(Team.id).first()[0]
# WHEN we try to link a non-existent event to a team
resp = auth_client.post(
url_for('events.add_event_team', event_id=1, team_id=team_id))
# THEN we expect an error code
assert resp.status_code == 404
# GIVEN a database with some unlinked events and teams
create_multiple_events(auth_client.sqla, 5)
event_id = auth_client.sqla.query(Event.id).first()[0]
# WHEN we link a team with an event
resp = auth_client.post(
url_for('events.add_event_team', event_id=event_id, team_id=team_id))
# THEN we expect the right status code
assert resp.status_code == 200
# THEN we expect the correct count of linked event and team in the database
count = auth_client.sqla.query(EventTeam).filter(
EventTeam.event_id == event_id, EventTeam.team_id == team_id).count()
assert count == 1
# WHEN we link the same team again
resp = auth_client.post(
url_for('events.add_event_team', event_id=event_id, team_id=team_id))
# THEN we expect an error status code
assert resp.status_code == 422
@pytest.mark.smoke
def test_delete_event_team(auth_client):
# GIVEN a database with some linked events and teams
create_multiple_events(auth_client.sqla, 5)
create_multiple_teams(auth_client.sqla, 5)
create_events_teams(auth_client.sqla, 1)
event_team = auth_client.sqla.query(EventTeam).first()
count = auth_client.sqla.query(EventTeam).count()
# WHEN we unlink an assets from an event
resp = auth_client.delete(url_for(
'events.delete_event_team', event_id=event_team.event_id, team_id=event_team.team_id))
# THEN we expect the right status code
assert resp.status_code == 204
# THEN we expect the linkage to be absent in the database
assert 0 == auth_client.sqla.query(EventTeam).filter(
EventTeam.event_id == event_team.event_id, EventTeam.team_id == event_team.team_id).count()
# THEN We expect the correct count of link in the database
new_count = auth_client.sqla.query(EventTeam).count()
assert count - 1 == new_count
# WHEN we unlink the same account again
resp = auth_client.delete(url_for(
'events.delete_event_team', event_id=event_team.event_id, team_id=event_team.team_id))
# THEN we expect an error
assert resp.status_code == 404
# ---- Linking tables (event <-> person)
@pytest.mark.smoke
def test_add_event_persons(auth_client):
description = fake.sentences(nb=1)[0]
payload = {
'description': description
}
# GIVEN a database with only some persons
create_multiple_people(auth_client.sqla, 5)
person_id = auth_client.sqla.query(Person.id).first()[0]
# WHEN we try to link a non-existent event to a person
resp = auth_client.post(url_for(
'events.add_event_persons', event_id=1, person_id=person_id), json=payload)
# THEN we expect an error code
assert resp.status_code == 404
# GIVEN a database with some unlinked events and persons
create_multiple_events(auth_client.sqla, 5)
event_id = auth_client.sqla.query(Event.id).first()[0]
# WHEN we try to make a link without description
resp = auth_client.post(
url_for('events.add_event_persons', event_id=1, person_id=person_id))
# THEN we expect an error code
assert resp.status_code == 422
# WHEN we link a person with an event
resp = auth_client.post(url_for(
'events.add_event_persons', event_id=event_id, person_id=person_id), json=payload)
# THEN we expect the right status code
assert resp.status_code == 200
# THEN we expect the correct count of linked event and person in the database
count = auth_client.sqla.query(EventPerson).filter(
EventPerson.event_id == event_id, EventPerson.person_id == person_id).count()
assert count == 1
# WHEN we link the same person again
resp = auth_client.post(url_for(
'events.add_event_persons', event_id=event_id, person_id=person_id), json=payload)
# THEN we expect an error status code
assert resp.status_code == 422
@pytest.mark.smoke
def test_modify_event_person(auth_client):
description = fake.sentences(nb=1)[0]
payload = {
'description': description
}
# GIVEN a database with unlinked events and persons
create_multiple_events(auth_client.sqla, 5)
create_multiple_people(auth_client.sqla, 5)
# WHEN we try to modify a person not associated with an event
event_id = auth_client.sqla.query(Event.id).first()[0]
person_id = auth_client.sqla.query(Person.id).first()[0]
resp = auth_client.patch(url_for(
'events.modify_event_person', event_id=event_id, person_id=person_id), json=payload)
# THEN we expect an error
assert resp.status_code == 404
# GIVEN a database with some linked events and persons
create_events_persons(auth_client.sqla, 1)
event_person = auth_client.sqla.query(EventPerson).first()
# WHEN we try to modify an event_person without a payload
resp = auth_client.patch(url_for('events.modify_event_person',
event_id=event_person.event_id, person_id=event_person.person_id))
# THEN we expect the error code
assert resp.status_code == 422
# WHEN we modify an event_person
resp = auth_client.patch(url_for('events.modify_event_person',
event_id=event_person.event_id, person_id=event_person.person_id), json=payload)
# THEN we expect the correct code
assert resp.status_code == 200
# THEN we expect the event_person to be modified
queried_description = auth_client.sqla.query(EventPerson.description).filter(
EventPerson.event_id == event_person.event_id, EventPerson.person_id == event_person.person_id).first()[0]
assert queried_description == description
@pytest.mark.smoke
def test_delete_event_persons(auth_client):
# GIVEN a database with some linked events and persons
create_multiple_events(auth_client.sqla, 5)
create_multiple_people(auth_client.sqla, 5)
create_events_persons(auth_client.sqla, 1)
event_person = auth_client.sqla.query(EventPerson).first()
count = auth_client.sqla.query(EventPerson).count()
# WHEN we unlink an assets from an event
resp = auth_client.delete(url_for('events.delete_event_persons',
event_id=event_person.event_id, person_id=event_person.person_id))
# THEN we expect the right status code
assert resp.status_code == 204
# THEN we expect the linkage to be absent in the database
assert 0 == auth_client.sqla.query(EventPerson).filter(
EventPerson.event_id == event_person.event_id, EventPerson.person_id == event_person.person_id).count()
# THEN We expect the correct count of link in the database
new_count = auth_client.sqla.query(EventPerson).count()
assert count - 1 == new_count
# WHEN we unlink the same account again
resp = auth_client.delete(url_for('events.delete_event_persons',
event_id=event_person.event_id, person_id=event_person.person_id))
# THEN we expect an error
assert resp.status_code == 404
# ---- Linking tables (event <-> participant)
@pytest.mark.smoke
def test_add_event_participants(auth_client):
payload = {
'confirmed': flip()
}
# GIVEN a database with only some participants
create_multiple_people(auth_client.sqla, 5)
person_id = auth_client.sqla.query(Person.id).first()[0]
# WHEN we try to link a non-existent event to a participant
resp = auth_client.post(url_for(
'events.add_event_participants', event_id=1, person_id=person_id), json=payload)
# THEN we expect an error code
assert resp.status_code == 404
# GIVEN a database with some unlinked events and participants
create_multiple_events(auth_client.sqla, 5)
event_id = auth_client.sqla.query(Event.id).first()[0]
# WHEN we try to make a link without description
resp = auth_client.post(
url_for('events.add_event_participants', event_id=1, person_id=person_id))
# THEN we expect an error code
assert resp.status_code == 422
# WHEN we link a participant with an event
resp = auth_client.post(url_for('events.add_event_participants',
event_id=event_id, person_id=person_id), json=payload)
# THEN we expect the right status code
assert resp.status_code == 200
# THEN we expect the correct count of linked event and participant in the database
count = auth_client.sqla.query(EventParticipant).filter(
EventParticipant.event_id == event_id, EventParticipant.person_id == person_id).count()
assert count == 1
# WHEN we link the same participant again
resp = auth_client.post(url_for('events.add_event_participants',
event_id=event_id, person_id=person_id), json=payload)
# THEN we expect an error status code
assert resp.status_code == 422
@pytest.mark.smoke
def test_modify_event_participant(auth_client):
payload = {
'confirmed': flip()
}
# GIVEN a database with unlinked events and participants
create_multiple_events(auth_client.sqla, 5)
create_multiple_people(auth_client.sqla, 5)
# WHEN we try to modify a participant not associated with an event
event_id = auth_client.sqla.query(Event.id).first()[0]
person_id = auth_client.sqla.query(Person.id).first()[0]
resp = auth_client.patch(url_for(
'events.modify_event_participant', event_id=event_id, person_id=person_id), json=payload)
# THEN we expect an error
assert resp.status_code == 404
# GIVEN a database with some linked events and participants
create_events_participants(auth_client.sqla, 1)
event_participant = auth_client.sqla.query(EventParticipant).first()
# WHEN we try to modify an event_participant without a payload
resp = auth_client.patch(url_for('events.modify_event_participant',
event_id=event_participant.event_id, person_id=event_participant.person_id))
# THEN we expect the error code
assert resp.status_code == 422
# WHEN we modify an event_participant
resp = auth_client.patch(url_for('events.modify_event_participant',
event_id=event_participant.event_id, person_id=event_participant.person_id),
json=payload)
# THEN we expect the correct code
assert resp.status_code == 200
# THEN we expect the event_participant to be modified
queried_confirmed = auth_client.sqla.query(EventParticipant.confirmed).filter(
EventParticipant.event_id == event_participant.event_id,
EventParticipant.person_id == event_participant.person_id).first()[0]
assert queried_confirmed == payload["confirmed"]
@pytest.mark.smoke
def test_delete_event_participant(auth_client):
# GIVEN a database with some linked events and participants
create_multiple_events(auth_client.sqla, 5)
create_multiple_people(auth_client.sqla, 5)
create_events_participants(auth_client.sqla, 1)
event_participant = auth_client.sqla.query(EventParticipant).first()
count = auth_client.sqla.query(EventParticipant).count()
# WHEN we unlink an assets from an event
resp = auth_client.delete(url_for('events.delete_event_participant',
event_id=event_participant.event_id, person_id=event_participant.person_id))
# THEN we expect the right status code
assert resp.status_code == 204
# THEN we expect the linkage to be absent in the database
assert 0 == auth_client.sqla.query(EventParticipant).filter(
EventParticipant.event_id == event_participant.event_id,
EventParticipant.person_id == event_participant.person_id).count()
# THEN We expect the correct count of link in the database
new_count = auth_client.sqla.query(EventParticipant).count()
assert count - 1 == new_count
# WHEN we unlink the same account again
resp = auth_client.delete(url_for('events.delete_event_participant',
event_id=event_participant.event_id, person_id=event_participant.person_id))
# THEN we expect an error
assert resp.status_code == 404
@pytest.mark.smoke
def test_add_event_images(auth_client):
# GIVEN a set of events and images
count = random.randint(3, 6)
create_multiple_events(auth_client.sqla, count)
create_test_images(auth_client.sqla)
events = auth_client.sqla.query(Event).all()
images = auth_client.sqla.query(Image).all()
# WHEN an image is requested to be tied to each event
for i in range(count):
print(i)
resp = auth_client.post(url_for(
'events.add_event_images', event_id=events[i].id, image_id=images[i].id))
# THEN expect the request to run OK
assert resp.status_code == 201
# THEN expect the event to have a single image
assert len(auth_client.sqla.query(Event).filter_by(
id=events[i].id).first().images) == 1
@pytest.mark.smoke
def test_add_event_images_no_exist(auth_client):
# GIVEN a set of events and images
count = random.randint(3, 6)
create_multiple_events(auth_client.sqla, count)
create_test_images(auth_client.sqla)
images = auth_client.sqla.query(Image).all()
# WHEN a no existent image is requested to be tied to an event
resp = auth_client.post(
url_for('events.add_event_images', event_id=1, image_id=len(images) + 1))
# THEN expect the image not to be found
assert resp.status_code == 404
# WHEN an image is requested to be tied to a no existent event
resp = auth_client.post(
url_for('events.add_event_images', event_id=count + 1, image_id=1))
# THEN expect the event not to be found
assert resp.status_code == 404
@pytest.mark.smoke
def test_add_event_images_already_exist(auth_client):
# GIVEN a set of events, images, and event_image relationships
count = random.randint(3, 6)
create_multiple_events(auth_client.sqla, count)
create_test_images(auth_client.sqla)
create_event_images(auth_client.sqla)
event_images = auth_client.sqla.query(ImageEvent).all()
# WHEN existing event_image relationships are requested to be created
for event_image in event_images:
resp = auth_client.post(url_for(
'events.add_event_images', event_id=event_image.event_id, image_id=event_image.image_id))
# THEN expect the request to be unprocessable
assert resp.status_code == 422
@pytest.mark.smoke
def test_delete_event_image(auth_client):
# GIVEN a set of events, images, and event_image relationships
count = random.randint(3, 6)
create_multiple_events(auth_client.sqla, count)
create_test_images(auth_client.sqla)
create_images_events(auth_client.sqla)
valid_event_image = auth_client.sqla.query(ImageEvent).first()
# WHEN the event_image relationships are requested to be deleted
resp = auth_client.delete(url_for('events.delete_event_image',
event_id=valid_event_image.event_id, image_id=valid_event_image.image_id))
# THEN expect the delete to run OK
assert resp.status_code == 204
@pytest.mark.smoke
def test_delete_event_image_no_exist(auth_client):
# GIVEN an empty database
# WHEN an event_image relationship is requested to be deleted
resp = auth_client.delete(url_for('events.delete_event_image', event_id=random.randint(
1, 8), image_id=random.randint(1, 8)))
# THEN expect the requested row to not be found
assert resp.status_code == 404
| 32,608 | 0 | 728 |
4711200c0e7ea5d1c68d433a699de2fe03a2ceec | 1,135 | py | Python | AnimatorStudio/WaveEditor.py | Hapy-End/Fighting | fe09e6056fde30405163ecd32a971375277905b7 | [
"MIT"
] | null | null | null | AnimatorStudio/WaveEditor.py | Hapy-End/Fighting | fe09e6056fde30405163ecd32a971375277905b7 | [
"MIT"
] | null | null | null | AnimatorStudio/WaveEditor.py | Hapy-End/Fighting | fe09e6056fde30405163ecd32a971375277905b7 | [
"MIT"
] | null | null | null | import wave
import numpy as np
import pygame as pg
pg.init()
screen_size = [1000,800]
screen = pg.display.set_mode(screen_size)
wav = wave.open("/home/anjaro/Android/Projects/AnimaWar/android/assets/Sounds/FireBig.wav", mode="r")
nchannels,sampwidth,framerate,nframes,comptype,compname = wav.getparams()
content = wav.readframes(nframes)
types = {
1: np.int8,
2: np.int16,
3: np.int32
}
samples = np.fromstring(content, dtype=types[sampwidth])
#samples = samples[np.where(samples!=0)]
print(len(samples))
isRun = True
while isRun:
ss = []
for e in pg.event.get():
if e.type == pg.QUIT:
isRun = False
elif e.type == pg.KEYDOWN:
if e.key == pg.K_ESCAPE:
isRun = False
screen.fill((250,250,250))
nn = samples.max()
mm = samples.min()
if abs(mm)>abs(nn):
nn = abs(mm)
nn=(screen_size[1])/nn
j = 0
for i in range(len(samples)):
j+=screen_size[0]/len(samples)
ss.append([int(j),int(screen_size[1]//2 + i//nn)])
pg.draw.lines(screen,(30,30,30),0,ss)
pg.display.flip()
isRun = False
while True:
pass | 27.682927 | 101 | 0.620264 | import wave
import numpy as np
import pygame as pg
pg.init()
screen_size = [1000,800]
screen = pg.display.set_mode(screen_size)
wav = wave.open("/home/anjaro/Android/Projects/AnimaWar/android/assets/Sounds/FireBig.wav", mode="r")
nchannels,sampwidth,framerate,nframes,comptype,compname = wav.getparams()
content = wav.readframes(nframes)
types = {
1: np.int8,
2: np.int16,
3: np.int32
}
samples = np.fromstring(content, dtype=types[sampwidth])
#samples = samples[np.where(samples!=0)]
print(len(samples))
isRun = True
while isRun:
ss = []
for e in pg.event.get():
if e.type == pg.QUIT:
isRun = False
elif e.type == pg.KEYDOWN:
if e.key == pg.K_ESCAPE:
isRun = False
screen.fill((250,250,250))
nn = samples.max()
mm = samples.min()
if abs(mm)>abs(nn):
nn = abs(mm)
nn=(screen_size[1])/nn
j = 0
for i in range(len(samples)):
j+=screen_size[0]/len(samples)
ss.append([int(j),int(screen_size[1]//2 + i//nn)])
pg.draw.lines(screen,(30,30,30),0,ss)
pg.display.flip()
isRun = False
while True:
pass | 0 | 0 | 0 |
337c3da1a332bbc2ac0f773bb905fe0d61053fda | 2,104 | py | Python | notebooks/download_ref_data.py | peterleeishere/aws-batch-architecture-for-alphafold | 2ccb581aa91a3a7da1a8f524eed8466a6cee10da | [
"Apache-2.0"
] | 16 | 2022-02-24T18:59:53.000Z | 2022-03-31T02:05:27.000Z | notebooks/download_ref_data.py | peterleeishere/aws-batch-architecture-for-alphafold | 2ccb581aa91a3a7da1a8f524eed8466a6cee10da | [
"Apache-2.0"
] | 1 | 2022-02-25T20:51:15.000Z | 2022-02-28T13:57:16.000Z | notebooks/download_ref_data.py | peterleeishere/aws-batch-architecture-for-alphafold | 2ccb581aa91a3a7da1a8f524eed8466a6cee10da | [
"Apache-2.0"
] | 1 | 2022-03-25T02:09:57.000Z | 2022-03-25T02:09:57.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from nbhelpers import nbhelpers
import boto3
import argparse
batch = boto3.client("batch")
if __name__ == "__main__":
### Command line parser
args, _ = _parse_args()
response = submit_download_data_job(
stack_name = args.stack_name,
job_name = args.job_name,
script = args.script,
cpu = args.cpu,
memory = args.memory,
download_dir = args.download_dir,
download_mode = args.download_mode
)
print(response) | 28.432432 | 77 | 0.644962 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from nbhelpers import nbhelpers
import boto3
import argparse
batch = boto3.client("batch")
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--stack_name', type=str, default=None)
parser.add_argument('--job_name', type=str, default="download_job")
parser.add_argument('--script', type=str, default="download_all_data.sh")
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--memory', type=int, default=16)
parser.add_argument('--download_dir', type=str, default="/fsx")
parser.add_argument('--download_mode', type=str, default="reduced_dbs")
return parser.parse_known_args()
def submit_download_data_job(
stack_name,
job_name,
script,
cpu,
memory,
download_dir,
download_mode,
):
if stack_name is None:
stack_name = nbhelpers.list_alphafold_stacks()[0]["StackName"]
batch_resources = nbhelpers.get_batch_resources(stack_name)
job_definition = batch_resources["download_job_definition"]
job_queue = batch_resources["download_job_queue"]
container_overrides = {
"command": [
script,
download_dir,
download_mode
],
"resourceRequirements": [
{"value": str(cpu), "type": "VCPU"},
{"value": str(memory * 1000), "type": "MEMORY"},
],
}
response = batch.submit_job(
jobDefinition=job_definition,
jobName=job_name,
jobQueue=job_queue,
containerOverrides=container_overrides,
)
return response
if __name__ == "__main__":
### Command line parser
args, _ = _parse_args()
response = submit_download_data_job(
stack_name = args.stack_name,
job_name = args.job_name,
script = args.script,
cpu = args.cpu,
memory = args.memory,
download_dir = args.download_dir,
download_mode = args.download_mode
)
print(response) | 1,457 | 0 | 46 |
9bee9e1d1ec05fd58e437cf274047cb5c8a96ec6 | 1,539 | py | Python | gemelli/base.py | lisa55asil/gemelli | cc15f9d575b7d26c9eecf4ac0b38ea1f35bf76e6 | [
"BSD-3-Clause"
] | 32 | 2020-08-31T20:59:20.000Z | 2022-03-31T15:07:05.000Z | gemelli/base.py | lisa55asil/gemelli | cc15f9d575b7d26c9eecf4ac0b38ea1f35bf76e6 | [
"BSD-3-Clause"
] | 24 | 2020-09-01T12:07:11.000Z | 2022-03-29T18:08:51.000Z | gemelli/base.py | lisa55asil/gemelli | cc15f9d575b7d26c9eecf4ac0b38ea1f35bf76e6 | [
"BSD-3-Clause"
] | 10 | 2020-09-06T04:24:14.000Z | 2021-12-29T13:03:18.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2019--, gemelli development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from abc import abstractmethod
class _BaseImpute(object):
"""Base class for imputation methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def fit(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
@abstractmethod
def label(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
class _BaseConstruct(object):
"""Base class for transformation/norm methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def construct(self):
"""
conditional_loading : array-like or list of array-like
The conditional loading vectors
of shape (conditions, r) if there is 1 type
of condition, and a list of such matrices if
there are more than 1 type of condition
feature_loading : array-like
The feature loading vectors
of shape (features, r)
sample_loading : array-like
The sample loading vectors
of shape (samples, r) """
| 32.0625 | 78 | 0.578298 | # ----------------------------------------------------------------------------
# Copyright (c) 2019--, gemelli development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from abc import abstractmethod
class _BaseImpute(object):
"""Base class for imputation methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def fit(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
@abstractmethod
def label(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
class _BaseConstruct(object):
"""Base class for transformation/norm methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def construct(self):
"""
conditional_loading : array-like or list of array-like
The conditional loading vectors
of shape (conditions, r) if there is 1 type
of condition, and a list of such matrices if
there are more than 1 type of condition
feature_loading : array-like
The feature loading vectors
of shape (features, r)
sample_loading : array-like
The sample loading vectors
of shape (samples, r) """
| 0 | 0 | 0 |
9125e7572c0e25005695dab9770702b927c50f50 | 821 | py | Python | accounts/migrations/0004_auto_20181212_0927.py | kekeho/nitnc-cancel-notification | ba8bd9f3ed6b7c831b244d6d1cff537f72bf5057 | [
"MIT"
] | 1 | 2018-12-05T13:35:17.000Z | 2018-12-05T13:35:17.000Z | accounts/migrations/0004_auto_20181212_0927.py | kekeho/nitnc-cancel-notification | ba8bd9f3ed6b7c831b244d6d1cff537f72bf5057 | [
"MIT"
] | 3 | 2020-02-11T23:33:44.000Z | 2021-06-10T21:04:08.000Z | accounts/migrations/0004_auto_20181212_0927.py | kekeho/NITNC-Cancel-Notification | ba8bd9f3ed6b7c831b244d6d1cff537f72bf5057 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.4 on 2018-12-12 09:27
from django.conf import settings
from django.db import migrations, models
| 27.366667 | 82 | 0.607795 | # Generated by Django 2.1.4 on 2018-12-12 09:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20181212_0924'),
]
operations = [
migrations.AlterField(
model_name='grade',
name='user',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='lowgradeclass',
name='user',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='major',
name='user',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
]
| 0 | 674 | 23 |
cb857d5acff66852827d160f026843108f8cc463 | 1,705 | py | Python | src/mayaToCorona/mtco_devmodule/scripts/Corona/AETemplate/AECoronaWireTemplate.py | haggi/OpenMaya | 746e0740f480d9ef8d2173f31b3c99b9b0ea0d24 | [
"MIT"
] | 42 | 2015-01-03T15:07:25.000Z | 2021-12-09T03:56:59.000Z | src/mayaToCorona/mtco_devmodule/scripts/Corona/AETemplate/AECoronaWireTemplate.py | haggi/OpenMaya | 746e0740f480d9ef8d2173f31b3c99b9b0ea0d24 | [
"MIT"
] | 66 | 2015-01-02T13:28:44.000Z | 2022-03-16T14:00:57.000Z | src/mayaToCorona/mtco_devmodule/scripts/Corona/AETemplate/AECoronaWireTemplate.py | haggi/OpenMaya | 746e0740f480d9ef8d2173f31b3c99b9b0ea0d24 | [
"MIT"
] | 12 | 2015-02-07T05:02:17.000Z | 2020-07-10T17:21:44.000Z | import pymel.core as pm
import logging
log = logging.getLogger("ui")
| 37.888889 | 114 | 0.642815 | import pymel.core as pm
import logging
log = logging.getLogger("ui")
class BaseTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
class AECoronaWireTemplate(BaseTemplate):
def __init__(self, nodeName):
BaseTemplate.__init__(self,nodeName)
log.debug("AECoronaLightTemplate")
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
pm.mel.AEswatchDisplay(nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
allAttributes = self.node.listAttr()
allowedAttributes = ["useWorldSpace", "showVertices", "allEdges", "edgeWidth", "vertexWidth", "showEdges"]
for att in allAttributes:
att = att.split(".")[-1]
if not att in allowedAttributes:
self.suppress(att)
self.addExtraControls("ExtraControls")
self.endScrollLayout()
def buildBody(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
self.beginLayout("Wire Settings" ,collapse=0)
self.beginNoOptimize()
self.addControl("useWorldSpace", label="Color")
self.addControl("showVertices", label="Multiplier")
self.addControl("showEdges", label="Opacity")
self.addControl("allEdges", label="Emit Light")
self.addControl("edgeWidth", label="Directionality")
self.addControl("vertexWidth", label="Double Sided")
self.endNoOptimize()
self.endLayout()
| 1,401 | 36 | 173 |
b5e2135dafe18aed7e0bf9dbf7b383cdeb5a1f75 | 33,737 | py | Python | booknlp/english/tagger.py | ishine/booknlp | 2b42ccd40dc2c62097308398d4e08f91ecab4177 | [
"MIT"
] | 539 | 2021-11-22T16:29:40.000Z | 2022-03-30T17:50:58.000Z | booknlp/english/tagger.py | gxxu-ml/booknlp | 2b42ccd40dc2c62097308398d4e08f91ecab4177 | [
"MIT"
] | 6 | 2021-12-12T18:21:49.000Z | 2022-03-30T20:51:40.000Z | booknlp/english/tagger.py | gxxu-ml/booknlp | 2b42ccd40dc2c62097308398d4e08f91ecab4177 | [
"MIT"
] | 44 | 2021-11-22T07:22:50.000Z | 2022-03-25T20:02:26.000Z | import sys
import re
import math
from transformers import BertTokenizer, BertModel
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import booknlp.common.crf as crf
import booknlp.common.sequence_eval as sequence_eval
from torch.nn import CrossEntropyLoss
| 29.750441 | 282 | 0.6928 | import sys
import re
import math
from transformers import BertTokenizer, BertModel
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import booknlp.common.crf as crf
import booknlp.common.sequence_eval as sequence_eval
from torch.nn import CrossEntropyLoss
class Tagger(nn.Module):
def __init__(self, freeze_bert=False, base_model=None, tagset=None, supersense_tagset=None, tagset_flat=None, hidden_dim=100, flat_hidden_dim=200, device=None):
super(Tagger, self).__init__()
modelName=base_model
modelName=re.sub("^entities_", "", modelName)
modelName=re.sub("-v\d.*$", "", modelName)
matcher=re.search(".*-(\d+)_H-(\d+)_A-.*", modelName)
bert_dim=0
modelSize=0
self.num_layers=0
if matcher is not None:
bert_dim=int(matcher.group(2))
self.num_layers=min(4, int(matcher.group(1)))
modelSize=self.num_layers*bert_dim
assert bert_dim != 0
self.tagset=tagset
self.tagset_flat=tagset_flat
self.device=device
self.crf=crf.CRF(len(self.tagset), device)
self.wn_embedding = nn.Embedding(50, 20)
self.rev_tagset={tagset[v]:v for v in tagset}
self.rev_tagset[len(tagset)]="O"
self.rev_tagset[len(tagset)+1]="O"
self.num_labels=len(tagset) + 2
self.supersense_tagset=supersense_tagset
self.num_supersense_labels=len(supersense_tagset) + 2
self.supersense_crf=crf.CRF(len(supersense_tagset), device)
self.rev_supersense_tagset={supersense_tagset[v]:v for v in supersense_tagset}
self.rev_supersense_tagset[len(supersense_tagset)]="O"
self.rev_supersense_tagset[len(supersense_tagset)+1]="O"
self.num_labels_flat=len(tagset_flat)
self.tokenizer = BertTokenizer.from_pretrained(modelName, do_lower_case=False, do_basic_tokenize=False)
self.bert = BertModel.from_pretrained(modelName)
self.tokenizer.add_tokens(["[CAP]"], special_tokens=True)
self.bert.resize_token_embeddings(len(self.tokenizer))
self.bert.eval()
if freeze_bert:
for param in self.bert.parameters():
param.requires_grad = False
self.hidden_dim = hidden_dim
self.layered_dropout = nn.Dropout(0.20)
self.supersense_lstm1 = nn.LSTM(modelSize + 20, hidden_dim, bidirectional=True, batch_first=True)
self.supersense_hidden2tag1 = nn.Linear(hidden_dim * 2, self.num_supersense_labels)
self.lstm1 = nn.LSTM(modelSize, hidden_dim, bidirectional=True, batch_first=True)
self.hidden2tag1 = nn.Linear(hidden_dim * 2, self.num_labels)
self.lstm2 = nn.LSTM(2*hidden_dim, hidden_dim, bidirectional=True, batch_first=True)
self.hidden2tag2 = nn.Linear(hidden_dim * 2, self.num_labels)
self.lstm3 = nn.LSTM(2*hidden_dim, hidden_dim, bidirectional=True, batch_first=True)
self.hidden2tag3 = nn.Linear(hidden_dim * 2, self.num_labels)
self.flat_dropout = nn.Dropout(0.5)
self.flat_hidden_dim=flat_hidden_dim
self.flat_lstm = nn.LSTM(modelSize, self.flat_hidden_dim, bidirectional=True, batch_first=True, num_layers=1)
self.flat_classifier = nn.Linear(2*self.flat_hidden_dim, self.num_labels_flat)
param_group = []
self.bert_params={}
self.everything_else_params={}
def forwardFlatSequence(self, input_ids, token_type_ids=None, attention_mask=None, transforms=None, labels=None):
batch_s, max_len=input_ids.shape
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
transforms = transforms.to(self.device)
if labels is not None:
labels = labels.to(self.device)
output = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask, output_hidden_states=True)
hidden_states=output["hidden_states"]
if self.num_layers == 4:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]), 2)
elif self.num_layers == 2:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2]), 2)
out=torch.matmul(transforms,all_layers)
out, _ = self.flat_lstm(out)
out=self.flat_dropout(out)
out = out.contiguous().view(-1,out.shape[2])
logits = self.flat_classifier(out)
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(logits.view(-1, self.num_labels_flat), labels.view(-1))
return loss
else:
return logits
def forward_supersense(self, wn, input_ids, matrix1, matrix2, attention_mask=None, transforms=None, labels=None, lens=None):
matrix1=matrix1.to(self.device)
matrix2=matrix2.to(self.device)
wn=wn.to(self.device)
wn_embeds=self.wn_embedding(wn)
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
transforms = transforms.to(self.device)
if lens is not None:
lens[0] = lens[0].to(self.device)
lens[1] = lens[1].to(self.device)
lens[2] = lens[2].to(self.device)
if labels is not None:
labels[0] = labels[0].to(self.device)
labels[1] = labels[1].to(self.device)
labels[2] = labels[2].to(self.device)
output = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask, output_hidden_states=True)
hidden_states=output["hidden_states"]
if self.num_layers == 4:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]), 2)
elif self.num_layers == 2:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2]), 2)
# remove the opening [CLS]
reduced=torch.matmul(transforms,all_layers)[:,1:,:]
wn_embeds=wn_embeds[:,1:,:]
reduced=torch.cat([reduced, wn_embeds], axis=2)
reduced=self.layered_dropout(reduced)
lstm_out1, _ = self.supersense_lstm1(reduced)
tag_space1 = self.supersense_hidden2tag1(lstm_out1)
to_value=0
forward_score1 = self.supersense_crf.forward(tag_space1, lens[0]-2)
sequence_score1 = self.supersense_crf.score(torch.where(labels[0][:,1:] == -100, torch.ones_like(labels[0][:,1:]) * to_value, labels[0][:,1:]), lens[0]-2, logits=tag_space1)
loss1 = (forward_score1 - sequence_score1).sum()
return loss1
def forward(self, input_ids, matrix1, matrix2, attention_mask=None, transforms=None, labels=None, lens=None):
matrix1=matrix1.to(self.device)
matrix2=matrix2.to(self.device)
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
transforms = transforms.to(self.device)
if lens is not None:
lens[0] = lens[0].to(self.device)
lens[1] = lens[1].to(self.device)
lens[2] = lens[2].to(self.device)
if labels is not None:
labels[0] = labels[0].to(self.device)
labels[1] = labels[1].to(self.device)
labels[2] = labels[2].to(self.device)
output = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask, output_hidden_states=True)
hidden_states=output["hidden_states"]
if self.num_layers == 4:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]), 2)
elif self.num_layers == 2:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2]), 2)
# remove the opening [CLS]
reduced=torch.matmul(transforms,all_layers)[:,1:,:]
reduced=self.layered_dropout(reduced)
lstm_out1, _ = self.lstm1(reduced)
tag_space1 = self.hidden2tag1(lstm_out1)
input2=torch.matmul(matrix1[:,1:,1:],lstm_out1)
input2=self.layered_dropout(input2)
lstm_out2, _ = self.lstm2(input2)
tag_space2 = self.hidden2tag2(lstm_out2)
input3=torch.matmul(matrix2[:,1:,1:],lstm_out2)
input3=self.layered_dropout(input3)
lstm_out3, _ = self.lstm3(input3)
tag_space3 = self.hidden2tag3(lstm_out3)
to_value=0
forward_score1 = self.crf.forward(tag_space1, lens[0]-2)
sequence_score1 = self.crf.score(torch.where(labels[0][:,1:] == -100, torch.ones_like(labels[0][:,1:]) * to_value, labels[0][:,1:]), lens[0]-2, logits=tag_space1)
loss1 = (forward_score1 - sequence_score1).sum()
forward_score2 = self.crf.forward(tag_space2, lens[1]-2)
sequence_score2 = self.crf.score(torch.where(labels[1][:,1:] == -100, torch.ones_like(labels[1][:,1:]) * to_value, labels[1][:,1:]), lens[1]-2, logits=tag_space2)
loss2 = (forward_score2 - sequence_score2).sum()
forward_score3 = self.crf.forward(tag_space3, lens[2]-2)
sequence_score3 = self.crf.score(torch.where(labels[2][:,1:] == -100, torch.ones_like(labels[2][:,1:]) * to_value, labels[2][:,1:]), lens[2]-2, logits=tag_space3)
loss3 = (forward_score3 - sequence_score3).sum()
return loss1 + loss2 + loss3
def predict_all(self, wn, input_ids, attention_mask=None, transforms=None, lens=None, doEvent=True, doEntities=True, doSS=True):
def fix(sequence):
"""
Ensure tag sequence is BIO-compliant
"""
for idx, tag in enumerate(sequence):
tag=self.rev_tagset[tag]
if tag.startswith("I-"):
parts=tag.split("-")
label=parts[1]
flag=False
for i in range(idx-1, -1, -1):
prev=self.rev_tagset[sequence[i]].split("-")
if prev[0] == "B" and prev[1] == label:
flag=True
break
if prev[0] == "O":
break
if prev[0] != "O" and prev[1] != label:
break
if flag==False:
sequence[idx]=self.tagset["B-%s" % label]
def get_layer_transformation(tag_space, t):
"""
After predicting a tag sequence, get the information we need to transform the current layer
to the next layer (e.g., merging tokens in the same entity and remembering which ones we merged)
"""
nl=tag_space.shape[1]
all_tags=[]
for tags in t:
all_tags.append(list(tags.data.cpu().numpy()))
# matrix for merging tokens in layer n+1 that are part of the same entity in layer n
all_index=[]
# indices of tokens that were merged (so we can restored them later)
all_missing=[]
# length of the resulting layer (after merging)
all_lens=[]
for tags1 in all_tags:
fix(tags1)
index1=self.get_index([tags1], self.rev_tagset)[0]
for z in range(len(index1)):
for y in range(len(index1[z]), nl):
index1[z].append(0)
for z in range(len(index1), nl):
index1.append(np.zeros(nl))
all_index.append(index1)
missing1=[]
nll=0
for idx, tag in enumerate(tags1):
if idx > 0 and self.rev_tagset[tag].startswith("I-"):
missing1.append(idx)
else:
nll+=1
all_lens.append(nll)
all_missing.append(missing1)
all_index=torch.FloatTensor(np.array(all_index)).to(self.device)
return all_tags, all_index, all_missing, all_lens
def supersense_fix(sequence):
"""
Ensure tag sequence is BIO-compliant
"""
for idx, tag in enumerate(sequence):
tag=self.rev_supersense_tagset[tag]
if tag.startswith("I-"):
parts=tag.split("-")
label=parts[1]
flag=False
for i in range(idx-1, -1, -1):
prev=self.rev_supersense_tagset[sequence[i]].split("-")
if prev[0] == "B" and prev[1] == label:
flag=True
break
if prev[0] == "O":
break
if prev[0] != "O" and prev[1] != label:
break
if flag==False:
sequence[idx]=self.supersense_tagset["B-%s" % label]
def get_supersense_layer_transformation(tag_space, t):
"""
After predicting a tag sequence, get the information we need to transform the current layer
to the next layer (e.g., merging tokens in the same entity and remembering which ones we merged)
"""
nl=tag_space.shape[1]
all_tags=[]
for tags in t:
all_tags.append(list(tags.data.cpu().numpy()))
# matrix for merging tokens in layer n+1 that are part of the same entity in layer n
all_index=[]
# indices of tokens that were merged (so we can restored them later)
all_missing=[]
# length of the resulting layer (after merging)
all_lens=[]
for tags1 in all_tags:
supersense_fix(tags1)
index1=self.get_index([tags1], self.rev_supersense_tagset)[0]
for z in range(len(index1)):
for y in range(len(index1[z]), nl):
index1[z].append(0)
for z in range(len(index1), nl):
index1.append(np.zeros(nl))
all_index.append(index1)
missing1=[]
nll=0
for idx, tag in enumerate(tags1):
if idx > 0 and self.rev_supersense_tagset[tag].startswith("I-"):
missing1.append(idx)
else:
nll+=1
all_lens.append(nll)
all_missing.append(missing1)
all_index=torch.FloatTensor(np.array(all_index)).to(self.device)
return all_tags, all_index, all_missing, all_lens
all_tags1=all_tags2=all_tags3=event_logits=all_supersense_tags1=None
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
transforms = transforms.to(self.device)
ll=lens.to(self.device)
sequence_outputs, pooled_outputs, hidden_states = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask, output_hidden_states=True, return_dict=False)
if self.num_layers == 4:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]), 2)
elif self.num_layers == 2:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2]), 2)
# remove the opening [CLS]
reduced=torch.matmul(transforms,all_layers)[:,1:,:]
##
# ENTITIES
##
if doEntities:
## LAYER 1
lstm_out1, _ = self.lstm1(reduced)
tag_space1 = self.hidden2tag1(lstm_out1)
_, t1 = self.crf.viterbi_decode(tag_space1, ll-2)
all_tags1, all_index1, all_missing1, n_lens1=get_layer_transformation(tag_space1, t1)
input2=torch.matmul(all_index1,lstm_out1)
## LAYER 2
lstm_out2, _ = self.lstm2(input2)
tag_space2 = self.hidden2tag2(lstm_out2)
_, t2 = self.crf.viterbi_decode(tag_space2, torch.LongTensor(n_lens1) )
all_tags2, all_index2, all_missing2, n_lens2=get_layer_transformation(tag_space2, t2)
input3=torch.matmul(all_index2,lstm_out2)
## LAYER 3
lstm_out3, _ = self.lstm3(input3)
tag_space3 = self.hidden2tag3(lstm_out3)
_, t3 = self.crf.viterbi_decode(tag_space3, torch.LongTensor(n_lens2))
all_tags3=[]
for tags in t3:
all_tags3.append(list(tags.data.cpu().numpy()))
for tags3 in all_tags3:
fix(tags3)
## Insert tokens into later layers that were compressed in earlier layers
for idx, missing2 in enumerate(all_missing2):
for m in missing2:
parts=self.rev_tagset[all_tags3[idx][m-1]].split("-")
if len(parts) > 1:
all_tags3[idx].insert(m, self.tagset["I-%s" % parts[1]])
else:
all_tags3[idx].insert(m, self.tagset["O"])
for idx, missing1 in enumerate(all_missing1):
for m in missing1:
parts=self.rev_tagset[all_tags3[idx][m-1]].split("-")
if len(parts) > 1:
all_tags3[idx].insert(m, self.tagset["I-%s" % parts[1]])
else:
all_tags3[idx].insert(m, self.tagset["O"])
for idx, missing1 in enumerate(all_missing1):
for m in missing1:
parts=self.rev_tagset[all_tags2[idx][m-1]].split("-")
if len(parts) > 1:
all_tags2[idx].insert(m, self.tagset["I-%s" % parts[1]])
else:
all_tags2[idx].insert(m, self.tagset["O"])
for idx in range(len(all_tags1)):
all_tags2[idx]=all_tags2[idx][:len(all_tags1[idx])]
all_tags3[idx]=all_tags3[idx][:len(all_tags1[idx])]
###
# EVENTS
###
if doEvent:
out, _ = self.flat_lstm(reduced)
out = out.contiguous().view(-1,out.shape[2])
event_logits = self.flat_classifier(out)
##
# SUPERSENSE
##
if doSS:
wn=wn.to(self.device)
wn_embeds=self.wn_embedding(wn)
wn_embeds=wn_embeds[:,1:,:]
reduced_wn=torch.cat([reduced, wn_embeds], axis=2)
lstm_out1, _ = self.supersense_lstm1(reduced_wn)
tag_space1 = self.supersense_hidden2tag1(lstm_out1)
_, t1 = self.supersense_crf.viterbi_decode(tag_space1, ll-2)
all_supersense_tags1, all_index1, all_missing1, n_lens1=get_supersense_layer_transformation(tag_space1, t1)
return all_tags1, all_tags2, all_tags3, event_logits, all_supersense_tags1
def predict(self, input_ids, attention_mask=None, transforms=None, lens=None):
def fix(sequence):
"""
Ensure tag sequence is BIO-compliant
"""
for idx, tag in enumerate(sequence):
tag=self.rev_tagset[tag]
if tag.startswith("I-"):
parts=tag.split("-")
label=parts[1]
flag=False
for i in range(idx-1, -1, -1):
prev=self.rev_tagset[sequence[i]].split("-")
if prev[0] == "B" and prev[1] == label:
flag=True
break
if prev[0] == "O":
break
if prev[0] != "O" and prev[1] != label:
break
if flag==False:
sequence[idx]=self.tagset["B-%s" % label]
def get_layer_transformation(tag_space, t):
"""
After predicting a tag sequence, get the information we need to transform the current layer
to the next layer (e.g., merging tokens in the same entity and remembering which ones we merged)
"""
nl=tag_space.shape[1]
all_tags=[]
for tags in t:
all_tags.append(list(tags.data.cpu().numpy()))
# matrix for merging tokens in layer n+1 that are part of the same entity in layer n
all_index=[]
# indices of tokens that were merged (so we can restored them later)
all_missing=[]
# length of the resulting layer (after merging)
all_lens=[]
for tags1 in all_tags:
fix(tags1)
index1=self.get_index([tags1], self.rev_tagset)[0]
for z in range(len(index1)):
for y in range(len(index1[z]), nl):
index1[z].append(0)
for z in range(len(index1), nl):
index1.append(np.zeros(nl))
all_index.append(index1)
missing1=[]
nll=0
for idx, tag in enumerate(tags1):
if idx > 0 and self.rev_tagset[tag].startswith("I-"):
missing1.append(idx)
else:
nll+=1
all_lens.append(nll)
all_missing.append(missing1)
all_index=torch.FloatTensor(all_index).to(self.device)
return all_tags, all_index, all_missing, all_lens
## PREDICT
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
transforms = transforms.to(self.device)
ll=lens.to(self.device)
sequence_outputs, pooled_outputs, hidden_states = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask, output_hidden_states=True, return_dict=False)
if self.num_layers == 4:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]), 2)
elif self.num_layers == 2:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2]), 2)
# remove the opening [CLS]
reduced=torch.matmul(transforms,all_layers)[:,1:,:]
## LAYER 1
lstm_out1, _ = self.lstm1(reduced)
tag_space1 = self.hidden2tag1(lstm_out1)
_, t1 = self.crf.viterbi_decode(tag_space1, ll-2)
all_tags1, all_index1, all_missing1, n_lens1=get_layer_transformation(tag_space1, t1)
input2=torch.matmul(all_index1,lstm_out1)
## LAYER 2
lstm_out2, _ = self.lstm2(input2)
tag_space2 = self.hidden2tag2(lstm_out2)
_, t2 = self.crf.viterbi_decode(tag_space2, torch.LongTensor(n_lens1) )
all_tags2, all_index2, all_missing2, n_lens2=get_layer_transformation(tag_space2, t2)
input3=torch.matmul(all_index2,lstm_out2)
## LAYER 3
lstm_out3, _ = self.lstm3(input3)
tag_space3 = self.hidden2tag3(lstm_out3)
_, t3 = self.crf.viterbi_decode(tag_space3, torch.LongTensor(n_lens2))
all_tags3=[]
for tags in t3:
all_tags3.append(list(tags.data.cpu().numpy()))
for tags3 in all_tags3:
fix(tags3)
## Insert tokens into later layers that were compressed in earlier layers
for idx, missing2 in enumerate(all_missing2):
for m in missing2:
parts=self.rev_tagset[all_tags3[idx][m-1]].split("-")
if len(parts) > 1:
all_tags3[idx].insert(m, self.tagset["I-%s" % parts[1]])
else:
all_tags3[idx].insert(m, self.tagset["O"])
for idx, missing1 in enumerate(all_missing1):
for m in missing1:
parts=self.rev_tagset[all_tags3[idx][m-1]].split("-")
if len(parts) > 1:
all_tags3[idx].insert(m, self.tagset["I-%s" % parts[1]])
else:
all_tags3[idx].insert(m, self.tagset["O"])
for idx, missing1 in enumerate(all_missing1):
for m in missing1:
parts=self.rev_tagset[all_tags2[idx][m-1]].split("-")
if len(parts) > 1:
all_tags2[idx].insert(m, self.tagset["I-%s" % parts[1]])
else:
all_tags2[idx].insert(m, self.tagset["O"])
for idx in range(len(all_tags1)):
all_tags2[idx]=all_tags2[idx][:len(all_tags1[idx])]
all_tags3[idx]=all_tags3[idx][:len(all_tags1[idx])]
return all_tags1, all_tags2, all_tags3
def supersense_predict(self, wn, input_ids, attention_mask=None, transforms=None, lens=None):
""" Get logits for layered sequence labeling """
def supersense_fix(sequence):
"""
Ensure tag sequence is BIO-compliant
"""
for idx, tag in enumerate(sequence):
tag=self.rev_supersense_tagset[tag]
if tag.startswith("I-"):
parts=tag.split("-")
label=parts[1]
flag=False
for i in range(idx-1, -1, -1):
prev=self.rev_supersense_tagset[sequence[i]].split("-")
if prev[0] == "B" and prev[1] == label:
flag=True
break
if prev[0] == "O":
break
if prev[0] != "O" and prev[1] != label:
break
if flag==False:
sequence[idx]=self.supersense_tagset["B-%s" % label]
def get_layer_transformation(tag_space, t):
"""
After predicting a tag sequence, get the information we need to transform the current layer
to the next layer (e.g., merging tokens in the same entity and remembering which ones we merged)
"""
nl=tag_space.shape[1]
all_tags=[]
for tags in t:
all_tags.append(list(tags.data.cpu().numpy()))
# matrix for merging tokens in layer n+1 that are part of the same entity in layer n
all_index=[]
# indices of tokens that were merged (so we can restored them later)
all_missing=[]
# length of the resulting layer (after merging)
all_lens=[]
for tags1 in all_tags:
supersense_fix(tags1)
index1=self.get_index([tags1], self.rev_supersense_tagset)[0]
for z in range(len(index1)):
for y in range(len(index1[z]), nl):
index1[z].append(0)
for z in range(len(index1), nl):
index1.append(np.zeros(nl))
all_index.append(index1)
missing1=[]
nll=0
for idx, tag in enumerate(tags1):
if idx > 0 and self.rev_supersense_tagset[tag].startswith("I-"):
missing1.append(idx)
else:
nll+=1
all_lens.append(nll)
all_missing.append(missing1)
all_index=torch.FloatTensor(all_index).to(self.device)
return all_tags, all_index, all_missing, all_lens
## PREDICT
wn=wn.to(self.device)
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
transforms = transforms.to(self.device)
ll=lens.to(self.device)
output = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask, output_hidden_states=True)
hidden_states=output["hidden_states"]
if self.num_layers == 4:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]), 2)
elif self.num_layers == 2:
all_layers = torch.cat((hidden_states[-1], hidden_states[-2]), 2)
# remove the opening [CLS]
reduced=torch.matmul(transforms,all_layers)[:,1:,:]
wn_embeds=self.wn_embedding(wn)
wn_embeds=wn_embeds[:,1:,:]
reduced=torch.cat([reduced, wn_embeds], axis=2)
lstm_out1, _ = self.supersense_lstm1(reduced)
tag_space1 = self.supersense_hidden2tag1(lstm_out1)
_, t1 = self.supersense_crf.viterbi_decode(tag_space1, ll-2)
all_tags1, all_index1, all_missing1, n_lens1=get_layer_transformation(tag_space1, t1)
return all_tags1
def tag_all(self, batched_wn, batched_sents, batched_data, batched_mask, batched_transforms, batched_orig_token_lens, ordering, doEvent=True, doEntities=True, doSS=True):
""" Tag input data for layered sequence labeling """
c=0
e=0
ordered_preds=[]
ordered_supersense_preds=[]
ordered_events=[]
preds_in_order=events_in_order=supersense_preds_in_order=None
with torch.no_grad():
for b in range(len(batched_data)):
all_tags1, all_tags2, all_tags3, event_logits, all_supersense_tags1=self.predict_all(batched_wn[b], batched_data[b], attention_mask=batched_mask[b], transforms=batched_transforms[b], lens=batched_orig_token_lens[b], doEvent=doEvent, doEntities=doEntities, doSS=doSS)
# for each sentence in the batch
if doEntities:
for d in range(len(all_tags1)):
preds={}
for entity in self.get_spans(self.rev_tagset, c, all_tags1[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
preds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, all_tags2[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
preds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, all_tags3[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
preds[entity]=1
ordered_preds.append(preds)
c+=1
if doSS:
for d in range(len(all_supersense_tags1)):
supersense_preds={}
for entity in self.get_spans(self.rev_supersense_tagset, e, all_supersense_tags1[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
supersense_preds[entity]=1
ordered_supersense_preds.append(supersense_preds)
e+=1
if doEvent:
logits=event_logits.cpu()
ordered_event_preds=[]
ordered_event_preds += [np.array(r) for r in logits]
size=batched_wn[b].shape
logits=logits.view(-1, size[1]-1, 2)
for row in range(size[0]):
events={}
for col in range(batched_orig_token_lens[b][row]-1):
pred=np.argmax(logits[row][col])
if pred == 1:
events[col]=1
ordered_events.append(events)
if doSS:
supersense_preds_in_order = [None for i in range(len(ordering))]
for i, ind in enumerate(ordering):
supersense_preds_in_order[ind] = ordered_supersense_preds[i]
if doEntities:
preds_in_order = [None for i in range(len(ordering))]
for i, ind in enumerate(ordering):
preds_in_order[ind] = ordered_preds[i]
if doEvent:
events_in_order = [None for i in range(len(ordering))]
for i, ind in enumerate(ordering):
events_in_order[ind] = ordered_events[i]
return preds_in_order, events_in_order, supersense_preds_in_order
def tag(self, batched_sents, batched_data, batched_mask, batched_transforms, batched_orig_token_lens, ordering):
c=0
ordered_preds=[]
with torch.no_grad():
for b in range(len(batched_data)):
all_tags1, all_tags2, all_tags3=self.predict(batched_data[b], attention_mask=batched_mask[b], transforms=batched_transforms[b], lens=batched_orig_token_lens[b])
for d in range(len(all_tags1)):
preds={}
for entity in self.get_spans(self.rev_tagset, c, all_tags1[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
preds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, all_tags2[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
preds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, all_tags3[d], batched_orig_token_lens[b][d], batched_sents[b][d][1:]):
preds[entity]=1
ordered_preds.append(preds)
c+=1
preds_in_order = [None for i in range(len(ordering))]
for i, ind in enumerate(ordering):
preds_in_order[ind] = ordered_preds[i]
return preds_in_order
def supersense_evaluate(self, test_batched_wn, test_batched_sents, test_batched_data, test_batched_mask, test_batched_labels, test_batched_transforms, test_batched_layered_labels1, test_batched_layered_labels2, test_batched_layered_labels3, test_batched_layered_labels4, dev_lens):
self.eval()
with torch.no_grad():
preds={}
golds={}
c=0
for b in range(len(test_batched_data)):
all_tags1=self.supersense_predict(test_batched_wn[b], test_batched_data[b], attention_mask=test_batched_mask[b], transforms=test_batched_transforms[b], lens=dev_lens[0][b])
for d in range(len(all_tags1)):
# remove [CLS] from the gold labels
for entity in self.get_spans(self.rev_supersense_tagset, c, test_batched_layered_labels1[b][d][1:], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
golds[entity]=1
# predicted tags already have [CLS] removed
for entity in self.get_spans(self.rev_supersense_tagset, c, all_tags1[d], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
preds[entity]=1
c+=1
F1=sequence_eval.check_span_f1_two_dicts_subcat(golds, preds)
return F1
def evaluate(self, test_batched_sents, test_batched_data, test_batched_mask, test_batched_labels, test_batched_transforms, test_batched_layered_labels1, test_batched_layered_labels2, test_batched_layered_labels3, test_batched_layered_labels4, dev_lens):
""" Evaluate input data (with labels) for layered sequence labeling """
self.eval()
with torch.no_grad():
preds={}
golds={}
c=0
for b in range(len(test_batched_data)):
all_tags1, all_tags2, all_tags3=self.predict(test_batched_data[b], attention_mask=test_batched_mask[b], transforms=test_batched_transforms[b], lens=dev_lens[0][b])
for d in range(len(all_tags1)):
# remove [CLS] from the gold labels
# LitBank has max 4 layers
for entity in self.get_spans(self.rev_tagset, c, test_batched_layered_labels1[b][d][1:], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
golds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, test_batched_layered_labels2[b][d][1:], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
golds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, test_batched_layered_labels3[b][d][1:], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
golds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, test_batched_layered_labels4[b][d][1:], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
golds[entity]=1
# predicted tags already have [CLS] removed
# LitBank has max 4 layers but 4th layer has only 0.1% of tags, so let's just predict 3
for entity in self.get_spans(self.rev_tagset, c, all_tags1[d], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
preds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, all_tags2[d], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
preds[entity]=1
for entity in self.get_spans(self.rev_tagset, c, all_tags3[d], dev_lens[0][b][d], test_batched_sents[b][d][1:]):
preds[entity]=1
c+=1
F1=sequence_eval.check_span_f1_two_dicts_subcat(golds, preds)
return F1
def evaluateFlat(self, dev_batched_data, dev_batched_mask, dev_batched_labels, dev_batched_transforms, metric, tagset):
num_labels=len(tagset)
self.eval()
with torch.no_grad():
ordered_preds=[]
all_preds=[]
all_golds=[]
for b in range(len(dev_batched_data)):
logits = self.forwardFlatSequence(dev_batched_data[b], token_type_ids=None, attention_mask=dev_batched_mask[b], transforms=dev_batched_transforms[b])
logits=logits.cpu()
ordered_preds += [np.array(r) for r in logits]
size=dev_batched_labels[b].shape
logits=logits.view(-1, size[1], num_labels)
for row in range(size[0]):
for col in range(size[1]):
if dev_batched_labels[b][row][col] != -100:
pred=np.argmax(logits[row][col])
all_preds.append(pred.cpu().numpy())
all_golds.append(dev_batched_labels[b][row][col].cpu().numpy())
return metric(all_golds, all_preds, tagset)
def tagFlat(self, batched_sents, batched_data, batched_mask, batched_transforms, batched_orig_token_lens, ordering):
with torch.no_grad():
c=0
ordered_preds=[]
for b in range(len(batched_data)):
dataSize=batched_transforms[b].shape
batch_len=dataSize[0]
sequence_length=dataSize[1]
logits = self.forwardFlatSequence(batched_data[b], token_type_ids=None, attention_mask=batched_mask[b], transforms=batched_transforms[b])
logits=logits.view(-1, sequence_length, self.num_labels_flat)
logits=logits.cpu()
preds=np.argmax(logits, axis=2)
for sentence in range(batch_len):
word_preds=[]
for idx in range(1,len(batched_sents[b][sentence])-1):
word_preds.append((batched_sents[b][sentence][idx], int(preds[sentence][idx])))
ordered_preds.append(word_preds)
preds_in_order = [None for i in range(len(ordering))]
for i, ind in enumerate(ordering):
preds_in_order[ind] = ordered_preds[i]
return preds_in_order
def get_spans(self, rev_tagset, doc_idx, tags, length, sentence):
# remove the opening [CLS] and closing [SEP]
tags=tags[:length-2]
entities={}
for idx, tag in enumerate(tags):
tag=rev_tagset[int(tag)]
if tag.startswith("B-"):
j=idx+1
parts=tag.split("-")
while(1):
if j >= len(tags):
break
tagn=rev_tagset[int(tags[j])]
if tagn.startswith("B") or tagn.startswith("O"):
break
parts_n=tagn.split("-")
if parts_n[1] != parts[1]:
break
j+=1
key=doc_idx, parts[1], idx, j
entities[key]=1
return entities
def compress(self, labels, rev_tagset):
newlabels=[]
keep=[i for i in range(len(labels[0]))]
for i in range(len(labels)):
newlabels.append([labels[i][k] for k in keep])
newkeep=[]
for j in keep:
if labels[i][j] == -100 or not rev_tagset[labels[i][j]].startswith("I-"):
newkeep.append(j)
keep=newkeep
return newlabels
def get_index(self, all_labels, rev_tagset):
indices=[]
for labels in all_labels:
index=[]
n=len(labels)
for idx, label in enumerate(labels):
ind=list(np.zeros(n))
if label == -100 or not rev_tagset[label].startswith("I-"):
ind[idx]=1
index.append(ind)
else:
index[-1][idx]=1
indices.append(index)
for index in indices:
for i, idx in enumerate(index):
idx=idx/np.sum(idx)
index[i]=list(idx)
return indices
| 25,230 | 8,188 | 23 |
fe0de2a6c96468f1168498a7abcb94c810f8ff96 | 3,199 | py | Python | text2chem/parser_pipeline.py | CederGroupHub/text2chem | e0ca69a6d88a639bd4ab33e8b6c85f7b87229c77 | [
"MIT"
] | 1 | 2021-11-09T06:06:43.000Z | 2021-11-09T06:06:43.000Z | text2chem/parser_pipeline.py | CederGroupHub/text2chem | e0ca69a6d88a639bd4ab33e8b6c85f7b87229c77 | [
"MIT"
] | 1 | 2021-10-15T12:51:47.000Z | 2021-10-19T22:25:33.000Z | text2chem/parser_pipeline.py | CederGroupHub/text2chem | e0ca69a6d88a639bd4ab33e8b6c85f7b87229c77 | [
"MIT"
] | null | null | null | from text2chem.chemical_structure import ChemicalStructure
from text2chem.core.default_processing import DefaultProcessing
from text2chem.chemical_data import list_of_elements, name2element, diatomic_molecules
| 35.153846 | 102 | 0.634261 | from text2chem.chemical_structure import ChemicalStructure
from text2chem.core.default_processing import DefaultProcessing
from text2chem.chemical_data import list_of_elements, name2element, diatomic_molecules
class ParserPipeline:
def __init__(self, options, regex_parser, preprocessings, postprocessings):
self._options = options
self._regex_parser = regex_parser
self._default_processing = DefaultProcessing(regex_parser)
self._preprocessings = preprocessings
self._postprocessings = postprocessings
def parse(self, material_string):
"""
:param material_string:
:return: chemical structure (see chemical_structure.py)
"""
output_structure = ChemicalStructure(material_string)
if not material_string:
return output_structure
"""
element-like material string
"""
if material_string in list_of_elements:
return output_structure.element_structure(material_string)
if material_string in name2element:
return output_structure.element_structure(name2element[material_string])
"""
material string is diatomic molecule
"""
if material_string in diatomic_molecules:
return output_structure.diatomic_molecule_structure(material_string[0])
"""
preprocessing steps
"""
for p in self._preprocessings:
material_string, output_structure = p(self._regex_parser).process_string(material_string,
output_structure)
"""
default functionality: extraction of data from chemical formula
"""
material_string, output_structure = self._default_processing.process_string(material_string,
output_structure)
"""
postprocessing steps
"""
for p in self._postprocessings:
output_structure = p(self._regex_parser).process_data(output_structure)
output_structure.combine_formula()
return output_structure
class ParserPipelineBuilder:
def __init__(self):
self._materialParser = None
self._file_reader = None
self._regex_parser = None
self._preprocessings = []
self._postprocessings = []
def add_preprocessing(self, preprocessing): # -> Builder
self._preprocessings.append(preprocessing)
return self
def add_postprocessing(self, postprocessing): # -> Builder
self._postprocessings.append(postprocessing)
return self
def set_file_reader(self, file_reader): # -> Builder
self._file_reader = file_reader
return self
def set_regex_parser(self, regex_parser): # -> Builder
self._regex_parser = regex_parser()
return self
def build(self, options=None): # -> MaterialParser
return ParserPipeline(options,
self._regex_parser,
self._preprocessings,
self._postprocessings)
| 1,091 | 1,688 | 208 |